Dataset Viewer
code
stringlengths 208
42.9k
| apis
sequence | extract_api
stringlengths 129
69.9k
|
---|---|---|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
from megengine.core import Tensor
from official.vision.detection import layers
def get_focal_loss(
logits: Tensor,
labels: Tensor,
ignore_label: int = -1,
background: int = 0,
alpha: float = 0.5,
gamma: float = 0,
norm_type: str = "fg",
) -> Tensor:
r"""Focal Loss for Dense Object Detection:
<https://arxiv.org/pdf/1708.02002.pdf>
.. math::
FL(p_t) = -\alpha_t(1-p_t)^\gamma \log(p_t)
Args:
logits (Tensor):
the predicted logits with the shape of :math:`(B, A, C)`
labels (Tensor):
the assigned labels of boxes with shape of :math:`(B, A)`
ignore_label (int):
the value of ignore class. Default: -1
background (int):
the value of background class. Default: 0
alpha (float):
parameter to mitigate class imbalance. Default: 0.5
gamma (float):
parameter to mitigate easy/hard loss imbalance. Default: 0
norm_type (str): current support "fg", "none":
"fg": loss will be normalized by number of fore-ground samples
"none": not norm
Returns:
the calculated focal loss.
"""
class_range = F.arange(1, logits.shape[2] + 1)
labels = F.add_axis(labels, axis=2)
scores = F.sigmoid(logits)
pos_part = (1 - scores) ** gamma * layers.logsigmoid(logits)
neg_part = scores ** gamma * layers.logsigmoid(-logits)
pos_loss = -(labels == class_range) * pos_part * alpha
neg_loss = (
-(labels != class_range) * (labels != ignore_label) * neg_part * (1 - alpha)
)
loss = (pos_loss + neg_loss).sum()
if norm_type == "fg":
fg_mask = (labels != background) * (labels != ignore_label)
return loss / F.maximum(fg_mask.sum(), 1)
elif norm_type == "none":
return loss
else:
raise NotImplementedError
def get_smooth_l1_loss(
pred_bbox: Tensor,
gt_bbox: Tensor,
labels: Tensor,
beta: int = 1,
background: int = 0,
ignore_label: int = -1,
norm_type: str = "fg",
) -> Tensor:
r"""Smooth l1 loss used in RetinaNet.
Args:
pred_bbox (Tensor):
the predicted bbox with the shape of :math:`(B, A, 4)`
gt_bbox (Tensor):
the ground-truth bbox with the shape of :math:`(B, A, 4)`
labels (Tensor):
the assigned labels of boxes with shape of :math:`(B, A)`
beta (int):
the parameter of smooth l1 loss. Default: 1
background (int):
the value of background class. Default: 0
ignore_label (int):
the value of ignore class. Default: -1
norm_type (str): current support "fg", "all", "none":
"fg": loss will be normalized by number of fore-ground samples
"all": loss will be normalized by number of all samples
"none": not norm
Returns:
the calculated smooth l1 loss.
"""
pred_bbox = pred_bbox.reshape(-1, 4)
gt_bbox = gt_bbox.reshape(-1, 4)
labels = labels.reshape(-1)
fg_mask = (labels != background) * (labels != ignore_label)
loss = get_smooth_l1_base(pred_bbox, gt_bbox, beta)
loss = (loss.sum(axis=1) * fg_mask).sum()
if norm_type == "fg":
loss = loss / F.maximum(fg_mask.sum(), 1)
elif norm_type == "all":
all_mask = labels != ignore_label
loss = loss / F.maximum(all_mask.sum(), 1)
elif norm_type == "none":
return loss
else:
raise NotImplementedError
return loss
def get_smooth_l1_base(pred_bbox: Tensor, gt_bbox: Tensor, beta: float) -> Tensor:
r"""
Args:
pred_bbox (Tensor):
the predicted bbox with the shape of :math:`(N, 4)`
gt_bbox (Tensor):
the ground-truth bbox with the shape of :math:`(N, 4)`
beta (int):
the parameter of smooth l1 loss.
Returns:
the calculated smooth l1 loss.
"""
x = pred_bbox - gt_bbox
abs_x = F.abs(x)
if beta < 1e-5:
loss = abs_x
else:
in_loss = 0.5 * x ** 2 / beta
out_loss = abs_x - 0.5 * beta
# FIXME: F.where cannot handle 0-shape tensor yet
# loss = F.where(abs_x < beta, in_loss, out_loss)
in_mask = abs_x < beta
loss = in_loss * in_mask + out_loss * (1 - in_mask)
return loss
def softmax_loss(scores: Tensor, labels: Tensor, ignore_label: int = -1) -> Tensor:
max_scores = F.zero_grad(scores.max(axis=1, keepdims=True))
scores -= max_scores
log_prob = scores - F.log(F.exp(scores).sum(axis=1, keepdims=True))
mask = labels != ignore_label
vlabels = labels * mask
loss = -(F.indexing_one_hot(log_prob, vlabels.astype("int32"), 1) * mask).sum()
loss = loss / F.maximum(mask.sum(), 1)
return loss
|
[
"megengine.functional.add_axis",
"megengine.functional.arange",
"megengine.functional.sigmoid",
"megengine.functional.abs",
"megengine.functional.exp"
] |
[((1623, 1655), 'megengine.functional.arange', 'F.arange', (['(1)', '(logits.shape[2] + 1)'], {}), '(1, logits.shape[2] + 1)\n', (1631, 1655), True, 'import megengine.functional as F\n'), ((1670, 1696), 'megengine.functional.add_axis', 'F.add_axis', (['labels'], {'axis': '(2)'}), '(labels, axis=2)\n', (1680, 1696), True, 'import megengine.functional as F\n'), ((1710, 1727), 'megengine.functional.sigmoid', 'F.sigmoid', (['logits'], {}), '(logits)\n', (1719, 1727), True, 'import megengine.functional as F\n'), ((4412, 4420), 'megengine.functional.abs', 'F.abs', (['x'], {}), '(x)\n', (4417, 4420), True, 'import megengine.functional as F\n'), ((1767, 1792), 'official.vision.detection.layers.logsigmoid', 'layers.logsigmoid', (['logits'], {}), '(logits)\n', (1784, 1792), False, 'from official.vision.detection import layers\n'), ((1826, 1852), 'official.vision.detection.layers.logsigmoid', 'layers.logsigmoid', (['(-logits)'], {}), '(-logits)\n', (1843, 1852), False, 'from official.vision.detection import layers\n'), ((4977, 4990), 'megengine.functional.exp', 'F.exp', (['scores'], {}), '(scores)\n', (4982, 4990), True, 'import megengine.functional as F\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import megengine as mge
import megengine.module as M
import numpy as np
import pytest
from basecls.models.repvgg import RepVGGBlock
@pytest.mark.parametrize("w_in", [32, 64])
@pytest.mark.parametrize("w_out", [64])
@pytest.mark.parametrize("stride", [1, 2])
@pytest.mark.parametrize("groups", [1, 2, 4])
@pytest.mark.parametrize("se_r", [0.0, 0.25])
@pytest.mark.parametrize("act_name", ["relu"])
def test_block(w_in, w_out, stride, groups, se_r, act_name):
m = RepVGGBlock(w_in, w_out, stride, groups, se_r, act_name, deploy=False)
assert isinstance(m, M.Module)
m.eval()
x = mge.random.uniform(size=(2, w_in, 8, 8))
y0 = m(x)
m = RepVGGBlock.convert_to_deploy(m)
y1 = m(x)
np.testing.assert_allclose(y1.numpy(), y0.numpy(), rtol=1e-4, atol=1e-6)
|
[
"megengine.random.uniform"
] |
[((218, 259), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_in"""', '[32, 64]'], {}), "('w_in', [32, 64])\n", (241, 259), False, 'import pytest\n'), ((261, 299), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_out"""', '[64]'], {}), "('w_out', [64])\n", (284, 299), False, 'import pytest\n'), ((301, 342), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stride"""', '[1, 2]'], {}), "('stride', [1, 2])\n", (324, 342), False, 'import pytest\n'), ((344, 388), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""groups"""', '[1, 2, 4]'], {}), "('groups', [1, 2, 4])\n", (367, 388), False, 'import pytest\n'), ((390, 434), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""se_r"""', '[0.0, 0.25]'], {}), "('se_r', [0.0, 0.25])\n", (413, 434), False, 'import pytest\n'), ((436, 481), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""act_name"""', "['relu']"], {}), "('act_name', ['relu'])\n", (459, 481), False, 'import pytest\n'), ((551, 621), 'basecls.models.repvgg.RepVGGBlock', 'RepVGGBlock', (['w_in', 'w_out', 'stride', 'groups', 'se_r', 'act_name'], {'deploy': '(False)'}), '(w_in, w_out, stride, groups, se_r, act_name, deploy=False)\n', (562, 621), False, 'from basecls.models.repvgg import RepVGGBlock\n'), ((679, 719), 'megengine.random.uniform', 'mge.random.uniform', ([], {'size': '(2, w_in, 8, 8)'}), '(size=(2, w_in, 8, 8))\n', (697, 719), True, 'import megengine as mge\n'), ((743, 775), 'basecls.models.repvgg.RepVGGBlock.convert_to_deploy', 'RepVGGBlock.convert_to_deploy', (['m'], {}), '(m)\n', (772, 775), False, 'from basecls.models.repvgg import RepVGGBlock\n')]
|
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias)
self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
x = F.reshape(x, (-1, x.shape[1]))
gate_x = self.ih(x)
gate_h = self.hh(hidden)
i_r, i_i, i_n = F.split(gate_x, 3, axis=1)
h_r, h_i, h_n = F.split(gate_h, 3, axis=1)
resetgate = F.sigmoid(i_r + h_r)
inputgate = F.sigmoid(i_i + h_i)
newgate = F.tanh(i_n + (resetgate * h_n))
hy = newgate + inputgate * (hidden - newgate)
return hy
class GRU(M.Module):
"""
An implementation of GRUModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias))
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
GRUCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = F.zeros((self.num_layers, batch, self.hidden_size))
else:
h0 = hx
outs = []
hidden = list()
for layer in range(self.num_layers):
hidden.append(h0[layer, :, :])
length = input.shape[1] if self.batch_first else input.shape[0]
for t in range(length):
for layer in range(self.num_layers):
if layer == 0:
if self.batch_first:
hidden_l = self.rnn_cell_list[layer](
input[:, t, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
input[t, :, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
hidden[layer - 1], hidden[layer]
)
if self.dropout and (layer is not self.num_layers - 1):
hidden_l = F.dropout(hidden_l, self.dropout)
hidden[layer] = hidden_l
outs.append(hidden_l)
if self.batch_first:
output = F.stack(outs, axis=1)
else:
output = F.stack(outs, axis=0)
return output
# ================================= LSTM Implementation ==========================================================
class LSTMCell(M.Module):
"""
An implementation of LSTMCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias)
self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
hx, cx = hidden
x = F.reshape(x, (-1, x.shape[1]))
gates = self.x2h(x) + self.h2h(hx)
ingate, forgetgate, cellgate, outgate = F.split(gates, 4, axis=1)
ingate = F.sigmoid(ingate)
forgetgate = F.sigmoid(forgetgate)
cellgate = F.tanh(cellgate)
outgate = F.sigmoid(outgate)
cy = F.mul(cx, forgetgate) + F.mul(ingate, cellgate)
hy = F.mul(outgate, F.tanh(cy))
return (hy, cy)
class LSTM(M.Module):
"""
An implementation of LSTMModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(
LSTMCell(self.input_size, self.hidden_size, self.bias)
)
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
LSTMCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = F.zeros((self.num_layers, batch, self.hidden_size))
c0 = F.zeros((self.num_layers, batch, self.hidden_size))
else:
h0 = hx[0]
c0 = hx[1]
outs = []
hidden = list()
for layer in range(self.num_layers):
hidden.append((h0[layer, :, :], c0[layer, :, :]))
length = input.shape[1] if self.batch_first else input.shape[0]
for t in range(length):
for layer in range(self.num_layers):
if layer == 0:
inp = input[:, t, :] if self.batch_first else input[t, :, :]
hidden_l = self.rnn_cell_list[layer](
inp, (hidden[layer][0], hidden[layer][1])
)
else:
hidden_l = self.rnn_cell_list[layer](
hidden[layer - 1][0], (hidden[layer][0], hidden[layer][1])
)
if self.dropout and (layer is not self.num_layers - 1):
hidden_l = (
F.dropout(hidden_l[0], self.dropout),
F.dropout(hidden_l[1], self.dropout),
)
hidden[layer] = hidden_l
outs.append(hidden_l[0])
if self.batch_first:
output = F.stack(outs, axis=1)
else:
output = F.stack(outs, axis=0)
return output
|
[
"megengine.functional.zeros",
"megengine.functional.mul",
"megengine.functional.sigmoid",
"megengine.functional.split",
"megengine.functional.stack",
"megengine.module.Linear",
"megengine.functional.dropout",
"megengine.functional.tanh",
"megengine.functional.reshape",
"megengine.module.init.uniform_"
] |
[((520, 568), 'megengine.module.Linear', 'M.Linear', (['input_size', '(3 * hidden_size)'], {'bias': 'bias'}), '(input_size, 3 * hidden_size, bias=bias)\n', (528, 568), True, 'import megengine.module as M\n'), ((587, 636), 'megengine.module.Linear', 'M.Linear', (['hidden_size', '(3 * hidden_size)'], {'bias': 'bias'}), '(hidden_size, 3 * hidden_size, bias=bias)\n', (595, 636), True, 'import megengine.module as M\n'), ((876, 906), 'megengine.functional.reshape', 'F.reshape', (['x', '(-1, x.shape[1])'], {}), '(x, (-1, x.shape[1]))\n', (885, 906), True, 'import megengine.functional as F\n'), ((994, 1020), 'megengine.functional.split', 'F.split', (['gate_x', '(3)'], {'axis': '(1)'}), '(gate_x, 3, axis=1)\n', (1001, 1020), True, 'import megengine.functional as F\n'), ((1045, 1071), 'megengine.functional.split', 'F.split', (['gate_h', '(3)'], {'axis': '(1)'}), '(gate_h, 3, axis=1)\n', (1052, 1071), True, 'import megengine.functional as F\n'), ((1093, 1113), 'megengine.functional.sigmoid', 'F.sigmoid', (['(i_r + h_r)'], {}), '(i_r + h_r)\n', (1102, 1113), True, 'import megengine.functional as F\n'), ((1134, 1154), 'megengine.functional.sigmoid', 'F.sigmoid', (['(i_i + h_i)'], {}), '(i_i + h_i)\n', (1143, 1154), True, 'import megengine.functional as F\n'), ((1173, 1202), 'megengine.functional.tanh', 'F.tanh', (['(i_n + resetgate * h_n)'], {}), '(i_n + resetgate * h_n)\n', (1179, 1202), True, 'import megengine.functional as F\n'), ((3899, 3947), 'megengine.module.Linear', 'M.Linear', (['input_size', '(4 * hidden_size)'], {'bias': 'bias'}), '(input_size, 4 * hidden_size, bias=bias)\n', (3907, 3947), True, 'import megengine.module as M\n'), ((3967, 4016), 'megengine.module.Linear', 'M.Linear', (['hidden_size', '(4 * hidden_size)'], {'bias': 'bias'}), '(hidden_size, 4 * hidden_size, bias=bias)\n', (3975, 4016), True, 'import megengine.module as M\n'), ((4281, 4311), 'megengine.functional.reshape', 'F.reshape', (['x', '(-1, x.shape[1])'], {}), '(x, (-1, x.shape[1]))\n', (4290, 4311), True, 'import megengine.functional as F\n'), ((4405, 4430), 'megengine.functional.split', 'F.split', (['gates', '(4)'], {'axis': '(1)'}), '(gates, 4, axis=1)\n', (4412, 4430), True, 'import megengine.functional as F\n'), ((4449, 4466), 'megengine.functional.sigmoid', 'F.sigmoid', (['ingate'], {}), '(ingate)\n', (4458, 4466), True, 'import megengine.functional as F\n'), ((4488, 4509), 'megengine.functional.sigmoid', 'F.sigmoid', (['forgetgate'], {}), '(forgetgate)\n', (4497, 4509), True, 'import megengine.functional as F\n'), ((4529, 4545), 'megengine.functional.tanh', 'F.tanh', (['cellgate'], {}), '(cellgate)\n', (4535, 4545), True, 'import megengine.functional as F\n'), ((4564, 4582), 'megengine.functional.sigmoid', 'F.sigmoid', (['outgate'], {}), '(outgate)\n', (4573, 4582), True, 'import megengine.functional as F\n'), ((722, 749), 'math.sqrt', 'math.sqrt', (['self.hidden_size'], {}), '(self.hidden_size)\n', (731, 749), False, 'import math\n'), ((798, 827), 'megengine.module.init.uniform_', 'M.init.uniform_', (['w', '(-std)', 'std'], {}), '(w, -std, std)\n', (813, 827), True, 'import megengine.module as M\n'), ((2204, 2255), 'megengine.functional.zeros', 'F.zeros', (['(self.num_layers, batch, self.hidden_size)'], {}), '((self.num_layers, batch, self.hidden_size))\n', (2211, 2255), True, 'import megengine.functional as F\n'), ((3392, 3413), 'megengine.functional.stack', 'F.stack', (['outs'], {'axis': '(1)'}), '(outs, axis=1)\n', (3399, 3413), True, 'import megengine.functional as F\n'), ((3449, 3470), 'megengine.functional.stack', 'F.stack', (['outs'], {'axis': '(0)'}), '(outs, axis=0)\n', (3456, 3470), True, 'import megengine.functional as F\n'), ((4102, 4129), 'math.sqrt', 'math.sqrt', (['self.hidden_size'], {}), '(self.hidden_size)\n', (4111, 4129), False, 'import math\n'), ((4178, 4207), 'megengine.module.init.uniform_', 'M.init.uniform_', (['w', '(-std)', 'std'], {}), '(w, -std, std)\n', (4193, 4207), True, 'import megengine.module as M\n'), ((4597, 4618), 'megengine.functional.mul', 'F.mul', (['cx', 'forgetgate'], {}), '(cx, forgetgate)\n', (4602, 4618), True, 'import megengine.functional as F\n'), ((4621, 4644), 'megengine.functional.mul', 'F.mul', (['ingate', 'cellgate'], {}), '(ingate, cellgate)\n', (4626, 4644), True, 'import megengine.functional as F\n'), ((4674, 4684), 'megengine.functional.tanh', 'F.tanh', (['cy'], {}), '(cy)\n', (4680, 4684), True, 'import megengine.functional as F\n'), ((5661, 5712), 'megengine.functional.zeros', 'F.zeros', (['(self.num_layers, batch, self.hidden_size)'], {}), '((self.num_layers, batch, self.hidden_size))\n', (5668, 5712), True, 'import megengine.functional as F\n'), ((5730, 5781), 'megengine.functional.zeros', 'F.zeros', (['(self.num_layers, batch, self.hidden_size)'], {}), '((self.num_layers, batch, self.hidden_size))\n', (5737, 5781), True, 'import megengine.functional as F\n'), ((6974, 6995), 'megengine.functional.stack', 'F.stack', (['outs'], {'axis': '(1)'}), '(outs, axis=1)\n', (6981, 6995), True, 'import megengine.functional as F\n'), ((7031, 7052), 'megengine.functional.stack', 'F.stack', (['outs'], {'axis': '(0)'}), '(outs, axis=0)\n', (7038, 7052), True, 'import megengine.functional as F\n'), ((3230, 3263), 'megengine.functional.dropout', 'F.dropout', (['hidden_l', 'self.dropout'], {}), '(hidden_l, self.dropout)\n', (3239, 3263), True, 'import megengine.functional as F\n'), ((6721, 6757), 'megengine.functional.dropout', 'F.dropout', (['hidden_l[0]', 'self.dropout'], {}), '(hidden_l[0], self.dropout)\n', (6730, 6757), True, 'import megengine.functional as F\n'), ((6783, 6819), 'megengine.functional.dropout', 'F.dropout', (['hidden_l[1]', 'self.dropout'], {}), '(hidden_l[1], self.dropout)\n', (6792, 6819), True, 'import megengine.functional as F\n')]
|
import megengine.module as M
import megengine.functional as F
class FlowHead(M.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1)
self.relu = M.ReLU()
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class SepConvGRU(M.Module):
def __init__(self, hidden_dim=128, input_dim=192 + 128):
super(SepConvGRU, self).__init__()
self.convz1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convr1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convq1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convz2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convr2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convq2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
def forward(self, h, x):
# horizontal
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz1(hx))
r = F.sigmoid(self.convr1(hx))
q = F.tanh(self.convq1(F.concat([r * h, x], axis=1)))
h = (1 - z) * h + z * q
# vertical
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz2(hx))
r = F.sigmoid(self.convr2(hx))
q = F.tanh(self.convq2(F.concat([r * h, x], axis=1)))
h = (1 - z) * h + z * q
return h
class BasicMotionEncoder(M.Module):
def __init__(self, cor_planes):
super(BasicMotionEncoder, self).__init__()
self.convc1 = M.Conv2d(cor_planes, 256, 1, padding=0)
self.convc2 = M.Conv2d(256, 192, 3, padding=1)
self.convf1 = M.Conv2d(2, 128, 7, padding=3)
self.convf2 = M.Conv2d(128, 64, 3, padding=1)
self.conv = M.Conv2d(64 + 192, 128 - 2, 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
cor = F.relu(self.convc2(cor))
flo = F.relu(self.convf1(flow))
flo = F.relu(self.convf2(flo))
cor_flo = F.concat([cor, flo], axis=1)
out = F.relu(self.conv(cor_flo))
return F.concat([out, flow], axis=1)
class BasicUpdateBlock(M.Module):
def __init__(self, hidden_dim, cor_planes, mask_size=8):
super(BasicUpdateBlock, self).__init__()
self.encoder = BasicMotionEncoder(cor_planes)
self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=128 + hidden_dim)
self.flow_head = FlowHead(hidden_dim, hidden_dim=256)
self.mask = M.Sequential(
M.Conv2d(128, 256, 3, padding=1),
M.ReLU(),
M.Conv2d(256, mask_size**2 * 9, 1, padding=0),
)
def forward(self, net, inp, corr, flow, upsample=True):
motion_features = self.encoder(flow, corr)
inp = F.concat([inp, motion_features], axis=1)
net = self.gru(net, inp)
delta_flow = self.flow_head(net)
# scale mask to balence gradients
mask = 0.25 * self.mask(net)
return net, mask, delta_flow
|
[
"megengine.functional.concat",
"megengine.module.ReLU",
"megengine.module.Conv2d"
] |
[((207, 252), 'megengine.module.Conv2d', 'M.Conv2d', (['input_dim', 'hidden_dim', '(3)'], {'padding': '(1)'}), '(input_dim, hidden_dim, 3, padding=1)\n', (215, 252), True, 'import megengine.module as M\n'), ((274, 311), 'megengine.module.Conv2d', 'M.Conv2d', (['hidden_dim', '(2)', '(3)'], {'padding': '(1)'}), '(hidden_dim, 2, 3, padding=1)\n', (282, 311), True, 'import megengine.module as M\n'), ((332, 340), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (338, 340), True, 'import megengine.module as M\n'), ((576, 644), 'megengine.module.Conv2d', 'M.Conv2d', (['(hidden_dim + input_dim)', 'hidden_dim', '(1, 5)'], {'padding': '(0, 2)'}), '(hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2))\n', (584, 644), True, 'import megengine.module as M\n'), ((689, 757), 'megengine.module.Conv2d', 'M.Conv2d', (['(hidden_dim + input_dim)', 'hidden_dim', '(1, 5)'], {'padding': '(0, 2)'}), '(hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2))\n', (697, 757), True, 'import megengine.module as M\n'), ((802, 870), 'megengine.module.Conv2d', 'M.Conv2d', (['(hidden_dim + input_dim)', 'hidden_dim', '(1, 5)'], {'padding': '(0, 2)'}), '(hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2))\n', (810, 870), True, 'import megengine.module as M\n'), ((916, 984), 'megengine.module.Conv2d', 'M.Conv2d', (['(hidden_dim + input_dim)', 'hidden_dim', '(5, 1)'], {'padding': '(2, 0)'}), '(hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0))\n', (924, 984), True, 'import megengine.module as M\n'), ((1029, 1097), 'megengine.module.Conv2d', 'M.Conv2d', (['(hidden_dim + input_dim)', 'hidden_dim', '(5, 1)'], {'padding': '(2, 0)'}), '(hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0))\n', (1037, 1097), True, 'import megengine.module as M\n'), ((1142, 1210), 'megengine.module.Conv2d', 'M.Conv2d', (['(hidden_dim + input_dim)', 'hidden_dim', '(5, 1)'], {'padding': '(2, 0)'}), '(hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0))\n', (1150, 1210), True, 'import megengine.module as M\n'), ((1297, 1321), 'megengine.functional.concat', 'F.concat', (['[h, x]'], {'axis': '(1)'}), '([h, x], axis=1)\n', (1305, 1321), True, 'import megengine.functional as F\n'), ((1527, 1551), 'megengine.functional.concat', 'F.concat', (['[h, x]'], {'axis': '(1)'}), '([h, x], axis=1)\n', (1535, 1551), True, 'import megengine.functional as F\n'), ((1890, 1929), 'megengine.module.Conv2d', 'M.Conv2d', (['cor_planes', '(256)', '(1)'], {'padding': '(0)'}), '(cor_planes, 256, 1, padding=0)\n', (1898, 1929), True, 'import megengine.module as M\n'), ((1952, 1984), 'megengine.module.Conv2d', 'M.Conv2d', (['(256)', '(192)', '(3)'], {'padding': '(1)'}), '(256, 192, 3, padding=1)\n', (1960, 1984), True, 'import megengine.module as M\n'), ((2007, 2037), 'megengine.module.Conv2d', 'M.Conv2d', (['(2)', '(128)', '(7)'], {'padding': '(3)'}), '(2, 128, 7, padding=3)\n', (2015, 2037), True, 'import megengine.module as M\n'), ((2060, 2091), 'megengine.module.Conv2d', 'M.Conv2d', (['(128)', '(64)', '(3)'], {'padding': '(1)'}), '(128, 64, 3, padding=1)\n', (2068, 2091), True, 'import megengine.module as M\n'), ((2112, 2153), 'megengine.module.Conv2d', 'M.Conv2d', (['(64 + 192)', '(128 - 2)', '(3)'], {'padding': '(1)'}), '(64 + 192, 128 - 2, 3, padding=1)\n', (2120, 2153), True, 'import megengine.module as M\n'), ((2367, 2395), 'megengine.functional.concat', 'F.concat', (['[cor, flo]'], {'axis': '(1)'}), '([cor, flo], axis=1)\n', (2375, 2395), True, 'import megengine.functional as F\n'), ((2452, 2481), 'megengine.functional.concat', 'F.concat', (['[out, flow]'], {'axis': '(1)'}), '([out, flow], axis=1)\n', (2460, 2481), True, 'import megengine.functional as F\n'), ((3123, 3163), 'megengine.functional.concat', 'F.concat', (['[inp, motion_features]'], {'axis': '(1)'}), '([inp, motion_features], axis=1)\n', (3131, 3163), True, 'import megengine.functional as F\n'), ((2872, 2904), 'megengine.module.Conv2d', 'M.Conv2d', (['(128)', '(256)', '(3)'], {'padding': '(1)'}), '(128, 256, 3, padding=1)\n', (2880, 2904), True, 'import megengine.module as M\n'), ((2918, 2926), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (2924, 2926), True, 'import megengine.module as M\n'), ((2940, 2987), 'megengine.module.Conv2d', 'M.Conv2d', (['(256)', '(mask_size ** 2 * 9)', '(1)'], {'padding': '(0)'}), '(256, mask_size ** 2 * 9, 1, padding=0)\n', (2948, 2987), True, 'import megengine.module as M\n'), ((1431, 1459), 'megengine.functional.concat', 'F.concat', (['[r * h, x]'], {'axis': '(1)'}), '([r * h, x], axis=1)\n', (1439, 1459), True, 'import megengine.functional as F\n'), ((1661, 1689), 'megengine.functional.concat', 'F.concat', (['[r * h, x]'], {'axis': '(1)'}), '([r * h, x], axis=1)\n', (1669, 1689), True, 'import megengine.functional as F\n')]
|
#!/usr/bin/env python3
from dataset import SIDDValData
from model import UNetD
import megengine.data as data
from utils import batch_PSNR
from tqdm import tqdm
import argparse
import pickle
import megengine
def test(args):
valid_dataset = SIDDValData(args.data)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=1, drop_last=False
)
valid_dataloader = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
num_workers=8,
)
model = UNetD(3)
with open(args.checkpoint, "rb") as f:
state = pickle.load(f)
model.load_state_dict(state["state_dict"])
model.eval()
def valid_step(image, label):
pred = model(image)
pred = image - pred
psnr_it = batch_PSNR(pred, label)
return psnr_it
def valid(func, data_queue):
psnr_v = 0.
for step, (image, label) in tqdm(enumerate(data_queue)):
image = megengine.tensor(image)
label = megengine.tensor(label)
psnr_it = func(image, label)
psnr_v += psnr_it
psnr_v /= step + 1
return psnr_v
psnr_v = valid(valid_step, valid_dataloader)
print("PSNR: {:.3f}".format(psnr_v.item()) )
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="MegEngine NBNet")
parser.add_argument("-d", "--data", default="/data/sidd", metavar="DIR", help="path to imagenet dataset")
parser.add_argument("-c", "--checkpoint", help="path to checkpoint")
args = parser.parse_args()
test(args)
# vim: ts=4 sw=4 sts=4 expandtab
|
[
"megengine.data.DataLoader",
"megengine.tensor",
"megengine.data.SequentialSampler"
] |
[((245, 267), 'dataset.SIDDValData', 'SIDDValData', (['args.data'], {}), '(args.data)\n', (256, 267), False, 'from dataset import SIDDValData\n'), ((288, 356), 'megengine.data.SequentialSampler', 'data.SequentialSampler', (['valid_dataset'], {'batch_size': '(1)', 'drop_last': '(False)'}), '(valid_dataset, batch_size=1, drop_last=False)\n', (310, 356), True, 'import megengine.data as data\n'), ((394, 462), 'megengine.data.DataLoader', 'data.DataLoader', (['valid_dataset'], {'sampler': 'valid_sampler', 'num_workers': '(8)'}), '(valid_dataset, sampler=valid_sampler, num_workers=8)\n', (409, 462), True, 'import megengine.data as data\n'), ((506, 514), 'model.UNetD', 'UNetD', (['(3)'], {}), '(3)\n', (511, 514), False, 'from model import UNetD\n'), ((1276, 1330), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""MegEngine NBNet"""'}), "(description='MegEngine NBNet')\n", (1299, 1330), False, 'import argparse\n'), ((574, 588), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (585, 588), False, 'import pickle\n'), ((762, 785), 'utils.batch_PSNR', 'batch_PSNR', (['pred', 'label'], {}), '(pred, label)\n', (772, 785), False, 'from utils import batch_PSNR\n'), ((948, 971), 'megengine.tensor', 'megengine.tensor', (['image'], {}), '(image)\n', (964, 971), False, 'import megengine\n'), ((992, 1015), 'megengine.tensor', 'megengine.tensor', (['label'], {}), '(label)\n', (1008, 1015), False, 'import megengine\n')]
|
import os
import numpy as np
import collections
import megengine.module as M
import megengine.functional as F
import megengine as mge
from megengine.data.dataset import Dataset
from megengine.data import DataLoader
import hparams as hp
from megengine.data import Collator
class AsrDataset(Dataset):
def __init__(self, data_set="train"):
"""
Args:
root_dir (string): Directory with all the spectrograms.
"""
self.metas = self.load_metas(hp.dataset_root, data_set)
def load_metas(self, root, data_set): # fix a bug
metas = []
with open(os.path.join(root, f"{data_set}.txt")) as f:
for line in f.readlines():
info = line.split("|")
metas.append(
{
"mel_path": os.path.join(root, info[0]),
"frames": info[1],
"token_ids_str": info[2],
"speaker": info[3],
}
)
return metas
def __len__(self):
return len(self.metas)
def __getitem__(self, idx):
meta = self.metas[idx]
token_ids = [int(i) for i in meta["token_ids_str"].split(" ")]
text = np.array(token_ids, dtype=np.int32)
mel = np.load(meta["mel_path"])
text_input = text[:-1]
text_output = text[1:]
text_length = text_input.shape[0]
pos_text = np.arange(1, text_length + 1)
pos_mel = np.arange(1, mel.shape[0] + 1)
return {
"text": text,
"text_input": text_input,
"text_output": text_output,
"text_length": text_length,
"mel": mel,
"pos_mel": pos_mel,
"pos_text": pos_text,
}
class AsrCollator(Collator):
def __init__(self, pad_value: float = 0.0):
super().__init__()
self.pad_value = pad_value
def apply(self, batch):
# Puts each data field into a tensor with outer dimension batch size
if isinstance(batch[0], collections.Mapping):
text = [d["text"] for d in batch]
text_input = [d["text_input"] for d in batch]
text_output = [d["text_output"] for d in batch]
text_length = [d["text_length"] for d in batch]
mel = [d["mel"] for d in batch]
mel_length = [d["mel"].shape[0] for d in batch]
pos_mel = [d["pos_mel"] for d in batch]
pos_text = [d["pos_text"] for d in batch]
text = [
i
for i, _ in sorted(
zip(text, mel_length), key=lambda x: x[1], reverse=True
)
]
text_input = [
i
for i, _ in sorted(
zip(text_input, mel_length), key=lambda x: x[1], reverse=True
)
]
text_output = [
i
for i, _ in sorted(
zip(text_output, mel_length), key=lambda x: x[1], reverse=True
)
]
text_length = [
i
for i, _ in sorted(
zip(text_length, mel_length), key=lambda x: x[1], reverse=True
)
]
mel = [
i
for i, _ in sorted(
zip(mel, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_text = [
i
for i, _ in sorted(
zip(pos_text, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_mel = [
i
for i, _ in sorted(
zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True
)
]
mel_length = sorted(mel_length, reverse=True)
# PAD sequences with largest length of the batch
text_input = _prepare_data(text_input).astype(np.int32)
text_output = _prepare_data(text_output).astype(np.int32)
mel = _pad_mel(mel)
pos_mel = _prepare_data(pos_mel).astype(np.int32)
pos_text = _prepare_data(pos_text).astype(np.int32)
return (
mge.Tensor(text_input),
mge.Tensor(text_output),
mge.Tensor(mel),
mge.Tensor(pos_text),
mge.Tensor(pos_mel),
mge.Tensor(text_length),
mge.Tensor(mel_length),
)
raise TypeError(
(
"batch must contain tensors, numbers, dicts or lists; found {}".format(
type(batch[0])
)
)
)
def collate_fn_transformer_test(batch):
# Puts each data field into a tensor with outer dimension batch size
# if isinstance(batch[0], collections.Mapping):
text = [batch["text"]] # for d in batch]
text_input = batch["text_input"]
text_output = batch["text_output"]
text_length = batch["text_length"]
mel = [batch["mel"]]
mel_length = [batch["mel"].shape[1]]
pos_mel = batch["pos_mel"]
pos_text = batch["pos_text"]
text = [
i for i, _ in sorted(zip(text, mel_length), key=lambda x: x[1], reverse=True)
]
text_input = [
i
for i, _ in sorted(
zip(text_input, mel_length), key=lambda x: x[1], reverse=True
)
]
text_output = [
i
for i, _ in sorted(
zip(text_output, mel_length), key=lambda x: x[1], reverse=True
)
]
text_length = [
i
for i, _ in sorted(
zip(text_length, mel_length), key=lambda x: x[1], reverse=True
)
]
mel = [i for i, _ in sorted(zip(mel, mel_length), key=lambda x: x[1], reverse=True)]
pos_text = [
i
for i, _ in sorted(zip(pos_text, mel_length), key=lambda x: x[1], reverse=True)
]
pos_mel = [
i for i, _ in sorted(zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True)
]
mel_length = sorted(mel_length, reverse=True)
# PAD sequences with largest length of the batch
text_input = _prepare_data(text_input).astype(np.int32)
text_output = _prepare_data(text_output).astype(np.int32)
mel = _pad_mel(mel[0])
pos_mel = _prepare_data(pos_mel).astype(np.int32)
pos_text = _prepare_data(pos_text).astype(np.int32)
return (
mge.Tensor(text_input),
mge.Tensor(text_output),
mge.Tensor(mel),
mge.Tensor(pos_text),
mge.Tensor(pos_mel),
mge.Tensor(text_length),
mge.Tensor(mel_length),
)
raise TypeError(
(
"batch must contain tensors, numbers, dicts or lists; found {}".format(
type(batch[0])
)
)
)
############################ Utils ###################################
def _pad_data(x, length):
_pad = 0
return np.pad(x, (0, length - x.shape[0]), mode="constant", constant_values=_pad)
def _prepare_data(inputs):
max_len = max((len(x) for x in inputs))
return np.stack([_pad_data(x, max_len) for x in inputs])
def _pad_mel(inputs):
_pad = 0
def _pad_one(x, max_len):
mel_len = x.shape[0]
return np.pad(
x, [[0, max_len - mel_len], [0, 0]], mode="constant", constant_values=_pad
)
max_len = max((x.shape[0] for x in inputs))
return np.stack([_pad_one(x, max_len) for x in inputs])
|
[
"megengine.Tensor"
] |
[((7013, 7087), 'numpy.pad', 'np.pad', (['x', '(0, length - x.shape[0])'], {'mode': '"""constant"""', 'constant_values': '_pad'}), "(x, (0, length - x.shape[0]), mode='constant', constant_values=_pad)\n", (7019, 7087), True, 'import numpy as np\n'), ((1250, 1285), 'numpy.array', 'np.array', (['token_ids'], {'dtype': 'np.int32'}), '(token_ids, dtype=np.int32)\n', (1258, 1285), True, 'import numpy as np\n'), ((1300, 1325), 'numpy.load', 'np.load', (["meta['mel_path']"], {}), "(meta['mel_path'])\n", (1307, 1325), True, 'import numpy as np\n'), ((1449, 1478), 'numpy.arange', 'np.arange', (['(1)', '(text_length + 1)'], {}), '(1, text_length + 1)\n', (1458, 1478), True, 'import numpy as np\n'), ((1497, 1527), 'numpy.arange', 'np.arange', (['(1)', '(mel.shape[0] + 1)'], {}), '(1, mel.shape[0] + 1)\n', (1506, 1527), True, 'import numpy as np\n'), ((6499, 6521), 'megengine.Tensor', 'mge.Tensor', (['text_input'], {}), '(text_input)\n', (6509, 6521), True, 'import megengine as mge\n'), ((6531, 6554), 'megengine.Tensor', 'mge.Tensor', (['text_output'], {}), '(text_output)\n', (6541, 6554), True, 'import megengine as mge\n'), ((6564, 6579), 'megengine.Tensor', 'mge.Tensor', (['mel'], {}), '(mel)\n', (6574, 6579), True, 'import megengine as mge\n'), ((6589, 6609), 'megengine.Tensor', 'mge.Tensor', (['pos_text'], {}), '(pos_text)\n', (6599, 6609), True, 'import megengine as mge\n'), ((6619, 6638), 'megengine.Tensor', 'mge.Tensor', (['pos_mel'], {}), '(pos_mel)\n', (6629, 6638), True, 'import megengine as mge\n'), ((6648, 6671), 'megengine.Tensor', 'mge.Tensor', (['text_length'], {}), '(text_length)\n', (6658, 6671), True, 'import megengine as mge\n'), ((6681, 6703), 'megengine.Tensor', 'mge.Tensor', (['mel_length'], {}), '(mel_length)\n', (6691, 6703), True, 'import megengine as mge\n'), ((7334, 7420), 'numpy.pad', 'np.pad', (['x', '[[0, max_len - mel_len], [0, 0]]'], {'mode': '"""constant"""', 'constant_values': '_pad'}), "(x, [[0, max_len - mel_len], [0, 0]], mode='constant',\n constant_values=_pad)\n", (7340, 7420), True, 'import numpy as np\n'), ((607, 644), 'os.path.join', 'os.path.join', (['root', 'f"""{data_set}.txt"""'], {}), "(root, f'{data_set}.txt')\n", (619, 644), False, 'import os\n'), ((4312, 4334), 'megengine.Tensor', 'mge.Tensor', (['text_input'], {}), '(text_input)\n', (4322, 4334), True, 'import megengine as mge\n'), ((4352, 4375), 'megengine.Tensor', 'mge.Tensor', (['text_output'], {}), '(text_output)\n', (4362, 4375), True, 'import megengine as mge\n'), ((4393, 4408), 'megengine.Tensor', 'mge.Tensor', (['mel'], {}), '(mel)\n', (4403, 4408), True, 'import megengine as mge\n'), ((4426, 4446), 'megengine.Tensor', 'mge.Tensor', (['pos_text'], {}), '(pos_text)\n', (4436, 4446), True, 'import megengine as mge\n'), ((4464, 4483), 'megengine.Tensor', 'mge.Tensor', (['pos_mel'], {}), '(pos_mel)\n', (4474, 4483), True, 'import megengine as mge\n'), ((4501, 4524), 'megengine.Tensor', 'mge.Tensor', (['text_length'], {}), '(text_length)\n', (4511, 4524), True, 'import megengine as mge\n'), ((4542, 4564), 'megengine.Tensor', 'mge.Tensor', (['mel_length'], {}), '(mel_length)\n', (4552, 4564), True, 'import megengine as mge\n'), ((818, 845), 'os.path.join', 'os.path.join', (['root', 'info[0]'], {}), '(root, info[0])\n', (830, 845), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
train_dataloader, valid_dataloader = build_dataset(args)
train_queue = iter(train_dataloader) # infinite
steps_per_epoch = 1280000 // (world_size * args.batch_size)
# build model
model = resnet_model.__dict__[args.arch]()
# Sync parameters
if world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
# Optimizer
opt = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * world_size, # scale weight decay in "SUM" mode
)
# train and valid func
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
gm.backward(loss)
opt.step().clear_grad()
return loss, acc1, acc5
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = F.distributed.all_reduce_sum(acc1) / world_size
acc5 = F.distributed.all_reduce_sum(acc5) / world_size
return loss, acc1, acc5
# multi-step learning rate scheduler with warmup
def adjust_learning_rate(step):
lr = args.lr * 0.1 ** bisect.bisect_right(
[30 * steps_per_epoch, 60 * steps_per_epoch, 80 * steps_per_epoch], step
)
if step < 5 * steps_per_epoch: # warmup
lr = args.lr * (step / (5 * steps_per_epoch))
for param_group in opt.param_groups:
param_group["lr"] = lr
return lr
# start training
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
for step in range(0, args.epochs * steps_per_epoch):
lr = adjust_learning_rate(step)
t = time.time()
image, label = next(train_queue)
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
loss, acc1, acc5 = train_step(image, label)
objs.update(loss.item())
top1.update(100 * acc1.item())
top5.update(100 * acc5.item())
clck.update(time.time() - t)
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info(
"Epoch %d Step %d, LR %.4f, %s %s %s %s",
step // steps_per_epoch,
step,
lr,
objs,
top1,
top5,
clck,
)
objs.reset()
top1.reset()
top5.reset()
clck.reset()
if (step + 1) % steps_per_epoch == 0:
model.eval()
_, valid_acc1, valid_acc5 = valid(valid_step, valid_dataloader, args)
model.train()
logging.info(
"Epoch %d Test Acc@1 %.3f, Acc@5 %.3f",
(step + 1) // steps_per_epoch,
valid_acc1,
valid_acc5,
)
megengine.save(
{
"epoch": (step + 1) // steps_per_epoch,
"state_dict": model.state_dict(),
},
os.path.join(args.save, args.arch, "checkpoint.pkl"),
)
def valid(func, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
n = image.shape[0]
loss, acc1, acc5 = func(image, label)
objs.update(loss.item(), n)
top1.update(100 * acc1.item(), n)
top5.update(100 * acc5.item(), n)
clck.update(time.time() - t, n)
t = time.time()
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info("Test step %d, %s %s %s %s", step, objs, top1, top5, clck)
return objs.avg, top1.avg, top5.avg
def build_dataset(args):
train_dataset = data.dataset.ImageNet(args.data, train=True)
train_sampler = data.Infinite(
data.RandomSampler(train_dataset, batch_size=args.batch_size, drop_last=True)
)
train_dataloader = data.DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
[ # Baseline Augmentation for small models
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.Normalize(
mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395]
), # BGR
T.ToMode("CHW"),
]
)
if args.arch in ("resnet18", "resnet34")
else T.Compose(
[ # Facebook Augmentation for large models
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
T.Normalize(
mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395]
), # BGR
T.ToMode("CHW"),
]
),
num_workers=args.workers,
)
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_dataloader = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[
T.Resize(256),
T.CenterCrop(224),
T.Normalize(
mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395]
), # BGR
T.ToMode("CHW"),
]
),
num_workers=args.workers,
)
return train_dataloader, valid_dataloader
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=":.3f"):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
if __name__ == "__main__":
main()
|
[
"megengine.functional.topk_accuracy",
"megengine.data.transform.RandomHorizontalFlip",
"megengine.distributed.init_process_group",
"megengine.utils.dtr.DTR",
"megengine.distributed.get_rank",
"megengine.functional.distributed.all_reduce_sum",
"megengine.data.transform.CenterCrop",
"megengine.distributed.get_world_size",
"megengine.tensor",
"megengine.data.transform.Resize",
"megengine.functional.nn.cross_entropy",
"megengine.data.SequentialSampler",
"megengine.distributed.Server",
"megengine.data.transform.Normalize",
"megengine.logger.get_logger",
"megengine.data.transform.ToMode",
"megengine.distributed.make_allreduce_cb",
"megengine.data.RandomSampler",
"megengine.data.transform.RandomResizedCrop",
"megengine.data.transform.ColorJitter",
"megengine.data.dataset.ImageNet",
"megengine.autodiff.GradManager"
] |
[((753, 782), 'megengine.logger.get_logger', 'megengine.logger.get_logger', ([], {}), '()\n', (780, 782), False, 'import megengine\n'), ((809, 875), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""MegEngine ImageNet Training"""'}), "(description='MegEngine ImageNet Training')\n", (832, 875), False, 'import argparse\n'), ((8402, 8413), 'time.time', 'time.time', ([], {}), '()\n', (8411, 8413), False, 'import time\n'), ((9079, 9123), 'megengine.data.dataset.ImageNet', 'data.dataset.ImageNet', (['args.data'], {'train': '(True)'}), '(args.data, train=True)\n', (9100, 9123), True, 'import megengine.data as data\n'), ((10255, 10300), 'megengine.data.dataset.ImageNet', 'data.dataset.ImageNet', (['args.data'], {'train': '(False)'}), '(args.data, train=False)\n', (10276, 10300), True, 'import megengine.data as data\n'), ((10321, 10391), 'megengine.data.SequentialSampler', 'data.SequentialSampler', (['valid_dataset'], {'batch_size': '(100)', 'drop_last': '(False)'}), '(valid_dataset, batch_size=100, drop_last=False)\n', (10343, 10391), True, 'import megengine.data as data\n'), ((2923, 2955), 'megengine.distributed.Server', 'dist.Server', ([], {'port': 'args.dist_port'}), '(port=args.dist_port)\n', (2934, 2955), True, 'import megengine.distributed as dist\n'), ((3038, 3061), 'multiprocessing.Pool', 'multiprocessing.Pool', (['(1)'], {}), '(1)\n', (3058, 3061), False, 'import multiprocessing\n'), ((3897, 3929), 'megengine.utils.dtr.DTR', 'DTR', ([], {'memory_budget': '(5 * 1024 ** 3)'}), '(memory_budget=5 * 1024 ** 3)\n', (3900, 3929), False, 'from megengine.utils.dtr import DTR\n'), ((4156, 4315), 'megengine.distributed.init_process_group', 'dist.init_process_group', ([], {'master_ip': 'args.dist_addr', 'port': 'args.dist_port', 'world_size': 'world_size', 'rank': 'rank', 'device': '(rank % ngpus_per_node)', 'backend': '"""nccl"""'}), "(master_ip=args.dist_addr, port=args.dist_port,\n world_size=world_size, rank=rank, device=rank % ngpus_per_node, backend\n ='nccl')\n", (4179, 4315), True, 'import megengine.distributed as dist\n'), ((5686, 5719), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits', 'label'], {}), '(logits, label)\n', (5704, 5719), True, 'import megengine.functional as F\n'), ((5741, 5784), 'megengine.functional.topk_accuracy', 'F.topk_accuracy', (['logits', 'label'], {'topk': '(1, 5)'}), '(logits, label, topk=(1, 5))\n', (5756, 5784), True, 'import megengine.functional as F\n'), ((6781, 6792), 'time.time', 'time.time', ([], {}), '()\n', (6790, 6792), False, 'import time\n'), ((6851, 6891), 'megengine.tensor', 'megengine.tensor', (['image'], {'dtype': '"""float32"""'}), "(image, dtype='float32')\n", (6867, 6891), False, 'import megengine\n'), ((6908, 6946), 'megengine.tensor', 'megengine.tensor', (['label'], {'dtype': '"""int32"""'}), "(label, dtype='int32')\n", (6924, 6946), False, 'import megengine\n'), ((8485, 8525), 'megengine.tensor', 'megengine.tensor', (['image'], {'dtype': '"""float32"""'}), "(image, dtype='float32')\n", (8501, 8525), False, 'import megengine\n'), ((8542, 8580), 'megengine.tensor', 'megengine.tensor', (['label'], {'dtype': '"""int32"""'}), "(label, dtype='int32')\n", (8558, 8580), False, 'import megengine\n'), ((8829, 8840), 'time.time', 'time.time', ([], {}), '()\n', (8838, 8840), False, 'import time\n'), ((9167, 9244), 'megengine.data.RandomSampler', 'data.RandomSampler', (['train_dataset'], {'batch_size': 'args.batch_size', 'drop_last': '(True)'}), '(train_dataset, batch_size=args.batch_size, drop_last=True)\n', (9185, 9244), True, 'import megengine.data as data\n'), ((3964, 3998), 'os.path.join', 'os.path.join', (['args.save', 'args.arch'], {}), '(args.save, args.arch)\n', (3976, 3998), False, 'import os\n'), ((4053, 4098), 'os.path.join', 'os.path.join', (['args.save', 'args.arch', '"""log.txt"""'], {}), "(args.save, args.arch, 'log.txt')\n", (4065, 4098), False, 'import os\n'), ((4459, 4474), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (4472, 4474), True, 'import megengine.distributed as dist\n'), ((4476, 4497), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (4495, 4497), True, 'import megengine.distributed as dist\n'), ((4918, 4940), 'megengine.autodiff.GradManager', 'autodiff.GradManager', ([], {}), '()\n', (4938, 4940), True, 'import megengine.autodiff as autodiff\n'), ((5405, 5438), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits', 'label'], {}), '(logits, label)\n', (5423, 5438), True, 'import megengine.functional as F\n'), ((5464, 5507), 'megengine.functional.topk_accuracy', 'F.topk_accuracy', (['logits', 'label'], {'topk': '(1, 5)'}), '(logits, label, topk=(1, 5))\n', (5479, 5507), True, 'import megengine.functional as F\n'), ((4995, 5024), 'megengine.distributed.make_allreduce_cb', 'dist.make_allreduce_cb', (['"""SUM"""'], {}), "('SUM')\n", (5017, 5024), True, 'import megengine.distributed as dist\n'), ((5863, 5897), 'megengine.functional.distributed.all_reduce_sum', 'F.distributed.all_reduce_sum', (['loss'], {}), '(loss)\n', (5891, 5897), True, 'import megengine.functional as F\n'), ((5930, 5964), 'megengine.functional.distributed.all_reduce_sum', 'F.distributed.all_reduce_sum', (['acc1'], {}), '(acc1)\n', (5958, 5964), True, 'import megengine.functional as F\n'), ((5997, 6031), 'megengine.functional.distributed.all_reduce_sum', 'F.distributed.all_reduce_sum', (['acc5'], {}), '(acc5)\n', (6025, 6031), True, 'import megengine.functional as F\n'), ((6197, 6294), 'bisect.bisect_right', 'bisect.bisect_right', (['[30 * steps_per_epoch, 60 * steps_per_epoch, 80 * steps_per_epoch]', 'step'], {}), '([30 * steps_per_epoch, 60 * steps_per_epoch, 80 *\n steps_per_epoch], step)\n', (6216, 6294), False, 'import bisect\n'), ((7132, 7143), 'time.time', 'time.time', ([], {}), '()\n', (7141, 7143), False, 'import time\n'), ((7193, 7208), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (7206, 7208), True, 'import megengine.distributed as dist\n'), ((8158, 8210), 'os.path.join', 'os.path.join', (['args.save', 'args.arch', '"""checkpoint.pkl"""'], {}), "(args.save, args.arch, 'checkpoint.pkl')\n", (8170, 8210), False, 'import os\n'), ((8797, 8808), 'time.time', 'time.time', ([], {}), '()\n', (8806, 8808), False, 'import time\n'), ((8885, 8900), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (8898, 8900), True, 'import megengine.distributed as dist\n'), ((10559, 10572), 'megengine.data.transform.Resize', 'T.Resize', (['(256)'], {}), '(256)\n', (10567, 10572), True, 'import megengine.data.transform as T\n'), ((10590, 10607), 'megengine.data.transform.CenterCrop', 'T.CenterCrop', (['(224)'], {}), '(224)\n', (10602, 10607), True, 'import megengine.data.transform as T\n'), ((10625, 10697), 'megengine.data.transform.Normalize', 'T.Normalize', ([], {'mean': '[103.53, 116.28, 123.675]', 'std': '[57.375, 57.12, 58.395]'}), '(mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395])\n', (10636, 10697), True, 'import megengine.data.transform as T\n'), ((10763, 10778), 'megengine.data.transform.ToMode', 'T.ToMode', (['"""CHW"""'], {}), "('CHW')\n", (10771, 10778), True, 'import megengine.data.transform as T\n'), ((9446, 9470), 'megengine.data.transform.RandomResizedCrop', 'T.RandomResizedCrop', (['(224)'], {}), '(224)\n', (9465, 9470), True, 'import megengine.data.transform as T\n'), ((9488, 9512), 'megengine.data.transform.RandomHorizontalFlip', 'T.RandomHorizontalFlip', ([], {}), '()\n', (9510, 9512), True, 'import megengine.data.transform as T\n'), ((9530, 9602), 'megengine.data.transform.Normalize', 'T.Normalize', ([], {'mean': '[103.53, 116.28, 123.675]', 'std': '[57.375, 57.12, 58.395]'}), '(mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395])\n', (9541, 9602), True, 'import megengine.data.transform as T\n'), ((9668, 9683), 'megengine.data.transform.ToMode', 'T.ToMode', (['"""CHW"""'], {}), "('CHW')\n", (9676, 9683), True, 'import megengine.data.transform as T\n'), ((9854, 9878), 'megengine.data.transform.RandomResizedCrop', 'T.RandomResizedCrop', (['(224)'], {}), '(224)\n', (9873, 9878), True, 'import megengine.data.transform as T\n'), ((9896, 9920), 'megengine.data.transform.RandomHorizontalFlip', 'T.RandomHorizontalFlip', ([], {}), '()\n', (9918, 9920), True, 'import megengine.data.transform as T\n'), ((9938, 9997), 'megengine.data.transform.ColorJitter', 'T.ColorJitter', ([], {'brightness': '(0.4)', 'contrast': '(0.4)', 'saturation': '(0.4)'}), '(brightness=0.4, contrast=0.4, saturation=0.4)\n', (9951, 9997), True, 'import megengine.data.transform as T\n'), ((10015, 10087), 'megengine.data.transform.Normalize', 'T.Normalize', ([], {'mean': '[103.53, 116.28, 123.675]', 'std': '[57.375, 57.12, 58.395]'}), '(mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395])\n', (10026, 10087), True, 'import megengine.data.transform as T\n'), ((10153, 10168), 'megengine.data.transform.ToMode', 'T.ToMode', (['"""CHW"""'], {}), "('CHW')\n", (10161, 10168), True, 'import megengine.data.transform as T\n')]
|
# -*- coding: utf-8 -*-
import megengine as mge
import megengine.random as rand
import megengine.functional as F
import numpy as np
from config import config
from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr, box_overlap_ignore_opr
import pdb
def fpn_roi_target(rpn_rois, im_info, gt_boxes, fg_threshold = config.fg_threshold, top_k=1):
return_rois, return_labels = [], []
return_bbox_targets = []
# get per image proposals and gt_boxes
batch_per_gpu = im_info.shape[0]
sampling = True
# is_sample = True if top_k < 2 else False
for bid in range(batch_per_gpu):
gt_boxes_perimg = gt_boxes[bid, :im_info[bid, 5].astype(np.int32), :]
dummy_gt = F.ones([1, gt_boxes_perimg.shape[1]])
batch_inds = F.ones((gt_boxes_perimg.shape[0], 1)) * bid
#if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_perimg[:, :4]], axis=1)
batch_rois_mask = F.equal(rpn_rois[:, 0], bid) > 0
_, batch_rois_index = F.cond_take(batch_rois_mask, batch_rois_mask)
# batch_roi_mask = rpn_rois[:, 0] == bid
# batch_roi_inds = mask_to_inds(batch_roi_mask)
all_rois= F.concat([rpn_rois[batch_rois_index], gt_rois], axis=0) if sampling \
else rpn_rois[batch_rois_index]
# all_rois = F.concat([rpn_rois.ai[batch_roi_inds], gt_rois], axis=0)
gt_boxes_perimg = F.concat([gt_boxes_perimg, dummy_gt],axis=0)
overlaps_normal, overlaps_ignore = box_overlap_ignore_opr(
all_rois[:, 1:5], gt_boxes_perimg)
# overlaps_normal, overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
# overlaps_ignore, overlaps_ignore_indices = F.argsort(overlaps_ignore, descending=True)
overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
overlaps_normal = F.gather(overlaps_normal, 1, overlaps_normal_indices)
# overlaps_normal = F.nn.indexing_one_hot(overlaps_normal, overlaps_normal_indices, 1)
overlaps_ignore_indices = F.argsort(overlaps_ignore, descending = True)
overlaps_ignore = F.gather(overlaps_ignore, 1, overlaps_ignore_indices)
# overlaps_ignore = F.nn.indexing_one_hot(overlaps_ignore, overlaps_ignore_indices, 1)
# gt max and indices, ignore max and indices
max_overlaps_normal = overlaps_normal[:, :top_k].flatten()
gt_assignment_normal = overlaps_normal_indices[:, :top_k].flatten()
max_overlaps_ignore = overlaps_ignore[:, :top_k].flatten()
gt_assignment_ignore = overlaps_ignore_indices[:, :top_k].flatten()
# cons masks
ignore_assign_mask = (max_overlaps_normal < fg_threshold).astype(np.float32) * (
max_overlaps_ignore > max_overlaps_normal).astype(np.float32)
max_overlaps = max_overlaps_normal * (1 - ignore_assign_mask).astype(np.float32) + \
max_overlaps_ignore * ignore_assign_mask
gt_assignment = gt_assignment_normal * (1- ignore_assign_mask) + \
gt_assignment_ignore * ignore_assign_mask
gt_assignment = gt_assignment.astype(np.int32)
labels = gt_boxes_perimg[gt_assignment, 4]
fg_mask = (max_overlaps >= fg_threshold).astype(np.float32) * (1 - F.equal(labels, config.ignore_label))
bg_mask = (max_overlaps < config.bg_threshold_high).astype(np.float32) * (
max_overlaps >= config.bg_threshold_low).astype(np.float32)
fg_mask = fg_mask.reshape(-1, top_k)
bg_mask = bg_mask.reshape(-1, top_k)
pos_max = config.num_rois * config.fg_ratio
fg_inds_mask = _bernoulli_sample_masks(fg_mask[:, 0], pos_max, 1) if sampling else F.equal(fg_mask[:, 0], 0)
neg_max = config.num_rois - fg_inds_mask.sum()
bg_inds_mask = _bernoulli_sample_masks(bg_mask[:, 0], neg_max, 1) if sampling else F.equal(bg_mask[:, 0], 0)
labels = labels * fg_mask.reshape(-1)
keep_mask = fg_inds_mask + bg_inds_mask
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0)
# keep_inds = mask_to_inds(keep_mask)
_, keep_inds = F.cond_take(keep_mask > 0, keep_mask)
#keep_inds = keep_inds[:F.minimum(config.num_rois, keep_inds.shapeof()[0])]
# labels
labels = labels.reshape(-1, top_k)[keep_inds]
gt_assignment = gt_assignment.reshape(-1, top_k)[keep_inds].reshape(-1).astype(np.int32)
target_boxes = gt_boxes_perimg[gt_assignment, :4]
# rois = all_rois.ai[keep_inds]
rois = all_rois[keep_inds]
# target_shape = (rois.shapeof()[0], top_k, rois.shapeof()[-1])
n, c = rois.shape[0], rois.shape[1]
target_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, top_k, c)).reshape(-1, c)
# target_rois = F.add_axis(rois, 1).broadcast(target_shape).reshape(-1, rois.shapeof()[-1])
bbox_targets = bbox_transform_opr(target_rois[:, 1:5], target_boxes[:, :4])
if config.rcnn_bbox_normalize_targets:
std_opr = mge.tensor(config.bbox_normalize_stds[None, :]).to(rois.device)
mean_opr = mge.tensor(config.bbox_normalize_means[None, :]).to(rois.device)
minus_opr = mean_opr / std_opr
bbox_targets = bbox_targets / std_opr - minus_opr
bbox_targets = bbox_targets.reshape(-1, top_k * 4)
return_rois.append(rois)
return_labels.append(labels)
return_bbox_targets.append(bbox_targets)
if config.batch_per_gpu == 1:
rois, labels, bbox_targets = rois.detach(), labels.detach(), bbox_targets.detach()
return rois, labels, bbox_targets
# return F.zero_grad(rois), F.zero_grad(labels), F.zero_grad(bbox_targets)
else:
return_rois = F.concat(return_rois, axis=0)
return_labels = F.concat(return_labels, axis=0)
return_bbox_targets = F.concat(return_bbox_targets, axis=0)
return_rois = return_rois.detach()
return_labels = return_labels.detach()
return_bbox_targets = return_bbox_targets.detach()
return return_rois, return_labels, return_bbox_targets
# rois, labels, bbox_targets = return_rois.detach(), return_labels.detach(), return_bbox_targets.detach()
# return rois, labels, bbox_targets
# return F.zero_grad(return_rois), F.zero_grad(return_labels), F.zero_grad(return_bbox_targets)
def _bernoulli_sample_masks(masks, num_samples, sample_value):
""" Using the bernoulli sampling method"""
sample_mask = F.equal(masks, sample_value)
num_mask = sample_mask.sum()
num_final_samples = F.minimum(num_mask, num_samples)
# here, we use the bernoulli probability to sample the anchors
sample_prob = num_final_samples / num_mask
# uniform_rng = rand.uniform(sample_mask.shapeof()[0])
uniform_rng = rand.uniform(0, 1, sample_mask.shape)
after_sampled_mask = (uniform_rng <= sample_prob) * sample_mask
return after_sampled_mask
|
[
"megengine.functional.gather",
"megengine.functional.minimum",
"megengine.functional.argsort",
"megengine.tensor",
"megengine.functional.cond_take",
"megengine.random.uniform",
"megengine.functional.equal",
"megengine.functional.expand_dims",
"megengine.functional.concat",
"megengine.functional.ones"
] |
[((6507, 6535), 'megengine.functional.equal', 'F.equal', (['masks', 'sample_value'], {}), '(masks, sample_value)\n', (6514, 6535), True, 'import megengine.functional as F\n'), ((6593, 6625), 'megengine.functional.minimum', 'F.minimum', (['num_mask', 'num_samples'], {}), '(num_mask, num_samples)\n', (6602, 6625), True, 'import megengine.functional as F\n'), ((6817, 6854), 'megengine.random.uniform', 'rand.uniform', (['(0)', '(1)', 'sample_mask.shape'], {}), '(0, 1, sample_mask.shape)\n', (6829, 6854), True, 'import megengine.random as rand\n'), ((706, 743), 'megengine.functional.ones', 'F.ones', (['[1, gt_boxes_perimg.shape[1]]'], {}), '([1, gt_boxes_perimg.shape[1]])\n', (712, 743), True, 'import megengine.functional as F\n'), ((867, 921), 'megengine.functional.concat', 'F.concat', (['[batch_inds, gt_boxes_perimg[:, :4]]'], {'axis': '(1)'}), '([batch_inds, gt_boxes_perimg[:, :4]], axis=1)\n', (875, 921), True, 'import megengine.functional as F\n'), ((1011, 1056), 'megengine.functional.cond_take', 'F.cond_take', (['batch_rois_mask', 'batch_rois_mask'], {}), '(batch_rois_mask, batch_rois_mask)\n', (1022, 1056), True, 'import megengine.functional as F\n'), ((1408, 1453), 'megengine.functional.concat', 'F.concat', (['[gt_boxes_perimg, dummy_gt]'], {'axis': '(0)'}), '([gt_boxes_perimg, dummy_gt], axis=0)\n', (1416, 1453), True, 'import megengine.functional as F\n'), ((1496, 1553), 'det_opr.bbox_opr.box_overlap_ignore_opr', 'box_overlap_ignore_opr', (['all_rois[:, 1:5]', 'gt_boxes_perimg'], {}), '(all_rois[:, 1:5], gt_boxes_perimg)\n', (1518, 1553), False, 'from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr, box_overlap_ignore_opr\n'), ((1800, 1843), 'megengine.functional.argsort', 'F.argsort', (['overlaps_normal'], {'descending': '(True)'}), '(overlaps_normal, descending=True)\n', (1809, 1843), True, 'import megengine.functional as F\n'), ((1870, 1923), 'megengine.functional.gather', 'F.gather', (['overlaps_normal', '(1)', 'overlaps_normal_indices'], {}), '(overlaps_normal, 1, overlaps_normal_indices)\n', (1878, 1923), True, 'import megengine.functional as F\n'), ((2053, 2096), 'megengine.functional.argsort', 'F.argsort', (['overlaps_ignore'], {'descending': '(True)'}), '(overlaps_ignore, descending=True)\n', (2062, 2096), True, 'import megengine.functional as F\n'), ((2125, 2178), 'megengine.functional.gather', 'F.gather', (['overlaps_ignore', '(1)', 'overlaps_ignore_indices'], {}), '(overlaps_ignore, 1, overlaps_ignore_indices)\n', (2133, 2178), True, 'import megengine.functional as F\n'), ((4149, 4186), 'megengine.functional.cond_take', 'F.cond_take', (['(keep_mask > 0)', 'keep_mask'], {}), '(keep_mask > 0, keep_mask)\n', (4160, 4186), True, 'import megengine.functional as F\n'), ((4902, 4962), 'det_opr.bbox_opr.bbox_transform_opr', 'bbox_transform_opr', (['target_rois[:, 1:5]', 'target_boxes[:, :4]'], {}), '(target_rois[:, 1:5], target_boxes[:, :4])\n', (4920, 4962), False, 'from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr, box_overlap_ignore_opr\n'), ((5749, 5778), 'megengine.functional.concat', 'F.concat', (['return_rois'], {'axis': '(0)'}), '(return_rois, axis=0)\n', (5757, 5778), True, 'import megengine.functional as F\n'), ((5803, 5834), 'megengine.functional.concat', 'F.concat', (['return_labels'], {'axis': '(0)'}), '(return_labels, axis=0)\n', (5811, 5834), True, 'import megengine.functional as F\n'), ((5865, 5902), 'megengine.functional.concat', 'F.concat', (['return_bbox_targets'], {'axis': '(0)'}), '(return_bbox_targets, axis=0)\n', (5873, 5902), True, 'import megengine.functional as F\n'), ((766, 803), 'megengine.functional.ones', 'F.ones', (['(gt_boxes_perimg.shape[0], 1)'], {}), '((gt_boxes_perimg.shape[0], 1))\n', (772, 803), True, 'import megengine.functional as F\n'), ((948, 976), 'megengine.functional.equal', 'F.equal', (['rpn_rois[:, 0]', 'bid'], {}), '(rpn_rois[:, 0], bid)\n', (955, 976), True, 'import megengine.functional as F\n'), ((1189, 1244), 'megengine.functional.concat', 'F.concat', (['[rpn_rois[batch_rois_index], gt_rois]'], {'axis': '(0)'}), '([rpn_rois[batch_rois_index], gt_rois], axis=0)\n', (1197, 1244), True, 'import megengine.functional as F\n'), ((3727, 3752), 'megengine.functional.equal', 'F.equal', (['fg_mask[:, 0]', '(0)'], {}), '(fg_mask[:, 0], 0)\n', (3734, 3752), True, 'import megengine.functional as F\n'), ((3899, 3924), 'megengine.functional.equal', 'F.equal', (['bg_mask[:, 0]', '(0)'], {}), '(bg_mask[:, 0], 0)\n', (3906, 3924), True, 'import megengine.functional as F\n'), ((3296, 3332), 'megengine.functional.equal', 'F.equal', (['labels', 'config.ignore_label'], {}), '(labels, config.ignore_label)\n', (3303, 3332), True, 'import megengine.functional as F\n'), ((4725, 4747), 'megengine.functional.expand_dims', 'F.expand_dims', (['rois', '(1)'], {}), '(rois, 1)\n', (4738, 4747), True, 'import megengine.functional as F\n'), ((5032, 5079), 'megengine.tensor', 'mge.tensor', (['config.bbox_normalize_stds[None, :]'], {}), '(config.bbox_normalize_stds[None, :])\n', (5042, 5079), True, 'import megengine as mge\n'), ((5119, 5167), 'megengine.tensor', 'mge.tensor', (['config.bbox_normalize_means[None, :]'], {}), '(config.bbox_normalize_means[None, :])\n', (5129, 5167), True, 'import megengine as mge\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import copy
from typing import Optional, Sequence
import cv2
import megengine.data as data
import megengine.data.transform as T
import numpy as np
from basecore.config import ConfigDict
from loguru import logger
from basecls.utils import registers
from .augment import WARP_PARAMS, TorchAutoAugment, TorchRandAugment
from .const import CV2_INTERP, PIL_INTERP
from .mixup import MixupCutmixCollator
from .rand_erase import RandomErasing
__all__ = [
"build_transform",
"AutoAugment",
"SimpleAugment",
"ColorAugment",
"RandAugment",
"build_mixup",
]
def build_transform(
cfg: ConfigDict, train: bool = True, augments: T.Transform = None
) -> T.Transform:
"""Build function for MegEngine transform.
Args:
cfg: config for building transform.
train: train set or test set. Default: ``True``
augments: augments for building transform.
Returns:
A transform.
"""
if train:
assert augments is not None
bgr_mean = copy.deepcopy(cfg.preprocess.img_mean)
bgr_std = copy.deepcopy(cfg.preprocess.img_std)
if cfg.preprocess.img_color_space == "RGB":
bgr_mean = bgr_mean[::-1]
bgr_std = bgr_std[::-1]
WARP_PARAMS["fillcolor"] = tuple(round(v) for v in bgr_mean[::-1]) # need RGB
WARP_PARAMS["resample"] = PIL_INTERP[cfg.augments.resize.interpolation]
transforms = [
T.RandomResizedCrop(
cfg.preprocess.img_size,
cfg.augments.resize.scale_range,
cfg.augments.resize.ratio_range,
CV2_INTERP[cfg.augments.resize.interpolation],
),
T.RandomHorizontalFlip(),
augments,
RandomErasing(
**cfg.augments.rand_erase.to_dict(),
pad_mean=bgr_mean, # need BGR
pad_std=bgr_std, # need BGR
),
ToColorSpace(cfg.preprocess.img_color_space),
T.ToMode(),
]
else:
assert augments is None
transforms = [
T.Resize(
int(cfg.test.img_size / cfg.test.crop_pct / 2 + 0.5) * 2, # make it even
CV2_INTERP[cfg.augments.resize.interpolation],
),
T.CenterCrop(cfg.test.img_size),
ToColorSpace(cfg.preprocess.img_color_space),
T.ToMode(),
]
return T.Compose(transforms=transforms, order=["image", "image_category"])
class ToColorSpace(T.VisionTransform):
"""Transform to transfer color space.
Args:
color_space: color space, supports ``"BGR"``, ``"RGB"`` and ``"GRAY"``.
"""
def __init__(self, color_space: str, *, order: Sequence = None):
super().__init__(order)
if color_space not in ("BGR", "RGB", "GRAY"):
raise ValueError(f"Color space '{color_space}' not supported")
self.color_space = color_space
def _apply_image(self, image: np.ndarray) -> np.ndarray:
if self.color_space == "BGR":
return image
elif self.color_space == "RGB":
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
elif self.color_space == "GRAY":
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)[..., np.newaxis]
else:
raise ValueError(f"Color space '{self.color_space}' not supported")
@registers.augments.register()
class SimpleAugment:
"""Simple augmentation."""
@classmethod
def build(cls, cfg: ConfigDict) -> T.Transform:
return T.PseudoTransform()
@registers.augments.register()
class ColorAugment:
"""Color augmentation."""
@classmethod
def build(cls, cfg: ConfigDict) -> T.Transform:
aug_args = cfg.augments.color_aug.to_dict()
lighting_scale = aug_args.pop("lighting")
return T.Compose([T.ColorJitter(**aug_args), T.Lighting(lighting_scale)])
@registers.augments.register()
class AutoAugment:
"""AutoAugment."""
@classmethod
def build(cls, cfg: ConfigDict) -> T.Transform:
return T.TorchTransformCompose([TorchAutoAugment()])
@registers.augments.register()
class RandAugment:
"""Random augmentation."""
@classmethod
def build(cls, cfg: ConfigDict) -> T.Transform:
return T.TorchTransformCompose([TorchRandAugment(**cfg.augments.rand_aug.to_dict())])
def build_mixup(cfg: ConfigDict, train: bool = True) -> Optional[data.Collator]:
"""Build (optionally) Mixup/CutMix augment.
Args:
cfg: config for building Mixup/CutMix collator.
train: train set or test set. Default: ``True``
Returns:
:py:class:`~basecls.data.mixup.MixupCutmixCollator` or ``None``
"""
mixup_cfg = cfg.augments.mixup
if train and (
mixup_cfg.mixup_alpha > 0.0
or mixup_cfg.cutmix_alpha > 0.0
or mixup_cfg.cutmix_minmax is not None
):
mixup_collator = MixupCutmixCollator(**mixup_cfg.to_dict(), num_classes=cfg.num_classes)
logger.info(f"Using mixup with configuration:\n{mixup_cfg}")
else:
mixup_collator = None
return mixup_collator
|
[
"megengine.data.transform.Lighting",
"megengine.data.transform.PseudoTransform",
"megengine.data.transform.RandomResizedCrop",
"megengine.data.transform.RandomHorizontalFlip",
"megengine.data.transform.ColorJitter",
"megengine.data.transform.Compose",
"megengine.data.transform.ToMode",
"megengine.data.transform.CenterCrop"
] |
[((3450, 3479), 'basecls.utils.registers.augments.register', 'registers.augments.register', ([], {}), '()\n', (3477, 3479), False, 'from basecls.utils import registers\n'), ((3640, 3669), 'basecls.utils.registers.augments.register', 'registers.augments.register', ([], {}), '()\n', (3667, 3669), False, 'from basecls.utils import registers\n'), ((3977, 4006), 'basecls.utils.registers.augments.register', 'registers.augments.register', ([], {}), '()\n', (4004, 4006), False, 'from basecls.utils import registers\n'), ((4183, 4212), 'basecls.utils.registers.augments.register', 'registers.augments.register', ([], {}), '()\n', (4210, 4212), False, 'from basecls.utils import registers\n'), ((2493, 2560), 'megengine.data.transform.Compose', 'T.Compose', ([], {'transforms': 'transforms', 'order': "['image', 'image_category']"}), "(transforms=transforms, order=['image', 'image_category'])\n", (2502, 2560), True, 'import megengine.data.transform as T\n'), ((1089, 1127), 'copy.deepcopy', 'copy.deepcopy', (['cfg.preprocess.img_mean'], {}), '(cfg.preprocess.img_mean)\n', (1102, 1127), False, 'import copy\n'), ((1146, 1183), 'copy.deepcopy', 'copy.deepcopy', (['cfg.preprocess.img_std'], {}), '(cfg.preprocess.img_std)\n', (1159, 1183), False, 'import copy\n'), ((3617, 3636), 'megengine.data.transform.PseudoTransform', 'T.PseudoTransform', ([], {}), '()\n', (3634, 3636), True, 'import megengine.data.transform as T\n'), ((5064, 5127), 'loguru.logger.info', 'logger.info', (['f"""Using mixup with configuration:\n{mixup_cfg}"""'], {}), '(f"""Using mixup with configuration:\n{mixup_cfg}""")\n', (5075, 5127), False, 'from loguru import logger\n'), ((1513, 1680), 'megengine.data.transform.RandomResizedCrop', 'T.RandomResizedCrop', (['cfg.preprocess.img_size', 'cfg.augments.resize.scale_range', 'cfg.augments.resize.ratio_range', 'CV2_INTERP[cfg.augments.resize.interpolation]'], {}), '(cfg.preprocess.img_size, cfg.augments.resize.\n scale_range, cfg.augments.resize.ratio_range, CV2_INTERP[cfg.augments.\n resize.interpolation])\n', (1532, 1680), True, 'import megengine.data.transform as T\n'), ((1763, 1787), 'megengine.data.transform.RandomHorizontalFlip', 'T.RandomHorizontalFlip', ([], {}), '()\n', (1785, 1787), True, 'import megengine.data.transform as T\n'), ((2068, 2078), 'megengine.data.transform.ToMode', 'T.ToMode', ([], {}), '()\n', (2076, 2078), True, 'import megengine.data.transform as T\n'), ((2357, 2388), 'megengine.data.transform.CenterCrop', 'T.CenterCrop', (['cfg.test.img_size'], {}), '(cfg.test.img_size)\n', (2369, 2388), True, 'import megengine.data.transform as T\n'), ((2460, 2470), 'megengine.data.transform.ToMode', 'T.ToMode', ([], {}), '()\n', (2468, 2470), True, 'import megengine.data.transform as T\n'), ((3197, 3235), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (3209, 3235), False, 'import cv2\n'), ((3918, 3943), 'megengine.data.transform.ColorJitter', 'T.ColorJitter', ([], {}), '(**aug_args)\n', (3931, 3943), True, 'import megengine.data.transform as T\n'), ((3945, 3971), 'megengine.data.transform.Lighting', 'T.Lighting', (['lighting_scale'], {}), '(lighting_scale)\n', (3955, 3971), True, 'import megengine.data.transform as T\n'), ((3296, 3335), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (3308, 3335), False, 'import cv2\n')]
|
# Copyright (c) 2020 <NAME>
# This code is licensed under MIT license
# (https://github.com/kwotsin/mimicry/blob/master/LICENSE)
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
"""
Implementation of Base GAN models.
"""
import megengine
import megengine.functional as F
import megengine.module as M
import megengine.random as R
import numpy as np
from . import losses
from .basemodel import BaseModel
class BaseGenerator(BaseModel):
r"""
Base class for a generic unconditional generator model.
Attributes:
nz (int): Noise dimension for upsampling.
ngf (int): Variable controlling generator feature map sizes.
bottom_width (int): Starting width for upsampling generator output to an image.
loss_type (str): Name of loss to use for GAN loss.
"""
def __init__(self, nz, ngf, bottom_width, loss_type, **kwargs):
super().__init__(**kwargs)
self.nz = nz
self.ngf = ngf
self.bottom_width = bottom_width
self.loss_type = loss_type
def _train_step_implementation(
self,
real_batch,
netD=None,
optG=None):
# Produce fake images
fake_images = self._infer_step_implementation(real_batch)
# Compute output logit of D thinking image real
output = netD(fake_images)
# Compute loss
errG = self.compute_gan_loss(output=output)
optG.zero_grad()
optG.backward(errG)
optG.step()
return errG
def _infer_step_implementation(self, batch):
# Get only batch size from real batch
batch_size = batch.shape[0]
noise = R.gaussian(shape=[batch_size, self.nz])
fake_images = self.forward(noise)
return fake_images
def compute_gan_loss(self, output):
if self.loss_type == "ns":
errG = losses.ns_loss_gen(output)
elif self.loss_type == "wasserstein":
errG = losses.wasserstein_loss_gen(output)
else:
raise ValueError("Invalid loss_type {} selected.".format(
self.loss_type))
return errG
def generate_images(self, num_images):
"""Generate images of shape [`num_images`, C, H, W].
Depending on the final activation function, pixel values are NOT guarenteed
to be within [0, 1].
"""
return self.infer_step(np.empty(num_images, dtype="float32"))
class BaseDiscriminator(BaseModel):
r"""
Base class for a generic unconditional discriminator model.
Attributes:
ndf (int): Variable controlling discriminator feature map sizes.
loss_type (str): Name of loss to use for GAN loss.
"""
def __init__(self, ndf, loss_type, **kwargs):
super().__init__(**kwargs)
self.ndf = ndf
self.loss_type = loss_type
def _train_step_implementation(
self,
real_batch,
netG=None,
optD=None):
# Produce logits for real images
output_real = self._infer_step_implementation(real_batch)
# Produce fake images
fake_images = netG._infer_step_implementation(real_batch)
fake_images = F.zero_grad(fake_images)
# Produce logits for fake images
output_fake = self._infer_step_implementation(fake_images)
# Compute loss for D
errD = self.compute_gan_loss(output_real=output_real,
output_fake=output_fake)
D_x, D_Gz = self.compute_probs(output_real=output_real,
output_fake=output_fake)
# Backprop and update gradients
optD.zero_grad()
optD.backward(errD)
optD.step()
return errD, D_x, D_Gz
def _infer_step_implementation(self, batch):
return self.forward(batch)
def compute_gan_loss(self, output_real, output_fake):
r"""
Computes GAN loss for discriminator.
Args:
output_real (Tensor): A batch of output logits of shape (N, 1) from real images.
output_fake (Tensor): A batch of output logits of shape (N, 1) from fake images.
Returns:
errD (Tensor): A batch of GAN losses for the discriminator.
"""
# Compute loss for D
if self.loss_type == "gan" or self.loss_type == "ns":
errD = losses.minimax_loss_dis(output_fake=output_fake,
output_real=output_real)
elif self.loss_type == "wasserstein":
errD = losses.wasserstein_loss_dis(output_fake=output_fake,
output_real=output_real)
else:
raise ValueError("Invalid loss_type selected.")
return errD
def compute_probs(self, output_real, output_fake):
r"""
Computes probabilities from real/fake images logits.
Args:
output_real (Tensor): A batch of output logits of shape (N, 1) from real images.
output_fake (Tensor): A batch of output logits of shape (N, 1) from fake images.
Returns:
tuple: Average probabilities of real/fake image considered as real for the batch.
"""
D_x = F.sigmoid(output_real).mean()
D_Gz = F.sigmoid(output_fake).mean()
return D_x, D_Gz
|
[
"megengine.random.gaussian",
"megengine.functional.zero_grad",
"megengine.functional.sigmoid"
] |
[((2259, 2298), 'megengine.random.gaussian', 'R.gaussian', ([], {'shape': '[batch_size, self.nz]'}), '(shape=[batch_size, self.nz])\n', (2269, 2298), True, 'import megengine.random as R\n'), ((3780, 3804), 'megengine.functional.zero_grad', 'F.zero_grad', (['fake_images'], {}), '(fake_images)\n', (3791, 3804), True, 'import megengine.functional as F\n'), ((2994, 3031), 'numpy.empty', 'np.empty', (['num_images'], {'dtype': '"""float32"""'}), "(num_images, dtype='float32')\n", (3002, 3031), True, 'import numpy as np\n'), ((5829, 5851), 'megengine.functional.sigmoid', 'F.sigmoid', (['output_real'], {}), '(output_real)\n', (5838, 5851), True, 'import megengine.functional as F\n'), ((5874, 5896), 'megengine.functional.sigmoid', 'F.sigmoid', (['output_fake'], {}), '(output_fake)\n', (5883, 5896), True, 'import megengine.functional as F\n')]
|
import numpy as np
import megengine
import megengine.module as M
import megengine.functional as F
import math
from . import default_init_weights
class ShuffleV2Block(M.Module):
def __init__(self, inp, oup, mid_channels, *, ksize, stride):
super().__init__()
self.stride = stride
assert stride in [1, 2]
self.mid_channels = mid_channels
self.ksize = ksize
pad = ksize // 2
self.pad = pad
self.inp = inp
outputs = oup - inp
branch_main = [
# pw
M.Conv2d(inp, mid_channels, 1, 1, 0, bias=False),
M.ReLU(),
# dw
M.Conv2d(
mid_channels, mid_channels, ksize, stride, pad,
groups=mid_channels, bias=False,
),
# pw-linear
M.Conv2d(mid_channels, outputs, 1, 1, 0, bias=False),
M.ReLU(),
]
self.branch_main = M.Sequential(*branch_main)
if stride == 2:
branch_proj = [
# dw
M.Conv2d(inp, inp, ksize, stride, pad, groups=inp, bias=False),
M.BatchNorm2d(inp),
# pw-linear
M.Conv2d(inp, inp, 1, 1, 0, bias=False),
M.BatchNorm2d(inp),
M.ReLU(),
]
self.branch_proj = M.Sequential(*branch_proj)
else:
self.branch_proj = None
self.init_weights()
def forward(self, old_x):
if self.stride == 1:
x_proj, x = self.channel_shuffle(old_x)
return F.concat((x_proj, self.branch_main(x)), 1)
elif self.stride == 2:
x_proj = old_x
x = old_x
return F.concat((self.branch_proj(x_proj), self.branch_main(x)), 1)
else:
raise ValueError("use stride 1 or 2, current stride {}".format(self.stride))
def channel_shuffle(self, x):
batchsize, num_channels, height, width = x.shape
# assert (num_channels % 4 == 0)
x = x.reshape(batchsize * num_channels // 2, 2, height * width)
x = F.transpose(x, (1, 0, 2))
x = x.reshape(2, -1, num_channels // 2, height, width)
return x[0], x[1]
def init_weights(self):
default_init_weights(self, scale=0.2)
|
[
"megengine.module.ReLU",
"megengine.module.BatchNorm2d",
"megengine.functional.transpose",
"megengine.module.Sequential",
"megengine.module.Conv2d"
] |
[((943, 969), 'megengine.module.Sequential', 'M.Sequential', (['*branch_main'], {}), '(*branch_main)\n', (955, 969), True, 'import megengine.module as M\n'), ((2111, 2136), 'megengine.functional.transpose', 'F.transpose', (['x', '(1, 0, 2)'], {}), '(x, (1, 0, 2))\n', (2122, 2136), True, 'import megengine.functional as F\n'), ((555, 603), 'megengine.module.Conv2d', 'M.Conv2d', (['inp', 'mid_channels', '(1)', '(1)', '(0)'], {'bias': '(False)'}), '(inp, mid_channels, 1, 1, 0, bias=False)\n', (563, 603), True, 'import megengine.module as M\n'), ((617, 625), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (623, 625), True, 'import megengine.module as M\n'), ((656, 750), 'megengine.module.Conv2d', 'M.Conv2d', (['mid_channels', 'mid_channels', 'ksize', 'stride', 'pad'], {'groups': 'mid_channels', 'bias': '(False)'}), '(mid_channels, mid_channels, ksize, stride, pad, groups=\n mid_channels, bias=False)\n', (664, 750), True, 'import megengine.module as M\n'), ((830, 882), 'megengine.module.Conv2d', 'M.Conv2d', (['mid_channels', 'outputs', '(1)', '(1)', '(0)'], {'bias': '(False)'}), '(mid_channels, outputs, 1, 1, 0, bias=False)\n', (838, 882), True, 'import megengine.module as M\n'), ((896, 904), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (902, 904), True, 'import megengine.module as M\n'), ((1352, 1378), 'megengine.module.Sequential', 'M.Sequential', (['*branch_proj'], {}), '(*branch_proj)\n', (1364, 1378), True, 'import megengine.module as M\n'), ((1060, 1122), 'megengine.module.Conv2d', 'M.Conv2d', (['inp', 'inp', 'ksize', 'stride', 'pad'], {'groups': 'inp', 'bias': '(False)'}), '(inp, inp, ksize, stride, pad, groups=inp, bias=False)\n', (1068, 1122), True, 'import megengine.module as M\n'), ((1140, 1158), 'megengine.module.BatchNorm2d', 'M.BatchNorm2d', (['inp'], {}), '(inp)\n', (1153, 1158), True, 'import megengine.module as M\n'), ((1204, 1243), 'megengine.module.Conv2d', 'M.Conv2d', (['inp', 'inp', '(1)', '(1)', '(0)'], {'bias': '(False)'}), '(inp, inp, 1, 1, 0, bias=False)\n', (1212, 1243), True, 'import megengine.module as M\n'), ((1261, 1279), 'megengine.module.BatchNorm2d', 'M.BatchNorm2d', (['inp'], {}), '(inp)\n', (1274, 1279), True, 'import megengine.module as M\n'), ((1297, 1305), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (1303, 1305), True, 'import megengine.module as M\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
# ----------------------------------------------------------------------
"""Megengine BERT model."""
import copy
import json
import math
import os
import urllib
import urllib.request
from io import open
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
from megengine import Parameter
from megengine.functional.loss import cross_entropy
from megengine.module import Dropout, Embedding, Linear, Module, Sequential
from megengine.module.activation import Softmax
def transpose(inp, a, b):
cur_shape = list(range(0, inp.ndim))
cur_shape[a], cur_shape[b] = cur_shape[b], cur_shape[a]
return inp.transpose(cur_shape)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
x * 0.5 * (1.0 + F.tanh((F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3)))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + F.tanh(F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3))))
ACT2FN = {"gelu": gelu, "relu": F.relu}
class BertConfig:
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
class BertLayerNorm(Module):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
self.weight = Parameter(np.ones(hidden_size).astype(np.float32))
self.bias = Parameter(np.zeros(hidden_size).astype(np.float32))
self.variance_epsilon = eps
def forward(self, x):
u = F.mean(x, len(x.shape) - 1, True)
s = F.mean((x - u) ** 2, len(x.shape) - 1, True)
x = (x - u) / ((s + self.variance_epsilon) ** 0.5)
return self.weight * x + self.bias
class BertEmbeddings(Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = Embedding(
config.max_position_embeddings, config.hidden_size
)
self.token_type_embeddings = Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name
# and be able to load any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.shape[1]
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
position_ids = F.linspace(0, seq_length - 1, seq_length).astype(np.int32)
position_ids = F.broadcast_to(F.expand_dims(position_ids, 0), input_ids.shape)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = Linear(config.hidden_size, self.all_head_size)
self.key = Linear(config.hidden_size, self.all_head_size)
self.value = Linear(config.hidden_size, self.all_head_size)
self.dropout = Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
# using symbolic shapes to make trace happy
x_shape = mge.tensor(x.shape)
new_x_shape = F.concat(
[x_shape[:-1], (self.num_attention_heads, self.attention_head_size)]
)
x = x.reshape(new_x_shape)
return x.transpose(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = F.matmul(query_layer, transpose(key_layer, -1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = Softmax(len(attention_scores.shape) - 1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = F.matmul(attention_probs, value_layer)
context_layer = context_layer.transpose(0, 2, 1, 3)
# using symbolic shapes to make trace happy
context_shape = mge.tensor(context_layer.shape)
new_context_layer_shape = F.concat([context_shape[:-2], self.all_head_size])
context_layer = context_layer.reshape(new_context_layer_shape)
return context_layer
class BertSelfOutput(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(Module):
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(Module):
def __init__(self, config):
super().__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(Module):
def __init__(self, config):
super().__init__()
self.layer = Sequential(
*[BertLayer(config) for _ in range(config.num_hidden_layers)]
)
# self.layer = ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.hidden_size)
self.activation = F.tanh
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertModel(Module):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary
(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape
[batch_size, sequence_length] with the token types indices selected in [0, 1].
Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [0, 1]. It's a mask to be used if the input sequence length
is smaller than the max input sequence length in the current batch.
It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers`
output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of
encoded-hidden-states at the end of each attention block
(i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size
[batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of
hidden-states corresponding to the last attention block of shape
[batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size]
which is the output of classifier pretrained on top of the hidden state
associated to the first character of the
input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super().__init__()
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
def forward(
self,
input_ids,
token_type_ids=None,
attention_mask=None,
output_all_encoded_layers=True,
):
if attention_mask is None:
attention_mask = F.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
# print('input_ids', input_ids.sum())
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
# print('attention_mask', attention_mask.sum())
extended_attention_mask = F.expand_dims(attention_mask, (1, 2))
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.astype(
next(self.parameters()).dtype
) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers = self.encoder(
embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class BertForSequenceClassification(Module):
"""BERT model for classification.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary.
Items in the batch should begin with the special "CLS" token.
(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length]
with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`
and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [0, 1]. It's a mask to be used if the input sequence length
is smaller than the max input sequence length in the current batch. It's the mask
that we typically use for attention when a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForSequenceClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels, bert=None):
super().__init__()
if bert is None:
self.bert = BertModel(config)
else:
self.bert = bert
self.num_labels = num_labels
self.dropout = Dropout(config.hidden_dropout_prob)
self.classifier = Linear(config.hidden_size, num_labels)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
_, pooled_output = self.bert(
input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False
)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
if labels is not None:
loss = cross_entropy(
logits.reshape(-1, self.num_labels), labels.reshape(-1)
)
return logits, loss
else:
return logits, None
DATA_URL = "https://data.megengine.org.cn/models/weights/bert"
CONFIG_NAME = "bert_config.json"
VOCAB_NAME = "vocab.txt"
MODEL_NAME = {
"wwm_cased_L-24_H-1024_A-16": "wwm_cased_L_24_H_1024_A_16",
"wwm_uncased_L-24_H-1024_A-16": "wwm_uncased_L_24_H_1024_A_16",
"cased_L-12_H-768_A-12": "cased_L_12_H_768_A_12",
"cased_L-24_H-1024_A-16": "cased_L_24_H_1024_A_16",
"uncased_L-12_H-768_A-12": "uncased_L_12_H_768_A_12",
"uncased_L-24_H-1024_A-16": "uncased_L_24_H_1024_A_16",
"chinese_L-12_H-768_A-12": "chinese_L_12_H_768_A_12",
"multi_cased_L-12_H-768_A-12": "multi_cased_L_12_H_768_A_12",
}
def download_file(url, filename):
# urllib.URLopener().retrieve(url, filename)
urllib.request.urlretrieve(url, filename)
def create_hub_bert(model_name, pretrained):
assert model_name in MODEL_NAME, "{} not in the valid models {}".format(
model_name, MODEL_NAME
)
data_dir = "./{}".format(model_name)
if not os.path.exists(data_dir):
os.makedirs(data_dir)
vocab_url = "{}/{}/{}".format(DATA_URL, model_name, VOCAB_NAME)
config_url = "{}/{}/{}".format(DATA_URL, model_name, CONFIG_NAME)
vocab_file = "./{}/{}".format(model_name, VOCAB_NAME)
config_file = "./{}/{}".format(model_name, CONFIG_NAME)
download_file(vocab_url, vocab_file)
download_file(config_url, config_file)
config = BertConfig(config_file)
model = hub.load("megengine/models", MODEL_NAME[model_name], pretrained=pretrained)
return model, config, vocab_file
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/bert/"
"uncased_L-12_H-768_A-12/bert_4f2157f7_uncased_L-12_H-768_A-12.pkl"
)
def uncased_L_12_H_768_A_12():
config_dict = {
"attention_probs_dropout_prob": 0.1,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"initializer_range": 0.02,
"intermediate_size": 3072,
"max_position_embeddings": 512,
"num_attention_heads": 12,
"num_hidden_layers": 12,
"type_vocab_size": 2,
"vocab_size": 30522,
}
config = BertConfig.from_dict(config_dict)
return BertModel(config)
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/bert/"
"cased_L-12_H-768_A-12/bert_b9727c2f_cased_L-12_H-768_A-12.pkl"
)
def cased_L_12_H_768_A_12():
config_dict = {
"attention_probs_dropout_prob": 0.1,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"initializer_range": 0.02,
"intermediate_size": 3072,
"max_position_embeddings": 512,
"num_attention_heads": 12,
"num_hidden_layers": 12,
"type_vocab_size": 2,
"vocab_size": 28996,
}
config = BertConfig.from_dict(config_dict)
return BertModel(config)
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/bert/"
"uncased_L-24_H-1024_A-16/bert_222f5012_uncased_L-24_H-1024_A-16.pkl"
)
def uncased_L_24_H_1024_A_16():
config_dict = {
"attention_probs_dropout_prob": 0.1,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 1024,
"initializer_range": 0.02,
"intermediate_size": 4096,
"max_position_embeddings": 512,
"num_attention_heads": 16,
"num_hidden_layers": 24,
"type_vocab_size": 2,
"vocab_size": 30522,
}
config = BertConfig.from_dict(config_dict)
return BertModel(config)
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/bert/"
"cased_L-24_H-1024_A-16/bert_01f2a65f_cased_L-24_H-1024_A-16.pkl"
)
def cased_L_24_H_1024_A_16():
config_dict = {
"attention_probs_dropout_prob": 0.1,
"directionality": "bidi",
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 1024,
"initializer_range": 0.02,
"intermediate_size": 4096,
"max_position_embeddings": 512,
"num_attention_heads": 16,
"num_hidden_layers": 24,
"pooler_fc_size": 768,
"pooler_num_attention_heads": 12,
"pooler_num_fc_layers": 3,
"pooler_size_per_head": 128,
"pooler_type": "first_token_transform",
"type_vocab_size": 2,
"vocab_size": 28996,
}
config = BertConfig.from_dict(config_dict)
return BertModel(config)
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/bert/"
"chinese_L-12_H-768_A-12/bert_ee91be1a_chinese_L-12_H-768_A-12.pkl"
)
def chinese_L_12_H_768_A_12():
config_dict = {
"attention_probs_dropout_prob": 0.1,
"directionality": "bidi",
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"initializer_range": 0.02,
"intermediate_size": 3072,
"max_position_embeddings": 512,
"num_attention_heads": 12,
"num_hidden_layers": 12,
"pooler_fc_size": 768,
"pooler_num_attention_heads": 12,
"pooler_num_fc_layers": 3,
"pooler_size_per_head": 128,
"pooler_type": "first_token_transform",
"type_vocab_size": 2,
"vocab_size": 21128,
}
config = BertConfig.from_dict(config_dict)
return BertModel(config)
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/bert/"
"multi_cased_L-12_H-768_A-12/bert_283ceec5_multi_cased_L-12_H-768_A-12.pkl"
)
def multi_cased_L_12_H_768_A_12():
config_dict = {
"attention_probs_dropout_prob": 0.1,
"directionality": "bidi",
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"initializer_range": 0.02,
"intermediate_size": 3072,
"max_position_embeddings": 512,
"num_attention_heads": 12,
"num_hidden_layers": 12,
"pooler_fc_size": 768,
"pooler_num_attention_heads": 12,
"pooler_num_fc_layers": 3,
"pooler_size_per_head": 128,
"pooler_type": "first_token_transform",
"type_vocab_size": 2,
"vocab_size": 119547,
}
config = BertConfig.from_dict(config_dict)
return BertModel(config)
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/bert/"
"wwm_uncased_L-24_H-1024_A-16/bert_e2780a6a_wwm_uncased_L-24_H-1024_A-16.pkl"
)
def wwm_uncased_L_24_H_1024_A_16():
config_dict = {
"attention_probs_dropout_prob": 0.1,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 1024,
"initializer_range": 0.02,
"intermediate_size": 4096,
"max_position_embeddings": 512,
"num_attention_heads": 16,
"num_hidden_layers": 24,
"type_vocab_size": 2,
"vocab_size": 30522,
}
config = BertConfig.from_dict(config_dict)
return BertModel(config)
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/bert/"
"wwm_cased_L-24_H-1024_A-16/bert_0a8f1389_wwm_cased_L-24_H-1024_A-16.pkl"
)
def wwm_cased_L_24_H_1024_A_16():
config_dict = {
"attention_probs_dropout_prob": 0.1,
"directionality": "bidi",
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 1024,
"initializer_range": 0.02,
"intermediate_size": 4096,
"max_position_embeddings": 512,
"num_attention_heads": 16,
"num_hidden_layers": 24,
"pooler_fc_size": 768,
"pooler_num_attention_heads": 12,
"pooler_num_fc_layers": 3,
"pooler_size_per_head": 128,
"pooler_type": "first_token_transform",
"type_vocab_size": 2,
"vocab_size": 28996,
}
config = BertConfig.from_dict(config_dict)
return BertModel(config)
|
[
"megengine.hub.pretrained",
"megengine.module.Embedding",
"megengine.hub.load",
"megengine.tensor",
"megengine.functional.concat",
"megengine.functional.matmul",
"megengine.functional.sqrt",
"megengine.functional.ones_like",
"megengine.functional.zeros_like",
"megengine.functional.linspace",
"megengine.module.Dropout",
"megengine.module.Linear",
"megengine.functional.expand_dims"
] |
[((24900, 25043), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/models/weights/bert/uncased_L-12_H-768_A-12/bert_4f2157f7_uncased_L-12_H-768_A-12.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/models/weights/bert/uncased_L-12_H-768_A-12/bert_4f2157f7_uncased_L-12_H-768_A-12.pkl'\n )\n", (24914, 25043), True, 'import megengine.hub as hub\n'), ((25559, 25698), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/models/weights/bert/cased_L-12_H-768_A-12/bert_b9727c2f_cased_L-12_H-768_A-12.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/models/weights/bert/cased_L-12_H-768_A-12/bert_b9727c2f_cased_L-12_H-768_A-12.pkl'\n )\n", (25573, 25698), True, 'import megengine.hub as hub\n'), ((26212, 26357), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/models/weights/bert/uncased_L-24_H-1024_A-16/bert_222f5012_uncased_L-24_H-1024_A-16.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/models/weights/bert/uncased_L-24_H-1024_A-16/bert_222f5012_uncased_L-24_H-1024_A-16.pkl'\n )\n", (26226, 26357), True, 'import megengine.hub as hub\n'), ((26876, 27017), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/models/weights/bert/cased_L-24_H-1024_A-16/bert_01f2a65f_cased_L-24_H-1024_A-16.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/models/weights/bert/cased_L-24_H-1024_A-16/bert_01f2a65f_cased_L-24_H-1024_A-16.pkl'\n )\n", (26890, 27017), True, 'import megengine.hub as hub\n'), ((27761, 27904), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/models/weights/bert/chinese_L-12_H-768_A-12/bert_ee91be1a_chinese_L-12_H-768_A-12.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/models/weights/bert/chinese_L-12_H-768_A-12/bert_ee91be1a_chinese_L-12_H-768_A-12.pkl'\n )\n", (27775, 27904), True, 'import megengine.hub as hub\n'), ((28647, 28798), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/models/weights/bert/multi_cased_L-12_H-768_A-12/bert_283ceec5_multi_cased_L-12_H-768_A-12.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/models/weights/bert/multi_cased_L-12_H-768_A-12/bert_283ceec5_multi_cased_L-12_H-768_A-12.pkl'\n )\n", (28661, 28798), True, 'import megengine.hub as hub\n'), ((29547, 29700), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/models/weights/bert/wwm_uncased_L-24_H-1024_A-16/bert_e2780a6a_wwm_uncased_L-24_H-1024_A-16.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/models/weights/bert/wwm_uncased_L-24_H-1024_A-16/bert_e2780a6a_wwm_uncased_L-24_H-1024_A-16.pkl'\n )\n", (29561, 29700), True, 'import megengine.hub as hub\n'), ((30222, 30371), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/models/weights/bert/wwm_cased_L-24_H-1024_A-16/bert_0a8f1389_wwm_cased_L-24_H-1024_A-16.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/models/weights/bert/wwm_cased_L-24_H-1024_A-16/bert_0a8f1389_wwm_cased_L-24_H-1024_A-16.pkl'\n )\n", (30236, 30371), True, 'import megengine.hub as hub\n'), ((24078, 24119), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['url', 'filename'], {}), '(url, filename)\n', (24104, 24119), False, 'import urllib\n'), ((24783, 24858), 'megengine.hub.load', 'hub.load', (['"""megengine/models"""', 'MODEL_NAME[model_name]'], {'pretrained': 'pretrained'}), "('megengine/models', MODEL_NAME[model_name], pretrained=pretrained)\n", (24791, 24858), True, 'import megengine.hub as hub\n'), ((5786, 5814), 'copy.deepcopy', 'copy.deepcopy', (['self.__dict__'], {}), '(self.__dict__)\n', (5799, 5814), False, 'import copy\n'), ((7035, 7083), 'megengine.module.Embedding', 'Embedding', (['config.vocab_size', 'config.hidden_size'], {}), '(config.vocab_size, config.hidden_size)\n', (7044, 7083), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((7119, 7180), 'megengine.module.Embedding', 'Embedding', (['config.max_position_embeddings', 'config.hidden_size'], {}), '(config.max_position_embeddings, config.hidden_size)\n', (7128, 7180), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((7240, 7293), 'megengine.module.Embedding', 'Embedding', (['config.type_vocab_size', 'config.hidden_size'], {}), '(config.type_vocab_size, config.hidden_size)\n', (7249, 7293), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((7560, 7595), 'megengine.module.Dropout', 'Dropout', (['config.hidden_dropout_prob'], {}), '(config.hidden_dropout_prob)\n', (7567, 7595), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((8981, 9027), 'megengine.module.Linear', 'Linear', (['config.hidden_size', 'self.all_head_size'], {}), '(config.hidden_size, self.all_head_size)\n', (8987, 9027), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((9047, 9093), 'megengine.module.Linear', 'Linear', (['config.hidden_size', 'self.all_head_size'], {}), '(config.hidden_size, self.all_head_size)\n', (9053, 9093), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((9115, 9161), 'megengine.module.Linear', 'Linear', (['config.hidden_size', 'self.all_head_size'], {}), '(config.hidden_size, self.all_head_size)\n', (9121, 9161), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((9186, 9230), 'megengine.module.Dropout', 'Dropout', (['config.attention_probs_dropout_prob'], {}), '(config.attention_probs_dropout_prob)\n', (9193, 9230), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((9341, 9360), 'megengine.tensor', 'mge.tensor', (['x.shape'], {}), '(x.shape)\n', (9351, 9360), True, 'import megengine as mge\n'), ((9383, 9461), 'megengine.functional.concat', 'F.concat', (['[x_shape[:-1], (self.num_attention_heads, self.attention_head_size)]'], {}), '([x_shape[:-1], (self.num_attention_heads, self.attention_head_size)])\n', (9391, 9461), True, 'import megengine.functional as F\n'), ((10768, 10806), 'megengine.functional.matmul', 'F.matmul', (['attention_probs', 'value_layer'], {}), '(attention_probs, value_layer)\n', (10776, 10806), True, 'import megengine.functional as F\n'), ((10943, 10974), 'megengine.tensor', 'mge.tensor', (['context_layer.shape'], {}), '(context_layer.shape)\n', (10953, 10974), True, 'import megengine as mge\n'), ((11009, 11059), 'megengine.functional.concat', 'F.concat', (['[context_shape[:-2], self.all_head_size]'], {}), '([context_shape[:-2], self.all_head_size])\n', (11017, 11059), True, 'import megengine.functional as F\n'), ((11272, 11318), 'megengine.module.Linear', 'Linear', (['config.hidden_size', 'config.hidden_size'], {}), '(config.hidden_size, config.hidden_size)\n', (11278, 11318), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((11412, 11447), 'megengine.module.Dropout', 'Dropout', (['config.hidden_dropout_prob'], {}), '(config.hidden_dropout_prob)\n', (11419, 11447), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((12210, 12262), 'megengine.module.Linear', 'Linear', (['config.hidden_size', 'config.intermediate_size'], {}), '(config.hidden_size, config.intermediate_size)\n', (12216, 12262), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((12736, 12788), 'megengine.module.Linear', 'Linear', (['config.intermediate_size', 'config.hidden_size'], {}), '(config.intermediate_size, config.hidden_size)\n', (12742, 12788), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((12882, 12917), 'megengine.module.Dropout', 'Dropout', (['config.hidden_dropout_prob'], {}), '(config.hidden_dropout_prob)\n', (12889, 12917), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((14562, 14608), 'megengine.module.Linear', 'Linear', (['config.hidden_size', 'config.hidden_size'], {}), '(config.hidden_size, config.hidden_size)\n', (14568, 14608), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((18962, 18999), 'megengine.functional.expand_dims', 'F.expand_dims', (['attention_mask', '(1, 2)'], {}), '(attention_mask, (1, 2))\n', (18975, 18999), True, 'import megengine.functional as F\n'), ((22709, 22744), 'megengine.module.Dropout', 'Dropout', (['config.hidden_dropout_prob'], {}), '(config.hidden_dropout_prob)\n', (22716, 22744), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((22771, 22809), 'megengine.module.Linear', 'Linear', (['config.hidden_size', 'num_labels'], {}), '(config.hidden_size, num_labels)\n', (22777, 22809), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((24333, 24357), 'os.path.exists', 'os.path.exists', (['data_dir'], {}), '(data_dir)\n', (24347, 24357), False, 'import os\n'), ((24367, 24388), 'os.makedirs', 'os.makedirs', (['data_dir'], {}), '(data_dir)\n', (24378, 24388), False, 'import os\n'), ((5485, 5523), 'io.open', 'open', (['json_file', '"""r"""'], {'encoding': '"""utf-8"""'}), "(json_file, 'r', encoding='utf-8')\n", (5489, 5523), False, 'from io import open\n'), ((5597, 5613), 'json.loads', 'json.loads', (['text'], {}), '(text)\n', (5607, 5613), False, 'import json\n'), ((6108, 6151), 'io.open', 'open', (['json_file_path', '"""w"""'], {'encoding': '"""utf-8"""'}), "(json_file_path, 'w', encoding='utf-8')\n", (6112, 6151), False, 'from io import open\n'), ((7757, 7780), 'megengine.functional.zeros_like', 'F.zeros_like', (['input_ids'], {}), '(input_ids)\n', (7769, 7780), True, 'import megengine.functional as F\n'), ((7902, 7932), 'megengine.functional.expand_dims', 'F.expand_dims', (['position_ids', '(0)'], {}), '(position_ids, 0)\n', (7915, 7932), True, 'import megengine.functional as F\n'), ((10185, 10220), 'math.sqrt', 'math.sqrt', (['self.attention_head_size'], {}), '(self.attention_head_size)\n', (10194, 10220), False, 'import math\n'), ((18332, 18354), 'megengine.functional.ones_like', 'F.ones_like', (['input_ids'], {}), '(input_ids)\n', (18343, 18354), True, 'import megengine.functional as F\n'), ((18419, 18442), 'megengine.functional.zeros_like', 'F.zeros_like', (['input_ids'], {}), '(input_ids)\n', (18431, 18442), True, 'import megengine.functional as F\n'), ((3928, 3987), 'io.open', 'open', (['vocab_size_or_config_json_file', '"""r"""'], {'encoding': '"""utf-8"""'}), "(vocab_size_or_config_json_file, 'r', encoding='utf-8')\n", (3932, 3987), False, 'from io import open\n'), ((7805, 7846), 'megengine.functional.linspace', 'F.linspace', (['(0)', '(seq_length - 1)', 'seq_length'], {}), '(0, seq_length - 1, seq_length)\n', (7815, 7846), True, 'import megengine.functional as F\n'), ((1797, 1816), 'megengine.functional.sqrt', 'F.sqrt', (['(2 / math.pi)'], {}), '(2 / math.pi)\n', (1803, 1816), True, 'import megengine.functional as F\n'), ((6444, 6464), 'numpy.ones', 'np.ones', (['hidden_size'], {}), '(hidden_size)\n', (6451, 6464), True, 'import numpy as np\n'), ((6515, 6536), 'numpy.zeros', 'np.zeros', (['hidden_size'], {}), '(hidden_size)\n', (6523, 6536), True, 'import numpy as np\n')]
|
#!/usr/bin/env mdl
# This file will seal the nms opr within a better way than lib_nms
import ctypes
import os
import struct
import numpy as np
import megengine as mge
import megengine.functional as F
from megengine._internal.craniotome import CraniotomeBase
from megengine.core.tensor import wrap_io_tensor
_current_path = os.path.dirname(os.path.abspath(__file__))
_so_path = os.path.join(_current_path, "lib_nms.so")
try:
_so_lib = ctypes.CDLL(_so_path)
except Exception:
import subprocess
mge_path = os.path.join(os.path.dirname(mge.__file__), "_internal", "include")
assert os.path.exists(mge_path), "{} file not found".format(mge_path)
src_file = os.path.join(_current_path, "gpu_nms", "nms.cu")
assert os.path.exists(src_file), "{} file not found".format(src_file)
cmd = (
"nvcc -I {} -shared -o {} -Xcompiler '-fno-strict-aliasing -fPIC' {}".format(
mge_path, _so_path, src_file
)
)
subprocess.check_call(cmd, shell=True)
_so_lib = ctypes.CDLL(_so_path)
_TYPE_POINTER = ctypes.c_void_p
_TYPE_POINTER = ctypes.c_void_p
_TYPE_INT = ctypes.c_int32
_TYPE_FLOAT = ctypes.c_float
_so_lib.NMSForwardGpu.argtypes = [
_TYPE_POINTER,
_TYPE_POINTER,
_TYPE_POINTER,
_TYPE_POINTER,
_TYPE_FLOAT,
_TYPE_INT,
_TYPE_POINTER,
]
_so_lib.NMSForwardGpu.restype = _TYPE_INT
_so_lib.CreateHostDevice.restype = _TYPE_POINTER
class NMSCran(CraniotomeBase):
__nr_inputs__ = 1
__nr_outputs__ = 3
def setup(self, iou_threshold, max_output):
self._iou_threshold = iou_threshold
self._max_output = max_output
# Load the necessary host device
self._host_device = _so_lib.CreateHostDevice()
def execute(self, inputs, outputs):
box_tensor_ptr = inputs[0].pubapi_dev_tensor_ptr
output_tensor_ptr = outputs[0].pubapi_dev_tensor_ptr
output_num_tensor_ptr = outputs[1].pubapi_dev_tensor_ptr
mask_tensor_ptr = outputs[2].pubapi_dev_tensor_ptr
_so_lib.NMSForwardGpu(
box_tensor_ptr,
mask_tensor_ptr,
output_tensor_ptr,
output_num_tensor_ptr,
self._iou_threshold,
self._max_output,
self._host_device,
)
def grad(self, wrt_idx, inputs, outputs, out_grad):
return 0
def init_output_dtype(self, input_dtypes):
return [np.int32, np.int32, np.int32]
def get_serialize_params(self):
return ("nms", struct.pack("fi", self._iou_threshold, self._max_output))
def infer_shape(self, inp_shapes):
nr_box = inp_shapes[0][0]
threadsPerBlock = 64
output_size = nr_box
# here we compute the number of int32 used in mask_outputs.
# In original version, we compute the bytes only.
mask_size = int(
nr_box
* (nr_box // threadsPerBlock + int((nr_box % threadsPerBlock) > 0))
* 8
/ 4
)
return [[output_size], [1], [mask_size]]
@wrap_io_tensor
def gpu_nms(box, iou_threshold, max_output):
keep, num, _ = NMSCran.make(box, iou_threshold=iou_threshold, max_output=max_output)
return keep[:num]
def batched_nms(boxes, scores, idxs, iou_threshold, num_keep, use_offset=False):
if use_offset:
boxes_offset = (
mge.tensor([0, 0, 1, 1], device=boxes.device)
.reshape(1, 4)
.broadcast(boxes.shapeof(0), 4)
)
boxes = boxes - boxes_offset
max_coordinate = boxes.max()
offsets = idxs * (max_coordinate + 1)
boxes_for_nms = boxes + offsets.reshape(-1, 1).broadcast(boxes.shapeof(0), 4)
boxes_with_scores = F.concat([boxes_for_nms, scores.reshape(-1, 1)], axis=1)
keep_inds = gpu_nms(boxes_with_scores, iou_threshold, num_keep)
return keep_inds
|
[
"megengine.tensor"
] |
[((380, 421), 'os.path.join', 'os.path.join', (['_current_path', '"""lib_nms.so"""'], {}), "(_current_path, 'lib_nms.so')\n", (392, 421), False, 'import os\n'), ((342, 367), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (357, 367), False, 'import os\n'), ((441, 462), 'ctypes.CDLL', 'ctypes.CDLL', (['_so_path'], {}), '(_so_path)\n', (452, 462), False, 'import ctypes\n'), ((597, 621), 'os.path.exists', 'os.path.exists', (['mge_path'], {}), '(mge_path)\n', (611, 621), False, 'import os\n'), ((675, 723), 'os.path.join', 'os.path.join', (['_current_path', '"""gpu_nms"""', '"""nms.cu"""'], {}), "(_current_path, 'gpu_nms', 'nms.cu')\n", (687, 723), False, 'import os\n'), ((735, 759), 'os.path.exists', 'os.path.exists', (['src_file'], {}), '(src_file)\n', (749, 759), False, 'import os\n'), ((958, 996), 'subprocess.check_call', 'subprocess.check_call', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (979, 996), False, 'import subprocess\n'), ((1011, 1032), 'ctypes.CDLL', 'ctypes.CDLL', (['_so_path'], {}), '(_so_path)\n', (1022, 1032), False, 'import ctypes\n'), ((531, 560), 'os.path.dirname', 'os.path.dirname', (['mge.__file__'], {}), '(mge.__file__)\n', (546, 560), False, 'import os\n'), ((2486, 2542), 'struct.pack', 'struct.pack', (['"""fi"""', 'self._iou_threshold', 'self._max_output'], {}), "('fi', self._iou_threshold, self._max_output)\n", (2497, 2542), False, 'import struct\n'), ((3330, 3375), 'megengine.tensor', 'mge.tensor', (['[0, 0, 1, 1]'], {'device': 'boxes.device'}), '([0, 0, 1, 1], device=boxes.device)\n', (3340, 3375), True, 'import megengine as mge\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2019 - present, Facebook, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2020 Megvii Inc. All rights reserved.
# ---------------------------------------------------------------------
import numpy as np
import megengine.functional as F
import megengine.module as M
from megengine import Parameter
class FrozenBatchNorm2d(M.Module):
"""
BatchNorm2d, which the weight, bias, running_mean, running_var
are immutable.
"""
def __init__(self, num_features, eps=1e-5):
super().__init__()
self.num_features = num_features
self.eps = eps
self.weight = Parameter(np.ones(num_features, dtype=np.float32))
self.bias = Parameter(np.zeros(num_features, dtype=np.float32))
self.running_mean = Parameter(np.zeros((1, num_features, 1, 1), dtype=np.float32))
self.running_var = Parameter(np.ones((1, num_features, 1, 1), dtype=np.float32))
def forward(self, x):
scale = self.weight.reshape(1, -1, 1, 1) * (
1.0 / F.sqrt(self.running_var + self.eps)
)
bias = self.bias.reshape(1, -1, 1, 1) - self.running_mean * scale
return x * scale.detach() + bias.detach()
class GroupNorm(M.Module):
def __init__(self, num_groups, num_channels, eps=1e-5, affine=True):
super().__init__()
self.num_groups = num_groups
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = Parameter(np.ones(num_channels, dtype=np.float32))
self.bias = Parameter(np.zeros(num_channels, dtype=np.float32))
else:
self.weight = None
self.bias = None
self.reset_parameters()
def reset_parameters(self):
if self.affine:
M.init.ones_(self.weight)
M.init.zeros_(self.bias)
def forward(self, x):
output = x.reshape(x.shape[0], self.num_groups, -1)
mean = F.mean(output, axis=2, keepdims=True)
mean2 = F.mean(output ** 2, axis=2, keepdims=True)
var = mean2 - mean * mean
output = (output - mean) / F.sqrt(var + self.eps)
output = output.reshape(x.shape)
if self.affine:
output = self.weight.reshape(1, -1, 1, 1) * output + \
self.bias.reshape(1, -1, 1, 1)
return output
def get_norm(norm):
"""
Args:
norm (str): currently support "BN", "SyncBN", "FrozenBN" and "GN"
Returns:
M.Module or None: the normalization layer
"""
if norm is None:
return None
norm = {
"BN": M.BatchNorm2d,
"SyncBN": M.SyncBatchNorm,
"FrozenBN": FrozenBatchNorm2d,
"GN": GroupNorm,
}[norm]
return norm
|
[
"megengine.functional.mean",
"megengine.functional.sqrt",
"megengine.module.init.zeros_",
"megengine.module.init.ones_"
] |
[((3029, 3066), 'megengine.functional.mean', 'F.mean', (['output'], {'axis': '(2)', 'keepdims': '(True)'}), '(output, axis=2, keepdims=True)\n', (3035, 3066), True, 'import megengine.functional as F\n'), ((3083, 3125), 'megengine.functional.mean', 'F.mean', (['(output ** 2)'], {'axis': '(2)', 'keepdims': '(True)'}), '(output ** 2, axis=2, keepdims=True)\n', (3089, 3125), True, 'import megengine.functional as F\n'), ((1691, 1730), 'numpy.ones', 'np.ones', (['num_features'], {'dtype': 'np.float32'}), '(num_features, dtype=np.float32)\n', (1698, 1730), True, 'import numpy as np\n'), ((1762, 1802), 'numpy.zeros', 'np.zeros', (['num_features'], {'dtype': 'np.float32'}), '(num_features, dtype=np.float32)\n', (1770, 1802), True, 'import numpy as np\n'), ((1843, 1894), 'numpy.zeros', 'np.zeros', (['(1, num_features, 1, 1)'], {'dtype': 'np.float32'}), '((1, num_features, 1, 1), dtype=np.float32)\n', (1851, 1894), True, 'import numpy as np\n'), ((1933, 1983), 'numpy.ones', 'np.ones', (['(1, num_features, 1, 1)'], {'dtype': 'np.float32'}), '((1, num_features, 1, 1), dtype=np.float32)\n', (1940, 1983), True, 'import numpy as np\n'), ((2864, 2889), 'megengine.module.init.ones_', 'M.init.ones_', (['self.weight'], {}), '(self.weight)\n', (2876, 2889), True, 'import megengine.module as M\n'), ((2902, 2926), 'megengine.module.init.zeros_', 'M.init.zeros_', (['self.bias'], {}), '(self.bias)\n', (2915, 2926), True, 'import megengine.module as M\n'), ((3196, 3218), 'megengine.functional.sqrt', 'F.sqrt', (['(var + self.eps)'], {}), '(var + self.eps)\n', (3202, 3218), True, 'import megengine.functional as F\n'), ((2083, 2118), 'megengine.functional.sqrt', 'F.sqrt', (['(self.running_var + self.eps)'], {}), '(self.running_var + self.eps)\n', (2089, 2118), True, 'import megengine.functional as F\n'), ((2572, 2611), 'numpy.ones', 'np.ones', (['num_channels'], {'dtype': 'np.float32'}), '(num_channels, dtype=np.float32)\n', (2579, 2611), True, 'import numpy as np\n'), ((2647, 2687), 'numpy.zeros', 'np.zeros', (['num_channels'], {'dtype': 'np.float32'}), '(num_channels, dtype=np.float32)\n', (2655, 2687), True, 'import numpy as np\n')]
|
# BSD 3-Clause License
# Copyright (c) <NAME> 2016,
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
import megengine.module as M
__all__ = ['MobileNetV2', 'mobilenet_v2']
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class InvertedResidual(M.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(M.ConvBnRelu2d(inp, hidden_dim, kernel_size=1, bias=False))
layers.extend([
# dw
M.ConvBnRelu2d(hidden_dim, hidden_dim, kernel_size=3, padding=1,
stride=stride, groups=hidden_dim, bias=False),
# pw-linear
M.ConvBn2d(hidden_dim, oup, kernel_size=1, bias=False)
])
self.conv = M.Sequential(*layers)
self.add = M.Elemwise("ADD")
def forward(self, x):
if self.use_res_connect:
return self.add(x, self.conv(x))
else:
return self.conv(x)
class MobileNetV2(M.Module):
def __init__(self, num_classes=1000, width_mult=1.0, inverted_residual_setting=None, round_nearest=8):
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
"""
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
if inverted_residual_setting is None:
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError("inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting))
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
features = [M.ConvBnRelu2d(3, input_channel, kernel_size=3, padding=1, stride=2, bias=False)]
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t))
input_channel = output_channel
# building last several layers
features.append(M.ConvBnRelu2d(input_channel, self.last_channel, kernel_size=1, bias=False))
# make it M.Sequential
self.features = M.Sequential(*features)
# building classifier
self.classifier = M.Sequential(
M.Dropout(0.2),
M.Linear(self.last_channel, num_classes),
)
self.quant = M.QuantStub()
self.dequant = M.DequantStub()
# weight initialization
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode='fan_out')
if m.bias is not None:
M.init.zeros_(m.bias)
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
elif isinstance(m, M.Linear):
M.init.normal_(m.weight, 0, 0.01)
M.init.zeros_(m.bias)
def forward(self, x):
x = self.quant(x)
x = self.features(x)
x = F.avg_pool2d(x, 7)
x = F.flatten(x, 1)
x = self.dequant(x)
x = self.classifier(x)
return x
def mobilenet_v2(**kwargs):
"""
Constructs a MobileNetV2 architecture from
`"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_.
"""
model = MobileNetV2(**kwargs)
return model
|
[
"megengine.module.Elemwise",
"megengine.module.ConvBnRelu2d",
"megengine.module.Dropout",
"megengine.functional.flatten",
"megengine.module.DequantStub",
"megengine.module.Linear",
"megengine.module.ConvBn2d",
"megengine.module.init.msra_normal_",
"megengine.module.init.zeros_",
"megengine.module.init.normal_",
"megengine.module.init.ones_",
"megengine.functional.avg_pool2d",
"megengine.module.Sequential",
"megengine.module.QuantStub"
] |
[((3759, 3780), 'megengine.module.Sequential', 'M.Sequential', (['*layers'], {}), '(*layers)\n', (3771, 3780), True, 'import megengine.module as M\n'), ((3800, 3817), 'megengine.module.Elemwise', 'M.Elemwise', (['"""ADD"""'], {}), "('ADD')\n", (3810, 3817), True, 'import megengine.module as M\n'), ((6265, 6288), 'megengine.module.Sequential', 'M.Sequential', (['*features'], {}), '(*features)\n', (6277, 6288), True, 'import megengine.module as M\n'), ((6474, 6487), 'megengine.module.QuantStub', 'M.QuantStub', ([], {}), '()\n', (6485, 6487), True, 'import megengine.module as M\n'), ((6511, 6526), 'megengine.module.DequantStub', 'M.DequantStub', ([], {}), '()\n', (6524, 6526), True, 'import megengine.module as M\n'), ((7124, 7142), 'megengine.functional.avg_pool2d', 'F.avg_pool2d', (['x', '(7)'], {}), '(x, 7)\n', (7136, 7142), True, 'import megengine.functional as F\n'), ((7155, 7170), 'megengine.functional.flatten', 'F.flatten', (['x', '(1)'], {}), '(x, 1)\n', (7164, 7170), True, 'import megengine.functional as F\n'), ((5599, 5684), 'megengine.module.ConvBnRelu2d', 'M.ConvBnRelu2d', (['(3)', 'input_channel'], {'kernel_size': '(3)', 'padding': '(1)', 'stride': '(2)', 'bias': '(False)'}), '(3, input_channel, kernel_size=3, padding=1, stride=2, bias=False\n )\n', (5613, 5684), True, 'import megengine.module as M\n'), ((6133, 6208), 'megengine.module.ConvBnRelu2d', 'M.ConvBnRelu2d', (['input_channel', 'self.last_channel'], {'kernel_size': '(1)', 'bias': '(False)'}), '(input_channel, self.last_channel, kernel_size=1, bias=False)\n', (6147, 6208), True, 'import megengine.module as M\n'), ((6372, 6386), 'megengine.module.Dropout', 'M.Dropout', (['(0.2)'], {}), '(0.2)\n', (6381, 6386), True, 'import megengine.module as M\n'), ((6400, 6440), 'megengine.module.Linear', 'M.Linear', (['self.last_channel', 'num_classes'], {}), '(self.last_channel, num_classes)\n', (6408, 6440), True, 'import megengine.module as M\n'), ((3385, 3443), 'megengine.module.ConvBnRelu2d', 'M.ConvBnRelu2d', (['inp', 'hidden_dim'], {'kernel_size': '(1)', 'bias': '(False)'}), '(inp, hidden_dim, kernel_size=1, bias=False)\n', (3399, 3443), True, 'import megengine.module as M\n'), ((3498, 3613), 'megengine.module.ConvBnRelu2d', 'M.ConvBnRelu2d', (['hidden_dim', 'hidden_dim'], {'kernel_size': '(3)', 'padding': '(1)', 'stride': 'stride', 'groups': 'hidden_dim', 'bias': '(False)'}), '(hidden_dim, hidden_dim, kernel_size=3, padding=1, stride=\n stride, groups=hidden_dim, bias=False)\n', (3512, 3613), True, 'import megengine.module as M\n'), ((3673, 3727), 'megengine.module.ConvBn2d', 'M.ConvBn2d', (['hidden_dim', 'oup'], {'kernel_size': '(1)', 'bias': '(False)'}), '(hidden_dim, oup, kernel_size=1, bias=False)\n', (3683, 3727), True, 'import megengine.module as M\n'), ((6649, 6694), 'megengine.module.init.msra_normal_', 'M.init.msra_normal_', (['m.weight'], {'mode': '"""fan_out"""'}), "(m.weight, mode='fan_out')\n", (6668, 6694), True, 'import megengine.module as M\n'), ((6754, 6775), 'megengine.module.init.zeros_', 'M.init.zeros_', (['m.bias'], {}), '(m.bias)\n', (6767, 6775), True, 'import megengine.module as M\n'), ((6839, 6861), 'megengine.module.init.ones_', 'M.init.ones_', (['m.weight'], {}), '(m.weight)\n', (6851, 6861), True, 'import megengine.module as M\n'), ((6878, 6899), 'megengine.module.init.zeros_', 'M.init.zeros_', (['m.bias'], {}), '(m.bias)\n', (6891, 6899), True, 'import megengine.module as M\n'), ((6958, 6991), 'megengine.module.init.normal_', 'M.init.normal_', (['m.weight', '(0)', '(0.01)'], {}), '(m.weight, 0, 0.01)\n', (6972, 6991), True, 'import megengine.module as M\n'), ((7008, 7029), 'megengine.module.init.zeros_', 'M.init.zeros_', (['m.bias'], {}), '(m.bias)\n', (7021, 7029), True, 'import megengine.module as M\n')]
|
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
class Matcher:
def __init__(self, thresholds, labels, allow_low_quality_matches=False):
assert len(thresholds) + 1 == len(labels), "thresholds and labels are not matched"
assert all(low <= high for (low, high) in zip(thresholds[:-1], thresholds[1:]))
thresholds.append(float("inf"))
thresholds.insert(0, -float("inf"))
self.thresholds = thresholds
self.labels = labels
self.allow_low_quality_matches = allow_low_quality_matches
def __call__(self, matrix):
"""
matrix(tensor): A two dim tensor with shape of (N, M). N is number of GT-boxes,
while M is the number of anchors in detection.
"""
assert len(matrix.shape) == 2
max_scores = matrix.max(axis=0)
match_indices = F.argmax(matrix, axis=0)
# default ignore label: -1
labels = F.full_like(match_indices, -1)
for label, low, high in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]):
mask = (max_scores >= low) & (max_scores < high)
labels[mask] = label
if self.allow_low_quality_matches:
mask = (matrix == F.max(matrix, axis=1, keepdims=True)).sum(axis=0) > 0
labels[mask] = 1
return match_indices, labels
|
[
"megengine.functional.max",
"megengine.functional.argmax",
"megengine.functional.full_like"
] |
[((1208, 1232), 'megengine.functional.argmax', 'F.argmax', (['matrix'], {'axis': '(0)'}), '(matrix, axis=0)\n', (1216, 1232), True, 'import megengine.functional as F\n'), ((1286, 1316), 'megengine.functional.full_like', 'F.full_like', (['match_indices', '(-1)'], {}), '(match_indices, -1)\n', (1297, 1316), True, 'import megengine.functional as F\n'), ((1579, 1615), 'megengine.functional.max', 'F.max', (['matrix'], {'axis': '(1)', 'keepdims': '(True)'}), '(matrix, axis=1, keepdims=True)\n', (1584, 1615), True, 'import megengine.functional as F\n')]
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 3