code
stringlengths 110
64.5k
| apis
list | extract_api
stringlengths 123
69.9k
|
---|---|---|
import argparse
import megengine.core.tensor.megbrain_graph as G
import megengine.utils.comp_graph_tools as cgtools
from megengine.core._imperative_rt import make_h2d
def change_batch_and_dump(inp_file, oup_file):
cg, _, outputs = G.load_graph(inp_file)
inputs = cgtools.get_dep_vars(outputs[0], "Host2DeviceCopy")
replace_dict = {}
for var in inputs:
n_shape = list(var.shape)
n_shape[0] = 1
new_input = make_h2d(cg, "xpux", var.dtype, n_shape, var.name)
replace_dict[var] = new_input
new_outputs = cgtools.replace_vars(outputs, replace_dict)
dump_content, _ = G.dump_graph(map(G.VarNode, new_outputs), keep_var_name=2)
with open(oup_file, "wb") as file:
file.write(dump_content)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-i", "--input", required=True, type=str, help="Input megengine dump model file"
)
parser.add_argument(
"-o", "--output", required=True, type=str, help="Output batch1 model file"
)
args = parser.parse_args()
change_batch_and_dump(args.input, args.output)
if __name__ == "__main__":
main()
|
[
"megengine.core._imperative_rt.make_h2d",
"megengine.utils.comp_graph_tools.replace_vars",
"megengine.utils.comp_graph_tools.get_dep_vars",
"megengine.core.tensor.megbrain_graph.load_graph"
] |
[((238, 260), 'megengine.core.tensor.megbrain_graph.load_graph', 'G.load_graph', (['inp_file'], {}), '(inp_file)\n', (250, 260), True, 'import megengine.core.tensor.megbrain_graph as G\n'), ((274, 325), 'megengine.utils.comp_graph_tools.get_dep_vars', 'cgtools.get_dep_vars', (['outputs[0]', '"""Host2DeviceCopy"""'], {}), "(outputs[0], 'Host2DeviceCopy')\n", (294, 325), True, 'import megengine.utils.comp_graph_tools as cgtools\n'), ((556, 599), 'megengine.utils.comp_graph_tools.replace_vars', 'cgtools.replace_vars', (['outputs', 'replace_dict'], {}), '(outputs, replace_dict)\n', (576, 599), True, 'import megengine.utils.comp_graph_tools as cgtools\n'), ((780, 805), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (803, 805), False, 'import argparse\n'), ((448, 498), 'megengine.core._imperative_rt.make_h2d', 'make_h2d', (['cg', '"""xpux"""', 'var.dtype', 'n_shape', 'var.name'], {}), "(cg, 'xpux', var.dtype, n_shape, var.name)\n", (456, 498), False, 'from megengine.core._imperative_rt import make_h2d\n')]
|
import megengine.module as M
import megengine.functional as F
from megengine import amp
from .update import BasicUpdateBlock
from .extractor import BasicEncoder
from .corr import AGCL
from .attention import PositionEncodingSine, LocalFeatureTransformer
class CREStereo(M.Module):
def __init__(self, max_disp=192, mixed_precision=False, test_mode=False):
super(CREStereo, self).__init__()
self.max_flow = max_disp
self.mixed_precision = mixed_precision
self.test_mode = test_mode
self.hidden_dim = 128
self.context_dim = 128
self.dropout = 0
# feature network and update block
self.fnet = BasicEncoder(
output_dim=256, norm_fn="instance", dropout=self.dropout
)
self.update_block = BasicUpdateBlock(
hidden_dim=self.hidden_dim, cor_planes=4 * 9, mask_size=4
)
# loftr
self.self_att_fn = LocalFeatureTransformer(
d_model=256, nhead=8, layer_names=["self"] * 1, attention="linear"
)
self.cross_att_fn = LocalFeatureTransformer(
d_model=256, nhead=8, layer_names=["cross"] * 1, attention="linear"
)
# adaptive search
self.search_num = 9
self.conv_offset_16 = M.Conv2d(
256, self.search_num * 2, kernel_size=3, stride=1, padding=1
)
self.conv_offset_8 = M.Conv2d(
256, self.search_num * 2, kernel_size=3, stride=1, padding=1
)
self.range_16 = 1
self.range_8 = 1
def freeze_bn(self):
for m in self.modules():
if isinstance(m, M.BatchNorm2d):
m.eval()
def unfold(self, x, kernel_size, dilation=1, padding=0, stride=1):
n, c, h, w = x.shape
if isinstance(kernel_size, tuple) or isinstance(kernel_size, list):
assert len(kernel_size) == 2
k1, k2 = kernel_size
else:
assert isinstance(kernel_size, int)
k1 = k2 = kernel_size
x = F.sliding_window(
x,
kernel_size=kernel_size,
dilation=dilation,
padding=padding,
stride=stride,
)
x = F.reshape(x, (n, c, -1, k1 * k2))
x = F.transpose(x, (0, 1, 3, 2))
x = F.reshape(x, (n, c * k1 * k2, -1))
return x
def convex_upsample(self, flow, mask, rate=4):
"""[H/rate, W/rate, 2] -> [H, W, 2]"""
N, _, H, W = flow.shape
mask = F.reshape(mask, (N, 1, 9, rate, rate, H, W))
mask = F.softmax(mask, axis=2)
up_flow = self.unfold(rate * flow, [3, 3], padding=1)
up_flow = F.reshape(up_flow, (N, 2, 9, 1, 1, H, W))
up_flow = F.sum(mask * up_flow, axis=2)
up_flow = F.transpose(up_flow, (0, 1, 4, 2, 5, 3))
return F.reshape(up_flow, (N, 2, rate * H, rate * W))
def zero_init(self, fmap):
N, C, H, W = fmap.shape
_x = F.zeros([N, 1, H, W], dtype="float32")
_y = F.zeros([N, 1, H, W], dtype="float32")
zero_flow = F.concat([_x, _y], axis=1).to(fmap.device)
return zero_flow
def forward(self, image1, image2, iters=10, flow_init=None):
image1 = 2 * (image1 / 255.0) - 1.0
image2 = 2 * (image2 / 255.0) - 1.0
hdim = self.hidden_dim
cdim = self.context_dim
# feature network
with amp.autocast(enabled=self.mixed_precision):
fmap1, fmap2 = self.fnet([image1, image2])
fmap1 = fmap1.astype("float32")
fmap2 = fmap2.astype("float32")
with amp.autocast(enabled=self.mixed_precision):
# 1/4 -> 1/8
# feature
fmap1_dw8 = F.avg_pool2d(fmap1, 2, stride=2)
fmap2_dw8 = F.avg_pool2d(fmap2, 2, stride=2)
# offset
offset_dw8 = self.conv_offset_8(fmap1_dw8)
offset_dw8 = self.range_8 * (F.sigmoid(offset_dw8) - 0.5) * 2.0
# context
net, inp = F.split(fmap1, [hdim], axis=1)
net = F.tanh(net)
inp = F.relu(inp)
net_dw8 = F.avg_pool2d(net, 2, stride=2)
inp_dw8 = F.avg_pool2d(inp, 2, stride=2)
# 1/4 -> 1/16
# feature
fmap1_dw16 = F.avg_pool2d(fmap1, 4, stride=4)
fmap2_dw16 = F.avg_pool2d(fmap2, 4, stride=4)
offset_dw16 = self.conv_offset_16(fmap1_dw16)
offset_dw16 = self.range_16 * (F.sigmoid(offset_dw16) - 0.5) * 2.0
# context
net_dw16 = F.avg_pool2d(net, 4, stride=4)
inp_dw16 = F.avg_pool2d(inp, 4, stride=4)
# positional encoding and self-attention
pos_encoding_fn_small = PositionEncodingSine(
d_model=256, max_shape=(image1.shape[2] // 16, image1.shape[3] // 16)
)
# 'n c h w -> n (h w) c'
x_tmp = pos_encoding_fn_small(fmap1_dw16)
fmap1_dw16 = F.reshape(
F.transpose(x_tmp, (0, 2, 3, 1)),
(x_tmp.shape[0], x_tmp.shape[2] * x_tmp.shape[3], x_tmp.shape[1]),
)
# 'n c h w -> n (h w) c'
x_tmp = pos_encoding_fn_small(fmap2_dw16)
fmap2_dw16 = F.reshape(
F.transpose(x_tmp, (0, 2, 3, 1)),
(x_tmp.shape[0], x_tmp.shape[2] * x_tmp.shape[3], x_tmp.shape[1]),
)
fmap1_dw16, fmap2_dw16 = self.self_att_fn(fmap1_dw16, fmap2_dw16)
fmap1_dw16, fmap2_dw16 = [
F.transpose(
F.reshape(x, (x.shape[0], image1.shape[2] // 16, -1, x.shape[2])),
(0, 3, 1, 2),
)
for x in [fmap1_dw16, fmap2_dw16]
]
corr_fn = AGCL(fmap1, fmap2)
corr_fn_dw8 = AGCL(fmap1_dw8, fmap2_dw8)
corr_fn_att_dw16 = AGCL(fmap1_dw16, fmap2_dw16, att=self.cross_att_fn)
# Cascaded refinement (1/16 + 1/8 + 1/4)
predictions = []
flow = None
flow_up = None
if flow_init is not None:
scale = fmap1.shape[2] / flow_init.shape[2]
flow = -scale * F.nn.interpolate(
flow_init,
size=(fmap1.shape[2], fmap1.shape[3]),
mode="bilinear",
align_corners=True,
)
else:
# zero initialization
flow_dw16 = self.zero_init(fmap1_dw16)
# Recurrent Update Module
# RUM: 1/16
for itr in range(iters // 2):
if itr % 2 == 0:
small_patch = False
else:
small_patch = True
flow_dw16 = flow_dw16.detach()
out_corrs = corr_fn_att_dw16(
flow_dw16, offset_dw16, small_patch=small_patch
)
with amp.autocast(enabled=self.mixed_precision):
net_dw16, up_mask, delta_flow = self.update_block(
net_dw16, inp_dw16, out_corrs, flow_dw16
)
flow_dw16 = flow_dw16 + delta_flow
flow = self.convex_upsample(flow_dw16, up_mask, rate=4)
flow_up = -4 * F.nn.interpolate(
flow,
size=(4 * flow.shape[2], 4 * flow.shape[3]),
mode="bilinear",
align_corners=True,
)
predictions.append(flow_up)
scale = fmap1_dw8.shape[2] / flow.shape[2]
flow_dw8 = -scale * F.nn.interpolate(
flow,
size=(fmap1_dw8.shape[2], fmap1_dw8.shape[3]),
mode="bilinear",
align_corners=True,
)
# RUM: 1/8
for itr in range(iters // 2):
if itr % 2 == 0:
small_patch = False
else:
small_patch = True
flow_dw8 = flow_dw8.detach()
out_corrs = corr_fn_dw8(flow_dw8, offset_dw8, small_patch=small_patch)
with amp.autocast(enabled=self.mixed_precision):
net_dw8, up_mask, delta_flow = self.update_block(
net_dw8, inp_dw8, out_corrs, flow_dw8
)
flow_dw8 = flow_dw8 + delta_flow
flow = self.convex_upsample(flow_dw8, up_mask, rate=4)
flow_up = -2 * F.nn.interpolate(
flow,
size=(2 * flow.shape[2], 2 * flow.shape[3]),
mode="bilinear",
align_corners=True,
)
predictions.append(flow_up)
scale = fmap1.shape[2] / flow.shape[2]
flow = -scale * F.nn.interpolate(
flow,
size=(fmap1.shape[2], fmap1.shape[3]),
mode="bilinear",
align_corners=True,
)
# RUM: 1/4
for itr in range(iters):
if itr % 2 == 0:
small_patch = False
else:
small_patch = True
flow = flow.detach()
out_corrs = corr_fn(flow, None, small_patch=small_patch, iter_mode=True)
with amp.autocast(enabled=self.mixed_precision):
net, up_mask, delta_flow = self.update_block(net, inp, out_corrs, flow)
flow = flow + delta_flow
flow_up = -self.convex_upsample(flow, up_mask, rate=4)
predictions.append(flow_up)
if self.test_mode:
return flow_up
return predictions
|
[
"megengine.functional.split",
"megengine.functional.sigmoid",
"megengine.functional.nn.interpolate",
"megengine.functional.softmax",
"megengine.functional.transpose",
"megengine.amp.autocast",
"megengine.functional.avg_pool2d",
"megengine.functional.relu",
"megengine.functional.sum",
"megengine.functional.zeros",
"megengine.functional.concat",
"megengine.module.Conv2d",
"megengine.functional.reshape",
"megengine.functional.sliding_window",
"megengine.functional.tanh"
] |
[((1276, 1346), 'megengine.module.Conv2d', 'M.Conv2d', (['(256)', '(self.search_num * 2)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256, self.search_num * 2, kernel_size=3, stride=1, padding=1)\n', (1284, 1346), True, 'import megengine.module as M\n'), ((1398, 1468), 'megengine.module.Conv2d', 'M.Conv2d', (['(256)', '(self.search_num * 2)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256, self.search_num * 2, kernel_size=3, stride=1, padding=1)\n', (1406, 1468), True, 'import megengine.module as M\n'), ((2030, 2130), 'megengine.functional.sliding_window', 'F.sliding_window', (['x'], {'kernel_size': 'kernel_size', 'dilation': 'dilation', 'padding': 'padding', 'stride': 'stride'}), '(x, kernel_size=kernel_size, dilation=dilation, padding=\n padding, stride=stride)\n', (2046, 2130), True, 'import megengine.functional as F\n'), ((2209, 2242), 'megengine.functional.reshape', 'F.reshape', (['x', '(n, c, -1, k1 * k2)'], {}), '(x, (n, c, -1, k1 * k2))\n', (2218, 2242), True, 'import megengine.functional as F\n'), ((2255, 2283), 'megengine.functional.transpose', 'F.transpose', (['x', '(0, 1, 3, 2)'], {}), '(x, (0, 1, 3, 2))\n', (2266, 2283), True, 'import megengine.functional as F\n'), ((2296, 2330), 'megengine.functional.reshape', 'F.reshape', (['x', '(n, c * k1 * k2, -1)'], {}), '(x, (n, c * k1 * k2, -1))\n', (2305, 2330), True, 'import megengine.functional as F\n'), ((2494, 2538), 'megengine.functional.reshape', 'F.reshape', (['mask', '(N, 1, 9, rate, rate, H, W)'], {}), '(mask, (N, 1, 9, rate, rate, H, W))\n', (2503, 2538), True, 'import megengine.functional as F\n'), ((2554, 2577), 'megengine.functional.softmax', 'F.softmax', (['mask'], {'axis': '(2)'}), '(mask, axis=2)\n', (2563, 2577), True, 'import megengine.functional as F\n'), ((2659, 2700), 'megengine.functional.reshape', 'F.reshape', (['up_flow', '(N, 2, 9, 1, 1, H, W)'], {}), '(up_flow, (N, 2, 9, 1, 1, H, W))\n', (2668, 2700), True, 'import megengine.functional as F\n'), ((2720, 2749), 'megengine.functional.sum', 'F.sum', (['(mask * up_flow)'], {'axis': '(2)'}), '(mask * up_flow, axis=2)\n', (2725, 2749), True, 'import megengine.functional as F\n'), ((2768, 2808), 'megengine.functional.transpose', 'F.transpose', (['up_flow', '(0, 1, 4, 2, 5, 3)'], {}), '(up_flow, (0, 1, 4, 2, 5, 3))\n', (2779, 2808), True, 'import megengine.functional as F\n'), ((2824, 2870), 'megengine.functional.reshape', 'F.reshape', (['up_flow', '(N, 2, rate * H, rate * W)'], {}), '(up_flow, (N, 2, rate * H, rate * W))\n', (2833, 2870), True, 'import megengine.functional as F\n'), ((2948, 2986), 'megengine.functional.zeros', 'F.zeros', (['[N, 1, H, W]'], {'dtype': '"""float32"""'}), "([N, 1, H, W], dtype='float32')\n", (2955, 2986), True, 'import megengine.functional as F\n'), ((3000, 3038), 'megengine.functional.zeros', 'F.zeros', (['[N, 1, H, W]'], {'dtype': '"""float32"""'}), "([N, 1, H, W], dtype='float32')\n", (3007, 3038), True, 'import megengine.functional as F\n'), ((3386, 3428), 'megengine.amp.autocast', 'amp.autocast', ([], {'enabled': 'self.mixed_precision'}), '(enabled=self.mixed_precision)\n', (3398, 3428), False, 'from megengine import amp\n'), ((3580, 3622), 'megengine.amp.autocast', 'amp.autocast', ([], {'enabled': 'self.mixed_precision'}), '(enabled=self.mixed_precision)\n', (3592, 3622), False, 'from megengine import amp\n'), ((3696, 3728), 'megengine.functional.avg_pool2d', 'F.avg_pool2d', (['fmap1', '(2)'], {'stride': '(2)'}), '(fmap1, 2, stride=2)\n', (3708, 3728), True, 'import megengine.functional as F\n'), ((3753, 3785), 'megengine.functional.avg_pool2d', 'F.avg_pool2d', (['fmap2', '(2)'], {'stride': '(2)'}), '(fmap2, 2, stride=2)\n', (3765, 3785), True, 'import megengine.functional as F\n'), ((3985, 4015), 'megengine.functional.split', 'F.split', (['fmap1', '[hdim]'], {'axis': '(1)'}), '(fmap1, [hdim], axis=1)\n', (3992, 4015), True, 'import megengine.functional as F\n'), ((4034, 4045), 'megengine.functional.tanh', 'F.tanh', (['net'], {}), '(net)\n', (4040, 4045), True, 'import megengine.functional as F\n'), ((4064, 4075), 'megengine.functional.relu', 'F.relu', (['inp'], {}), '(inp)\n', (4070, 4075), True, 'import megengine.functional as F\n'), ((4098, 4128), 'megengine.functional.avg_pool2d', 'F.avg_pool2d', (['net', '(2)'], {'stride': '(2)'}), '(net, 2, stride=2)\n', (4110, 4128), True, 'import megengine.functional as F\n'), ((4151, 4181), 'megengine.functional.avg_pool2d', 'F.avg_pool2d', (['inp', '(2)'], {'stride': '(2)'}), '(inp, 2, stride=2)\n', (4163, 4181), True, 'import megengine.functional as F\n'), ((4256, 4288), 'megengine.functional.avg_pool2d', 'F.avg_pool2d', (['fmap1', '(4)'], {'stride': '(4)'}), '(fmap1, 4, stride=4)\n', (4268, 4288), True, 'import megengine.functional as F\n'), ((4314, 4346), 'megengine.functional.avg_pool2d', 'F.avg_pool2d', (['fmap2', '(4)'], {'stride': '(4)'}), '(fmap2, 4, stride=4)\n', (4326, 4346), True, 'import megengine.functional as F\n'), ((4530, 4560), 'megengine.functional.avg_pool2d', 'F.avg_pool2d', (['net', '(4)'], {'stride': '(4)'}), '(net, 4, stride=4)\n', (4542, 4560), True, 'import megengine.functional as F\n'), ((4584, 4614), 'megengine.functional.avg_pool2d', 'F.avg_pool2d', (['inp', '(4)'], {'stride': '(4)'}), '(inp, 4, stride=4)\n', (4596, 4614), True, 'import megengine.functional as F\n'), ((3059, 3085), 'megengine.functional.concat', 'F.concat', (['[_x, _y]'], {'axis': '(1)'}), '([_x, _y], axis=1)\n', (3067, 3085), True, 'import megengine.functional as F\n'), ((4970, 5002), 'megengine.functional.transpose', 'F.transpose', (['x_tmp', '(0, 2, 3, 1)'], {}), '(x_tmp, (0, 2, 3, 1))\n', (4981, 5002), True, 'import megengine.functional as F\n'), ((5244, 5276), 'megengine.functional.transpose', 'F.transpose', (['x_tmp', '(0, 2, 3, 1)'], {}), '(x_tmp, (0, 2, 3, 1))\n', (5255, 5276), True, 'import megengine.functional as F\n'), ((6127, 6235), 'megengine.functional.nn.interpolate', 'F.nn.interpolate', (['flow_init'], {'size': '(fmap1.shape[2], fmap1.shape[3])', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(flow_init, size=(fmap1.shape[2], fmap1.shape[3]), mode=\n 'bilinear', align_corners=True)\n", (6143, 6235), True, 'import megengine.functional as F\n'), ((7543, 7654), 'megengine.functional.nn.interpolate', 'F.nn.interpolate', (['flow'], {'size': '(fmap1_dw8.shape[2], fmap1_dw8.shape[3])', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(flow, size=(fmap1_dw8.shape[2], fmap1_dw8.shape[3]), mode=\n 'bilinear', align_corners=True)\n", (7559, 7654), True, 'import megengine.functional as F\n'), ((8762, 8865), 'megengine.functional.nn.interpolate', 'F.nn.interpolate', (['flow'], {'size': '(fmap1.shape[2], fmap1.shape[3])', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(flow, size=(fmap1.shape[2], fmap1.shape[3]), mode=\n 'bilinear', align_corners=True)\n", (8778, 8865), True, 'import megengine.functional as F\n'), ((9248, 9290), 'megengine.amp.autocast', 'amp.autocast', ([], {'enabled': 'self.mixed_precision'}), '(enabled=self.mixed_precision)\n', (9260, 9290), False, 'from megengine import amp\n'), ((5542, 5607), 'megengine.functional.reshape', 'F.reshape', (['x', '(x.shape[0], image1.shape[2] // 16, -1, x.shape[2])'], {}), '(x, (x.shape[0], image1.shape[2] // 16, -1, x.shape[2]))\n', (5551, 5607), True, 'import megengine.functional as F\n'), ((6850, 6892), 'megengine.amp.autocast', 'amp.autocast', ([], {'enabled': 'self.mixed_precision'}), '(enabled=self.mixed_precision)\n', (6862, 6892), False, 'from megengine import amp\n'), ((7207, 7316), 'megengine.functional.nn.interpolate', 'F.nn.interpolate', (['flow'], {'size': '(4 * flow.shape[2], 4 * flow.shape[3])', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(flow, size=(4 * flow.shape[2], 4 * flow.shape[3]), mode=\n 'bilinear', align_corners=True)\n", (7223, 7316), True, 'import megengine.functional as F\n'), ((8084, 8126), 'megengine.amp.autocast', 'amp.autocast', ([], {'enabled': 'self.mixed_precision'}), '(enabled=self.mixed_precision)\n', (8096, 8126), False, 'from megengine import amp\n'), ((8434, 8543), 'megengine.functional.nn.interpolate', 'F.nn.interpolate', (['flow'], {'size': '(2 * flow.shape[2], 2 * flow.shape[3])', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(flow, size=(2 * flow.shape[2], 2 * flow.shape[3]), mode=\n 'bilinear', align_corners=True)\n", (8450, 8543), True, 'import megengine.functional as F\n'), ((3904, 3925), 'megengine.functional.sigmoid', 'F.sigmoid', (['offset_dw8'], {}), '(offset_dw8)\n', (3913, 3925), True, 'import megengine.functional as F\n'), ((4448, 4470), 'megengine.functional.sigmoid', 'F.sigmoid', (['offset_dw16'], {}), '(offset_dw16)\n', (4457, 4470), True, 'import megengine.functional as F\n')]
|
from sfepy.base.testing import TestCommon, assert_
##
# 28.08.2007, c
class Test( TestCommon ):
##
# 28.08.2007, c
def from_conf( conf, options ):
return Test( conf = conf, options = options )
from_conf = staticmethod( from_conf )
##
# 28.08.2007, c
def test_struct_add( self ):
from sfepy.base.base import Struct
from copy import deepcopy
a = Struct( f1 = 0,
f2 = [1, 2, 3],
f3 = Struct( ff = 'abc' ),
f4 = 3.14 )
a0 = deepcopy( a )
b = Struct( f1 = 5,
f2 = [1],
f3 = Struct( ff = '', gg = 123 ),
f5 = 'new one' )
c = a + b
assert_( c.f1 == 0 )
assert_( c.f2 == [1, 2, 3] )
assert_( c.f3.ff == 'abc' )
assert_( c.f3.gg == 123 )
assert_( c.f4 == 3.14 )
assert_( c.f5 == 'new one' )
assert_( a.f1 == a0.f1 )
assert_( a.f2 == a0.f2 )
assert_( a.f3.ff == a0.f3.ff )
assert_( a.f4 == a0.f4 )
return True
##
# 28.08.2007, c
def test_struct_i_add( self ):
from sfepy.base.base import Struct
a = Struct( f1 = 0,
f2 = [1, 2, 3],
f3 = Struct( ff = 'abc' ) )
b = Struct( f1 = 5,
f2 = [1],
f3 = Struct( ff = '', gg = 123 ),
f4 = 'new one' )
a += b
assert_( a.f1 == 0 )
assert_( a.f2 == [1, 2, 3] )
assert_( a.f3.ff == 'abc' )
assert_( a.f3.gg == 123 )
assert_( a.f4 == 'new one' )
return True
|
[
"sfepy.base.testing.assert_",
"sfepy.base.base.Struct"
] |
[((552, 563), 'copy.deepcopy', 'deepcopy', (['a'], {}), '(a)\n', (560, 563), False, 'from copy import deepcopy\n'), ((742, 760), 'sfepy.base.testing.assert_', 'assert_', (['(c.f1 == 0)'], {}), '(c.f1 == 0)\n', (749, 760), False, 'from sfepy.base.testing import TestCommon, assert_\n'), ((771, 797), 'sfepy.base.testing.assert_', 'assert_', (['(c.f2 == [1, 2, 3])'], {}), '(c.f2 == [1, 2, 3])\n', (778, 797), False, 'from sfepy.base.testing import TestCommon, assert_\n'), ((808, 833), 'sfepy.base.testing.assert_', 'assert_', (["(c.f3.ff == 'abc')"], {}), "(c.f3.ff == 'abc')\n", (815, 833), False, 'from sfepy.base.testing import TestCommon, assert_\n'), ((844, 867), 'sfepy.base.testing.assert_', 'assert_', (['(c.f3.gg == 123)'], {}), '(c.f3.gg == 123)\n', (851, 867), False, 'from sfepy.base.testing import TestCommon, assert_\n'), ((878, 899), 'sfepy.base.testing.assert_', 'assert_', (['(c.f4 == 3.14)'], {}), '(c.f4 == 3.14)\n', (885, 899), False, 'from sfepy.base.testing import TestCommon, assert_\n'), ((910, 936), 'sfepy.base.testing.assert_', 'assert_', (["(c.f5 == 'new one')"], {}), "(c.f5 == 'new one')\n", (917, 936), False, 'from sfepy.base.testing import TestCommon, assert_\n'), ((948, 970), 'sfepy.base.testing.assert_', 'assert_', (['(a.f1 == a0.f1)'], {}), '(a.f1 == a0.f1)\n', (955, 970), False, 'from sfepy.base.testing import TestCommon, assert_\n'), ((981, 1003), 'sfepy.base.testing.assert_', 'assert_', (['(a.f2 == a0.f2)'], {}), '(a.f2 == a0.f2)\n', (988, 1003), False, 'from sfepy.base.testing import TestCommon, assert_\n'), ((1014, 1042), 'sfepy.base.testing.assert_', 'assert_', (['(a.f3.ff == a0.f3.ff)'], {}), '(a.f3.ff == a0.f3.ff)\n', (1021, 1042), False, 'from sfepy.base.testing import TestCommon, assert_\n'), ((1053, 1075), 'sfepy.base.testing.assert_', 'assert_', (['(a.f4 == a0.f4)'], {}), '(a.f4 == a0.f4)\n', (1060, 1075), False, 'from sfepy.base.testing import TestCommon, assert_\n'), ((1491, 1509), 'sfepy.base.testing.assert_', 'assert_', (['(a.f1 == 0)'], {}), '(a.f1 == 0)\n', (1498, 1509), False, 'from sfepy.base.testing import TestCommon, assert_\n'), ((1520, 1546), 'sfepy.base.testing.assert_', 'assert_', (['(a.f2 == [1, 2, 3])'], {}), '(a.f2 == [1, 2, 3])\n', (1527, 1546), False, 'from sfepy.base.testing import TestCommon, assert_\n'), ((1557, 1582), 'sfepy.base.testing.assert_', 'assert_', (["(a.f3.ff == 'abc')"], {}), "(a.f3.ff == 'abc')\n", (1564, 1582), False, 'from sfepy.base.testing import TestCommon, assert_\n'), ((1593, 1616), 'sfepy.base.testing.assert_', 'assert_', (['(a.f3.gg == 123)'], {}), '(a.f3.gg == 123)\n', (1600, 1616), False, 'from sfepy.base.testing import TestCommon, assert_\n'), ((1627, 1653), 'sfepy.base.testing.assert_', 'assert_', (["(a.f4 == 'new one')"], {}), "(a.f4 == 'new one')\n", (1634, 1653), False, 'from sfepy.base.testing import TestCommon, assert_\n'), ((485, 501), 'sfepy.base.base.Struct', 'Struct', ([], {'ff': '"""abc"""'}), "(ff='abc')\n", (491, 501), False, 'from sfepy.base.base import Struct\n'), ((649, 670), 'sfepy.base.base.Struct', 'Struct', ([], {'ff': '""""""', 'gg': '(123)'}), "(ff='', gg=123)\n", (655, 670), False, 'from sfepy.base.base import Struct\n'), ((1295, 1311), 'sfepy.base.base.Struct', 'Struct', ([], {'ff': '"""abc"""'}), "(ff='abc')\n", (1301, 1311), False, 'from sfepy.base.base import Struct\n'), ((1401, 1422), 'sfepy.base.base.Struct', 'Struct', ([], {'ff': '""""""', 'gg': '(123)'}), "(ff='', gg=123)\n", (1407, 1422), False, 'from sfepy.base.base import Struct\n')]
|
import logging
from sqlmodel import SQLModel, create_engine
import json
from dataclasses import dataclass
from etl.stores import run as etl_stores
from etl.sales import run as etl_sales
from etl.products import run as etl_products
# config to define variables
@dataclass
class Config:
stores_url: str
stores_filepath: str
db_conn_uri: str
log_level: str
# TODO convert this to a DAG or a pipeline
if __name__ == '__main__':
# load the config
config_dict = {}
with open('config.json', 'r') as f:
config_dict = json.load(f)
if config_dict['log_level'] == 'DEBUG':
config_dict['log_level'] = logging.DEBUG
else:
config_dict['log_level'] = logging.INFO
# create the config object
config = Config(**config_dict)
# setup logging
logging.basicConfig()
logging.getLogger().setLevel(config.log_level)
# create the database engine
engine = create_engine(config.db_conn_uri)
# create the sqlmodel metadata for the engine
SQLModel.metadata.create_all(engine)
# handle stores
etl_stores(config.stores_filepath, config.stores_url, engine)
# handle products
etl_products(engine)
# handle sales data
etl_sales(engine)
|
[
"sqlmodel.create_engine",
"sqlmodel.SQLModel.metadata.create_all"
] |
[((807, 828), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (826, 828), False, 'import logging\n'), ((927, 960), 'sqlmodel.create_engine', 'create_engine', (['config.db_conn_uri'], {}), '(config.db_conn_uri)\n', (940, 960), False, 'from sqlmodel import SQLModel, create_engine\n'), ((1016, 1052), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (1044, 1052), False, 'from sqlmodel import SQLModel, create_engine\n'), ((1078, 1139), 'etl.stores.run', 'etl_stores', (['config.stores_filepath', 'config.stores_url', 'engine'], {}), '(config.stores_filepath, config.stores_url, engine)\n', (1088, 1139), True, 'from etl.stores import run as etl_stores\n'), ((1167, 1187), 'etl.products.run', 'etl_products', (['engine'], {}), '(engine)\n', (1179, 1187), True, 'from etl.products import run as etl_products\n'), ((1220, 1237), 'etl.sales.run', 'etl_sales', (['engine'], {}), '(engine)\n', (1229, 1237), True, 'from etl.sales import run as etl_sales\n'), ((547, 559), 'json.load', 'json.load', (['f'], {}), '(f)\n', (556, 559), False, 'import json\n'), ((833, 852), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (850, 852), False, 'import logging\n')]
|
# Some code is modified from pytorch
# pytorch is licensed under BSD
# From PyTorch:
# Copyright (c) 2016- Facebook, Inc (<NAME>)
# Copyright (c) 2014- Facebook, Inc (<NAME>)
# Copyright (c) 2011-2014 Idiap Research Institute (<NAME>)
# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
# Copyright (c) 2011-2013 NYU (<NAME>)
# Copyright (c) 2006-2010 NEC Laboratories America (<NAME>, <NAME>, <NAME>, <NAME>)
# Copyright (c) 2006 Idiap Research Institute (<NAME>)
# Copyright (c) 2001-2004 Idiap Research Institute (<NAME>, <NAME>, <NAME>)
# From Caffe2:
# Copyright (c) 2016-present, Facebook Inc. All rights reserved.
# All contributions by Facebook:
# Copyright (c) 2016 Facebook Inc.
# All contributions by Google:
# Copyright (c) 2015 Google Inc.
# All rights reserved.
# All contributions by Yang<NAME>:
# Copyright (c) 2015 Yang<NAME>
# All rights reserved.
# All contributions by Kakao Brain:
# Copyright 2019-2020 Kakao Brain
# All contributions from Caffe:
# Copyright(c) 2013, 2014, 2015, the respective contributors
# All rights reserved.
# All other contributions:
# Copyright(c) 2015, 2016 the respective contributors
# All rights reserved.
# Caffe2 uses a copyright model similar to Caffe: each contributor holds
# copyright over their contributions to Caffe2. The project versioning records
# all such contribution and copyright details. If a contributor wants to further
# mark their specific copyright on a particular contribution, they should
# indicate their copyright solely in the commit message of the change when it is
# committed.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
# and IDIAP Research Institute nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import megengine as mge
import megengine.module as M
from megengine import functional as F
from megengine import Parameter
from megengine.module.init import xavier_uniform_, zeros_
from typing import List, Tuple, Dict, Optional
import numpy as np
from .utility import safe_masked_fill, has_nan_or_inf
def multi_head_attention_forward(
query: mge.Tensor,
key: mge.Tensor,
value: mge.Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: mge.Tensor,
in_proj_bias: Optional[mge.Tensor],
bias_k: Optional[mge.Tensor],
bias_v: Optional[mge.Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: mge.Tensor,
out_proj_bias: Optional[mge.Tensor],
training: bool = True,
key_padding_mask: Optional[mge.Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[mge.Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[mge.Tensor] = None,
k_proj_weight: Optional[mge.Tensor] = None,
v_proj_weight: Optional[mge.Tensor] = None,
static_k: Optional[mge.Tensor] = None,
static_v: Optional[mge.Tensor] = None,
proj_only: bool = False
) -> Tuple[mge.Tensor, Optional[mge.Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
tgt_len, bsz, embed_dim = query.shape
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.shape[0] == value.shape[0] and key.shape[1] == value.shape[1]
if isinstance(embed_dim, mge.Tensor):
# embed_dim can be a tensor when JIT tracing
#head_dim = embed_dim.div(num_heads, rounding_mode='trunc')
#NOTE: when positive number, floor_div is equivalent to trunc_div (in megengine only floor_div is available)
head_dim = F.floor_div(embed_dim, num_heads)
else:
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
assert not use_separate_proj_weight
assert need_weights
assert attn_mask is None
assert bias_k is None and bias_v is None
assert not add_zero_attn
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = F.linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = F.linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = F.linear(value, _w, _b)
q = q * scaling
raw_v = v
raw_v = raw_v.reshape(-1, bsz, num_heads, head_dim)
if proj_only:
return query, None, raw_v
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == np.uint8:
warnings.warn(
"Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead."
)
key_padding_mask = key_padding_mask.astype(np.bool)
#def _pad_last_dim_right_only(tensor):
# '''
# To replace with torch.nn.functional.pad(tensor, (0, 1))
# '''
# return F.concat([tensor, F.expand_dims(F.zeros(tensor.shape[:-1]), axis=-1)], axis=-1)
#q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
q = q.reshape(tgt_len, bsz * num_heads, head_dim).transpose(1, 0, 2)
if k is not None:
#k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
k = k.reshape(-1, bsz * num_heads, head_dim).transpose(1, 0, 2)
if v is not None:
#v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
v = v.reshape(-1, bsz * num_heads, head_dim).transpose(1, 0, 2)
if static_k is not None:
assert static_k.shape[0] == bsz * num_heads
assert static_k.shape[2] == head_dim
k = static_k
if static_v is not None:
assert static_v.shape[0] == bsz * num_heads
assert static_v.shape[2] == head_dim
v = static_v
src_len = k.shape[1]
if key_padding_mask is not None:
assert key_padding_mask.shape[1] == src_len
#attn_output_weights = torch.bmm(q, k.transpose(1, 2))
attn_output_weights = F.matmul(q, k.transpose(0, 2, 1))
assert list(attn_output_weights.shape) == [bsz * num_heads, tgt_len, src_len]
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.reshape(bsz, num_heads, tgt_len, src_len)
key_padding_mask = F.expand_dims(F.expand_dims(key_padding_mask, axis=1), axis=2)
attn_output_weights = safe_masked_fill(attn_output_weights, key_padding_mask, float("-inf"))
attn_output_weights = attn_output_weights.reshape(bsz * num_heads, tgt_len, src_len)
attn_output_weights_no_softmax = attn_output_weights
attn_output_weights = F.softmax(attn_output_weights, axis=-1)
attn_output_weights = F.dropout(attn_output_weights, dropout_p, training=training)
attn_output = F.matmul(attn_output_weights, v)
assert attn_output.shape == (bsz * num_heads, tgt_len, head_dim)
attn_output = F.transpose(attn_output, (1, 0, 2)).reshape(tgt_len, bsz, embed_dim)
attn_output = F.nn.linear(attn_output, out_proj_weight, out_proj_bias)
# average attention weights over heads
attn_output_weights = attn_output_weights_no_softmax.reshape(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights, raw_v
class MultiheadAttention(M.Module):
r"""Allows the model to jointly attend to information
from different representation subspaces.
See `Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
where :math:`head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)`.
Args:
embed_dim: total dimension of the model.
num_heads: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
bias: add bias as module parameter. Default: True.
add_bias_kv: add bias to the key and value sequences at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
kdim: total number of features in key. Default: None.
vdim: total number of features in value. Default: None.
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
Note that if :attr:`kdim` and :attr:`vdim` are None, they will be set
to :attr:`embed_dim` such that query, key, and value have the same
number of features.
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
__constants__ = ['batch_first']
bias_k: Optional[mge.Tensor]
bias_v: Optional[mge.Tensor]
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False,
kdim=None, vdim=None, batch_first=False):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
# True By default
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
# False By default
self.batch_first = batch_first
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.in_proj_weight = Parameter(F.zeros((3 * embed_dim, embed_dim)))
if bias:
self.in_proj_bias = Parameter(F.zeros((3 * embed_dim,)))
else:
self.in_proj_bias = None
self.out_proj = M.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(F.zeros((1, 1, embed_dim)))
self.bias_v = Parameter(F.zeros((1, 1, embed_dim)))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
xavier_uniform_(self.in_proj_weight)
if self.in_proj_bias is not None:
zeros_(self.in_proj_bias)
zeros_(self.out_proj.bias)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def __setstate__(self, state):
# Support loading old MultiheadAttention checkpoints generated by v1.1.0
if '_qkv_same_embed_dim' not in state:
state['_qkv_same_embed_dim'] = True
super(MultiheadAttention, self).__setstate__(state)
def forward(self, query: mge.Tensor, key: mge.Tensor, value: mge.Tensor, key_padding_mask: Optional[mge.Tensor] = None,
need_weights: bool = True, attn_mask: Optional[mge.Tensor] = None, proj_only=False) -> Tuple[mge.Tensor, Optional[mge.Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. When given a binary mask and a value is True,
the corresponding value on the attention layer will be ignored. When given
a byte mask and a value is non-zero, the corresponding value on the attention
layer will be ignored
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
Shapes for inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension. :math:`(N, L, E)` if ``batch_first`` is ``True``.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension. :math:`(N, S, E)` if ``batch_first`` is ``True``.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension. :math:`(N, S, E)` if ``batch_first`` is ``True``.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the position
with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: if a 2D mask: :math:`(L, S)` where L is the target sequence length, S is the
source sequence length.
If a 3D mask: :math:`(N\cdot\text{num\_heads}, L, S)` where N is the batch size, L is the target sequence
length, S is the source sequence length. ``attn_mask`` ensure that position i is allowed to attend
the unmasked positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
Shapes for outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension. :math:`(N, L, E)` if ``batch_first`` is ``True``.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
attn_output, attn_output_weights, values = multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights, #need_weights by default is True
attn_mask=attn_mask, proj_only=proj_only)
return attn_output, attn_output_weights, values
|
[
"megengine.functional.nn.linear",
"megengine.functional.floor_div",
"megengine.functional.zeros",
"megengine.module.init.zeros_",
"megengine.functional.matmul",
"megengine.functional.softmax",
"megengine.functional.expand_dims",
"megengine.functional.transpose",
"megengine.functional.dropout",
"megengine.module.Linear",
"megengine.functional.linear",
"megengine.module.init.xavier_uniform_"
] |
[((9573, 9596), 'megengine.functional.linear', 'F.linear', (['query', '_w', '_b'], {}), '(query, _w, _b)\n', (9581, 9596), True, 'from megengine import functional as F\n'), ((9843, 9864), 'megengine.functional.linear', 'F.linear', (['key', '_w', '_b'], {}), '(key, _w, _b)\n', (9851, 9864), True, 'from megengine import functional as F\n'), ((10098, 10121), 'megengine.functional.linear', 'F.linear', (['value', '_w', '_b'], {}), '(value, _w, _b)\n', (10106, 10121), True, 'from megengine import functional as F\n'), ((12452, 12491), 'megengine.functional.softmax', 'F.softmax', (['attn_output_weights'], {'axis': '(-1)'}), '(attn_output_weights, axis=-1)\n', (12461, 12491), True, 'from megengine import functional as F\n'), ((12518, 12578), 'megengine.functional.dropout', 'F.dropout', (['attn_output_weights', 'dropout_p'], {'training': 'training'}), '(attn_output_weights, dropout_p, training=training)\n', (12527, 12578), True, 'from megengine import functional as F\n'), ((12598, 12630), 'megengine.functional.matmul', 'F.matmul', (['attn_output_weights', 'v'], {}), '(attn_output_weights, v)\n', (12606, 12630), True, 'from megengine import functional as F\n'), ((12805, 12861), 'megengine.functional.nn.linear', 'F.nn.linear', (['attn_output', 'out_proj_weight', 'out_proj_bias'], {}), '(attn_output, out_proj_weight, out_proj_bias)\n', (12816, 12861), True, 'from megengine import functional as F\n'), ((8958, 8991), 'megengine.functional.floor_div', 'F.floor_div', (['embed_dim', 'num_heads'], {}), '(embed_dim, num_heads)\n', (8969, 8991), True, 'from megengine import functional as F\n'), ((15589, 15630), 'megengine.module.Linear', 'M.Linear', (['embed_dim', 'embed_dim'], {'bias': 'bias'}), '(embed_dim, embed_dim, bias=bias)\n', (15597, 15630), True, 'import megengine.module as M\n'), ((15963, 15999), 'megengine.module.init.xavier_uniform_', 'xavier_uniform_', (['self.in_proj_weight'], {}), '(self.in_proj_weight)\n', (15978, 15999), False, 'from megengine.module.init import xavier_uniform_, zeros_\n'), ((12125, 12164), 'megengine.functional.expand_dims', 'F.expand_dims', (['key_padding_mask'], {'axis': '(1)'}), '(key_padding_mask, axis=1)\n', (12138, 12164), True, 'from megengine import functional as F\n'), ((12718, 12753), 'megengine.functional.transpose', 'F.transpose', (['attn_output', '(1, 0, 2)'], {}), '(attn_output, (1, 0, 2))\n', (12729, 12753), True, 'from megengine import functional as F\n'), ((15390, 15425), 'megengine.functional.zeros', 'F.zeros', (['(3 * embed_dim, embed_dim)'], {}), '((3 * embed_dim, embed_dim))\n', (15397, 15425), True, 'from megengine import functional as F\n'), ((16055, 16080), 'megengine.module.init.zeros_', 'zeros_', (['self.in_proj_bias'], {}), '(self.in_proj_bias)\n', (16061, 16080), False, 'from megengine.module.init import xavier_uniform_, zeros_\n'), ((16093, 16119), 'megengine.module.init.zeros_', 'zeros_', (['self.out_proj.bias'], {}), '(self.out_proj.bias)\n', (16099, 16119), False, 'from megengine.module.init import xavier_uniform_, zeros_\n'), ((15487, 15512), 'megengine.functional.zeros', 'F.zeros', (['(3 * embed_dim,)'], {}), '((3 * embed_dim,))\n', (15494, 15512), True, 'from megengine import functional as F\n'), ((15692, 15718), 'megengine.functional.zeros', 'F.zeros', (['(1, 1, embed_dim)'], {}), '((1, 1, embed_dim))\n', (15699, 15718), True, 'from megengine import functional as F\n'), ((15756, 15782), 'megengine.functional.zeros', 'F.zeros', (['(1, 1, embed_dim)'], {}), '((1, 1, embed_dim))\n', (15763, 15782), True, 'from megengine import functional as F\n')]
|
import os
import jwt
import time
import requests
from flask import abort
from flask import Blueprint
from flask import request
from flask import jsonify
from flask import redirect
from flask import current_app
from flask import session
from urllib.parse import unquote
from base64 import b64encode
from datetime import datetime
from sqlmodel import select
from sqlmodel import Session as SQLSession
from app.models.user import User
bp = Blueprint("auth", __name__)
@bp.route("/login")
def login():
CLIENT_ID = current_app.config["CLIENT_ID"]
REDIRECT_URI = current_app.config["REDIRECT_URI"]
USER_STATE = b64encode(os.urandom(64)).decode("utf-8")
UAA_AUTHORIZE_URI = current_app.config["UAA_AUTHORIZE_URI"]
session["USER_STATE"] = USER_STATE
UAA_LOGIN = f"{UAA_AUTHORIZE_URI}?client_id={CLIENT_ID}&response_type=code&redirect_uri={REDIRECT_URI}&state={USER_STATE}"
return redirect(UAA_LOGIN)
@bp.route("/logout")
def logout():
CLIENT_ID = current_app.config["CLIENT_ID"]
REDIRECT_URI = current_app.config["REDIRECT_URI"]
UAA_LOGOUT_URI = current_app.config["UAA_LOGOUT_URI"]
UAA_LGOUT = f"{UAA_LOGOUT_URI}?client_id={CLIENT_ID}&redirect={REDIRECT_URI}"
session.clear()
requests.post(UAA_LGOUT)
return redirect("/")
@bp.route("/callback")
def callback():
# @url_param {string} code
# @url_param {string} status
code = request.args.get("code")
state = request.args.get("state")
if not code or not state:
abort(400)
UAA_TOKEN_URI = current_app.config["UAA_TOKEN_URI"]
data = {
"code": code,
"grant_type": "authorization_code",
"response_type": "token",
"client_id": current_app.config["CLIENT_ID"],
"client_secret": current_app.config["CLIENT_SECRET"],
"redirect_uri": current_app.config["REDIRECT_URI"],
}
response = requests.post(UAA_TOKEN_URI, data=data)
if response.status_code != 200:
abort(response.status_code)
response = response.json()
token = response["access_token"]
header = jwt.get_unverified_header(token)
session["claims"] = jwt.decode(
token, header["alg"], options={"verify_signature": False}
)
session["expiry"] = time.time() + (response["expires_in"] * 1000)
session["refresh_token"] = response["refresh_token"]
session["authenticated"] = True
with SQLSession(current_app.engine) as s:
query = select(User).where(User.email == session["claims"]["email"])
user = s.exec(query).first()
if user:
# Account exists
user.last_logon = datetime.now()
s.add(user)
s.commit()
else:
# Account does not exist
new_user = User(
user_name=session["claims"]["user_name"],
email=session["claims"]["email"],
last_logon=datetime.now(),
)
s.add(new_user)
s.commit()
user = s.exec(query).first()
session["user"] = user.dict()
return redirect("/")
|
[
"sqlmodel.select",
"sqlmodel.Session"
] |
[((438, 465), 'flask.Blueprint', 'Blueprint', (['"""auth"""', '__name__'], {}), "('auth', __name__)\n", (447, 465), False, 'from flask import Blueprint\n'), ((906, 925), 'flask.redirect', 'redirect', (['UAA_LOGIN'], {}), '(UAA_LOGIN)\n', (914, 925), False, 'from flask import redirect\n'), ((1210, 1225), 'flask.session.clear', 'session.clear', ([], {}), '()\n', (1223, 1225), False, 'from flask import session\n'), ((1230, 1254), 'requests.post', 'requests.post', (['UAA_LGOUT'], {}), '(UAA_LGOUT)\n', (1243, 1254), False, 'import requests\n'), ((1267, 1280), 'flask.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (1275, 1280), False, 'from flask import redirect\n'), ((1397, 1421), 'flask.request.args.get', 'request.args.get', (['"""code"""'], {}), "('code')\n", (1413, 1421), False, 'from flask import request\n'), ((1434, 1459), 'flask.request.args.get', 'request.args.get', (['"""state"""'], {}), "('state')\n", (1450, 1459), False, 'from flask import request\n'), ((1879, 1918), 'requests.post', 'requests.post', (['UAA_TOKEN_URI'], {'data': 'data'}), '(UAA_TOKEN_URI, data=data)\n', (1892, 1918), False, 'import requests\n'), ((2075, 2107), 'jwt.get_unverified_header', 'jwt.get_unverified_header', (['token'], {}), '(token)\n', (2100, 2107), False, 'import jwt\n'), ((2133, 2202), 'jwt.decode', 'jwt.decode', (['token', "header['alg']"], {'options': "{'verify_signature': False}"}), "(token, header['alg'], options={'verify_signature': False})\n", (2143, 2202), False, 'import jwt\n'), ((3065, 3078), 'flask.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (3073, 3078), False, 'from flask import redirect\n'), ((1499, 1509), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (1504, 1509), False, 'from flask import abort\n'), ((1964, 1991), 'flask.abort', 'abort', (['response.status_code'], {}), '(response.status_code)\n', (1969, 1991), False, 'from flask import abort\n'), ((2241, 2252), 'time.time', 'time.time', ([], {}), '()\n', (2250, 2252), False, 'import time\n'), ((2390, 2420), 'sqlmodel.Session', 'SQLSession', (['current_app.engine'], {}), '(current_app.engine)\n', (2400, 2420), True, 'from sqlmodel import Session as SQLSession\n'), ((2618, 2632), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2630, 2632), False, 'from datetime import datetime\n'), ((630, 644), 'os.urandom', 'os.urandom', (['(64)'], {}), '(64)\n', (640, 644), False, 'import os\n'), ((2443, 2455), 'sqlmodel.select', 'select', (['User'], {}), '(User)\n', (2449, 2455), False, 'from sqlmodel import select\n'), ((2896, 2910), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2908, 2910), False, 'from datetime import datetime\n')]
|
import logging
from megengine.distributed.group import get_rank
from megengine.distributed import is_distributed
logger_initialized = {}
def get_logger(name, log_file=None, log_level=logging.INFO):
"""Initialize and get a logger by name.
If the logger has not been initialized, this method will initialize the
logger by adding one or two handlers, otherwise the initialized logger will
be directly returned. During initialization, a StreamHandler will always be
added. If `log_file` is specified and the process rank is 0, a FileHandler
will also be added.
Args:
name (str): Logger name.
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the logger.
log_level (int): The logger level. Note that only the process of
rank 0 is affected, and other processes will set the level to
"Error" thus be silent most of the time.
Returns:
logging.Logger: The expected logger.
"""
logger = logging.getLogger(name)
if name in logger_initialized:
return logger
# handle hierarchical names
# e.g., logger "a" is initialized, then logger "a.b" will skip the
# initialization since it is a child of "a".
for logger_name in logger_initialized:
if name.startswith(logger_name): # child
return logger
# fix stream twice bug
# while logger.handlers:
# logger.handlers.pop()
stream_handler = logging.StreamHandler()
handlers = [stream_handler]
if is_distributed():
rank = get_rank()
else:
rank = 0
# only rank 0 will add a FileHandler
if rank == 0 and log_file is not None:
file_handler = logging.FileHandler(log_file, 'w')
handlers.append(file_handler)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
for handler in handlers:
handler.setFormatter(formatter)
handler.setLevel(log_level)
logger.addHandler(handler)
if rank == 0:
logger.setLevel(log_level)
else:
logger.setLevel(logging.ERROR)
logger_initialized[name] = True
return logger
def get_root_logger(log_file=None, log_level=logging.INFO):
"""Get the root logger.
The logger will be initialized if it has not been initialized.
By default a StreamHandler will be added.
If `log_file` is specified, a FileHandler will also be added.
The name of the root logger is the top-level package name, e.g., "edit".
Args:
log_file (str | None): The log filename. If specified, a FileHandler will be added to the root logger.
log_level (int): The root logger level. Note that only the process of
rank 0 is affected, while other processes will set the level to
"Error" and be silent most of the time.
Returns:
logging.Logger: The root logger.
"""
root_name = __name__.split('.')[0] # edit.utils.logger
if is_distributed():
rank = get_rank()
root_name = "rank" + str(rank) + "_" + root_name
logger = get_logger(root_name, log_file, log_level)
return logger
|
[
"megengine.distributed.is_distributed",
"megengine.distributed.group.get_rank"
] |
[((1026, 1049), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (1043, 1049), False, 'import logging\n'), ((1494, 1517), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1515, 1517), False, 'import logging\n'), ((1558, 1574), 'megengine.distributed.is_distributed', 'is_distributed', ([], {}), '()\n', (1572, 1574), False, 'from megengine.distributed import is_distributed\n'), ((1827, 1900), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (1844, 1900), False, 'import logging\n'), ((3007, 3023), 'megengine.distributed.is_distributed', 'is_distributed', ([], {}), '()\n', (3021, 3023), False, 'from megengine.distributed import is_distributed\n'), ((1591, 1601), 'megengine.distributed.group.get_rank', 'get_rank', ([], {}), '()\n', (1599, 1601), False, 'from megengine.distributed.group import get_rank\n'), ((1737, 1771), 'logging.FileHandler', 'logging.FileHandler', (['log_file', '"""w"""'], {}), "(log_file, 'w')\n", (1756, 1771), False, 'import logging\n'), ((3040, 3050), 'megengine.distributed.group.get_rank', 'get_rank', ([], {}), '()\n', (3048, 3050), False, 'from megengine.distributed.group import get_rank\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import time
import numpy as np
import pytest
from megengine.data.collator import Collator
from megengine.data.dataloader import DataLoader
from megengine.data.dataset import ArrayDataset, StreamDataset
from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler
from megengine.data.transform import (
Compose,
Normalize,
PseudoTransform,
ToMode,
Transform,
)
def init_dataset():
sample_num = 100
rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8)
label = np.random.randint(0, 10, size=(sample_num,), dtype=int)
dataset = ArrayDataset(rand_data, label)
return dataset
def test_dataloader_init():
dataset = init_dataset()
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=2, divide=True)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=-1)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, timeout=-1)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=0, divide=True)
dataloader = DataLoader(dataset)
assert isinstance(dataloader.sampler, SequentialSampler)
assert isinstance(dataloader.transform, PseudoTransform)
assert isinstance(dataloader.collator, Collator)
dataloader = DataLoader(
dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=False)
)
assert len(dataloader) == 17
dataloader = DataLoader(
dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=True)
)
assert len(dataloader) == 16
class MyStream(StreamDataset):
def __init__(self, number, batch=False, error_foramt=False, block=False):
self.number = number
self.batch = batch
self.error_format = error_foramt
self.block = block
def __iter__(self):
for cnt in range(self.number):
if self.block:
for _ in range(10):
time.sleep(1)
if self.batch:
data = np.random.randint(0, 256, (2, 2, 2, 3), dtype="uint8")
yield (True, (data, [cnt, cnt - self.number]))
else:
data = np.random.randint(0, 256, (2, 2, 3), dtype="uint8")
if self.error_format:
yield (data, cnt)
else:
yield (False, (data, cnt))
raise StopIteration
@pytest.mark.parametrize("batch", [True, False])
@pytest.mark.parametrize("num_workers", [0, 2])
def test_stream_dataloader(batch, num_workers):
dataset = MyStream(100, batch=batch)
sampler = StreamSampler(batch_size=4)
dataloader = DataLoader(
dataset,
sampler,
Compose([Normalize(mean=(103, 116, 123), std=(57, 57, 58)), ToMode("CHW")]),
num_workers=num_workers,
)
check_set = set()
for step, data in enumerate(dataloader):
if step == 10:
break
assert data[0].shape == (4, 3, 2, 2)
assert data[1].shape == (4,)
for i in data[1]:
assert i not in check_set
check_set.add(i)
def test_stream_dataloader_error():
dataset = MyStream(100, error_foramt=True)
sampler = StreamSampler(batch_size=4)
dataloader = DataLoader(dataset, sampler)
with pytest.raises(AssertionError, match=r".*tuple.*"):
data_iter = iter(dataloader)
next(data_iter)
@pytest.mark.parametrize("num_workers", [0, 2])
def test_stream_dataloader_timeout(num_workers):
dataset = MyStream(100, False, block=True)
sampler = StreamSampler(batch_size=4)
dataloader = DataLoader(dataset, sampler, num_workers=num_workers, timeout=2)
with pytest.raises(RuntimeError, match=r".*timeout.*"):
data_iter = iter(dataloader)
next(data_iter)
def test_dataloader_serial():
dataset = init_dataset()
dataloader = DataLoader(
dataset, sampler=RandomSampler(dataset, batch_size=4, drop_last=False)
)
for (data, label) in dataloader:
assert data.shape == (4, 1, 32, 32)
assert label.shape == (4,)
def test_dataloader_parallel():
# set max shared memory to 100M
os.environ["MGE_PLASMA_MEMORY"] = "100000000"
dataset = init_dataset()
dataloader = DataLoader(
dataset,
sampler=RandomSampler(dataset, batch_size=4, drop_last=False),
num_workers=2,
divide=False,
)
for (data, label) in dataloader:
assert data.shape == (4, 1, 32, 32)
assert label.shape == (4,)
dataloader = DataLoader(
dataset,
sampler=RandomSampler(dataset, batch_size=4, drop_last=False),
num_workers=2,
divide=True,
)
for (data, label) in dataloader:
assert data.shape == (4, 1, 32, 32)
assert label.shape == (4,)
@pytest.mark.skipif(
platform.system() == "Windows",
reason="dataloader do not support parallel on windows",
)
def test_dataloader_parallel_timeout():
dataset = init_dataset()
class TimeoutTransform(Transform):
def __init__(self):
pass
def apply(self, input):
time.sleep(10)
return input
dataloader = DataLoader(
dataset,
sampler=RandomSampler(dataset, batch_size=4, drop_last=False),
transform=TimeoutTransform(),
num_workers=2,
timeout=2,
)
with pytest.raises(RuntimeError, match=r".*timeout.*"):
data_iter = iter(dataloader)
batch_data = next(data_iter)
@pytest.mark.skipif(
platform.system() == "Windows",
reason="dataloader do not support parallel on windows",
)
def test_dataloader_parallel_worker_exception():
dataset = init_dataset()
class FakeErrorTransform(Transform):
def __init__(self):
pass
def apply(self, input):
raise RuntimeError("test raise error")
return input
dataloader = DataLoader(
dataset,
sampler=RandomSampler(dataset, batch_size=4, drop_last=False),
transform=FakeErrorTransform(),
num_workers=2,
)
with pytest.raises(RuntimeError, match=r"worker.*died"):
data_iter = iter(dataloader)
batch_data = next(data_iter)
def _multi_instances_parallel_dataloader_worker():
dataset = init_dataset()
for divide_flag in [True, False]:
train_dataloader = DataLoader(
dataset,
sampler=RandomSampler(dataset, batch_size=4, drop_last=False),
num_workers=2,
divide=divide_flag,
)
val_dataloader = DataLoader(
dataset,
sampler=RandomSampler(dataset, batch_size=10, drop_last=False),
num_workers=2,
divide=divide_flag,
)
for idx, (data, label) in enumerate(train_dataloader):
assert data.shape == (4, 1, 32, 32)
assert label.shape == (4,)
if idx % 5 == 0:
for val_data, val_label in val_dataloader:
assert val_data.shape == (10, 1, 32, 32)
assert val_label.shape == (10,)
def test_dataloader_parallel_multi_instances():
# set max shared memory to 100M
os.environ["MGE_PLASMA_MEMORY"] = "100000000"
_multi_instances_parallel_dataloader_worker()
@pytest.mark.isolated_distributed
def test_dataloader_parallel_multi_instances_multiprocessing():
# set max shared memory to 100M
os.environ["MGE_PLASMA_MEMORY"] = "100000000"
import multiprocessing as mp
# mp.set_start_method("spawn")
processes = []
for i in range(4):
p = mp.Process(target=_multi_instances_parallel_dataloader_worker)
p.start()
processes.append(p)
for p in processes:
p.join()
assert p.exitcode == 0
@pytest.mark.parametrize("num_workers", [0, 2])
def test_timeout_event(num_workers):
def cb():
return (True, (np.zeros(shape=(2, 2, 2, 3)), np.ones(shape=(2,))))
dataset = MyStream(100, block=True)
sampler = StreamSampler(batch_size=4)
dataloader = DataLoader(
dataset, sampler, num_workers=num_workers, timeout=2, timeout_event=cb
)
for _, data in enumerate(dataloader):
np.testing.assert_equal(data[0], np.zeros(shape=(4, 2, 2, 3)))
np.testing.assert_equal(data[1], np.ones(shape=(4,)))
break
|
[
"megengine.data.sampler.RandomSampler",
"megengine.data.dataset.ArrayDataset",
"megengine.data.dataloader.DataLoader",
"megengine.data.sampler.StreamSampler",
"megengine.data.transform.Normalize",
"megengine.data.transform.ToMode"
] |
[((2861, 2908), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""batch"""', '[True, False]'], {}), "('batch', [True, False])\n", (2884, 2908), False, 'import pytest\n'), ((2910, 2956), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""num_workers"""', '[0, 2]'], {}), "('num_workers', [0, 2])\n", (2933, 2956), False, 'import pytest\n'), ((3857, 3903), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""num_workers"""', '[0, 2]'], {}), "('num_workers', [0, 2])\n", (3880, 3903), False, 'import pytest\n'), ((8231, 8277), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""num_workers"""', '[0, 2]'], {}), "('num_workers', [0, 2])\n", (8254, 8277), False, 'import pytest\n'), ((866, 937), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)'], {'size': '(sample_num, 1, 32, 32)', 'dtype': 'np.uint8'}), '(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8)\n', (883, 937), True, 'import numpy as np\n'), ((950, 1005), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)'], {'size': '(sample_num,)', 'dtype': 'int'}), '(0, 10, size=(sample_num,), dtype=int)\n', (967, 1005), True, 'import numpy as np\n'), ((1020, 1050), 'megengine.data.dataset.ArrayDataset', 'ArrayDataset', (['rand_data', 'label'], {}), '(rand_data, label)\n', (1032, 1050), False, 'from megengine.data.dataset import ArrayDataset, StreamDataset\n'), ((1539, 1558), 'megengine.data.dataloader.DataLoader', 'DataLoader', (['dataset'], {}), '(dataset)\n', (1549, 1558), False, 'from megengine.data.dataloader import DataLoader\n'), ((3060, 3087), 'megengine.data.sampler.StreamSampler', 'StreamSampler', ([], {'batch_size': '(4)'}), '(batch_size=4)\n', (3073, 3087), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler\n'), ((3659, 3686), 'megengine.data.sampler.StreamSampler', 'StreamSampler', ([], {'batch_size': '(4)'}), '(batch_size=4)\n', (3672, 3686), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler\n'), ((3704, 3732), 'megengine.data.dataloader.DataLoader', 'DataLoader', (['dataset', 'sampler'], {}), '(dataset, sampler)\n', (3714, 3732), False, 'from megengine.data.dataloader import DataLoader\n'), ((4014, 4041), 'megengine.data.sampler.StreamSampler', 'StreamSampler', ([], {'batch_size': '(4)'}), '(batch_size=4)\n', (4027, 4041), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler\n'), ((4060, 4124), 'megengine.data.dataloader.DataLoader', 'DataLoader', (['dataset', 'sampler'], {'num_workers': 'num_workers', 'timeout': '(2)'}), '(dataset, sampler, num_workers=num_workers, timeout=2)\n', (4070, 4124), False, 'from megengine.data.dataloader import DataLoader\n'), ((8459, 8486), 'megengine.data.sampler.StreamSampler', 'StreamSampler', ([], {'batch_size': '(4)'}), '(batch_size=4)\n', (8472, 8486), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler\n'), ((8505, 8591), 'megengine.data.dataloader.DataLoader', 'DataLoader', (['dataset', 'sampler'], {'num_workers': 'num_workers', 'timeout': '(2)', 'timeout_event': 'cb'}), '(dataset, sampler, num_workers=num_workers, timeout=2,\n timeout_event=cb)\n', (8515, 8591), False, 'from megengine.data.dataloader import DataLoader\n'), ((1138, 1163), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1151, 1163), False, 'import pytest\n'), ((1186, 1233), 'megengine.data.dataloader.DataLoader', 'DataLoader', (['dataset'], {'num_workers': '(2)', 'divide': '(True)'}), '(dataset, num_workers=2, divide=True)\n', (1196, 1233), False, 'from megengine.data.dataloader import DataLoader\n'), ((1243, 1268), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1256, 1268), False, 'import pytest\n'), ((1291, 1326), 'megengine.data.dataloader.DataLoader', 'DataLoader', (['dataset'], {'num_workers': '(-1)'}), '(dataset, num_workers=-1)\n', (1301, 1326), False, 'from megengine.data.dataloader import DataLoader\n'), ((1336, 1361), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1349, 1361), False, 'import pytest\n'), ((1384, 1415), 'megengine.data.dataloader.DataLoader', 'DataLoader', (['dataset'], {'timeout': '(-1)'}), '(dataset, timeout=-1)\n', (1394, 1415), False, 'from megengine.data.dataloader import DataLoader\n'), ((1425, 1450), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1438, 1450), False, 'import pytest\n'), ((1473, 1520), 'megengine.data.dataloader.DataLoader', 'DataLoader', (['dataset'], {'num_workers': '(0)', 'divide': '(True)'}), '(dataset, num_workers=0, divide=True)\n', (1483, 1520), False, 'from megengine.data.dataloader import DataLoader\n'), ((3742, 3790), 'pytest.raises', 'pytest.raises', (['AssertionError'], {'match': '""".*tuple.*"""'}), "(AssertionError, match='.*tuple.*')\n", (3755, 3790), False, 'import pytest\n'), ((4134, 4182), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '""".*timeout.*"""'}), "(RuntimeError, match='.*timeout.*')\n", (4147, 4182), False, 'import pytest\n'), ((5828, 5876), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '""".*timeout.*"""'}), "(RuntimeError, match='.*timeout.*')\n", (5841, 5876), False, 'import pytest\n'), ((5282, 5299), 'platform.system', 'platform.system', ([], {}), '()\n', (5297, 5299), False, 'import platform\n'), ((6544, 6593), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""worker.*died"""'}), "(RuntimeError, match='worker.*died')\n", (6557, 6593), False, 'import pytest\n'), ((5980, 5997), 'platform.system', 'platform.system', ([], {}), '()\n', (5995, 5997), False, 'import platform\n'), ((8046, 8108), 'multiprocessing.Process', 'mp.Process', ([], {'target': '_multi_instances_parallel_dataloader_worker'}), '(target=_multi_instances_parallel_dataloader_worker)\n', (8056, 8108), True, 'import multiprocessing as mp\n'), ((1789, 1842), 'megengine.data.sampler.RandomSampler', 'RandomSampler', (['dataset'], {'batch_size': '(6)', 'drop_last': '(False)'}), '(dataset, batch_size=6, drop_last=False)\n', (1802, 1842), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler\n'), ((1936, 1988), 'megengine.data.sampler.RandomSampler', 'RandomSampler', (['dataset'], {'batch_size': '(6)', 'drop_last': '(True)'}), '(dataset, batch_size=6, drop_last=True)\n', (1949, 1988), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler\n'), ((4361, 4414), 'megengine.data.sampler.RandomSampler', 'RandomSampler', (['dataset'], {'batch_size': '(4)', 'drop_last': '(False)'}), '(dataset, batch_size=4, drop_last=False)\n', (4374, 4414), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler\n'), ((4749, 4802), 'megengine.data.sampler.RandomSampler', 'RandomSampler', (['dataset'], {'batch_size': '(4)', 'drop_last': '(False)'}), '(dataset, batch_size=4, drop_last=False)\n', (4762, 4802), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler\n'), ((5034, 5087), 'megengine.data.sampler.RandomSampler', 'RandomSampler', (['dataset'], {'batch_size': '(4)', 'drop_last': '(False)'}), '(dataset, batch_size=4, drop_last=False)\n', (5047, 5087), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler\n'), ((5575, 5589), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (5585, 5589), False, 'import time\n'), ((5678, 5731), 'megengine.data.sampler.RandomSampler', 'RandomSampler', (['dataset'], {'batch_size': '(4)', 'drop_last': '(False)'}), '(dataset, batch_size=4, drop_last=False)\n', (5691, 5731), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler\n'), ((6411, 6464), 'megengine.data.sampler.RandomSampler', 'RandomSampler', (['dataset'], {'batch_size': '(4)', 'drop_last': '(False)'}), '(dataset, batch_size=4, drop_last=False)\n', (6424, 6464), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler\n'), ((8685, 8713), 'numpy.zeros', 'np.zeros', ([], {'shape': '(4, 2, 2, 3)'}), '(shape=(4, 2, 2, 3))\n', (8693, 8713), True, 'import numpy as np\n'), ((8756, 8775), 'numpy.ones', 'np.ones', ([], {'shape': '(4,)'}), '(shape=(4,))\n', (8763, 8775), True, 'import numpy as np\n'), ((2474, 2528), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)', '(2, 2, 2, 3)'], {'dtype': '"""uint8"""'}), "(0, 256, (2, 2, 2, 3), dtype='uint8')\n", (2491, 2528), True, 'import numpy as np\n'), ((2633, 2684), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)', '(2, 2, 3)'], {'dtype': '"""uint8"""'}), "(0, 256, (2, 2, 3), dtype='uint8')\n", (2650, 2684), True, 'import numpy as np\n'), ((3168, 3217), 'megengine.data.transform.Normalize', 'Normalize', ([], {'mean': '(103, 116, 123)', 'std': '(57, 57, 58)'}), '(mean=(103, 116, 123), std=(57, 57, 58))\n', (3177, 3217), False, 'from megengine.data.transform import Compose, Normalize, PseudoTransform, ToMode, Transform\n'), ((3219, 3232), 'megengine.data.transform.ToMode', 'ToMode', (['"""CHW"""'], {}), "('CHW')\n", (3225, 3232), False, 'from megengine.data.transform import Compose, Normalize, PseudoTransform, ToMode, Transform\n'), ((6871, 6924), 'megengine.data.sampler.RandomSampler', 'RandomSampler', (['dataset'], {'batch_size': '(4)', 'drop_last': '(False)'}), '(dataset, batch_size=4, drop_last=False)\n', (6884, 6924), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler\n'), ((7073, 7127), 'megengine.data.sampler.RandomSampler', 'RandomSampler', (['dataset'], {'batch_size': '(10)', 'drop_last': '(False)'}), '(dataset, batch_size=10, drop_last=False)\n', (7086, 7127), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler\n'), ((8352, 8380), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 2, 2, 3)'}), '(shape=(2, 2, 2, 3))\n', (8360, 8380), True, 'import numpy as np\n'), ((8382, 8401), 'numpy.ones', 'np.ones', ([], {'shape': '(2,)'}), '(shape=(2,))\n', (8389, 8401), True, 'import numpy as np\n'), ((2410, 2423), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2420, 2423), False, 'import time\n')]
|
import asyncio
import strawberry
from sqlmodel import Session, select
from strawberry.types import Info
from fastapi_server.models.user import User
from fastapi_server.routes.graph_ql.broadcaster import Broadcast
broadcast = Broadcast()
@strawberry.type
class UserSystemQuery:
@strawberry.field
def user_login(self, info: Info, email: str, password: str) -> str:
# TODO Replace with actual password hash function
session: Session = info.context['session']
statement = select(User).where(User.email == email, User.password_hashed == password)
user = session.exec(statement).first()
if user is None:
raise FileNotFoundError('Email and password do not match')
return f'Login successful for {email}'
@strawberry.type
class UserSystemMutation:
@strawberry.mutation
def user_register(self, info: Info, username: str, email: str, password: str, password_repeated: str) -> bool:
if password != password_repeated:
raise KeyError('not same pw')
# TODO Replace with actual password hash function
password_hashed = hash(password)
session: Session = info.context['session']
username_taken = session.exec(select(User).where(User.username == username)).first()
if username_taken is not None:
raise KeyError('username taken')
email_taken = session.exec(select(User).where(User.email == email)).first()
if email_taken is not None:
raise KeyError('email taken')
session.add(
User(
username=username,
email=email,
password_hashed=password_hashed,
is_admin=False,
is_disabled=False,
is_verified=False,
)
)
session.commit()
return True
@strawberry.mutation
def user_send_password_reset_email(self, info: Info, email: str) -> bool:
# Check if email exists in db, send password reset with token
pass
@strawberry.mutation
def user_reset_password(self, info: Info, token: str) -> bool:
# Decypher email from token, if token is valid reset password and send a generated password per email
pass
@strawberry.mutation
def user_check_logged_in(self, info: Info) -> bool:
# Read request cookies to check if user is logged in. Also check if the token in the cookie is valid
pass
async def main():
pass
if __name__ == '__main__':
asyncio.run(main())
|
[
"sqlmodel.select"
] |
[((228, 239), 'fastapi_server.routes.graph_ql.broadcaster.Broadcast', 'Broadcast', ([], {}), '()\n', (237, 239), False, 'from fastapi_server.routes.graph_ql.broadcaster import Broadcast\n'), ((1560, 1687), 'fastapi_server.models.user.User', 'User', ([], {'username': 'username', 'email': 'email', 'password_hashed': 'password_hashed', 'is_admin': '(False)', 'is_disabled': '(False)', 'is_verified': '(False)'}), '(username=username, email=email, password_hashed=password_hashed,\n is_admin=False, is_disabled=False, is_verified=False)\n', (1564, 1687), False, 'from fastapi_server.models.user import User\n'), ((505, 517), 'sqlmodel.select', 'select', (['User'], {}), '(User)\n', (511, 517), False, 'from sqlmodel import Session, select\n'), ((1226, 1238), 'sqlmodel.select', 'select', (['User'], {}), '(User)\n', (1232, 1238), False, 'from sqlmodel import Session, select\n'), ((1400, 1412), 'sqlmodel.select', 'select', (['User'], {}), '(User)\n', (1406, 1412), False, 'from sqlmodel import Session, select\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import time
import numpy as np
import pytest
from megengine.data.collator import Collator
from megengine.data.dataloader import DataLoader
from megengine.data.dataset import ArrayDataset
from megengine.data.sampler import RandomSampler, SequentialSampler
from megengine.data.transform import PseudoTransform, Transform
def init_dataset():
sample_num = 100
rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8)
label = np.random.randint(0, 10, size=(sample_num,), dtype=int)
dataset = ArrayDataset(rand_data, label)
return dataset
def test_dataloader_init():
dataset = init_dataset()
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=2, divide=True)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=-1)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, timeout=-1)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=0, divide=True)
dataloader = DataLoader(dataset)
assert isinstance(dataloader.sampler, SequentialSampler)
assert isinstance(dataloader.transform, PseudoTransform)
assert isinstance(dataloader.collator, Collator)
dataloader = DataLoader(
dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=False)
)
assert len(dataloader) == 17
dataloader = DataLoader(
dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=True)
)
assert len(dataloader) == 16
def test_dataloader_serial():
dataset = init_dataset()
dataloader = DataLoader(
dataset, sampler=RandomSampler(dataset, batch_size=4, drop_last=False)
)
for (data, label) in dataloader:
assert data.shape == (4, 1, 32, 32)
assert label.shape == (4,)
def test_dataloader_parallel():
# set max shared memory to 100M
os.environ["MGE_PLASMA_MEMORY"] = "100000000"
dataset = init_dataset()
dataloader = DataLoader(
dataset,
sampler=RandomSampler(dataset, batch_size=4, drop_last=False),
num_workers=2,
divide=False,
)
for (data, label) in dataloader:
assert data.shape == (4, 1, 32, 32)
assert label.shape == (4,)
dataloader = DataLoader(
dataset,
sampler=RandomSampler(dataset, batch_size=4, drop_last=False),
num_workers=2,
divide=True,
)
for (data, label) in dataloader:
assert data.shape == (4, 1, 32, 32)
assert label.shape == (4,)
def test_dataloader_parallel_timeout():
dataset = init_dataset()
class TimeoutTransform(Transform):
def __init__(self):
pass
def apply(self, input):
time.sleep(10)
return input
dataloader = DataLoader(
dataset,
sampler=RandomSampler(dataset, batch_size=4, drop_last=False),
transform=TimeoutTransform(),
num_workers=2,
timeout=2,
)
with pytest.raises(RuntimeError, match=r".*timeout.*"):
data_iter = iter(dataloader)
batch_data = next(data_iter)
def test_dataloader_parallel_worker_exception():
dataset = init_dataset()
class FakeErrorTransform(Transform):
def __init__(self):
pass
def apply(self, input):
y = x + 1
return input
dataloader = DataLoader(
dataset,
sampler=RandomSampler(dataset, batch_size=4, drop_last=False),
transform=FakeErrorTransform(),
num_workers=2,
)
with pytest.raises(RuntimeError, match=r"worker.*died"):
data_iter = iter(dataloader)
batch_data = next(data_iter)
|
[
"megengine.data.sampler.RandomSampler",
"megengine.data.dataloader.DataLoader",
"megengine.data.dataset.ArrayDataset"
] |
[((767, 838), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)'], {'size': '(sample_num, 1, 32, 32)', 'dtype': 'np.uint8'}), '(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8)\n', (784, 838), True, 'import numpy as np\n'), ((851, 906), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)'], {'size': '(sample_num,)', 'dtype': 'int'}), '(0, 10, size=(sample_num,), dtype=int)\n', (868, 906), True, 'import numpy as np\n'), ((921, 951), 'megengine.data.dataset.ArrayDataset', 'ArrayDataset', (['rand_data', 'label'], {}), '(rand_data, label)\n', (933, 951), False, 'from megengine.data.dataset import ArrayDataset\n'), ((1440, 1459), 'megengine.data.dataloader.DataLoader', 'DataLoader', (['dataset'], {}), '(dataset)\n', (1450, 1459), False, 'from megengine.data.dataloader import DataLoader\n'), ((1039, 1064), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1052, 1064), False, 'import pytest\n'), ((1087, 1134), 'megengine.data.dataloader.DataLoader', 'DataLoader', (['dataset'], {'num_workers': '(2)', 'divide': '(True)'}), '(dataset, num_workers=2, divide=True)\n', (1097, 1134), False, 'from megengine.data.dataloader import DataLoader\n'), ((1144, 1169), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1157, 1169), False, 'import pytest\n'), ((1192, 1227), 'megengine.data.dataloader.DataLoader', 'DataLoader', (['dataset'], {'num_workers': '(-1)'}), '(dataset, num_workers=-1)\n', (1202, 1227), False, 'from megengine.data.dataloader import DataLoader\n'), ((1237, 1262), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1250, 1262), False, 'import pytest\n'), ((1285, 1316), 'megengine.data.dataloader.DataLoader', 'DataLoader', (['dataset'], {'timeout': '(-1)'}), '(dataset, timeout=-1)\n', (1295, 1316), False, 'from megengine.data.dataloader import DataLoader\n'), ((1326, 1351), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1339, 1351), False, 'import pytest\n'), ((1374, 1421), 'megengine.data.dataloader.DataLoader', 'DataLoader', (['dataset'], {'num_workers': '(0)', 'divide': '(True)'}), '(dataset, num_workers=0, divide=True)\n', (1384, 1421), False, 'from megengine.data.dataloader import DataLoader\n'), ((3392, 3440), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '""".*timeout.*"""'}), "(RuntimeError, match='.*timeout.*')\n", (3405, 3440), False, 'import pytest\n'), ((3960, 4009), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""worker.*died"""'}), "(RuntimeError, match='worker.*died')\n", (3973, 4009), False, 'import pytest\n'), ((1690, 1743), 'megengine.data.sampler.RandomSampler', 'RandomSampler', (['dataset'], {'batch_size': '(6)', 'drop_last': '(False)'}), '(dataset, batch_size=6, drop_last=False)\n', (1703, 1743), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler\n'), ((1837, 1889), 'megengine.data.sampler.RandomSampler', 'RandomSampler', (['dataset'], {'batch_size': '(6)', 'drop_last': '(True)'}), '(dataset, batch_size=6, drop_last=True)\n', (1850, 1889), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler\n'), ((2044, 2097), 'megengine.data.sampler.RandomSampler', 'RandomSampler', (['dataset'], {'batch_size': '(4)', 'drop_last': '(False)'}), '(dataset, batch_size=4, drop_last=False)\n', (2057, 2097), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler\n'), ((2432, 2485), 'megengine.data.sampler.RandomSampler', 'RandomSampler', (['dataset'], {'batch_size': '(4)', 'drop_last': '(False)'}), '(dataset, batch_size=4, drop_last=False)\n', (2445, 2485), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler\n'), ((2717, 2770), 'megengine.data.sampler.RandomSampler', 'RandomSampler', (['dataset'], {'batch_size': '(4)', 'drop_last': '(False)'}), '(dataset, batch_size=4, drop_last=False)\n', (2730, 2770), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler\n'), ((3139, 3153), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (3149, 3153), False, 'import time\n'), ((3242, 3295), 'megengine.data.sampler.RandomSampler', 'RandomSampler', (['dataset'], {'batch_size': '(4)', 'drop_last': '(False)'}), '(dataset, batch_size=4, drop_last=False)\n', (3255, 3295), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler\n'), ((3827, 3880), 'megengine.data.sampler.RandomSampler', 'RandomSampler', (['dataset'], {'batch_size': '(4)', 'drop_last': '(False)'}), '(dataset, batch_size=4, drop_last=False)\n', (3840, 3880), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler\n')]
|
from sfepy.terms.extmods import terms
from sfepy.terms.cache import DataCache
from sfepy.base.base import nm, pause, debug
class ExpHistoryDataCache(DataCache):
"""History for exponential decay convolution kernels.
The decay argument is F(\Delta t), F(t_0=0) is assumed to be 1.0.
"""
name = 'exp_history'
arg_types = ('decay', 'values')
def __init__(self, name, arg_names, history_sizes=None):
DataCache.__init__(self, name, arg_names,
['history', 'increment', 'decay'], history_sizes)
def init_data(self, key, ckey, term, **kwargs):
decay, values = self.get_args(**kwargs)
shape = values.shape
self.shapes = {
'history' : shape,
'increment' : shape,
'decay' : decay.shape,
}
DataCache.init_datas(self, ckey, self.shapes, zero=True)
def update(self, key, term, ih, **kwargs):
decay, values = self.get_args(**kwargs)
ckey = self.get_key(term)
self.data['increment'][ckey][ih] = values
self.data['decay'][ckey][ih] = decay
self.valid['history'][ckey] = True
self.valid['increment'][ckey] = True
self.valid['decay'][ckey] = True
def custom_advance(self, key, ckey, step):
if key == 'history':
history = self.data['history'][ckey][0]
increment = self.data['increment'][ckey][0]
decay = self.data['decay'][ckey][0]
self.data['history'][ckey][0][:] = decay * (history + increment)
|
[
"sfepy.terms.cache.DataCache.init_datas",
"sfepy.terms.cache.DataCache.__init__"
] |
[((430, 525), 'sfepy.terms.cache.DataCache.__init__', 'DataCache.__init__', (['self', 'name', 'arg_names', "['history', 'increment', 'decay']", 'history_sizes'], {}), "(self, name, arg_names, ['history', 'increment', 'decay'],\n history_sizes)\n", (448, 525), False, 'from sfepy.terms.cache import DataCache\n'), ((820, 876), 'sfepy.terms.cache.DataCache.init_datas', 'DataCache.init_datas', (['self', 'ckey', 'self.shapes'], {'zero': '(True)'}), '(self, ckey, self.shapes, zero=True)\n', (840, 876), False, 'from sfepy.terms.cache import DataCache\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + mge.tensor(self.data2)
elif self.mode == "fuse_add_sigmoid":
y = a + mge.tensor(self.data2)
z = F.sigmoid(y)
else:
raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
return z
class ReduceOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 1000)).astype(np.float32)
def forward(self, a):
if self.mode == "sum":
return F.sum(a, axis=2)
elif self.mode == "mean":
return F.mean(a, axis=2)
else:
return F.max(a, axis=2)
class ResizeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = [8, 8]
self.out_shape2 = [3, 4]
def forward(self, x):
x = F.vision.interpolate(x, size=self.out_shape, mode="bilinear")
x = F.vision.interpolate(x, size=self.out_shape2, mode="bilinear")
return x
class ActiveOpr(M.Module):
str2fun = {
"relu": F.relu,
"tanh": F.tanh,
"sigmoid": F.sigmoid,
"leaky_relu": F.leaky_relu,
"softmax": F.softmax,
"relu6": lambda x: F.maximum(F.minimum(x, 6), 0),
}
def __init__(self, mode, fused=False):
super().__init__()
self.mode = mode
self.fused = fused
self.data = (np.random.random((1, 2, 3, 4)).astype(np.float32) - 0.5) * 8.0
def forward(self, x):
if self.fused:
return ActiveOpr.str2fun[self.mode](x + x)
else:
return ActiveOpr.str2fun[self.mode](x)
class BroadcastOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([1], dtype=np.float16)
def forward(self, x):
return F.broadcast_to(x, (3, 5))
class TypeCvtOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([[2, 2, 2, 2], [3, 3, 3, 3]], dtype=np.int32)
def forward(self, x):
x = x + 1
x = x.astype(np.float32)
return x
class XORNet(M.Module):
def __init__(self, converter="normal"):
self.converter = converter
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = M.BatchNorm1d(self.mid_dim)
self.fc1 = M.Linear(self.mid_dim, self.mid_dim, bias=True)
self.bn1 = M.BatchNorm1d(self.mid_dim)
self.fc2 = M.Linear(self.mid_dim, self.num_class, bias=True)
self.data = np.arange(24).reshape(12, 2).astype(np.float32)
def forward(self, x):
x = self.fc0(x)
x = self.bn0(x)
x = F.softmax(x) if self.converter == "tflite" else F.tanh(x)
x = self.fc1(x)
x = self.bn1(x)
x = F.softmax(x) if self.converter == "tflite" else F.tanh(x)
x = self.fc2(x)
return x
class XORNet_LeakyRelu(M.Module):
def __init__(self):
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = M.BatchNorm1d(self.mid_dim)
self.fc1 = M.Linear(self.mid_dim, self.mid_dim, bias=True)
self.bn1 = M.BatchNorm1d(self.mid_dim)
self.fc2 = M.Linear(self.mid_dim, self.num_class, bias=True)
self.data = np.random.random((12, 2)).astype(np.float32)
def forward(self, x):
x = self.fc0(x)
x = self.bn0(x)
x = F.leaky_relu(x)
x = F.leaky_relu(x)
x = F.tanh(x)
x = self.fc1(x)
x = F.leaky_relu(x)
x = self.bn1(x)
x = F.tanh(x)
x = self.fc2(x)
x = F.leaky_relu(x)
return x
|
[
"megengine.functional.transpose",
"megengine.functional.sum",
"megengine.module.Conv2d",
"megengine.module.pooling.MaxPool2d",
"megengine.functional.remove_axis",
"megengine.functional.squeeze",
"megengine.functional.floor",
"megengine.module.BatchNorm2d",
"megengine.functional.log",
"megengine.functional.leaky_relu",
"megengine.functional.broadcast_to",
"megengine.jit.trace",
"megengine.tensor",
"megengine.functional.concat",
"megengine.module.ConvTranspose2d",
"megengine.module.BatchNorm1d",
"megengine.functional.exp",
"megengine.functional.ceil",
"megengine.functional.vision.interpolate",
"megengine.functional.minimum",
"megengine.module.Linear",
"megengine.functional.sigmoid",
"megengine.module.pooling.AvgPool2d",
"megengine.functional.maximum",
"megengine.functional.softmax",
"megengine.functional.relu",
"megengine.functional.mean",
"megengine.functional.abs",
"megengine.functional.max",
"megengine.functional.reshape",
"megengine.functional.tanh"
] |
[((657, 677), 'megengine.jit.trace', 'trace', ([], {'symbolic': '(True)'}), '(symbolic=True)\n', (662, 677), False, 'from megengine.jit import trace\n'), ((1193, 1236), 'megengine.jit.trace', 'trace', ([], {'symbolic': '(True)', 'capture_as_const': '(True)'}), '(symbolic=True, capture_as_const=True)\n', (1198, 1236), False, 'from megengine.jit import trace\n'), ((1780, 1846), 'megengine.module.Conv2d', 'M.Conv2d', (['(3)', '(30)', '(3)'], {'stride': '(2, 3)', 'dilation': '(2, 2)', 'padding': '(3, 1)'}), '(3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1))\n', (1788, 1846), True, 'import megengine.module as M\n'), ((1895, 1971), 'megengine.module.Conv2d', 'M.Conv2d', (['(3)', '(30)', '(3)'], {'stride': '(2, 3)', 'dilation': '(2, 2)', 'padding': '(3, 1)', 'groups': '(3)'}), '(3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3)\n', (1903, 1971), True, 'import megengine.module as M\n'), ((3518, 3548), 'megengine.module.Linear', 'M.Linear', (['(100)', '(200)'], {'bias': '(False)'}), '(100, 200, bias=False)\n', (3526, 3548), True, 'import megengine.module as M\n'), ((3576, 3605), 'megengine.module.Linear', 'M.Linear', (['(200)', '(200)'], {'bias': '(True)'}), '(200, 200, bias=True)\n', (3584, 3605), True, 'import megengine.module as M\n'), ((3838, 3847), 'megengine.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (3844, 3847), True, 'import megengine.functional as F\n'), ((4072, 4127), 'megengine.module.pooling.MaxPool2d', 'M.pooling.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(2)'}), '(kernel_size=3, stride=2, padding=2)\n', (4091, 4127), True, 'import megengine.module as M\n'), ((4151, 4206), 'megengine.module.pooling.AvgPool2d', 'M.pooling.AvgPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(2)'}), '(kernel_size=3, stride=2, padding=2)\n', (4170, 4206), True, 'import megengine.module as M\n'), ((4557, 4574), 'megengine.module.BatchNorm1d', 'M.BatchNorm1d', (['(32)'], {}), '(32)\n', (4570, 4574), True, 'import megengine.module as M\n'), ((4595, 4611), 'megengine.module.BatchNorm2d', 'M.BatchNorm2d', (['(3)'], {}), '(3)\n', (4608, 4611), True, 'import megengine.module as M\n'), ((5361, 5386), 'megengine.functional.transpose', 'F.transpose', (['x', 'self.perm'], {}), '(x, self.perm)\n', (5372, 5386), True, 'import megengine.functional as F\n'), ((5493, 5513), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (5507, 5513), False, 'import random\n'), ((5626, 5659), 'megengine.functional.concat', 'F.concat', (['[a, a]', 'self.concat_idx'], {}), '([a, a], self.concat_idx)\n', (5634, 5659), True, 'import megengine.functional as F\n'), ((5850, 5862), 'megengine.functional.softmax', 'F.softmax', (['a'], {}), '(a)\n', (5859, 5862), True, 'import megengine.functional as F\n'), ((6797, 6825), 'megengine.functional.reshape', 'F.reshape', (['x', 'self.out_shape'], {}), '(x, self.out_shape)\n', (6806, 6825), True, 'import megengine.functional as F\n'), ((6838, 6867), 'megengine.functional.reshape', 'F.reshape', (['x', 'self.out_shape1'], {}), '(x, self.out_shape1)\n', (6847, 6867), True, 'import megengine.functional as F\n'), ((6880, 6909), 'megengine.functional.reshape', 'F.reshape', (['x', 'self.out_shape2'], {}), '(x, self.out_shape2)\n', (6889, 6909), True, 'import megengine.functional as F\n'), ((9837, 9898), 'megengine.functional.vision.interpolate', 'F.vision.interpolate', (['x'], {'size': 'self.out_shape', 'mode': '"""bilinear"""'}), "(x, size=self.out_shape, mode='bilinear')\n", (9857, 9898), True, 'import megengine.functional as F\n'), ((9911, 9973), 'megengine.functional.vision.interpolate', 'F.vision.interpolate', (['x'], {'size': 'self.out_shape2', 'mode': '"""bilinear"""'}), "(x, size=self.out_shape2, mode='bilinear')\n", (9931, 9973), True, 'import megengine.functional as F\n'), ((10724, 10755), 'numpy.array', 'np.array', (['[1]'], {'dtype': 'np.float16'}), '([1], dtype=np.float16)\n', (10732, 10755), True, 'import numpy as np\n'), ((10798, 10823), 'megengine.functional.broadcast_to', 'F.broadcast_to', (['x', '(3, 5)'], {}), '(x, (3, 5))\n', (10812, 10823), True, 'import megengine.functional as F\n'), ((10925, 10979), 'numpy.array', 'np.array', (['[[2, 2, 2, 2], [3, 3, 3, 3]]'], {'dtype': 'np.int32'}), '([[2, 2, 2, 2], [3, 3, 3, 3]], dtype=np.int32)\n', (10933, 10979), True, 'import numpy as np\n'), ((11279, 11328), 'megengine.module.Linear', 'M.Linear', (['self.num_class', 'self.mid_dim'], {'bias': '(True)'}), '(self.num_class, self.mid_dim, bias=True)\n', (11287, 11328), True, 'import megengine.module as M\n'), ((11348, 11375), 'megengine.module.BatchNorm1d', 'M.BatchNorm1d', (['self.mid_dim'], {}), '(self.mid_dim)\n', (11361, 11375), True, 'import megengine.module as M\n'), ((11395, 11442), 'megengine.module.Linear', 'M.Linear', (['self.mid_dim', 'self.mid_dim'], {'bias': '(True)'}), '(self.mid_dim, self.mid_dim, bias=True)\n', (11403, 11442), True, 'import megengine.module as M\n'), ((11462, 11489), 'megengine.module.BatchNorm1d', 'M.BatchNorm1d', (['self.mid_dim'], {}), '(self.mid_dim)\n', (11475, 11489), True, 'import megengine.module as M\n'), ((11509, 11558), 'megengine.module.Linear', 'M.Linear', (['self.mid_dim', 'self.num_class'], {'bias': '(True)'}), '(self.mid_dim, self.num_class, bias=True)\n', (11517, 11558), True, 'import megengine.module as M\n'), ((12090, 12139), 'megengine.module.Linear', 'M.Linear', (['self.num_class', 'self.mid_dim'], {'bias': '(True)'}), '(self.num_class, self.mid_dim, bias=True)\n', (12098, 12139), True, 'import megengine.module as M\n'), ((12159, 12186), 'megengine.module.BatchNorm1d', 'M.BatchNorm1d', (['self.mid_dim'], {}), '(self.mid_dim)\n', (12172, 12186), True, 'import megengine.module as M\n'), ((12206, 12253), 'megengine.module.Linear', 'M.Linear', (['self.mid_dim', 'self.mid_dim'], {'bias': '(True)'}), '(self.mid_dim, self.mid_dim, bias=True)\n', (12214, 12253), True, 'import megengine.module as M\n'), ((12273, 12300), 'megengine.module.BatchNorm1d', 'M.BatchNorm1d', (['self.mid_dim'], {}), '(self.mid_dim)\n', (12286, 12300), True, 'import megengine.module as M\n'), ((12320, 12369), 'megengine.module.Linear', 'M.Linear', (['self.mid_dim', 'self.num_class'], {'bias': '(True)'}), '(self.mid_dim, self.num_class, bias=True)\n', (12328, 12369), True, 'import megengine.module as M\n'), ((12522, 12537), 'megengine.functional.leaky_relu', 'F.leaky_relu', (['x'], {}), '(x)\n', (12534, 12537), True, 'import megengine.functional as F\n'), ((12550, 12565), 'megengine.functional.leaky_relu', 'F.leaky_relu', (['x'], {}), '(x)\n', (12562, 12565), True, 'import megengine.functional as F\n'), ((12578, 12587), 'megengine.functional.tanh', 'F.tanh', (['x'], {}), '(x)\n', (12584, 12587), True, 'import megengine.functional as F\n'), ((12624, 12639), 'megengine.functional.leaky_relu', 'F.leaky_relu', (['x'], {}), '(x)\n', (12636, 12639), True, 'import megengine.functional as F\n'), ((12676, 12685), 'megengine.functional.tanh', 'F.tanh', (['x'], {}), '(x)\n', (12682, 12685), True, 'import megengine.functional as F\n'), ((12722, 12737), 'megengine.functional.leaky_relu', 'F.leaky_relu', (['x'], {}), '(x)\n', (12734, 12737), True, 'import megengine.functional as F\n'), ((1103, 1119), 'megengine.tensor', 'mge.tensor', (['data'], {}), '(data)\n', (1113, 1119), True, 'import megengine as mge\n'), ((1165, 1181), 'megengine.tensor', 'mge.tensor', (['data'], {}), '(data)\n', (1175, 1181), True, 'import megengine as mge\n'), ((1365, 1381), 'megengine.tensor', 'mge.tensor', (['data'], {}), '(data)\n', (1375, 1381), True, 'import megengine as mge\n'), ((2318, 2411), 'megengine.module.ConvTranspose2d', 'M.ConvTranspose2d', (['(3)', '(5)', '(3, 4)'], {'dilation': '(2, 2)', 'stride': '(3, 2)', 'padding': '(2, 3)', 'groups': '(1)'}), '(3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2,\n 3), groups=1)\n', (2335, 2411), True, 'import megengine.module as M\n'), ((2451, 2482), 'megengine.module.ConvTranspose2d', 'M.ConvTranspose2d', (['(5)', '(3)', '(3, 3)'], {}), '(5, 3, (3, 3))\n', (2468, 2482), True, 'import megengine.module as M\n'), ((2850, 2906), 'megengine.module.ConvTranspose2d', 'M.ConvTranspose2d', (['(3)', '(5)', '(3, 4)'], {'stride': '(3, 2)', 'groups': '(1)'}), '(3, 5, (3, 4), stride=(3, 2), groups=1)\n', (2867, 2906), True, 'import megengine.module as M\n'), ((2920, 2951), 'megengine.module.ConvTranspose2d', 'M.ConvTranspose2d', (['(5)', '(3)', '(3, 3)'], {}), '(5, 3, (3, 3))\n', (2937, 2951), True, 'import megengine.module as M\n'), ((6099, 6118), 'megengine.functional.remove_axis', 'F.remove_axis', (['a', '(0)'], {}), '(a, 0)\n', (6112, 6118), True, 'import megengine.functional as F\n'), ((6181, 6196), 'megengine.functional.squeeze', 'F.squeeze', (['a', '(0)'], {}), '(a, 0)\n', (6190, 6196), True, 'import megengine.functional as F\n'), ((9445, 9461), 'megengine.functional.sum', 'F.sum', (['a'], {'axis': '(2)'}), '(a, axis=2)\n', (9450, 9461), True, 'import megengine.functional as F\n'), ((11714, 11726), 'megengine.functional.softmax', 'F.softmax', (['x'], {}), '(x)\n', (11723, 11726), True, 'import megengine.functional as F\n'), ((11762, 11771), 'megengine.functional.tanh', 'F.tanh', (['x'], {}), '(x)\n', (11768, 11771), True, 'import megengine.functional as F\n'), ((11832, 11844), 'megengine.functional.softmax', 'F.softmax', (['x'], {}), '(x)\n', (11841, 11844), True, 'import megengine.functional as F\n'), ((11880, 11889), 'megengine.functional.tanh', 'F.tanh', (['x'], {}), '(x)\n', (11886, 11889), True, 'import megengine.functional as F\n'), ((1699, 1733), 'numpy.random.random', 'np.random.random', (['(1, 3, 224, 224)'], {}), '((1, 3, 224, 224))\n', (1715, 1733), True, 'import numpy as np\n'), ((3449, 3476), 'numpy.random.random', 'np.random.random', (['(10, 100)'], {}), '((10, 100))\n', (3465, 3476), True, 'import numpy as np\n'), ((3994, 4029), 'numpy.random.random', 'np.random.random', (['(30, 3, 224, 224)'], {}), '((30, 3, 224, 224))\n', (4010, 4029), True, 'import numpy as np\n'), ((4414, 4443), 'numpy.random.random', 'np.random.random', (['(1, 32, 32)'], {}), '((1, 32, 32))\n', (4430, 4443), True, 'import numpy as np\n'), ((4484, 4517), 'numpy.random.random', 'np.random.random', (['(20, 3, 24, 24)'], {}), '((20, 3, 24, 24))\n', (4500, 4517), True, 'import numpy as np\n'), ((4837, 4871), 'numpy.random.random', 'np.random.random', (['(10, 10, 10, 10)'], {}), '((10, 10, 10, 10))\n', (4853, 4871), True, 'import numpy as np\n'), ((5236, 5266), 'numpy.random.random', 'np.random.random', (['(1, 2, 3, 4)'], {}), '((1, 2, 3, 4))\n', (5252, 5266), True, 'import numpy as np\n'), ((5534, 5564), 'numpy.random.random', 'np.random.random', (['(1, 2, 4, 5)'], {}), '((1, 2, 4, 5))\n', (5550, 5564), True, 'import numpy as np\n'), ((5761, 5788), 'numpy.random.random', 'np.random.random', (['(1, 1000)'], {}), '((1, 1000))\n', (5777, 5788), True, 'import numpy as np\n'), ((5964, 5994), 'numpy.random.random', 'np.random.random', (['(1, 1, 1000)'], {}), '((1, 1, 1000))\n', (5980, 5994), True, 'import numpy as np\n'), ((7035, 7060), 'numpy.ones', 'np.ones', (['(2, 3, 224, 224)'], {}), '((2, 3, 224, 224))\n', (7042, 7060), True, 'import numpy as np\n'), ((7101, 7131), 'numpy.random.random', 'np.random.random', (['(1, 3, 1, 1)'], {}), '((1, 3, 1, 1))\n', (7117, 7131), True, 'import numpy as np\n'), ((7396, 7418), 'megengine.tensor', 'mge.tensor', (['self.data1'], {}), '(self.data1)\n', (7406, 7418), True, 'import megengine as mge\n'), ((9318, 9348), 'numpy.random.random', 'np.random.random', (['(1, 3, 1000)'], {}), '((1, 3, 1000))\n', (9334, 9348), True, 'import numpy as np\n'), ((9515, 9532), 'megengine.functional.mean', 'F.mean', (['a'], {'axis': '(2)'}), '(a, axis=2)\n', (9521, 9532), True, 'import megengine.functional as F\n'), ((9566, 9582), 'megengine.functional.max', 'F.max', (['a'], {'axis': '(2)'}), '(a, axis=2)\n', (9571, 9582), True, 'import megengine.functional as F\n'), ((9683, 9713), 'numpy.random.random', 'np.random.random', (['(1, 2, 3, 4)'], {}), '((1, 2, 3, 4))\n', (9699, 9713), True, 'import numpy as np\n'), ((10217, 10232), 'megengine.functional.minimum', 'F.minimum', (['x', '(6)'], {}), '(x, 6)\n', (10226, 10232), True, 'import megengine.functional as F\n'), ((12390, 12415), 'numpy.random.random', 'np.random.random', (['(12, 2)'], {}), '((12, 2))\n', (12406, 12415), True, 'import numpy as np\n'), ((2054, 2099), 'numpy.random.random', 'np.random.random', (['self.normal_conv.bias.shape'], {}), '(self.normal_conv.bias.shape)\n', (2070, 2099), True, 'import numpy as np\n'), ((2187, 2231), 'numpy.random.random', 'np.random.random', (['self.group_conv.bias.shape'], {}), '(self.group_conv.bias.shape)\n', (2203, 2231), True, 'import numpy as np\n'), ((2559, 2610), 'numpy.random.random', 'np.random.random', (['self.transpose_conv[0].bias.shape'], {}), '(self.transpose_conv[0].bias.shape)\n', (2575, 2610), True, 'import numpy as np\n'), ((2705, 2756), 'numpy.random.random', 'np.random.random', (['self.transpose_conv[1].bias.shape'], {}), '(self.transpose_conv[1].bias.shape)\n', (2721, 2756), True, 'import numpy as np\n'), ((3035, 3086), 'numpy.random.random', 'np.random.random', (['self.transpose_conv[0].bias.shape'], {}), '(self.transpose_conv[0].bias.shape)\n', (3051, 3086), True, 'import numpy as np\n'), ((3188, 3239), 'numpy.random.random', 'np.random.random', (['self.transpose_conv[1].bias.shape'], {}), '(self.transpose_conv[1].bias.shape)\n', (3204, 3239), True, 'import numpy as np\n'), ((3665, 3710), 'numpy.random.random', 'np.random.random', (['self.linear_bias.bias.shape'], {}), '(self.linear_bias.bias.shape)\n', (3681, 3710), True, 'import numpy as np\n'), ((6341, 6371), 'numpy.random.random', 'np.random.random', (['(1, 2, 3, 4)'], {}), '((1, 2, 3, 4))\n', (6357, 6371), True, 'import numpy as np\n'), ((6561, 6594), 'numpy.random.random', 'np.random.random', (['(1, 2, 3, 4, 5)'], {}), '((1, 2, 3, 4, 5))\n', (6577, 6594), True, 'import numpy as np\n'), ((7172, 7206), 'numpy.random.random', 'np.random.random', (['(2, 3, 224, 224)'], {}), '((2, 3, 224, 224))\n', (7188, 7206), True, 'import numpy as np\n'), ((7360, 7374), 'numpy.float32', 'np.float32', (['(10)'], {}), '(10)\n', (7370, 7374), True, 'import numpy as np\n'), ((7555, 7577), 'megengine.tensor', 'mge.tensor', (['self.data1'], {}), '(self.data1)\n', (7565, 7577), True, 'import megengine as mge\n'), ((7519, 7533), 'numpy.float32', 'np.float32', (['(10)'], {}), '(10)\n', (7529, 7533), True, 'import numpy as np\n'), ((7710, 7732), 'megengine.tensor', 'mge.tensor', (['self.data1'], {}), '(self.data1)\n', (7720, 7732), True, 'import megengine as mge\n'), ((7907, 7922), 'megengine.functional.maximum', 'F.maximum', (['x', 'y'], {}), '(x, y)\n', (7916, 7922), True, 'import megengine.functional as F\n'), ((10388, 10418), 'numpy.random.random', 'np.random.random', (['(1, 2, 3, 4)'], {}), '((1, 2, 3, 4))\n', (10404, 10418), True, 'import numpy as np\n'), ((11579, 11592), 'numpy.arange', 'np.arange', (['(24)'], {}), '(24)\n', (11588, 11592), True, 'import numpy as np\n'), ((7678, 7692), 'numpy.float32', 'np.float32', (['(10)'], {}), '(10)\n', (7688, 7692), True, 'import numpy as np\n'), ((7826, 7847), 'megengine.tensor', 'mge.tensor', (['self.data'], {}), '(self.data)\n', (7836, 7847), True, 'import megengine as mge\n'), ((7868, 7890), 'megengine.tensor', 'mge.tensor', (['self.data2'], {}), '(self.data2)\n', (7878, 7890), True, 'import megengine as mge\n'), ((8057, 8072), 'megengine.functional.minimum', 'F.minimum', (['x', 'y'], {}), '(x, y)\n', (8066, 8072), True, 'import megengine.functional as F\n'), ((7976, 7997), 'megengine.tensor', 'mge.tensor', (['self.data'], {}), '(self.data)\n', (7986, 7997), True, 'import megengine as mge\n'), ((8018, 8040), 'megengine.tensor', 'mge.tensor', (['self.data2'], {}), '(self.data2)\n', (8028, 8040), True, 'import megengine as mge\n'), ((8181, 8190), 'megengine.functional.ceil', 'F.ceil', (['a'], {}), '(a)\n', (8187, 8190), True, 'import megengine.functional as F\n'), ((8243, 8253), 'megengine.functional.floor', 'F.floor', (['a'], {}), '(a)\n', (8250, 8253), True, 'import megengine.functional as F\n'), ((8304, 8326), 'megengine.tensor', 'mge.tensor', (['self.data1'], {}), '(self.data1)\n', (8314, 8326), True, 'import megengine as mge\n'), ((8362, 8375), 'numpy.float32', 'np.float32', (['(2)'], {}), '(2)\n', (8372, 8375), True, 'import numpy as np\n'), ((8478, 8500), 'megengine.tensor', 'mge.tensor', (['self.data1'], {}), '(self.data1)\n', (8488, 8500), True, 'import megengine as mge\n'), ((8564, 8572), 'megengine.functional.abs', 'F.abs', (['a'], {}), '(a)\n', (8569, 8572), True, 'import megengine.functional as F\n'), ((8636, 8644), 'megengine.functional.exp', 'F.exp', (['a'], {}), '(a)\n', (8641, 8644), True, 'import megengine.functional as F\n'), ((8708, 8716), 'megengine.functional.log', 'F.log', (['a'], {}), '(a)\n', (8713, 8716), True, 'import megengine.functional as F\n'), ((8819, 8828), 'megengine.functional.relu', 'F.relu', (['y'], {}), '(y)\n', (8825, 8828), True, 'import megengine.functional as F\n'), ((8780, 8802), 'megengine.tensor', 'mge.tensor', (['self.data2'], {}), '(self.data2)\n', (8790, 8802), True, 'import megengine as mge\n'), ((8892, 8914), 'megengine.tensor', 'mge.tensor', (['self.data1'], {}), '(self.data1)\n', (8902, 8914), True, 'import megengine as mge\n'), ((8935, 8957), 'megengine.tensor', 'mge.tensor', (['self.data2'], {}), '(self.data2)\n', (8945, 8957), True, 'import megengine as mge\n'), ((9063, 9075), 'megengine.functional.sigmoid', 'F.sigmoid', (['y'], {}), '(y)\n', (9072, 9075), True, 'import megengine.functional as F\n'), ((9024, 9046), 'megengine.tensor', 'mge.tensor', (['self.data2'], {}), '(self.data2)\n', (9034, 9046), True, 'import megengine as mge\n')]
|
from sqlmodel import SQLModel, create_engine
from aot_quotes.common.db.quotes import Quotes
engine = create_engine("sqlite:///database.db", echo=True)
def migrate():
SQLModel.metadata.create_all(engine)
if __name__ == "__main__":
migrate()
__all__ = ["Quotes"]
|
[
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.create_engine"
] |
[((103, 152), 'sqlmodel.create_engine', 'create_engine', (['"""sqlite:///database.db"""'], {'echo': '(True)'}), "('sqlite:///database.db', echo=True)\n", (116, 152), False, 'from sqlmodel import SQLModel, create_engine\n'), ((174, 210), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (202, 210), False, 'from sqlmodel import SQLModel, create_engine\n')]
|
import asyncio
import uuid
from typing import List, Optional
import pytest
from fastapi_users import models
from pydantic import UUID4
from sqlmodel import Field, Relationship
from fastapi_users_db_sqlmodel import SQLModelBaseOAuthAccount, SQLModelBaseUserDB
class User(models.BaseUser):
first_name: Optional[str]
class UserCreate(models.BaseUserCreate):
first_name: Optional[str]
class UserUpdate(models.BaseUserUpdate):
pass
class UserDB(SQLModelBaseUserDB, User, table=True):
class Config:
orm_mode = True
class UserOAuth(User):
pass
class UserDBOAuth(SQLModelBaseUserDB, table=True):
__tablename__ = "user_oauth"
oauth_accounts: List["OAuthAccount"] = Relationship(
back_populates="user",
sa_relationship_kwargs={"lazy": "joined", "cascade": "all, delete"},
)
class OAuthAccount(SQLModelBaseOAuthAccount, table=True):
user_id: UUID4 = Field(foreign_key="user_oauth.id")
user: Optional[UserDBOAuth] = Relationship(back_populates="oauth_accounts")
@pytest.fixture(scope="session")
def event_loop():
"""Force the pytest-asyncio loop to be the main one."""
loop = asyncio.get_event_loop()
yield loop
@pytest.fixture
def oauth_account1() -> OAuthAccount:
return OAuthAccount(
id=uuid.UUID("b9089e5d-2642-406d-a7c0-cbc641aca0ec"),
oauth_name="service1",
access_token="TOKEN",
expires_at=1579000751,
account_id="user_oauth1",
account_email="<EMAIL>",
)
@pytest.fixture
def oauth_account2() -> OAuthAccount:
return OAuthAccount(
id=uuid.UUID("c9089e5d-2642-406d-a7c0-cbc641aca0ec"),
oauth_name="service2",
access_token="TOKEN",
expires_at=1579000751,
account_id="user_oauth2",
account_email="<EMAIL>",
)
|
[
"sqlmodel.Relationship",
"sqlmodel.Field"
] |
[((1033, 1064), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1047, 1064), False, 'import pytest\n'), ((706, 814), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""user"""', 'sa_relationship_kwargs': "{'lazy': 'joined', 'cascade': 'all, delete'}"}), "(back_populates='user', sa_relationship_kwargs={'lazy':\n 'joined', 'cascade': 'all, delete'})\n", (718, 814), False, 'from sqlmodel import Field, Relationship\n'), ((915, 949), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""user_oauth.id"""'}), "(foreign_key='user_oauth.id')\n", (920, 949), False, 'from sqlmodel import Field, Relationship\n'), ((984, 1029), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""oauth_accounts"""'}), "(back_populates='oauth_accounts')\n", (996, 1029), False, 'from sqlmodel import Field, Relationship\n'), ((1154, 1178), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1176, 1178), False, 'import asyncio\n'), ((1286, 1335), 'uuid.UUID', 'uuid.UUID', (['"""b9089e5d-2642-406d-a7c0-cbc641aca0ec"""'], {}), "('b9089e5d-2642-406d-a7c0-cbc641aca0ec')\n", (1295, 1335), False, 'import uuid\n'), ((1594, 1643), 'uuid.UUID', 'uuid.UUID', (['"""c9089e5d-2642-406d-a7c0-cbc641aca0ec"""'], {}), "('c9089e5d-2642-406d-a7c0-cbc641aca0ec')\n", (1603, 1643), False, 'import uuid\n')]
|
from uuid import UUID
from sqlalchemy import event
from sqlalchemy.schema import Column, ForeignKey, UniqueConstraint
from sqlmodel import Field, Relationship
from sqlmodel.sql.sqltypes import GUID
from joj.horse.models.base import DomainURLORMModel, url_pre_save
from joj.horse.models.domain import Domain
from joj.horse.schemas.domain_invitation import DomainInvitationDetail
class DomainInvitation(DomainURLORMModel, DomainInvitationDetail, table=True): # type: ignore[call-arg]
__tablename__ = "domain_invitations"
__table_args__ = (
UniqueConstraint("domain_id", "url"),
UniqueConstraint("domain_id", "code"),
)
domain_id: UUID = Field(
sa_column=Column(
GUID, ForeignKey("domains.id", ondelete="CASCADE"), nullable=False
)
)
domain: "Domain" = Relationship(back_populates="invitations")
event.listen(DomainInvitation, "before_insert", url_pre_save)
event.listen(DomainInvitation, "before_update", url_pre_save)
|
[
"sqlmodel.Relationship"
] |
[((869, 930), 'sqlalchemy.event.listen', 'event.listen', (['DomainInvitation', '"""before_insert"""', 'url_pre_save'], {}), "(DomainInvitation, 'before_insert', url_pre_save)\n", (881, 930), False, 'from sqlalchemy import event\n'), ((931, 992), 'sqlalchemy.event.listen', 'event.listen', (['DomainInvitation', '"""before_update"""', 'url_pre_save'], {}), "(DomainInvitation, 'before_update', url_pre_save)\n", (943, 992), False, 'from sqlalchemy import event\n'), ((824, 866), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""invitations"""'}), "(back_populates='invitations')\n", (836, 866), False, 'from sqlmodel import Field, Relationship\n'), ((559, 595), 'sqlalchemy.schema.UniqueConstraint', 'UniqueConstraint', (['"""domain_id"""', '"""url"""'], {}), "('domain_id', 'url')\n", (575, 595), False, 'from sqlalchemy.schema import Column, ForeignKey, UniqueConstraint\n'), ((605, 642), 'sqlalchemy.schema.UniqueConstraint', 'UniqueConstraint', (['"""domain_id"""', '"""code"""'], {}), "('domain_id', 'code')\n", (621, 642), False, 'from sqlalchemy.schema import Column, ForeignKey, UniqueConstraint\n'), ((724, 768), 'sqlalchemy.schema.ForeignKey', 'ForeignKey', (['"""domains.id"""'], {'ondelete': '"""CASCADE"""'}), "('domains.id', ondelete='CASCADE')\n", (734, 768), False, 'from sqlalchemy.schema import Column, ForeignKey, UniqueConstraint\n')]
|
import numpy as np
import megengine
import megengine.module as M
import megengine.functional as F
import math
from . import default_init_weights
class ShuffleV2Block(M.Module):
def __init__(self, inp, oup, mid_channels, *, ksize, stride):
super().__init__()
self.stride = stride
assert stride in [1, 2]
self.mid_channels = mid_channels
self.ksize = ksize
pad = ksize // 2
self.pad = pad
self.inp = inp
outputs = oup - inp
branch_main = [
# pw
M.Conv2d(inp, mid_channels, 1, 1, 0, bias=False),
M.ReLU(),
# dw
M.Conv2d(
mid_channels, mid_channels, ksize, stride, pad,
groups=mid_channels, bias=False,
),
# pw-linear
M.Conv2d(mid_channels, outputs, 1, 1, 0, bias=False),
M.ReLU(),
]
self.branch_main = M.Sequential(*branch_main)
if stride == 2:
branch_proj = [
# dw
M.Conv2d(inp, inp, ksize, stride, pad, groups=inp, bias=False),
M.BatchNorm2d(inp),
# pw-linear
M.Conv2d(inp, inp, 1, 1, 0, bias=False),
M.BatchNorm2d(inp),
M.ReLU(),
]
self.branch_proj = M.Sequential(*branch_proj)
else:
self.branch_proj = None
self.init_weights()
def forward(self, old_x):
if self.stride == 1:
x_proj, x = self.channel_shuffle(old_x)
return F.concat((x_proj, self.branch_main(x)), 1)
elif self.stride == 2:
x_proj = old_x
x = old_x
return F.concat((self.branch_proj(x_proj), self.branch_main(x)), 1)
else:
raise ValueError("use stride 1 or 2, current stride {}".format(self.stride))
def channel_shuffle(self, x):
batchsize, num_channels, height, width = x.shape
# assert (num_channels % 4 == 0)
x = x.reshape(batchsize * num_channels // 2, 2, height * width)
x = F.transpose(x, (1, 0, 2))
x = x.reshape(2, -1, num_channels // 2, height, width)
return x[0], x[1]
def init_weights(self):
default_init_weights(self, scale=0.2)
|
[
"megengine.module.ReLU",
"megengine.module.BatchNorm2d",
"megengine.functional.transpose",
"megengine.module.Sequential",
"megengine.module.Conv2d"
] |
[((943, 969), 'megengine.module.Sequential', 'M.Sequential', (['*branch_main'], {}), '(*branch_main)\n', (955, 969), True, 'import megengine.module as M\n'), ((2111, 2136), 'megengine.functional.transpose', 'F.transpose', (['x', '(1, 0, 2)'], {}), '(x, (1, 0, 2))\n', (2122, 2136), True, 'import megengine.functional as F\n'), ((555, 603), 'megengine.module.Conv2d', 'M.Conv2d', (['inp', 'mid_channels', '(1)', '(1)', '(0)'], {'bias': '(False)'}), '(inp, mid_channels, 1, 1, 0, bias=False)\n', (563, 603), True, 'import megengine.module as M\n'), ((617, 625), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (623, 625), True, 'import megengine.module as M\n'), ((656, 750), 'megengine.module.Conv2d', 'M.Conv2d', (['mid_channels', 'mid_channels', 'ksize', 'stride', 'pad'], {'groups': 'mid_channels', 'bias': '(False)'}), '(mid_channels, mid_channels, ksize, stride, pad, groups=\n mid_channels, bias=False)\n', (664, 750), True, 'import megengine.module as M\n'), ((830, 882), 'megengine.module.Conv2d', 'M.Conv2d', (['mid_channels', 'outputs', '(1)', '(1)', '(0)'], {'bias': '(False)'}), '(mid_channels, outputs, 1, 1, 0, bias=False)\n', (838, 882), True, 'import megengine.module as M\n'), ((896, 904), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (902, 904), True, 'import megengine.module as M\n'), ((1352, 1378), 'megengine.module.Sequential', 'M.Sequential', (['*branch_proj'], {}), '(*branch_proj)\n', (1364, 1378), True, 'import megengine.module as M\n'), ((1060, 1122), 'megengine.module.Conv2d', 'M.Conv2d', (['inp', 'inp', 'ksize', 'stride', 'pad'], {'groups': 'inp', 'bias': '(False)'}), '(inp, inp, ksize, stride, pad, groups=inp, bias=False)\n', (1068, 1122), True, 'import megengine.module as M\n'), ((1140, 1158), 'megengine.module.BatchNorm2d', 'M.BatchNorm2d', (['inp'], {}), '(inp)\n', (1153, 1158), True, 'import megengine.module as M\n'), ((1204, 1243), 'megengine.module.Conv2d', 'M.Conv2d', (['inp', 'inp', '(1)', '(1)', '(0)'], {'bias': '(False)'}), '(inp, inp, 1, 1, 0, bias=False)\n', (1212, 1243), True, 'import megengine.module as M\n'), ((1261, 1279), 'megengine.module.BatchNorm2d', 'M.BatchNorm2d', (['inp'], {}), '(inp)\n', (1274, 1279), True, 'import megengine.module as M\n'), ((1297, 1305), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (1303, 1305), True, 'import megengine.module as M\n')]
|
import sys
import os
import click
from app import config
from sqlmodel import Session
from sqlmodel import create_engine
from app.models.server import Server
from sqlmodel import select
API_ENVIRONMENT = os.environ.get("API_ENVIRONMENT", "Testing")
settings = getattr(sys.modules[__name__].config, API_ENVIRONMENT)
engine = create_engine(settings.DATABASE_URI)
@click.group()
@click.pass_context
def main():
pass
@main.command(name="settings")
def get_settings():
"""
Prints current API settings from $API_ENVIRONMENT.
"""
click.echo(settings)
@main.group(name="import")
def import_group():
pass
@import_group.command(name="catagories")
def import_catagories():
"""
Commands for importing a database.
"""
import yaml
from app.models.server import Catagory, ServerCatagoryLink
print("Looking for catagories.yml")
with Session(engine) as session:
with open("config.yml", "r") as stream:
catagories = yaml.safe_load(stream)
for name, data in catagories.get("catagories").items():
print(data)
_catagory = Catagory(
title = name,
meta_ref = name.lower().replace(" ", "-"),
color = f"#{data['color']}"
)
session.add(_catagory)
session.commit()
session.refresh(_catagory)
_query = select(Server).where(Server.domain_name.like(data['match']))
_result = session.exec(_query).all()
for server in _result:
_link = ServerCatagoryLink(
server_id = server.id,
catagory_id = _catagory.id
)
session.add(_link)
session.commit()
@import_group.command(name="csv")
def csv_file(file):
"""
Commands for importing a database.
"""
import csv
with Session(engine) as session:
with open(file, "r") as stream:
csv_reader = csv.DictReader(stream)
line_count = 0
for row in csv_reader:
if line_count == 0:
line_count += 1
_server = Server(
domain_name=row["Domain Name"],
domain_type=row["Domain Type"],
agency=row["Agency"],
organization=row["Organization"],
)
session.add(_server)
session.commit()
@import_group.command(name="file")
@click.argument("file")
def basic_file(file):
with Session(engine) as session:
with open(file, "r") as stream:
stream = stream.readlines()
servers = []
for row in stream:
servers.append(row.strip().lower())
for row in list(set(servers)):
session.add(Server(domain_name=row))
session.commit()
@main.group()
def tables():
"""
Commands for handling database tables.
"""
pass
@tables.command()
def drop():
"""
Forcefully remove all tables within the database.
"""
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy import MetaData
from sqlalchemy import inspect
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine(settings.DATABASE_URI)
meta = MetaData()
meta.reflect(bind=engine)
meta.drop_all(engine, checkfirst=False)
print("Dropped tables.")
@tables.command()
def create():
"""
Creates all tables within the API.
"""
from sqlmodel import SQLModel
from app.models.user import User, Log
from app.models.server import Server, ServerLog
SQLModel.metadata.create_all(engine)
print("Created tables.")
cli = click.CommandCollection(sources=[main])
if __name__ == "__main__":
cli()
|
[
"sqlmodel.select",
"sqlmodel.Session",
"sqlmodel.SQLModel.metadata.create_all"
] |
[((205, 249), 'os.environ.get', 'os.environ.get', (['"""API_ENVIRONMENT"""', '"""Testing"""'], {}), "('API_ENVIRONMENT', 'Testing')\n", (219, 249), False, 'import os\n'), ((326, 362), 'sqlalchemy.create_engine', 'create_engine', (['settings.DATABASE_URI'], {}), '(settings.DATABASE_URI)\n', (339, 362), False, 'from sqlalchemy import create_engine\n'), ((366, 379), 'click.group', 'click.group', ([], {}), '()\n', (377, 379), False, 'import click\n'), ((2575, 2597), 'click.argument', 'click.argument', (['"""file"""'], {}), "('file')\n", (2589, 2597), False, 'import click\n'), ((3878, 3917), 'click.CommandCollection', 'click.CommandCollection', ([], {'sources': '[main]'}), '(sources=[main])\n', (3901, 3917), False, 'import click\n'), ((549, 569), 'click.echo', 'click.echo', (['settings'], {}), '(settings)\n', (559, 569), False, 'import click\n'), ((3420, 3456), 'sqlalchemy.create_engine', 'create_engine', (['settings.DATABASE_URI'], {}), '(settings.DATABASE_URI)\n', (3433, 3456), False, 'from sqlalchemy import create_engine\n'), ((3468, 3478), 'sqlalchemy.MetaData', 'MetaData', ([], {}), '()\n', (3476, 3478), False, 'from sqlalchemy import MetaData\n'), ((3804, 3840), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (3832, 3840), False, 'from sqlmodel import SQLModel\n'), ((880, 895), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (887, 895), False, 'from sqlmodel import Session\n'), ((1972, 1987), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1979, 1987), False, 'from sqlmodel import Session\n'), ((2629, 2644), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (2636, 2644), False, 'from sqlmodel import Session\n'), ((981, 1003), 'yaml.safe_load', 'yaml.safe_load', (['stream'], {}), '(stream)\n', (995, 1003), False, 'import yaml\n'), ((2065, 2087), 'csv.DictReader', 'csv.DictReader', (['stream'], {}), '(stream)\n', (2079, 2087), False, 'import csv\n'), ((2249, 2379), 'app.models.server.Server', 'Server', ([], {'domain_name': "row['Domain Name']", 'domain_type': "row['Domain Type']", 'agency': "row['Agency']", 'organization': "row['Organization']"}), "(domain_name=row['Domain Name'], domain_type=row['Domain Type'],\n agency=row['Agency'], organization=row['Organization'])\n", (2255, 2379), False, 'from app.models.server import Server, ServerLog\n'), ((1465, 1503), 'app.models.server.Server.domain_name.like', 'Server.domain_name.like', (["data['match']"], {}), "(data['match'])\n", (1488, 1503), False, 'from app.models.server import Server, ServerLog\n'), ((1625, 1690), 'app.models.server.ServerCatagoryLink', 'ServerCatagoryLink', ([], {'server_id': 'server.id', 'catagory_id': '_catagory.id'}), '(server_id=server.id, catagory_id=_catagory.id)\n', (1643, 1690), False, 'from app.models.server import Catagory, ServerCatagoryLink\n'), ((2917, 2940), 'app.models.server.Server', 'Server', ([], {'domain_name': 'row'}), '(domain_name=row)\n', (2923, 2940), False, 'from app.models.server import Server, ServerLog\n'), ((1444, 1458), 'sqlmodel.select', 'select', (['Server'], {}), '(Server)\n', (1450, 1458), False, 'from sqlmodel import select\n')]
|
from typing import TYPE_CHECKING
from pydantic import validator
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlmodel import Field, Relationship, SQLModel
if TYPE_CHECKING:
from .event import Event, EventList
from .participant import Participant, ParticipantList
class FeedbackBase(SQLModel):
presentation: int
content: int
interest: int
comments: str = Field(default="", nullable=False)
again: bool = Field(default=True, nullable=False)
@validator("presentation", "content", "interest")
def between_1_and_5(cls, value: int):
if value < 1 or value > 5:
raise ValueError("must be between 1 and 5 inclusive")
return value
class Feedback(FeedbackBase, table=True):
__tablename__ = "feedback"
participant_id: str = Field(
sa_column=Column(
String(),
ForeignKey("participants.id", ondelete="CASCADE"),
nullable=False,
primary_key=True,
)
)
participant: "Participant" = Relationship()
event_id: int = Field(
sa_column=Column(
Integer(),
ForeignKey("events.id", ondelete="CASCADE"),
nullable=False,
primary_key=True,
)
)
event: "Event" = Relationship(back_populates="feedback")
class FeedbackCreate(FeedbackBase):
pass
class FeedbackList(SQLModel):
participant: "ParticipantList"
presentation: int
content: int
interest: int
class FeedbackRead(FeedbackBase):
participant: "ParticipantList"
event: "EventList"
|
[
"sqlmodel.Field",
"sqlmodel.Relationship"
] |
[((402, 435), 'sqlmodel.Field', 'Field', ([], {'default': '""""""', 'nullable': '(False)'}), "(default='', nullable=False)\n", (407, 435), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((454, 489), 'sqlmodel.Field', 'Field', ([], {'default': '(True)', 'nullable': '(False)'}), '(default=True, nullable=False)\n', (459, 489), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((496, 544), 'pydantic.validator', 'validator', (['"""presentation"""', '"""content"""', '"""interest"""'], {}), "('presentation', 'content', 'interest')\n", (505, 544), False, 'from pydantic import validator\n'), ((1036, 1050), 'sqlmodel.Relationship', 'Relationship', ([], {}), '()\n', (1048, 1050), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((1280, 1319), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""feedback"""'}), "(back_populates='feedback')\n", (1292, 1319), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((856, 864), 'sqlalchemy.String', 'String', ([], {}), '()\n', (862, 864), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((878, 927), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""participants.id"""'], {'ondelete': '"""CASCADE"""'}), "('participants.id', ondelete='CASCADE')\n", (888, 927), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((1117, 1126), 'sqlalchemy.Integer', 'Integer', ([], {}), '()\n', (1124, 1126), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n'), ((1140, 1183), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""events.id"""'], {'ondelete': '"""CASCADE"""'}), "('events.id', ondelete='CASCADE')\n", (1150, 1183), False, 'from sqlalchemy import Column, ForeignKey, Integer, String\n')]
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import megengine as mge
import megengine.module as M
from models.yolo_fpn import YOLOFPN
from models.yolo_head import YOLOXHead
from models.yolo_pafpn import YOLOPAFPN
from models.yolox import YOLOX
def build_yolox(name="yolox-s"):
num_classes = 80
# value meaning: depth, width
param_dict = {
"yolox-nano": (0.33, 0.25),
"yolox-tiny": (0.33, 0.375),
"yolox-s": (0.33, 0.50),
"yolox-m": (0.67, 0.75),
"yolox-l": (1.0, 1.0),
"yolox-x": (1.33, 1.25),
}
if name == "yolov3":
depth = 1.0
width = 1.0
backbone = YOLOFPN()
head = YOLOXHead(num_classes, width, in_channels=[128, 256, 512], act="lrelu")
model = YOLOX(backbone, head)
else:
assert name in param_dict
kwargs = {}
depth, width = param_dict[name]
if name == "yolox-nano":
kwargs["depthwise"] = True
in_channels = [256, 512, 1024]
backbone = YOLOPAFPN(depth, width, in_channels=in_channels, **kwargs)
head = YOLOXHead(num_classes, width, in_channels=in_channels, **kwargs)
model = YOLOX(backbone, head)
for m in model.modules():
if isinstance(m, M.BatchNorm2d):
m.eps = 1e-3
return model
def build_and_load(weight_file, name="yolox-s"):
model = build_yolox(name)
model_weights = mge.load(weight_file)
model.load_state_dict(model_weights, strict=False)
return model
|
[
"megengine.load"
] |
[((1412, 1433), 'megengine.load', 'mge.load', (['weight_file'], {}), '(weight_file)\n', (1420, 1433), True, 'import megengine as mge\n'), ((650, 659), 'models.yolo_fpn.YOLOFPN', 'YOLOFPN', ([], {}), '()\n', (657, 659), False, 'from models.yolo_fpn import YOLOFPN\n'), ((675, 746), 'models.yolo_head.YOLOXHead', 'YOLOXHead', (['num_classes', 'width'], {'in_channels': '[128, 256, 512]', 'act': '"""lrelu"""'}), "(num_classes, width, in_channels=[128, 256, 512], act='lrelu')\n", (684, 746), False, 'from models.yolo_head import YOLOXHead\n'), ((763, 784), 'models.yolox.YOLOX', 'YOLOX', (['backbone', 'head'], {}), '(backbone, head)\n', (768, 784), False, 'from models.yolox import YOLOX\n'), ((1019, 1077), 'models.yolo_pafpn.YOLOPAFPN', 'YOLOPAFPN', (['depth', 'width'], {'in_channels': 'in_channels'}), '(depth, width, in_channels=in_channels, **kwargs)\n', (1028, 1077), False, 'from models.yolo_pafpn import YOLOPAFPN\n'), ((1093, 1157), 'models.yolo_head.YOLOXHead', 'YOLOXHead', (['num_classes', 'width'], {'in_channels': 'in_channels'}), '(num_classes, width, in_channels=in_channels, **kwargs)\n', (1102, 1157), False, 'from models.yolo_head import YOLOXHead\n'), ((1174, 1195), 'models.yolox.YOLOX', 'YOLOX', (['backbone', 'head'], {}), '(backbone, head)\n', (1179, 1195), False, 'from models.yolox import YOLOX\n')]
|
from sqlmodel import SQLModel, create_engine, Session
from victor_api.config import settings
engine = create_engine(
url=settings.db.url,
echo=settings.db.echo,
connect_args=settings.db.connect_args
)
def get_session():
with Session(engine) as session:
yield session
def init_db():
SQLModel.metadata.create_all(engine)
|
[
"sqlmodel.Session",
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.create_engine"
] |
[((104, 205), 'sqlmodel.create_engine', 'create_engine', ([], {'url': 'settings.db.url', 'echo': 'settings.db.echo', 'connect_args': 'settings.db.connect_args'}), '(url=settings.db.url, echo=settings.db.echo, connect_args=\n settings.db.connect_args)\n', (117, 205), False, 'from sqlmodel import SQLModel, create_engine, Session\n'), ((316, 352), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (344, 352), False, 'from sqlmodel import SQLModel, create_engine, Session\n'), ((245, 260), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (252, 260), False, 'from sqlmodel import SQLModel, create_engine, Session\n')]
|
from fastapi import APIRouter, Depends
from ..utils import engine, get_session
from sqlmodel import Session, select, SQLModel, or_
from sqlalchemy.exc import NoResultFound
from ..models.user import User
from typing import Optional
from datetime import datetime
router = APIRouter(prefix="/api/users", tags=["user"])
session = Session(engine)
@router.post("/")
async def post_user(
user: User,
session: Session = Depends(get_session),
):
"""
Post a new user.
Parameters
----------
user : User
User that is to be added to the database.
session : Session
SQL session that is to be used to add the user.
Defaults to creating a dependency on the running SQL model session.
"""
statement = select(User).where(User.short_name == user.short_name)
try:
result = session.exec(statement).one()
return False
except NoResultFound:
session.add(user)
session.commit()
session.refresh(user)
return user
@router.get("/")
async def get_users(
session: Session = Depends(get_session),
is_active: bool = None,
short_name: str = None,
):
"""
Get list of user(s).
Parameters
----------
session : Session
SQL session that is to be used to get the users.
Defaults to creating a dependency on the running SQL model session.
is_active : bool
Status of users to be pulled.
short_name : str
Short name of user to be pulled.
"""
statement = select(User)
if is_active != None:
statement = (
select(User)
.where(User.is_active == is_active)
.order_by(User.start_date.desc())
)
result = session.exec(statement).all()
return result
@router.put("/{user_id}/")
async def update_user(
user_id: int,
is_active: Optional[bool] = None,
new_short_name: Optional[str] = None,
new_first_name: Optional[str] = None,
new_last_name: Optional[str] = None,
new_email: Optional[str] = None,
new_team_id: Optional[str] = None,
session: Session = Depends(get_session),
):
"""
Update a user.
Parameters
----------
user_id : int
ID of user to be updated.
is_active : Optional[bool]
Updated status of user.
new_short_name : Optional[bool]
Updated short name of user.
new_first_name : Optional[bool]
Updated first name of user.
new_last_name : Optional[bool]
Updated last name of user.
new_email : Optional[bool]
Updated email of user
new_team_id : Optional[bool]
Updated team id.
session : Session
SQL session that is to be used to update the user.
Defaults to creating a dependency on the running SQL model session.
"""
statement = select(User).where(User.id == user_id)
user_to_update = session.exec(statement).one()
if is_active != None:
user_to_update.is_active = is_active
if new_short_name != None:
user_to_update.short_name = new_short_name
if new_first_name != None:
user_to_update.first_name = new_first_name
if new_last_name != None:
user_to_update.last_name = new_last_name
if new_email != None:
user_to_update.email = new_email
if new_team_id != None:
user_to_update.team_id = new_team_id
user_to_update.updated_at = datetime.now()
session.add(user_to_update)
session.commit()
session.refresh(user_to_update)
return user_to_update
|
[
"sqlmodel.select",
"sqlmodel.Session"
] |
[((271, 316), 'fastapi.APIRouter', 'APIRouter', ([], {'prefix': '"""/api/users"""', 'tags': "['user']"}), "(prefix='/api/users', tags=['user'])\n", (280, 316), False, 'from fastapi import APIRouter, Depends\n'), ((327, 342), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (334, 342), False, 'from sqlmodel import Session, select, SQLModel, or_\n'), ((423, 443), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (430, 443), False, 'from fastapi import APIRouter, Depends\n'), ((1074, 1094), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1081, 1094), False, 'from fastapi import APIRouter, Depends\n'), ((1519, 1531), 'sqlmodel.select', 'select', (['User'], {}), '(User)\n', (1525, 1531), False, 'from sqlmodel import Session, select, SQLModel, or_\n'), ((2102, 2122), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (2109, 2122), False, 'from fastapi import APIRouter, Depends\n'), ((3390, 3404), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3402, 3404), False, 'from datetime import datetime\n'), ((752, 764), 'sqlmodel.select', 'select', (['User'], {}), '(User)\n', (758, 764), False, 'from sqlmodel import Session, select, SQLModel, or_\n'), ((2814, 2826), 'sqlmodel.select', 'select', (['User'], {}), '(User)\n', (2820, 2826), False, 'from sqlmodel import Session, select, SQLModel, or_\n'), ((1592, 1604), 'sqlmodel.select', 'select', (['User'], {}), '(User)\n', (1598, 1604), False, 'from sqlmodel import Session, select, SQLModel, or_\n')]
|
import numpy as nm
from sfepy.terms.extmods import terms
from sfepy.terms.cache import DataCache
from sfepy.base.base import pause, debug
class FiniteStrainTLDataCache( DataCache ):
"""
Stores shared deformation-related data useful for the total Lagrangian
formulation of finite strain elasticity.
arguments:
- state : displacements
supported:
- deformation gradient F
- jacobian J = det( F )
- right Cauchy-Green deformation tensor C = F^T F in vector (symmetric)
storage
- 1st invariant of C : tr( C )
- 2nd invariant of C
- C^{-1} in vector (symmetric) storage
- Green strain E = 1/2 (C - I)
All data are computed together, no matter which one is requested!
"""
name = 'finite_strain_tl'
arg_types = ('state',)
def __init__( self, name, arg_names, history_sizes = None ):
keys = ['F', 'detF', 'C', 'trC', 'in2C', 'invC', 'E']
DataCache.__init__( self, name, arg_names, keys, history_sizes,
terms.dq_finite_strain_tl )
def init_data(self, key, ckey, term, **kwargs):
state, = self.get_args( **kwargs )
n_el, n_qp, dim = state.get_data_shapes(term.integral, ckey[-1])[:3]
sym = dim * (dim + 1) / 2
self.shapes = {
'F' : (n_el, n_qp, dim, dim),
'detF' : (n_el, n_qp, 1, 1),
'C' : (n_el, n_qp, sym, 1),
'trC' : (n_el, n_qp, 1, 1),
'in2C' : (n_el, n_qp, 1, 1),
'invC' : (n_el, n_qp, sym, 1),
'E' : (n_el, n_qp, sym, 1),
}
DataCache.init_datas( self, ckey, self.shapes )
def update(self, key, term, ih, **kwargs):
state, = self.get_args( **kwargs )
ap, vg = term.get_approximation(state)
ckey = self.get_key(term)
self.function( self.data['F'][ckey][ih],
self.data['detF'][ckey][ih],
self.data['C'][ckey][ih],
self.data['trC'][ckey][ih],
self.data['in2C'][ckey][ih],
self.data['invC'][ckey][ih],
self.data['E'][ckey][ih],
state(), 0, vg, ap.econn )
self.valid['F'][ckey] = True
self.valid['detF'][ckey] = True
self.valid['C'][ckey] = True
self.valid['trC'][ckey] = True
self.valid['in2C'][ckey] = True
self.valid['invC'][ckey] = True
self.valid['E'][ckey] = True
class FiniteStrainSurfaceTLDataCache(DataCache):
"""
Stores shared deformation-related data useful for the total Lagrangian
formulation of finite strain elasticity for surface integrals.
arguments:
- state : displacements
supported:
- deformation gradient F
- inverse deformation gradient F^{-1}
- jacobian J = det( F )
All data are computed together, no matter which one is requested!
"""
name = 'finite_strain_surface_tl'
arg_types = ('state', 'data_shape')
def __init__( self, name, arg_names, history_sizes = None ):
keys = ['F', 'detF', 'invF']
DataCache.__init__( self, name, arg_names, keys, history_sizes,
terms.dq_tl_finite_strain_surface )
def init_data(self, key, ckey, term, **kwargs):
state, data_shape = self.get_args( **kwargs )
n_fa, n_qp, dim, n_ep = data_shape
self.shapes = {
'F' : (n_fa, n_qp, dim, dim),
'invF' : (n_fa, n_qp, dim, dim),
'detF' : (n_fa, n_qp, 1, 1),
}
DataCache.init_datas( self, ckey, self.shapes )
def update(self, key, term, ih, **kwargs):
state = self.get_args(**kwargs)[0]
ap, sg = term.get_approximation(state)
sd = ap.surface_data[term.region.name]
ckey = self.get_key(term)
self.function( self.data['F'][ckey][ih],
self.data['detF'][ckey][ih],
self.data['invF'][ckey][ih],
state(), 0, sg, sd.fis, ap.econn )
self.valid['F'][ckey] = True
self.valid['detF'][ckey] = True
self.valid['invF'][ckey] = True
class FiniteStrainULDataCache( DataCache ):
"""
Stores shared deformation-related data useful for the updated Lagrangian
formulation of finite strain elasticity.
arguments:
- state(s) : displacements u(t), u(t-1)
supported:
- relative deformation gradient F
- jacobian J = det( F )
- left Cauchy-Green deformation tensor b = F F^T in vector (symmetric)
storage
- 1st invariant of b : tr( b )
- 2nd invariant of b
??? - Green strain E = 1/2 (C - I)
All data are computed together, no matter which one is requested!
"""
name = 'finite_strain_ul'
arg_types = ('state',)
def __init__( self, name, arg_names, history_sizes = None ):
keys = ['F', 'detF', 'B', 'trB', 'in2B', 'E']
DataCache.__init__( self, name, arg_names, keys, history_sizes,
terms.dq_finite_strain_ul )
def init_data(self, key, ckey, term, **kwargs):
state, = self.get_args( **kwargs )
n_el, n_qp, dim = state.get_data_shapes(term.integral, ckey[-1])[:3]
sym = dim * (dim + 1) / 2
self.shapes = {
'F' : (n_el, n_qp, dim, dim),
'detF' : (n_el, n_qp, 1, 1),
'B' : (n_el, n_qp, sym, 1),
'trB' : (n_el, n_qp, 1, 1),
'in2B' : (n_el, n_qp, 1, 1),
'E' : (n_el, n_qp, sym, 1),
}
DataCache.init_datas( self, ckey, self.shapes )
def update(self, key, term, ih, **kwargs):
state, = self.get_args( **kwargs )
ap, vg = term.get_approximation(state)
ckey = self.get_key(term)
self.function( self.data['F'][ckey][ih],
self.data['detF'][ckey][ih],
self.data['B'][ckey][ih],
self.data['trB'][ckey][ih],
self.data['in2B'][ckey][ih],
self.data['E'][ckey][ih],
state(), state(step = -1),
0, vg, ap.econn )
self.valid['F'][ckey] = True
self.valid['detF'][ckey] = True
self.valid['B'][ckey] = True
self.valid['trB'][ckey] = True
self.valid['in2B'][ckey] = True
self.valid['E'][ckey] = True
|
[
"sfepy.terms.cache.DataCache.__init__",
"sfepy.terms.cache.DataCache.init_datas"
] |
[((947, 1041), 'sfepy.terms.cache.DataCache.__init__', 'DataCache.__init__', (['self', 'name', 'arg_names', 'keys', 'history_sizes', 'terms.dq_finite_strain_tl'], {}), '(self, name, arg_names, keys, history_sizes, terms.\n dq_finite_strain_tl)\n', (965, 1041), False, 'from sfepy.terms.cache import DataCache\n'), ((1606, 1651), 'sfepy.terms.cache.DataCache.init_datas', 'DataCache.init_datas', (['self', 'ckey', 'self.shapes'], {}), '(self, ckey, self.shapes)\n', (1626, 1651), False, 'from sfepy.terms.cache import DataCache\n'), ((3148, 3250), 'sfepy.terms.cache.DataCache.__init__', 'DataCache.__init__', (['self', 'name', 'arg_names', 'keys', 'history_sizes', 'terms.dq_tl_finite_strain_surface'], {}), '(self, name, arg_names, keys, history_sizes, terms.\n dq_tl_finite_strain_surface)\n', (3166, 3250), False, 'from sfepy.terms.cache import DataCache\n'), ((3599, 3644), 'sfepy.terms.cache.DataCache.init_datas', 'DataCache.init_datas', (['self', 'ckey', 'self.shapes'], {}), '(self, ckey, self.shapes)\n', (3619, 3644), False, 'from sfepy.terms.cache import DataCache\n'), ((4991, 5085), 'sfepy.terms.cache.DataCache.__init__', 'DataCache.__init__', (['self', 'name', 'arg_names', 'keys', 'history_sizes', 'terms.dq_finite_strain_ul'], {}), '(self, name, arg_names, keys, history_sizes, terms.\n dq_finite_strain_ul)\n', (5009, 5085), False, 'from sfepy.terms.cache import DataCache\n'), ((5607, 5652), 'sfepy.terms.cache.DataCache.init_datas', 'DataCache.init_datas', (['self', 'ckey', 'self.shapes'], {}), '(self, ckey, self.shapes)\n', (5627, 5652), False, 'from sfepy.terms.cache import DataCache\n')]
|
from typing import List, Optional
from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select
class Team(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
headquarters: str
heroes: List["Hero"] = Relationship()
class Hero(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: Optional[int] = Field(default=None, index=True)
team_id: Optional[int] = Field(default=None, foreign_key="team.id")
team: Optional[Team] = Relationship()
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
with Session(engine) as session:
team_preventers = Team(name="Preventers", headquarters="Sharp Tower")
team_z_force = Team(name="Z-Force", headquarters="Sister Margaret’s Bar")
hero_deadpond = Hero(
name="Deadpond", secret_name="<NAME>", team=team_z_force
)
hero_rusty_man = Hero(
name="Rusty-Man", secret_name="<NAME>", age=48, team=team_preventers
)
hero_spider_boy = Hero(name="Spider-Boy", secret_name="<NAME>")
session.add(hero_deadpond)
session.add(hero_rusty_man)
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_deadpond)
session.refresh(hero_rusty_man)
session.refresh(hero_spider_boy)
print("Created hero:", hero_deadpond)
print("Created hero:", hero_rusty_man)
print("Created hero:", hero_spider_boy)
hero_spider_boy.team = team_preventers
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_spider_boy)
print("Updated hero:", hero_spider_boy)
hero_black_lion = Hero(name="<NAME>", secret_name="<NAME>", age=35)
hero_sure_e = Hero(name="<NAME>", secret_name="Sure-E")
team_wakaland = Team(
name="Wakaland",
headquarters="Wakaland Capital City",
heroes=[hero_black_lion, hero_sure_e],
)
session.add(team_wakaland)
session.commit()
session.refresh(team_wakaland)
print("Team Wakaland:", team_wakaland)
hero_tarantula = Hero(name="Tarantula", secret_name="<NAME>", age=32)
hero_dr_weird = Hero(name="<NAME>", secret_name="<NAME>", age=36)
hero_cap = Hero(
name="Captain North America", secret_name="<NAME>", age=93
)
team_preventers.heroes.append(hero_tarantula)
team_preventers.heroes.append(hero_dr_weird)
team_preventers.heroes.append(hero_cap)
session.add(team_preventers)
session.commit()
session.refresh(hero_tarantula)
session.refresh(hero_dr_weird)
session.refresh(hero_cap)
print("Preventers new hero:", hero_tarantula)
print("Preventers new hero:", hero_dr_weird)
print("Preventers new hero:", hero_cap)
def select_heroes():
with Session(engine) as session:
statement = select(Team).where(Team.name == "Preventers")
result = session.exec(statement)
team_preventers = result.one()
print("Preventers heroes:", team_preventers.heroes)
def update_heroes():
with Session(engine) as session:
hero_spider_boy = session.exec(
select(Hero).where(Hero.name == "Spider-Boy")
).one()
preventers_team = session.exec(
select(Team).where(Team.name == "Preventers")
).one()
print("Hero Spider-Boy:", hero_spider_boy)
print("Preventers Team:", preventers_team)
print("Preventers Team Heroes:", preventers_team.heroes)
hero_spider_boy.team = None
print("Spider-Boy without team:", hero_spider_boy)
print("Preventers Team Heroes again:", preventers_team.heroes)
session.add(hero_spider_boy)
session.commit()
print("After committing")
session.refresh(hero_spider_boy)
print("Spider-Boy after commit:", hero_spider_boy)
print("Preventers Team Heroes after commit:", preventers_team.heroes)
def main():
create_db_and_tables()
create_heroes()
select_heroes()
update_heroes()
if __name__ == "__main__":
main()
|
[
"sqlmodel.Relationship",
"sqlmodel.create_engine",
"sqlmodel.Session",
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.Field",
"sqlmodel.select"
] |
[((730, 766), 'sqlmodel.create_engine', 'create_engine', (['sqlite_url'], {'echo': '(True)'}), '(sqlite_url, echo=True)\n', (743, 766), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((178, 215), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (183, 215), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((232, 249), 'sqlmodel.Field', 'Field', ([], {'index': '(True)'}), '(index=True)\n', (237, 249), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((300, 314), 'sqlmodel.Relationship', 'Relationship', ([], {}), '()\n', (312, 314), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((375, 412), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (380, 412), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((429, 446), 'sqlmodel.Field', 'Field', ([], {'index': '(True)'}), '(index=True)\n', (434, 446), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((493, 524), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'index': '(True)'}), '(default=None, index=True)\n', (498, 524), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((555, 597), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""team.id"""'}), "(default=None, foreign_key='team.id')\n", (560, 597), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((625, 639), 'sqlmodel.Relationship', 'Relationship', ([], {}), '()\n', (637, 639), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((801, 837), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (829, 837), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((870, 885), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (877, 885), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((3191, 3206), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (3198, 3206), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((3458, 3473), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (3465, 3473), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((3239, 3251), 'sqlmodel.select', 'select', (['Team'], {}), '(Team)\n', (3245, 3251), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((3538, 3550), 'sqlmodel.select', 'select', (['Hero'], {}), '(Hero)\n', (3544, 3550), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((3653, 3665), 'sqlmodel.select', 'select', (['Team'], {}), '(Team)\n', (3659, 3665), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n')]
|
from __future__ import absolute_import
import os.path as op
import shutil
import numpy as nm
from sfepy.base.base import ordered_iteritems, output, set_defaults, assert_
from sfepy.base.base import Struct
from sfepy.homogenization.engine import HomogenizationEngine
from sfepy.homogenization.homogen_app import HomogenizationApp
from sfepy.homogenization.coefficients import Coefficients
from sfepy.homogenization.coefs_base import CoefDummy
from sfepy.applications import PDESolverApp
from sfepy.base.plotutils import plt
import six
from six.moves import range
def try_set_defaults(obj, attr, defaults, recur=False):
try:
values = getattr(obj, attr)
except:
values = defaults
else:
if recur and isinstance(values, dict):
for key, val in six.iteritems(values):
set_defaults(val, defaults)
else:
set_defaults(values, defaults)
return values
def save_raw_bg_logs(filename, logs):
"""
Save raw band gaps `logs` into the `filename` file.
"""
out = {}
iranges = nm.cumsum([0] + [len(ii) for ii in logs.freqs])
out['iranges'] = iranges
for key, log in ordered_iteritems(logs.to_dict()):
out[key] = nm.concatenate(log, axis=0)
with open(filename, 'w') as fd:
nm.savez(fd, **out)
def transform_plot_data(datas, plot_transform, conf):
if plot_transform is not None:
fun = conf.get_function(plot_transform[0])
dmin, dmax = 1e+10, -1e+10
tdatas = []
for data in datas:
tdata = data.copy()
if plot_transform is not None:
tdata = fun(tdata, *plot_transform[1:])
dmin = min(dmin, nm.nanmin(tdata))
dmax = max(dmax, nm.nanmax(tdata))
tdatas.append(tdata)
dmin, dmax = min(dmax - 1e-8, dmin), max(dmin + 1e-8, dmax)
return (dmin, dmax), tdatas
def plot_eigs(fig_num, plot_rsc, plot_labels, valid, freq_range, plot_range,
show=False, clear=False, new_axes=False):
"""
Plot resonance/eigen-frequencies.
`valid` must correspond to `freq_range`
resonances : red
masked resonances: dotted red
"""
if plt is None: return
assert_(len(valid) == len(freq_range))
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
l0 = l1 = None
for ii, f in enumerate(freq_range):
if valid[ii]:
l0 = ax.plot([f, f], plot_range, **plot_rsc['resonance'])[0]
else:
l1 = ax.plot([f, f], plot_range, **plot_rsc['masked'])[0]
if l0:
l0.set_label(plot_labels['resonance'])
if l1:
l1.set_label(plot_labels['masked'])
if new_axes:
ax.set_xlim([freq_range[0], freq_range[-1]])
ax.set_ylim(plot_range)
if show:
plt.show()
return fig
def plot_logs(fig_num, plot_rsc, plot_labels,
freqs, logs, valid, freq_range, plot_range,
draw_eigs=True, show_legend=True, show=False,
clear=False, new_axes=False):
"""
Plot logs of min/middle/max eigs of a mass matrix.
"""
if plt is None: return
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
if draw_eigs:
plot_eigs(fig_num, plot_rsc, plot_labels, valid, freq_range,
plot_range)
for ii, log in enumerate(logs):
l1 = ax.plot(freqs[ii], log[:, -1], **plot_rsc['eig_max'])
if log.shape[1] >= 2:
l2 = ax.plot(freqs[ii], log[:, 0], **plot_rsc['eig_min'])
else:
l2 = None
if log.shape[1] == 3:
l3 = ax.plot(freqs[ii], log[:, 1], **plot_rsc['eig_mid'])
else:
l3 = None
l1[0].set_label(plot_labels['eig_max'])
if l2:
l2[0].set_label(plot_labels['eig_min'])
if l3:
l3[0].set_label(plot_labels['eig_mid'])
fmin, fmax = freqs[0][0], freqs[-1][-1]
ax.plot([fmin, fmax], [0, 0], **plot_rsc['x_axis'])
ax.set_xlabel(plot_labels['x_axis'])
ax.set_ylabel(plot_labels['y_axis'])
if new_axes:
ax.set_xlim([fmin, fmax])
ax.set_ylim(plot_range)
if show_legend:
ax.legend()
if show:
plt.show()
return fig
def plot_gap(ax, ranges, kind, kind_desc, plot_range, plot_rsc):
"""
Plot single band gap frequency ranges as rectangles.
"""
def draw_rect(ax, x, y, rsc):
ax.fill(nm.asarray(x)[[0,1,1,0]],
nm.asarray(y)[[0,0,1,1]],
**rsc)
# Colors.
strong = plot_rsc['strong_gap']
weak = plot_rsc['weak_gap']
propagation = plot_rsc['propagation']
if kind == 'p':
draw_rect(ax, ranges[0], plot_range, propagation)
elif kind == 'w':
draw_rect(ax, ranges[0], plot_range, weak)
elif kind == 'wp':
draw_rect(ax, ranges[0], plot_range, weak)
draw_rect(ax, ranges[1], plot_range, propagation)
elif kind == 's':
draw_rect(ax, ranges[0], plot_range, strong)
elif kind == 'sw':
draw_rect(ax, ranges[0], plot_range, strong)
draw_rect(ax, ranges[1], plot_range, weak)
elif kind == 'swp':
draw_rect(ax, ranges[0], plot_range, strong)
draw_rect(ax, ranges[1], plot_range, weak)
draw_rect(ax, ranges[2], plot_range, propagation)
elif kind == 'is':
draw_rect(ax, ranges[0], plot_range, strong)
elif kind == 'iw':
draw_rect(ax, ranges[0], plot_range, weak)
else:
raise ValueError('unknown band gap kind! (%s)' % kind)
def plot_gaps(fig_num, plot_rsc, gaps, kinds, gap_ranges, freq_range,
plot_range, show=False, clear=False, new_axes=False):
"""
Plot band gaps as rectangles.
"""
if plt is None: return
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
for ii in range(len(freq_range) - 1):
f0, f1 = freq_range[[ii, ii+1]]
gap = gaps[ii]
ranges = gap_ranges[ii]
if isinstance(gap, list):
for ig, (gmin, gmax) in enumerate(gap):
kind, kind_desc = kinds[ii][ig]
plot_gap(ax, ranges[ig], kind, kind_desc, plot_range, plot_rsc)
output(ii, gmin[0], gmax[0], '%.8f' % f0, '%.8f' % f1)
output(' -> %s\n %s' %(kind_desc, ranges[ig]))
else:
gmin, gmax = gap
kind, kind_desc = kinds[ii]
plot_gap(ax, ranges, kind, kind_desc, plot_range, plot_rsc)
output(ii, gmin[0], gmax[0], '%.8f' % f0, '%.8f' % f1)
output(' -> %s\n %s' %(kind_desc, ranges))
if new_axes:
ax.set_xlim([freq_range[0], freq_range[-1]])
ax.set_ylim(plot_range)
if show:
plt.show()
return fig
def _get_fig_name(output_dir, fig_name, key, common, fig_suffix):
"""
Construct the complete name of figure file.
"""
name = key.replace(common, '')
if name and (not name.startswith('_')):
name = '_' + name
fig_name = fig_name + name + fig_suffix
return op.join(output_dir, fig_name)
class AcousticBandGapsApp(HomogenizationApp):
"""
Application for computing acoustic band gaps.
"""
@staticmethod
def process_options(options):
"""
Application options setup. Sets default values for missing
non-compulsory options.
"""
get = options.get
default_plot_options = {'show' : True,'legend' : False,}
aux = {
'resonance' : 'eigenfrequencies',
'masked' : 'masked eigenfrequencies',
'eig_min' : r'min eig($M^*$)',
'eig_mid' : r'mid eig($M^*$)',
'eig_max' : r'max eig($M^*$)',
'x_axis' : r'$\sqrt{\lambda}$, $\omega$',
'y_axis' : r'eigenvalues of mass matrix $M^*$',
}
plot_labels = try_set_defaults(options, 'plot_labels', aux, recur=True)
aux = {
'resonance' : 'eigenfrequencies',
'masked' : 'masked eigenfrequencies',
'eig_min' : r'$\kappa$(min)',
'eig_mid' : r'$\kappa$(mid)',
'eig_max' : r'$\kappa$(max)',
'x_axis' : r'$\sqrt{\lambda}$, $\omega$',
'y_axis' : 'polarization angles',
}
plot_labels_angle = try_set_defaults(options, 'plot_labels_angle', aux)
aux = {
'resonance' : 'eigenfrequencies',
'masked' : 'masked eigenfrequencies',
'eig_min' : r'wave number (min)',
'eig_mid' : r'wave number (mid)',
'eig_max' : r'wave number (max)',
'x_axis' : r'$\sqrt{\lambda}$, $\omega$',
'y_axis' : 'wave numbers',
}
plot_labels_wave = try_set_defaults(options, 'plot_labels_wave', aux)
plot_rsc = {
'resonance' : {'linewidth' : 0.5, 'color' : 'r', 'linestyle' : '-'},
'masked' : {'linewidth' : 0.5, 'color' : 'r', 'linestyle' : ':'},
'x_axis' : {'linewidth' : 0.5, 'color' : 'k', 'linestyle' : '--'},
'eig_min' : {'linewidth' : 2.0, 'color' : (0.0, 0.0, 1.0),
'linestyle' : ':' },
'eig_mid' : {'linewidth' : 2.0, 'color' : (0.0, 0.0, 0.8),
'linestyle' : '--' },
'eig_max' : {'linewidth' : 2.0, 'color' : (0.0, 0.0, 0.6),
'linestyle' : '-' },
'strong_gap' : {'linewidth' : 0, 'facecolor' : (0.2, 0.4, 0.2)},
'weak_gap' : {'linewidth' : 0, 'facecolor' : (0.6, 0.8, 0.6)},
'propagation' : {'linewidth' : 0, 'facecolor' : (1, 1, 1)},
'params' : {'axes.labelsize': 'x-large',
'font.size': 14,
'legend.fontsize': 'large',
'legend.loc': 'best',
'xtick.labelsize': 'large',
'ytick.labelsize': 'large',
'text.usetex': True},
}
plot_rsc = try_set_defaults(options, 'plot_rsc', plot_rsc)
return Struct(incident_wave_dir=get('incident_wave_dir', None),
plot_transform=get('plot_transform', None),
plot_transform_wave=get('plot_transform_wave', None),
plot_transform_angle=get('plot_transform_angle', None),
plot_options=get('plot_options', default_plot_options),
fig_name=get('fig_name', None),
fig_name_wave=get('fig_name_wave', None),
fig_name_angle=get('fig_name_angle', None),
fig_suffix=get('fig_suffix', '.pdf'),
plot_labels=plot_labels,
plot_labels_angle=plot_labels_angle,
plot_labels_wave=plot_labels_wave,
plot_rsc=plot_rsc)
@staticmethod
def process_options_pv(options):
"""
Application options setup for phase velocity computation. Sets default
values for missing non-compulsory options.
"""
get = options.get
incident_wave_dir=get('incident_wave_dir', None,
'missing "incident_wave_dir" in options!')
return Struct(incident_wave_dir=incident_wave_dir)
def __init__(self, conf, options, output_prefix, **kwargs):
PDESolverApp.__init__(self, conf, options, output_prefix,
init_equations=False)
self.setup_options()
if conf._filename:
output_dir = self.problem.output_dir
shutil.copyfile(conf._filename,
op.join(output_dir, op.basename(conf._filename)))
def setup_options(self):
HomogenizationApp.setup_options(self)
if self.options.phase_velocity:
process_options = AcousticBandGapsApp.process_options_pv
else:
process_options = AcousticBandGapsApp.process_options
self.app_options += process_options(self.conf.options)
def call(self):
"""
Construct and call the homogenization engine accoring to options.
"""
options = self.options
opts = self.app_options
conf = self.problem.conf
coefs_name = opts.coefs
coef_info = conf.get(opts.coefs, None,
'missing "%s" in problem description!'
% opts.coefs)
if options.detect_band_gaps:
# Compute band gaps coefficients and data.
keys = [key for key in coef_info if key.startswith('band_gaps')]
elif options.analyze_dispersion or options.phase_velocity:
# Insert incident wave direction to coefficients that need it.
for key, val in six.iteritems(coef_info):
coef_opts = val.get('options', None)
if coef_opts is None: continue
if (('incident_wave_dir' in coef_opts)
and (coef_opts['incident_wave_dir'] is None)):
coef_opts['incident_wave_dir'] = opts.incident_wave_dir
if options.analyze_dispersion:
# Compute dispersion coefficients and data.
keys = [key for key in coef_info
if key.startswith('dispersion')
or key.startswith('polarization_angles')]
else:
# Compute phase velocity and its requirements.
keys = [key for key in coef_info
if key.startswith('phase_velocity')]
else:
# Compute only the eigenvalue problems.
names = [req for req in conf.get(opts.requirements, [''])
if req.startswith('evp')]
coefs = {'dummy' : {'requires' : names,
'class' : CoefDummy,}}
conf.coefs_dummy = coefs
coefs_name = 'coefs_dummy'
keys = ['dummy']
he_options = Struct(coefs=coefs_name, requirements=opts.requirements,
compute_only=keys,
post_process_hook=self.post_process_hook,
multiprocessing=False)
volumes = {}
if hasattr(opts, 'volumes') and (opts.volumes is not None):
volumes.update(opts.volumes)
elif hasattr(opts, 'volume') and (opts.volume is not None):
volumes['total'] = opts.volume
else:
volumes['total'] = 1.0
he = HomogenizationEngine(self.problem, options,
app_options=he_options,
volumes=volumes)
coefs = he()
coefs = Coefficients(**coefs.to_dict())
coefs_filename = op.join(opts.output_dir, opts.coefs_filename)
coefs.to_file_txt(coefs_filename + '.txt',
opts.tex_names,
opts.float_format)
bg_keys = [key for key in coefs.to_dict()
if key.startswith('band_gaps')
or key.startswith('dispersion')]
for ii, key in enumerate(bg_keys):
bg = coefs.get(key)
log_save_name = bg.get('log_save_name', None)
if log_save_name is not None:
filename = op.join(self.problem.output_dir, log_save_name)
bg.save_log(filename, opts.float_format, bg)
raw_log_save_name = bg.get('raw_log_save_name', None)
if raw_log_save_name is not None:
filename = op.join(self.problem.output_dir, raw_log_save_name)
save_raw_bg_logs(filename, bg.logs)
if options.plot:
if options.detect_band_gaps:
self.plot_band_gaps(coefs)
elif options.analyze_dispersion:
self.plot_dispersion(coefs)
elif options.phase_velocity:
keys = [key for key in coefs.to_dict()
if key.startswith('phase_velocity')]
for key in keys:
output('%s:' % key, coefs.get(key))
return coefs
def plot_band_gaps(self, coefs):
opts = self.app_options
bg_keys = [key for key in coefs.to_dict()
if key.startswith('band_gaps')]
plot_opts = opts.plot_options
plot_rsc = opts.plot_rsc
plt.rcParams.update(plot_rsc['params'])
for ii, key in enumerate(bg_keys):
bg = coefs.get(key)
plot_labels = opts.plot_labels.get(key, opts.plot_labels)
plot_range, teigs = transform_plot_data(bg.logs.eigs,
opts.plot_transform,
self.conf)
fig = plot_gaps(ii, plot_rsc, bg.gaps, bg.kinds, bg.gap_ranges,
bg.freq_range_margins, plot_range,
clear=True)
fig = plot_logs(ii, plot_rsc, plot_labels, bg.logs.freqs, teigs,
bg.valid[bg.eig_range],
bg.freq_range_initial,
plot_range,
show_legend=plot_opts['legend'],
new_axes=True)
plt.tight_layout()
if opts.fig_name is not None:
fig_name = _get_fig_name(self.problem.output_dir, opts.fig_name,
key, 'band_gaps', opts.fig_suffix)
fig.savefig(fig_name)
if plot_opts['show']:
plt.show()
def plot_dispersion(self, coefs):
opts = self.app_options
bg_keys = [key for key in coefs.to_dict()
if key.startswith('dispersion')]
plot_rsc = opts.plot_rsc
plot_opts = opts.plot_options
plt.rcParams.update(plot_rsc['params'])
plot_labels = opts.plot_labels_angle
for ii, key in enumerate(bg_keys):
pas_key = key.replace('dispersion', 'polarization_angles')
pas = coefs.get(pas_key)
aux = transform_plot_data(pas,
opts.plot_transform_angle,
self.conf)
plot_range, pas = aux
bg = coefs.get(key)
fig = plot_gaps(1, plot_rsc, bg.gaps, bg.kinds, bg.gap_ranges,
bg.freq_range_margins, plot_range,
clear=True)
fig = plot_logs(1, plot_rsc, plot_labels, bg.logs.freqs, pas,
bg.valid[bg.eig_range],
bg.freq_range_initial,
plot_range,
show_legend=plot_opts['legend'],
new_axes=True)
plt.tight_layout()
fig_name = opts.fig_name_angle
if fig_name is not None:
fig_name = _get_fig_name(self.problem.output_dir, fig_name,
key, 'dispersion', opts.fig_suffix)
fig.savefig(fig_name)
aux = transform_plot_data(bg.logs.eigs,
opts.plot_transform_wave,
self.conf)
plot_range, teigs = aux
plot_labels = opts.plot_labels_wave
fig = plot_gaps(2, plot_rsc, bg.gaps, bg.kinds, bg.gap_ranges,
bg.freq_range_margins, plot_range,
clear=True)
fig = plot_logs(2, plot_rsc, plot_labels, bg.logs.freqs, teigs,
bg.valid[bg.eig_range],
bg.freq_range_initial,
plot_range,
show_legend=plot_opts['legend'],
new_axes=True)
plt.tight_layout()
fig_name = opts.fig_name_wave
if fig_name is not None:
fig_name = _get_fig_name(self.problem.output_dir, fig_name,
key, 'dispersion', opts.fig_suffix)
fig.savefig(fig_name)
if plot_opts['show']:
plt.show()
|
[
"sfepy.base.base.set_defaults",
"sfepy.applications.PDESolverApp.__init__",
"sfepy.base.base.output",
"sfepy.base.base.Struct",
"sfepy.homogenization.homogen_app.HomogenizationApp.setup_options",
"sfepy.base.plotutils.plt.rcParams.update",
"sfepy.homogenization.engine.HomogenizationEngine",
"sfepy.base.plotutils.plt.tight_layout",
"sfepy.base.plotutils.plt.show",
"sfepy.base.plotutils.plt.figure"
] |
[((2230, 2249), 'sfepy.base.plotutils.plt.figure', 'plt.figure', (['fig_num'], {}), '(fig_num)\n', (2240, 2249), False, 'from sfepy.base.plotutils import plt\n'), ((3188, 3207), 'sfepy.base.plotutils.plt.figure', 'plt.figure', (['fig_num'], {}), '(fig_num)\n', (3198, 3207), False, 'from sfepy.base.plotutils import plt\n'), ((5869, 5888), 'sfepy.base.plotutils.plt.figure', 'plt.figure', (['fig_num'], {}), '(fig_num)\n', (5879, 5888), False, 'from sfepy.base.plotutils import plt\n'), ((7219, 7248), 'os.path.join', 'op.join', (['output_dir', 'fig_name'], {}), '(output_dir, fig_name)\n', (7226, 7248), True, 'import os.path as op\n'), ((1225, 1252), 'numpy.concatenate', 'nm.concatenate', (['log'], {'axis': '(0)'}), '(log, axis=0)\n', (1239, 1252), True, 'import numpy as nm\n'), ((1298, 1317), 'numpy.savez', 'nm.savez', (['fd'], {}), '(fd, **out)\n', (1306, 1317), True, 'import numpy as nm\n'), ((2844, 2854), 'sfepy.base.plotutils.plt.show', 'plt.show', ([], {}), '()\n', (2852, 2854), False, 'from sfepy.base.plotutils import plt\n'), ((4314, 4324), 'sfepy.base.plotutils.plt.show', 'plt.show', ([], {}), '()\n', (4322, 4324), False, 'from sfepy.base.plotutils import plt\n'), ((6901, 6911), 'sfepy.base.plotutils.plt.show', 'plt.show', ([], {}), '()\n', (6909, 6911), False, 'from sfepy.base.plotutils import plt\n'), ((11399, 11442), 'sfepy.base.base.Struct', 'Struct', ([], {'incident_wave_dir': 'incident_wave_dir'}), '(incident_wave_dir=incident_wave_dir)\n', (11405, 11442), False, 'from sfepy.base.base import Struct\n'), ((11516, 11595), 'sfepy.applications.PDESolverApp.__init__', 'PDESolverApp.__init__', (['self', 'conf', 'options', 'output_prefix'], {'init_equations': '(False)'}), '(self, conf, options, output_prefix, init_equations=False)\n', (11537, 11595), False, 'from sfepy.applications import PDESolverApp\n'), ((11893, 11930), 'sfepy.homogenization.homogen_app.HomogenizationApp.setup_options', 'HomogenizationApp.setup_options', (['self'], {}), '(self)\n', (11924, 11930), False, 'from sfepy.homogenization.homogen_app import HomogenizationApp\n'), ((14142, 14286), 'sfepy.base.base.Struct', 'Struct', ([], {'coefs': 'coefs_name', 'requirements': 'opts.requirements', 'compute_only': 'keys', 'post_process_hook': 'self.post_process_hook', 'multiprocessing': '(False)'}), '(coefs=coefs_name, requirements=opts.requirements, compute_only=keys,\n post_process_hook=self.post_process_hook, multiprocessing=False)\n', (14148, 14286), False, 'from sfepy.base.base import Struct\n'), ((14672, 14761), 'sfepy.homogenization.engine.HomogenizationEngine', 'HomogenizationEngine', (['self.problem', 'options'], {'app_options': 'he_options', 'volumes': 'volumes'}), '(self.problem, options, app_options=he_options, volumes\n =volumes)\n', (14692, 14761), False, 'from sfepy.homogenization.engine import HomogenizationEngine\n'), ((14921, 14966), 'os.path.join', 'op.join', (['opts.output_dir', 'opts.coefs_filename'], {}), '(opts.output_dir, opts.coefs_filename)\n', (14928, 14966), True, 'import os.path as op\n'), ((16516, 16555), 'sfepy.base.plotutils.plt.rcParams.update', 'plt.rcParams.update', (["plot_rsc['params']"], {}), "(plot_rsc['params'])\n", (16535, 16555), False, 'from sfepy.base.plotutils import plt\n'), ((17988, 18027), 'sfepy.base.plotutils.plt.rcParams.update', 'plt.rcParams.update', (["plot_rsc['params']"], {}), "(plot_rsc['params'])\n", (18007, 18027), False, 'from sfepy.base.plotutils import plt\n'), ((791, 812), 'six.iteritems', 'six.iteritems', (['values'], {}), '(values)\n', (804, 812), False, 'import six\n'), ((885, 915), 'sfepy.base.base.set_defaults', 'set_defaults', (['values', 'defaults'], {}), '(values, defaults)\n', (897, 915), False, 'from sfepy.base.base import ordered_iteritems, output, set_defaults, assert_\n'), ((1674, 1690), 'numpy.nanmin', 'nm.nanmin', (['tdata'], {}), '(tdata)\n', (1683, 1690), True, 'import numpy as nm\n'), ((1717, 1733), 'numpy.nanmax', 'nm.nanmax', (['tdata'], {}), '(tdata)\n', (1726, 1733), True, 'import numpy as nm\n'), ((6663, 6717), 'sfepy.base.base.output', 'output', (['ii', 'gmin[0]', 'gmax[0]', "('%.8f' % f0)", "('%.8f' % f1)"], {}), "(ii, gmin[0], gmax[0], '%.8f' % f0, '%.8f' % f1)\n", (6669, 6717), False, 'from sfepy.base.base import ordered_iteritems, output, set_defaults, assert_\n'), ((6730, 6776), 'sfepy.base.base.output', 'output', (["(' -> %s\\n %s' % (kind_desc, ranges))"], {}), "(' -> %s\\n %s' % (kind_desc, ranges))\n", (6736, 6776), False, 'from sfepy.base.base import ordered_iteritems, output, set_defaults, assert_\n'), ((17422, 17440), 'sfepy.base.plotutils.plt.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (17438, 17440), False, 'from sfepy.base.plotutils import plt\n'), ((17722, 17732), 'sfepy.base.plotutils.plt.show', 'plt.show', ([], {}), '()\n', (17730, 17732), False, 'from sfepy.base.plotutils import plt\n'), ((18965, 18983), 'sfepy.base.plotutils.plt.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (18981, 18983), False, 'from sfepy.base.plotutils import plt\n'), ((20022, 20040), 'sfepy.base.plotutils.plt.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (20038, 20040), False, 'from sfepy.base.plotutils import plt\n'), ((20355, 20365), 'sfepy.base.plotutils.plt.show', 'plt.show', ([], {}), '()\n', (20363, 20365), False, 'from sfepy.base.plotutils import plt\n'), ((830, 857), 'sfepy.base.base.set_defaults', 'set_defaults', (['val', 'defaults'], {}), '(val, defaults)\n', (842, 857), False, 'from sfepy.base.base import ordered_iteritems, output, set_defaults, assert_\n'), ((4529, 4542), 'numpy.asarray', 'nm.asarray', (['x'], {}), '(x)\n', (4539, 4542), True, 'import numpy as nm\n'), ((4572, 4585), 'numpy.asarray', 'nm.asarray', (['y'], {}), '(y)\n', (4582, 4585), True, 'import numpy as nm\n'), ((6375, 6429), 'sfepy.base.base.output', 'output', (['ii', 'gmin[0]', 'gmax[0]', "('%.8f' % f0)", "('%.8f' % f1)"], {}), "(ii, gmin[0], gmax[0], '%.8f' % f0, '%.8f' % f1)\n", (6381, 6429), False, 'from sfepy.base.base import ordered_iteritems, output, set_defaults, assert_\n'), ((6446, 6496), 'sfepy.base.base.output', 'output', (["(' -> %s\\n %s' % (kind_desc, ranges[ig]))"], {}), "(' -> %s\\n %s' % (kind_desc, ranges[ig]))\n", (6452, 6496), False, 'from sfepy.base.base import ordered_iteritems, output, set_defaults, assert_\n'), ((12932, 12956), 'six.iteritems', 'six.iteritems', (['coef_info'], {}), '(coef_info)\n', (12945, 12956), False, 'import six\n'), ((15460, 15507), 'os.path.join', 'op.join', (['self.problem.output_dir', 'log_save_name'], {}), '(self.problem.output_dir, log_save_name)\n', (15467, 15507), True, 'import os.path as op\n'), ((15709, 15760), 'os.path.join', 'op.join', (['self.problem.output_dir', 'raw_log_save_name'], {}), '(self.problem.output_dir, raw_log_save_name)\n', (15716, 15760), True, 'import os.path as op\n'), ((11825, 11852), 'os.path.basename', 'op.basename', (['conf._filename'], {}), '(conf._filename)\n', (11836, 11852), True, 'import os.path as op\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine._internal as mgb
from megengine.core import tensor
from megengine.test import assertTensorClose
def test_recoverable():
a = tensor()
b = tensor()
a_np = np.random.random((4, 3)).astype("float32")
b_np = np.random.random((3, 7)).astype("float32")
a.set_value(a_np)
b.set_value(b_np)
# Do some normal computation.
a2 = a * 2
ab = a @ b
# Raise a computation error.
with pytest.raises(mgb.MegBrainError):
_ = a * b
# Variable a2 and ab should be still usable after error happened.
assertTensorClose(a2.numpy(), a_np * 2)
assertTensorClose(ab.numpy(), a_np @ b_np)
# Should allow computation as well.
ab2 = ab ** 2
assertTensorClose(ab2.numpy(), (a_np @ b_np) ** 2)
|
[
"megengine.core.tensor"
] |
[((582, 590), 'megengine.core.tensor', 'tensor', ([], {}), '()\n', (588, 590), False, 'from megengine.core import tensor\n'), ((599, 607), 'megengine.core.tensor', 'tensor', ([], {}), '()\n', (605, 607), False, 'from megengine.core import tensor\n'), ((868, 900), 'pytest.raises', 'pytest.raises', (['mgb.MegBrainError'], {}), '(mgb.MegBrainError)\n', (881, 900), False, 'import pytest\n'), ((619, 643), 'numpy.random.random', 'np.random.random', (['(4, 3)'], {}), '((4, 3))\n', (635, 643), True, 'import numpy as np\n'), ((673, 697), 'numpy.random.random', 'np.random.random', (['(3, 7)'], {}), '((3, 7))\n', (689, 697), True, 'import numpy as np\n')]
|
import numpy as nm
from sfepy.base.base import output, Struct
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.homogenization.homogen_app import HomogenizationApp
from sfepy.homogenization.coefficients import Coefficients
import tables as pt
from sfepy.discrete.fem.meshio import HDF5MeshIO
import os.path as op
def get_homog_coefs_linear(ts, coor, mode,
micro_filename=None, regenerate=False,
coefs_filename=None):
oprefix = output.prefix
output.prefix = 'micro:'
required, other = get_standard_keywords()
required.remove( 'equations' )
conf = ProblemConf.from_file(micro_filename, required, other, verbose=False)
if coefs_filename is None:
coefs_filename = conf.options.get('coefs_filename', 'coefs')
coefs_filename = op.join(conf.options.get('output_dir', '.'),
coefs_filename) + '.h5'
if not regenerate:
if op.exists( coefs_filename ):
if not pt.isHDF5File( coefs_filename ):
regenerate = True
else:
regenerate = True
if regenerate:
options = Struct( output_filename_trunk = None )
app = HomogenizationApp( conf, options, 'micro:' )
coefs = app()
if type(coefs) is tuple:
coefs = coefs[0]
coefs.to_file_hdf5( coefs_filename )
else:
coefs = Coefficients.from_file_hdf5( coefs_filename )
out = {}
if mode == None:
for key, val in coefs.__dict__.iteritems():
out[key] = val
elif mode == 'qp':
for key, val in coefs.__dict__.iteritems():
if type( val ) == nm.ndarray or type(val) == nm.float64:
out[key] = nm.tile( val, (coor.shape[0], 1, 1) )
elif type(val) == dict:
for key2, val2 in val.iteritems():
if type(val2) == nm.ndarray or type(val2) == nm.float64:
out[key+'_'+key2] = \
nm.tile(val2, (coor.shape[0], 1, 1))
else:
out = None
output.prefix = oprefix
return out
def get_correctors_from_file( coefs_filename = 'coefs.h5',
dump_names = None ):
if dump_names == None:
coefs = Coefficients.from_file_hdf5( coefs_filename )
if hasattr( coefs, 'dump_names' ):
dump_names = coefs.dump_names
else:
raise ValueError( ' "filenames" coefficient must be used!' )
out = {}
for key, val in dump_names.iteritems():
corr_name = op.split( val )[-1]
io = HDF5MeshIO( val+'.h5' )
data = io.read_data( 0 )
dkeys = data.keys()
corr = {}
for dk in dkeys:
corr[dk] = data[dk].data.reshape(data[dk].shape)
out[corr_name] = corr
return out
|
[
"sfepy.base.conf.ProblemConf.from_file",
"sfepy.discrete.fem.meshio.HDF5MeshIO",
"sfepy.base.base.Struct",
"sfepy.homogenization.coefficients.Coefficients.from_file_hdf5",
"sfepy.base.conf.get_standard_keywords",
"sfepy.homogenization.homogen_app.HomogenizationApp"
] |
[((578, 601), 'sfepy.base.conf.get_standard_keywords', 'get_standard_keywords', ([], {}), '()\n', (599, 601), False, 'from sfepy.base.conf import ProblemConf, get_standard_keywords\n'), ((649, 718), 'sfepy.base.conf.ProblemConf.from_file', 'ProblemConf.from_file', (['micro_filename', 'required', 'other'], {'verbose': '(False)'}), '(micro_filename, required, other, verbose=False)\n', (670, 718), False, 'from sfepy.base.conf import ProblemConf, get_standard_keywords\n'), ((981, 1006), 'os.path.exists', 'op.exists', (['coefs_filename'], {}), '(coefs_filename)\n', (990, 1006), True, 'import os.path as op\n'), ((1178, 1212), 'sfepy.base.base.Struct', 'Struct', ([], {'output_filename_trunk': 'None'}), '(output_filename_trunk=None)\n', (1184, 1212), False, 'from sfepy.base.base import output, Struct\n'), ((1232, 1274), 'sfepy.homogenization.homogen_app.HomogenizationApp', 'HomogenizationApp', (['conf', 'options', '"""micro:"""'], {}), "(conf, options, 'micro:')\n", (1249, 1274), False, 'from sfepy.homogenization.homogen_app import HomogenizationApp\n'), ((1433, 1476), 'sfepy.homogenization.coefficients.Coefficients.from_file_hdf5', 'Coefficients.from_file_hdf5', (['coefs_filename'], {}), '(coefs_filename)\n', (1460, 1476), False, 'from sfepy.homogenization.coefficients import Coefficients\n'), ((2322, 2365), 'sfepy.homogenization.coefficients.Coefficients.from_file_hdf5', 'Coefficients.from_file_hdf5', (['coefs_filename'], {}), '(coefs_filename)\n', (2349, 2365), False, 'from sfepy.homogenization.coefficients import Coefficients\n'), ((2652, 2675), 'sfepy.discrete.fem.meshio.HDF5MeshIO', 'HDF5MeshIO', (["(val + '.h5')"], {}), "(val + '.h5')\n", (2662, 2675), False, 'from sfepy.discrete.fem.meshio import HDF5MeshIO\n'), ((2619, 2632), 'os.path.split', 'op.split', (['val'], {}), '(val)\n', (2627, 2632), True, 'import os.path as op\n'), ((1029, 1058), 'tables.isHDF5File', 'pt.isHDF5File', (['coefs_filename'], {}), '(coefs_filename)\n', (1042, 1058), True, 'import tables as pt\n'), ((1765, 1800), 'numpy.tile', 'nm.tile', (['val', '(coor.shape[0], 1, 1)'], {}), '(val, (coor.shape[0], 1, 1))\n', (1772, 1800), True, 'import numpy as nm\n'), ((2055, 2091), 'numpy.tile', 'nm.tile', (['val2', '(coor.shape[0], 1, 1)'], {}), '(val2, (coor.shape[0], 1, 1))\n', (2062, 2091), True, 'import numpy as nm\n')]
|
from glob import glob
from typing import Optional
from sqlmodel import Field, Session, SQLModel, create_engine, select
class Image(SQLModel, table=True):
key: Optional[int] = Field(default=None, primary_key=True)
image_name: str
label: str
image_url: str
engine = create_engine("sqlite:///image.db")
def read_images():
with Session(engine) as session:
statement = select(Image)
images = session.exec(statement).all()
return images
read_images()
|
[
"sqlmodel.select",
"sqlmodel.Session",
"sqlmodel.create_engine",
"sqlmodel.Field"
] |
[((283, 318), 'sqlmodel.create_engine', 'create_engine', (['"""sqlite:///image.db"""'], {}), "('sqlite:///image.db')\n", (296, 318), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((181, 218), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (186, 218), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((348, 363), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (355, 363), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((396, 409), 'sqlmodel.select', 'select', (['Image'], {}), '(Image)\n', (402, 409), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n')]
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
import random
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
class DataPrefetcher:
"""
DataPrefetcher is inspired by code of following file:
https://github.com/NVIDIA/apex/blob/master/examples/imagenet/main_amp.py
It could speedup your pytorch dataloader. For more information, please check
https://github.com/NVIDIA/apex/issues/304#issuecomment-493562789.
"""
def __init__(self, loader):
self.loader = iter(loader)
def preload(self):
try:
self.next_input, self.next_target, _, _ = next(self.loader)
except StopIteration:
self.next_input = None
self.next_target = None
return
def next(self):
inputs, target, _, _ = next(self.loader)
return inputs.numpy(), target.numpy()
def random_resize(data_loader, exp, epoch, rank, is_distributed):
tensor = mge.tensor([1])
if rank == 0:
if epoch > exp.max_epoch - 10:
size = exp.input_size
else:
size = random.randint(*exp.random_size)
size = int(32 * size)
tensor *= size
if is_distributed:
tensor = F.distributed.broadcast(tensor)
dist.group_barrier()
input_size = data_loader.change_input_dim(multiple=tensor.item(), random_range=None)
return input_size
|
[
"megengine.functional.distributed.broadcast",
"megengine.tensor",
"megengine.distributed.group_barrier"
] |
[((1025, 1040), 'megengine.tensor', 'mge.tensor', (['[1]'], {}), '([1])\n', (1035, 1040), True, 'import megengine as mge\n'), ((1297, 1328), 'megengine.functional.distributed.broadcast', 'F.distributed.broadcast', (['tensor'], {}), '(tensor)\n', (1320, 1328), True, 'import megengine.functional as F\n'), ((1337, 1357), 'megengine.distributed.group_barrier', 'dist.group_barrier', ([], {}), '()\n', (1355, 1357), True, 'import megengine.distributed as dist\n'), ((1166, 1198), 'random.randint', 'random.randint', (['*exp.random_size'], {}), '(*exp.random_size)\n', (1180, 1198), False, 'import random\n')]
|
from typing import Optional
from sqlmodel import Field, SQLModel
from alchemical import Alchemical
db = Alchemical('sqlite:///users.sqlite', model_class=SQLModel)
class User(db.Model, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(max_length=128)
def __repr__(self):
return f'<User {self.name}>'
db.drop_all()
db.create_all()
with db.begin() as session:
for name in ['mary', 'joe', 'susan']:
session.add(User(name=name))
with db.Session() as session:
print(session.scalars(User.select()).all())
|
[
"sqlmodel.Field"
] |
[((106, 164), 'alchemical.Alchemical', 'Alchemical', (['"""sqlite:///users.sqlite"""'], {'model_class': 'SQLModel'}), "('sqlite:///users.sqlite', model_class=SQLModel)\n", (116, 164), False, 'from alchemical import Alchemical\n'), ((225, 262), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (230, 262), False, 'from sqlmodel import Field, SQLModel\n'), ((279, 300), 'sqlmodel.Field', 'Field', ([], {'max_length': '(128)'}), '(max_length=128)\n', (284, 300), False, 'from sqlmodel import Field, SQLModel\n')]
|
import math
import megengine.module as M
import megengine.functional as F
import megengine as mge
class PositionalEncoding(M.Module):
"""Positional encoding.
:param int d_model: embedding dim
:param float dropout_rate: dropout rate
:param int max_len: maximum input length
"""
def __init__(self, d_model, dropout_rate, max_len=5000):
"""Construct an PositionalEncoding object."""
super(PositionalEncoding, self).__init__()
self.d_model = d_model
self.xscale = math.sqrt(self.d_model)
self.dropout = M.dropout.Dropout(dropout_rate)
self.pe = mge.Tensor(0.0)
self.extend_pe(F.tensor.zeros([1, max_len]))
def extend_pe(self, x):
"""Reset the positional encodings."""
if len(self.pe.shape):
if self.pe.shape[1] >= x.shape[1]:
if self.pe.dtype != x.dtype or self.pe.device != x.device:
self.pe = self.pe.to(dtype=x.dtype, device=x.device)
return
pe = F.tensor.zeros([x.shape[1], self.d_model])
position = mge.Tensor(F.arange(0, x.shape[1], dtype="float32")).reshape(
x.shape[1], -1
)
div_term = F.exp(
mge.Tensor(F.arange(0, self.d_model, 2, dtype="float32"))
* -(math.log(10000.0) / self.d_model)
)
pe[:, 0::2] = F.sin(position * div_term)
pe[:, 1::2] = F.cos(position * div_term)
h, w = pe.shape
pe = pe.reshape(-1, h, w)
self.pe[...] = pe.to(device=x.device)
def forward(self, x: mge.Tensor):
"""Add positional encoding.
Args:
x (megengine.Tensor): Input. Its shape is (batch, time, ...)
Returns:
megengine.Tensor: Encoded tensor. Its shape is (batch, time, ...)
"""
self.extend_pe(x)
x = x * self.xscale + self.pe[:, : x.shape[1]]
return self.dropout(x)
|
[
"megengine.functional.arange",
"megengine.module.dropout.Dropout",
"megengine.functional.cos",
"megengine.Tensor",
"megengine.functional.tensor.zeros",
"megengine.functional.sin"
] |
[((521, 544), 'math.sqrt', 'math.sqrt', (['self.d_model'], {}), '(self.d_model)\n', (530, 544), False, 'import math\n'), ((568, 599), 'megengine.module.dropout.Dropout', 'M.dropout.Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (585, 599), True, 'import megengine.module as M\n'), ((618, 633), 'megengine.Tensor', 'mge.Tensor', (['(0.0)'], {}), '(0.0)\n', (628, 633), True, 'import megengine as mge\n'), ((1024, 1066), 'megengine.functional.tensor.zeros', 'F.tensor.zeros', (['[x.shape[1], self.d_model]'], {}), '([x.shape[1], self.d_model])\n', (1038, 1066), True, 'import megengine.functional as F\n'), ((1363, 1389), 'megengine.functional.sin', 'F.sin', (['(position * div_term)'], {}), '(position * div_term)\n', (1368, 1389), True, 'import megengine.functional as F\n'), ((1412, 1438), 'megengine.functional.cos', 'F.cos', (['(position * div_term)'], {}), '(position * div_term)\n', (1417, 1438), True, 'import megengine.functional as F\n'), ((657, 685), 'megengine.functional.tensor.zeros', 'F.tensor.zeros', (['[1, max_len]'], {}), '([1, max_len])\n', (671, 685), True, 'import megengine.functional as F\n'), ((1097, 1137), 'megengine.functional.arange', 'F.arange', (['(0)', 'x.shape[1]'], {'dtype': '"""float32"""'}), "(0, x.shape[1], dtype='float32')\n", (1105, 1137), True, 'import megengine.functional as F\n'), ((1234, 1279), 'megengine.functional.arange', 'F.arange', (['(0)', 'self.d_model', '(2)'], {'dtype': '"""float32"""'}), "(0, self.d_model, 2, dtype='float32')\n", (1242, 1279), True, 'import megengine.functional as F\n'), ((1297, 1314), 'math.log', 'math.log', (['(10000.0)'], {}), '(10000.0)\n', (1305, 1314), False, 'import math\n')]
|
import megengine.module as M
import megengine.functional as F
class FlowHead(M.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1)
self.relu = M.ReLU()
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class SepConvGRU(M.Module):
def __init__(self, hidden_dim=128, input_dim=192 + 128):
super(SepConvGRU, self).__init__()
self.convz1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convr1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convq1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convz2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convr2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convq2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
def forward(self, h, x):
# horizontal
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz1(hx))
r = F.sigmoid(self.convr1(hx))
q = F.tanh(self.convq1(F.concat([r * h, x], axis=1)))
h = (1 - z) * h + z * q
# vertical
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz2(hx))
r = F.sigmoid(self.convr2(hx))
q = F.tanh(self.convq2(F.concat([r * h, x], axis=1)))
h = (1 - z) * h + z * q
return h
class BasicMotionEncoder(M.Module):
def __init__(self, cor_planes):
super(BasicMotionEncoder, self).__init__()
self.convc1 = M.Conv2d(cor_planes, 256, 1, padding=0)
self.convc2 = M.Conv2d(256, 192, 3, padding=1)
self.convf1 = M.Conv2d(2, 128, 7, padding=3)
self.convf2 = M.Conv2d(128, 64, 3, padding=1)
self.conv = M.Conv2d(64 + 192, 128 - 2, 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
cor = F.relu(self.convc2(cor))
flo = F.relu(self.convf1(flow))
flo = F.relu(self.convf2(flo))
cor_flo = F.concat([cor, flo], axis=1)
out = F.relu(self.conv(cor_flo))
return F.concat([out, flow], axis=1)
class BasicUpdateBlock(M.Module):
def __init__(self, hidden_dim, cor_planes, mask_size=8):
super(BasicUpdateBlock, self).__init__()
self.encoder = BasicMotionEncoder(cor_planes)
self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=128 + hidden_dim)
self.flow_head = FlowHead(hidden_dim, hidden_dim=256)
self.mask = M.Sequential(
M.Conv2d(128, 256, 3, padding=1),
M.ReLU(),
M.Conv2d(256, mask_size**2 * 9, 1, padding=0),
)
def forward(self, net, inp, corr, flow, upsample=True):
motion_features = self.encoder(flow, corr)
inp = F.concat([inp, motion_features], axis=1)
net = self.gru(net, inp)
delta_flow = self.flow_head(net)
# scale mask to balence gradients
mask = 0.25 * self.mask(net)
return net, mask, delta_flow
|
[
"megengine.functional.concat",
"megengine.module.ReLU",
"megengine.module.Conv2d"
] |
[((207, 252), 'megengine.module.Conv2d', 'M.Conv2d', (['input_dim', 'hidden_dim', '(3)'], {'padding': '(1)'}), '(input_dim, hidden_dim, 3, padding=1)\n', (215, 252), True, 'import megengine.module as M\n'), ((274, 311), 'megengine.module.Conv2d', 'M.Conv2d', (['hidden_dim', '(2)', '(3)'], {'padding': '(1)'}), '(hidden_dim, 2, 3, padding=1)\n', (282, 311), True, 'import megengine.module as M\n'), ((332, 340), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (338, 340), True, 'import megengine.module as M\n'), ((576, 644), 'megengine.module.Conv2d', 'M.Conv2d', (['(hidden_dim + input_dim)', 'hidden_dim', '(1, 5)'], {'padding': '(0, 2)'}), '(hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2))\n', (584, 644), True, 'import megengine.module as M\n'), ((689, 757), 'megengine.module.Conv2d', 'M.Conv2d', (['(hidden_dim + input_dim)', 'hidden_dim', '(1, 5)'], {'padding': '(0, 2)'}), '(hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2))\n', (697, 757), True, 'import megengine.module as M\n'), ((802, 870), 'megengine.module.Conv2d', 'M.Conv2d', (['(hidden_dim + input_dim)', 'hidden_dim', '(1, 5)'], {'padding': '(0, 2)'}), '(hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2))\n', (810, 870), True, 'import megengine.module as M\n'), ((916, 984), 'megengine.module.Conv2d', 'M.Conv2d', (['(hidden_dim + input_dim)', 'hidden_dim', '(5, 1)'], {'padding': '(2, 0)'}), '(hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0))\n', (924, 984), True, 'import megengine.module as M\n'), ((1029, 1097), 'megengine.module.Conv2d', 'M.Conv2d', (['(hidden_dim + input_dim)', 'hidden_dim', '(5, 1)'], {'padding': '(2, 0)'}), '(hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0))\n', (1037, 1097), True, 'import megengine.module as M\n'), ((1142, 1210), 'megengine.module.Conv2d', 'M.Conv2d', (['(hidden_dim + input_dim)', 'hidden_dim', '(5, 1)'], {'padding': '(2, 0)'}), '(hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0))\n', (1150, 1210), True, 'import megengine.module as M\n'), ((1297, 1321), 'megengine.functional.concat', 'F.concat', (['[h, x]'], {'axis': '(1)'}), '([h, x], axis=1)\n', (1305, 1321), True, 'import megengine.functional as F\n'), ((1527, 1551), 'megengine.functional.concat', 'F.concat', (['[h, x]'], {'axis': '(1)'}), '([h, x], axis=1)\n', (1535, 1551), True, 'import megengine.functional as F\n'), ((1890, 1929), 'megengine.module.Conv2d', 'M.Conv2d', (['cor_planes', '(256)', '(1)'], {'padding': '(0)'}), '(cor_planes, 256, 1, padding=0)\n', (1898, 1929), True, 'import megengine.module as M\n'), ((1952, 1984), 'megengine.module.Conv2d', 'M.Conv2d', (['(256)', '(192)', '(3)'], {'padding': '(1)'}), '(256, 192, 3, padding=1)\n', (1960, 1984), True, 'import megengine.module as M\n'), ((2007, 2037), 'megengine.module.Conv2d', 'M.Conv2d', (['(2)', '(128)', '(7)'], {'padding': '(3)'}), '(2, 128, 7, padding=3)\n', (2015, 2037), True, 'import megengine.module as M\n'), ((2060, 2091), 'megengine.module.Conv2d', 'M.Conv2d', (['(128)', '(64)', '(3)'], {'padding': '(1)'}), '(128, 64, 3, padding=1)\n', (2068, 2091), True, 'import megengine.module as M\n'), ((2112, 2153), 'megengine.module.Conv2d', 'M.Conv2d', (['(64 + 192)', '(128 - 2)', '(3)'], {'padding': '(1)'}), '(64 + 192, 128 - 2, 3, padding=1)\n', (2120, 2153), True, 'import megengine.module as M\n'), ((2367, 2395), 'megengine.functional.concat', 'F.concat', (['[cor, flo]'], {'axis': '(1)'}), '([cor, flo], axis=1)\n', (2375, 2395), True, 'import megengine.functional as F\n'), ((2452, 2481), 'megengine.functional.concat', 'F.concat', (['[out, flow]'], {'axis': '(1)'}), '([out, flow], axis=1)\n', (2460, 2481), True, 'import megengine.functional as F\n'), ((3123, 3163), 'megengine.functional.concat', 'F.concat', (['[inp, motion_features]'], {'axis': '(1)'}), '([inp, motion_features], axis=1)\n', (3131, 3163), True, 'import megengine.functional as F\n'), ((2872, 2904), 'megengine.module.Conv2d', 'M.Conv2d', (['(128)', '(256)', '(3)'], {'padding': '(1)'}), '(128, 256, 3, padding=1)\n', (2880, 2904), True, 'import megengine.module as M\n'), ((2918, 2926), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (2924, 2926), True, 'import megengine.module as M\n'), ((2940, 2987), 'megengine.module.Conv2d', 'M.Conv2d', (['(256)', '(mask_size ** 2 * 9)', '(1)'], {'padding': '(0)'}), '(256, mask_size ** 2 * 9, 1, padding=0)\n', (2948, 2987), True, 'import megengine.module as M\n'), ((1431, 1459), 'megengine.functional.concat', 'F.concat', (['[r * h, x]'], {'axis': '(1)'}), '([r * h, x], axis=1)\n', (1439, 1459), True, 'import megengine.functional as F\n'), ((1661, 1689), 'megengine.functional.concat', 'F.concat', (['[r * h, x]'], {'axis': '(1)'}), '([r * h, x], axis=1)\n', (1669, 1689), True, 'import megengine.functional as F\n')]
|
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
import numpy as np
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
# def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
# mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
mesh = Mesh.from_file(data_dir + '/meshes/3d/cube_medium_hexa.mesh')
domain = FEDomain('domain', mesh)
# min_x, max_x = domain.get_mesh_bounding_box()[:,0]
# eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
# gamma1 = domain.create_region('Gamma1',
# 'vertices in x < %.10f' % (min_x + eps),
# 'facet')
Bottom = domain.create_region('Bottom',
'vertices in z < %.10f' % -0.499,
'facet')
# gamma2 = domain.create_region('Gamma2',
# 'vertices in x > %.10f' % (max_x - eps),
# 'facet')
Top = domain.create_region('Top',
'vertices in z > %.10f' % 0.499,
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=3)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
# materials = {
# 'solid' : ({
# 'D': stiffness_from_lame(dim=3, lam=5.769, mu=3.846),
# },),
# 'cs' : ({
# 'f' : [1e5, 1e-2],
# '.c' : [0.0, 0.0, 1.2],
# '.r' : 0.8,
# },),
# }
# defK = materials['cs'][0]
# cs = ContactSphere(csc['.c'], csc['.r'])
m = Material('m', D=stiffness_from_lame(dim=3, lam=5.769, mu=3.846))
# f = Material('f', val=[[0.02], [0.01]])
# csf = Material('csf', val=[1e5, 1e-2])
# csc = Material('csc', val=[0.0, 0.0, 1.2])
# csr = Material('csr', val=0.8)
cs = Material('cs',f=[1e5, 1e-2],c=[0.0, 0.0, 1.2],r=0.8)
integral = Integral('i', order=3)
integral1 = Integral('i', order=2)
t1 = Term.new('dw_lin_elastic(m.D, v, u)',
integral, omega, m=m, v=v, u=u)
t2 = Term.new('dw_contact_sphere(cs.f, cs.c, cs.r, v, u)', integral1, Top, cs=cs, v=v, u=u)
eq = Equation('balance', t1 + t2)
eqs = Equations([eq])
fix_u = EssentialBC('fix_u', Bottom, {'u.all' : 0.0})
# bc_fun = Function('shift_u_fun', shift_u_fun,
# extra_args={'shift' : 0.01})
# shift_u = EssentialBC('shift_u', gamma2, {'u.0' : bc_fun})
ls = ScipyDirect({})
nls_status = IndexedStruct()
nls = Newton({}, lin_solver=ls, status=nls_status)
pb = Problem('elasticity', equations=eqs)
pb.save_regions_as_groups('regions')
pb.set_bcs(ebcs=Conditions([fix_u]))
pb.set_solver(nls)
status = IndexedStruct()
state = pb.solve(status=status)
print('Nonlinear solver status:\n', nls_status)
print('Stationary solver status:\n', status)
pb.save_state('linear_elasticity.vtk', state)
# if options.show:
view = Viewer('linear_elasticity.vtk')
view(vector_mode='warp_norm', rel_scaling=2,
is_scalar_bar=True, is_wireframe=True)
|
[
"sfepy.discrete.conditions.EssentialBC",
"sfepy.discrete.Integral",
"sfepy.postprocess.viewer.Viewer",
"sfepy.solvers.ls.ScipyDirect",
"sfepy.discrete.Equations",
"sfepy.discrete.fem.Field.from_args",
"sfepy.discrete.Equation",
"sfepy.discrete.fem.Mesh.from_file",
"sfepy.discrete.fem.FEDomain",
"sfepy.mechanics.matcoefs.stiffness_from_lame",
"sfepy.terms.Term.new",
"sfepy.discrete.conditions.Conditions",
"sfepy.discrete.FieldVariable",
"sfepy.discrete.Material",
"sfepy.discrete.Problem",
"sfepy.base.base.IndexedStruct",
"sfepy.solvers.nls.Newton"
] |
[((144, 164), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (159, 164), False, 'import sys\n'), ((971, 987), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (985, 987), False, 'from argparse import ArgumentParser\n'), ((1310, 1371), 'sfepy.discrete.fem.Mesh.from_file', 'Mesh.from_file', (["(data_dir + '/meshes/3d/cube_medium_hexa.mesh')"], {}), "(data_dir + '/meshes/3d/cube_medium_hexa.mesh')\n", (1324, 1371), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((1381, 1405), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['"""domain"""', 'mesh'], {}), "('domain', mesh)\n", (1389, 1405), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((2137, 2203), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""fu"""', 'nm.float64', '"""vector"""', 'omega'], {'approx_order': '(3)'}), "('fu', nm.float64, 'vector', omega, approx_order=3)\n", (2152, 2203), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((2233, 2269), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""u"""', '"""unknown"""', 'field'], {}), "('u', 'unknown', field)\n", (2246, 2269), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((2274, 2329), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""v"""', '"""test"""', 'field'], {'primary_var_name': '"""u"""'}), "('v', 'test', field, primary_var_name='u')\n", (2287, 2329), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((2866, 2926), 'sfepy.discrete.Material', 'Material', (['"""cs"""'], {'f': '[100000.0, 0.01]', 'c': '[0.0, 0.0, 1.2]', 'r': '(0.8)'}), "('cs', f=[100000.0, 0.01], c=[0.0, 0.0, 1.2], r=0.8)\n", (2874, 2926), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((2931, 2953), 'sfepy.discrete.Integral', 'Integral', (['"""i"""'], {'order': '(3)'}), "('i', order=3)\n", (2939, 2953), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((2966, 2988), 'sfepy.discrete.Integral', 'Integral', (['"""i"""'], {'order': '(2)'}), "('i', order=2)\n", (2974, 2988), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((2995, 3064), 'sfepy.terms.Term.new', 'Term.new', (['"""dw_lin_elastic(m.D, v, u)"""', 'integral', 'omega'], {'m': 'm', 'v': 'v', 'u': 'u'}), "('dw_lin_elastic(m.D, v, u)', integral, omega, m=m, v=v, u=u)\n", (3003, 3064), False, 'from sfepy.terms import Term\n'), ((3084, 3174), 'sfepy.terms.Term.new', 'Term.new', (['"""dw_contact_sphere(cs.f, cs.c, cs.r, v, u)"""', 'integral1', 'Top'], {'cs': 'cs', 'v': 'v', 'u': 'u'}), "('dw_contact_sphere(cs.f, cs.c, cs.r, v, u)', integral1, Top, cs=cs,\n v=v, u=u)\n", (3092, 3174), False, 'from sfepy.terms import Term\n'), ((3176, 3204), 'sfepy.discrete.Equation', 'Equation', (['"""balance"""', '(t1 + t2)'], {}), "('balance', t1 + t2)\n", (3184, 3204), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((3211, 3226), 'sfepy.discrete.Equations', 'Equations', (['[eq]'], {}), '([eq])\n', (3220, 3226), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((3236, 3280), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', (['"""fix_u"""', 'Bottom', "{'u.all': 0.0}"], {}), "('fix_u', Bottom, {'u.all': 0.0})\n", (3247, 3280), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((3447, 3462), 'sfepy.solvers.ls.ScipyDirect', 'ScipyDirect', (['{}'], {}), '({})\n', (3458, 3462), False, 'from sfepy.solvers.ls import ScipyDirect\n'), ((3477, 3492), 'sfepy.base.base.IndexedStruct', 'IndexedStruct', ([], {}), '()\n', (3490, 3492), False, 'from sfepy.base.base import IndexedStruct\n'), ((3499, 3543), 'sfepy.solvers.nls.Newton', 'Newton', (['{}'], {'lin_solver': 'ls', 'status': 'nls_status'}), '({}, lin_solver=ls, status=nls_status)\n', (3505, 3543), False, 'from sfepy.solvers.nls import Newton\n'), ((3550, 3586), 'sfepy.discrete.Problem', 'Problem', (['"""elasticity"""'], {'equations': 'eqs'}), "('elasticity', equations=eqs)\n", (3557, 3586), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((3692, 3707), 'sfepy.base.base.IndexedStruct', 'IndexedStruct', ([], {}), '()\n', (3705, 3707), False, 'from sfepy.base.base import IndexedStruct\n'), ((3908, 3939), 'sfepy.postprocess.viewer.Viewer', 'Viewer', (['"""linear_elasticity.vtk"""'], {}), "('linear_elasticity.vtk')\n", (3914, 3939), False, 'from sfepy.postprocess.viewer import Viewer\n'), ((2650, 2697), 'sfepy.mechanics.matcoefs.stiffness_from_lame', 'stiffness_from_lame', ([], {'dim': '(3)', 'lam': '(5.769)', 'mu': '(3.846)'}), '(dim=3, lam=5.769, mu=3.846)\n', (2669, 2697), False, 'from sfepy.mechanics.matcoefs import stiffness_from_lame\n'), ((3641, 3660), 'sfepy.discrete.conditions.Conditions', 'Conditions', (['[fix_u]'], {}), '([fix_u])\n', (3651, 3660), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n')]
|
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import math
import collections
import megengine.module as nn
import megengine.functional as F
from model.nn_upsample import NeuralUpsampler, FlowMaskEstimator
from common.utils import flow_warp, upsample2d_flow_as
def conv(inp, out, k=3, s=1, d=1, isReLU=True):
if isReLU:
ret = nn.Sequential(nn.Conv2d(inp, out, k, s, padding=((k - 1) * d) // 2, dilation=d, bias=True), nn.LeakyReLU(0.1))
else:
ret = nn.Sequential(nn.Conv2d(inp, out, k, s, padding=((k - 1) * d) // 2, dilation=d, bias=True))
return ret
class ContextNetwork(nn.Module):
def __init__(self, ch_in):
super(ContextNetwork, self).__init__()
self.convs = nn.Sequential(conv(ch_in, 128, 3, 1, 1), conv(128, 128, 3, 1, 2), conv(128, 128, 3, 1, 4), conv(128, 96, 3, 1, 8),
conv(96, 64, 3, 1, 16), conv(64, 32, 3, 1, 1), conv(32, 2, isReLU=False))
def forward(self, x):
return self.convs(x)
class FlowEstimator(nn.Module):
def __init__(self, ch_in):
super(FlowEstimator, self).__init__()
self.conv1 = conv(ch_in, 128)
self.conv2 = conv(128, 128)
self.conv3 = conv(128 + 128, 96)
self.conv4 = conv(96 + 128, 64)
self.conv5 = conv(96 + 64, 32)
# channels of the second last layer
self.feat_dim = 32
self.predict_flow = conv(64 + 32, 2, isReLU=False)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x1)
x3 = self.conv3(F.concat([x1, x2], axis=1))
x4 = self.conv4(F.concat([x2, x3], axis=1))
x5 = self.conv5(F.concat([x3, x4], axis=1))
flow = self.predict_flow(F.concat([x4, x5], axis=1))
return x5, flow
class CostVolume(nn.Module):
def __init__(self, d=4, *args, **kwargs):
super(CostVolume, self).__init__()
self.d = d
self.out_dim = 2 * self.d + 1
self.pad_size = self.d
def forward(self, x1, x2):
_, _, H, W = x1.shape
x2 = F.nn.pad(x2, ((0, 0), (0, 0), (self.pad_size, self.pad_size), (self.pad_size, self.pad_size)))
cv = []
for i in range(self.out_dim):
for j in range(self.out_dim):
cost = x1 * x2[:, :, i:(i + H), j:(j + W)]
cost = F.mean(cost, 1, keepdims=True)
cv.append(cost)
return F.concat(cv, 1)
class FeaturePyramidExtractor(nn.Module):
def __init__(self, pyr_chans):
super(FeaturePyramidExtractor, self).__init__()
self.pyr_chans = pyr_chans
self.convs = []
for _, (ch_in, ch_out) in enumerate(zip(pyr_chans[:-1], pyr_chans[1:])):
layer = nn.Sequential(conv(ch_in, ch_out, s=2), conv(ch_out, ch_out))
self.convs.append(layer)
def forward(self, x):
feature_pyramid = []
for conv in self.convs:
x = conv(x)
feature_pyramid.append(x)
return feature_pyramid[::-1]
class GyroFlow(nn.Module):
def __init__(self, params):
super(GyroFlow, self).__init__()
self.leakyRELU = nn.LeakyReLU(0.1)
self.upsample = params.upsample
self.with_bk = True
self.pyr_chans = [3, 16, 32, 64, 96, 128, 192]
self.feature_pyramid_extractor = FeaturePyramidExtractor(self.pyr_chans)
# correlation range
self.d = 4
self.output_level = 4
# cost volume
self.cost_volume = CostVolume(d=self.d)
self.cv_dim = (self.d * 2 + 1)**2
self.upsampler = NeuralUpsampler()
self.ch_inp = 32 + self.cv_dim + 2
self.flow_estimator = FlowEstimator(self.ch_inp)
self.context_net = ContextNetwork(self.flow_estimator.feat_dim + 2)
self.conv_1x1 = list([
conv(192, 32, k=1, s=1, d=1),
conv(128, 32, k=1, s=1, d=1),
conv(96, 32, k=1, s=1, d=1),
conv(64, 32, k=1, s=1, d=1),
conv(32, 32, k=1, s=1, d=1)
])
self.with_gyro_field = True
self.flow_predictor = FlowMaskEstimator(4, (8, 16, 32, 16, 8), 2)
self.mask_predictor = FlowMaskEstimator(64, (32, 32, 32, 16, 8), 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
def generate_fused_flow(self, x1, x2):
input_feature = F.concat((x1, x2), axis=1)
flow = self.flow_predictor(input_feature)[1]
assert flow.shape[1] == 2
return flow
def generate_map(self, x1, x2):
input_feature = F.concat((x1, x2), axis=1)
out = self.mask_predictor(input_feature)[1]
mask = F.sigmoid(out)
assert mask.shape[1] == 1
return mask
def self_guided_fusion_module(self, flow, gyro_field_rsz, x1, x2_warp, layer):
fuse_flow = self.generate_fused_flow(flow, gyro_field_rsz)
mask = self.generate_map(self.conv_1x1[layer](x1), self.conv_1x1[layer](x2_warp))
flow = fuse_flow * mask + gyro_field_rsz * (1 - mask)
return flow
def normalize_features(self, feature_list, normalize, center, moments_across_channels=True, moments_across_images=True):
# Compute feature statistics.
statistics = collections.defaultdict(list)
axes = [1, 2, 3] if moments_across_channels else [2, 3] # [b, c, h, w]
for feature_image in feature_list:
mean = F.mean(feature_image, axis=axes, keepdims=True) # [b,1,1,1] or [b,c,1,1]
variance = F.var(feature_image, axis=axes, keepdims=True) # [b,1,1,1] or [b,c,1,1]
statistics['mean'].append(mean)
statistics['var'].append(variance)
if moments_across_images:
statistics['mean'] = ([F.mean(F.stack(statistics['mean'], axis=0), axis=(0, ))] * len(feature_list))
statistics['var'] = ([F.var(F.stack(statistics['var'], axis=0), axis=(0, ))] * len(feature_list))
statistics['std'] = [F.sqrt(v + 1e-16) for v in statistics['var']]
# Center and normalize features.
if center:
feature_list = [f - mean for f, mean in zip(feature_list, statistics['mean'])]
if normalize:
feature_list = [f / std for f, std in zip(feature_list, statistics['std'])]
return feature_list
def predict_flow(self, x1_pyrs, x2_pyrs, gyro_field=None):
flow_pyrs = []
batch_size, _, h_x1, w_x1 = x1_pyrs[0].shape
dtype = x1_pyrs[0].dtype
flow = F.zeros((batch_size, 2, h_x1, w_x1), dtype=dtype)
for layer, (x1, x2) in enumerate(zip(x1_pyrs, x2_pyrs)):
if layer == 0:
x2_warp = x2
else:
flow = self.upsampler(flow, self.conv_1x1[layer](x1), self.conv_1x1[layer](x2))
gyro_field_rsz = upsample2d_flow_as(gyro_field, flow, if_rate=True)
x2_warp = flow_warp(x2, gyro_field_rsz)
flow = self.self_guided_fusion_module(flow, gyro_field_rsz, x1, x2_warp, layer)
x2_warp = flow_warp(x2, flow)
# cost volume normalized
x1_normalized, x2_warp_normalized = self.normalize_features([x1, x2_warp],
normalize=True,
center=True,
moments_across_channels=False,
moments_across_images=False)
_cv = self.cost_volume(x1_normalized, x2_warp_normalized)
_cv_relu = self.leakyRELU(_cv)
x1 = self.conv_1x1[layer](x1)
_x_feat, flow_pred = self.flow_estimator(F.concat([_cv_relu, x1, flow], axis=1))
flow += flow_pred
flow_refine = self.context_net(F.concat([_x_feat, flow], axis=1))
flow += flow_refine
flow_pyrs.append(flow)
if layer == self.output_level:
break
if self.upsample:
flows = [F.vision.interpolate(flow * 4, scale_factor=4, mode='bilinear', align_corners=True) for flow in flow_pyrs]
return flows[::-1]
def forward(self, data_batch, with_bk=True):
x = data_batch['imgs']
imgs = [x[:, 3 * i:3 * i + 3] for i in range(2)]
x = [self.feature_pyramid_extractor(img) + [img] for img in imgs]
gyro_field = data_batch["gyro_field"]
res = {}
res['flow_fw'] = self.predict_flow(x[0], x[1], gyro_field)
if with_bk:
res['flow_bw'] = self.predict_flow(x[1], x[0], -1 * gyro_field)
return res
class GyroFlowTestFlops(GyroFlow):
def forward(self, data_batch, with_bk=True):
x = data_batch
imgs = [x[:, 3 * i:3 * i + 3] for i in range(2)]
x = [self.feature_pyramid_extractor(img) + [img] for img in imgs]
gyro_field = F.ones_like(data_batch)[:, :2, ...]
res_fw = self.predict_flow(x[0], x[1], gyro_field)
if with_bk:
res_bw = self.predict_flow(x[1], x[0], -1 * gyro_field)
return res_fw, res_bw
def fetch_net(params):
if params.net_type == "gyroflow":
net = GyroFlow(params)
else:
raise NotImplementedError
return net
|
[
"megengine.module.init.calculate_fan_in_and_fan_out",
"megengine.functional.vision.interpolate",
"megengine.functional.sigmoid",
"megengine.module.Conv2d",
"megengine.functional.nn.pad",
"megengine.functional.var",
"megengine.functional.stack",
"megengine.functional.ones_like",
"megengine.functional.concat",
"megengine.functional.mean",
"megengine.module.init.uniform_",
"megengine.functional.zeros",
"megengine.functional.sqrt",
"megengine.module.LeakyReLU"
] |
[((3076, 3175), 'megengine.functional.nn.pad', 'F.nn.pad', (['x2', '((0, 0), (0, 0), (self.pad_size, self.pad_size), (self.pad_size, self.pad_size)\n )'], {}), '(x2, ((0, 0), (0, 0), (self.pad_size, self.pad_size), (self.\n pad_size, self.pad_size)))\n', (3084, 3175), True, 'import megengine.functional as F\n'), ((3427, 3442), 'megengine.functional.concat', 'F.concat', (['cv', '(1)'], {}), '(cv, 1)\n', (3435, 3442), True, 'import megengine.functional as F\n'), ((4151, 4168), 'megengine.module.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (4163, 4168), True, 'import megengine.module as nn\n'), ((4589, 4606), 'model.nn_upsample.NeuralUpsampler', 'NeuralUpsampler', ([], {}), '()\n', (4604, 4606), False, 'from model.nn_upsample import NeuralUpsampler, FlowMaskEstimator\n'), ((5100, 5143), 'model.nn_upsample.FlowMaskEstimator', 'FlowMaskEstimator', (['(4)', '(8, 16, 32, 16, 8)', '(2)'], {}), '(4, (8, 16, 32, 16, 8), 2)\n', (5117, 5143), False, 'from model.nn_upsample import NeuralUpsampler, FlowMaskEstimator\n'), ((5174, 5219), 'model.nn_upsample.FlowMaskEstimator', 'FlowMaskEstimator', (['(64)', '(32, 32, 32, 16, 8)', '(1)'], {}), '(64, (32, 32, 32, 16, 8), 1)\n', (5191, 5219), False, 'from model.nn_upsample import NeuralUpsampler, FlowMaskEstimator\n'), ((5654, 5680), 'megengine.functional.concat', 'F.concat', (['(x1, x2)'], {'axis': '(1)'}), '((x1, x2), axis=1)\n', (5662, 5680), True, 'import megengine.functional as F\n'), ((5849, 5875), 'megengine.functional.concat', 'F.concat', (['(x1, x2)'], {'axis': '(1)'}), '((x1, x2), axis=1)\n', (5857, 5875), True, 'import megengine.functional as F\n'), ((5943, 5957), 'megengine.functional.sigmoid', 'F.sigmoid', (['out'], {}), '(out)\n', (5952, 5957), True, 'import megengine.functional as F\n'), ((6520, 6549), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (6543, 6549), False, 'import collections\n'), ((7767, 7816), 'megengine.functional.zeros', 'F.zeros', (['(batch_size, 2, h_x1, w_x1)'], {'dtype': 'dtype'}), '((batch_size, 2, h_x1, w_x1), dtype=dtype)\n', (7774, 7816), True, 'import megengine.functional as F\n'), ((1392, 1466), 'megengine.module.Conv2d', 'nn.Conv2d', (['inp', 'out', 'k', 's'], {'padding': '((k - 1) * d // 2)', 'dilation': 'd', 'bias': '(True)'}), '(inp, out, k, s, padding=(k - 1) * d // 2, dilation=d, bias=True)\n', (1401, 1466), True, 'import megengine.module as nn\n'), ((1470, 1487), 'megengine.module.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (1482, 1487), True, 'import megengine.module as nn\n'), ((1527, 1601), 'megengine.module.Conv2d', 'nn.Conv2d', (['inp', 'out', 'k', 's'], {'padding': '((k - 1) * d // 2)', 'dilation': 'd', 'bias': '(True)'}), '(inp, out, k, s, padding=(k - 1) * d // 2, dilation=d, bias=True)\n', (1536, 1601), True, 'import megengine.module as nn\n'), ((2575, 2601), 'megengine.functional.concat', 'F.concat', (['[x1, x2]'], {'axis': '(1)'}), '([x1, x2], axis=1)\n', (2583, 2601), True, 'import megengine.functional as F\n'), ((2627, 2653), 'megengine.functional.concat', 'F.concat', (['[x2, x3]'], {'axis': '(1)'}), '([x2, x3], axis=1)\n', (2635, 2653), True, 'import megengine.functional as F\n'), ((2679, 2705), 'megengine.functional.concat', 'F.concat', (['[x3, x4]'], {'axis': '(1)'}), '([x3, x4], axis=1)\n', (2687, 2705), True, 'import megengine.functional as F\n'), ((2740, 2766), 'megengine.functional.concat', 'F.concat', (['[x4, x5]'], {'axis': '(1)'}), '([x4, x5], axis=1)\n', (2748, 2766), True, 'import megengine.functional as F\n'), ((6692, 6739), 'megengine.functional.mean', 'F.mean', (['feature_image'], {'axis': 'axes', 'keepdims': '(True)'}), '(feature_image, axis=axes, keepdims=True)\n', (6698, 6739), True, 'import megengine.functional as F\n'), ((6789, 6835), 'megengine.functional.var', 'F.var', (['feature_image'], {'axis': 'axes', 'keepdims': '(True)'}), '(feature_image, axis=axes, keepdims=True)\n', (6794, 6835), True, 'import megengine.functional as F\n'), ((7241, 7258), 'megengine.functional.sqrt', 'F.sqrt', (['(v + 1e-16)'], {}), '(v + 1e-16)\n', (7247, 7258), True, 'import megengine.functional as F\n'), ((10234, 10257), 'megengine.functional.ones_like', 'F.ones_like', (['data_batch'], {}), '(data_batch)\n', (10245, 10257), True, 'import megengine.functional as F\n'), ((3349, 3379), 'megengine.functional.mean', 'F.mean', (['cost', '(1)'], {'keepdims': '(True)'}), '(cost, 1, keepdims=True)\n', (3355, 3379), True, 'import megengine.functional as F\n'), ((8087, 8137), 'common.utils.upsample2d_flow_as', 'upsample2d_flow_as', (['gyro_field', 'flow'], {'if_rate': '(True)'}), '(gyro_field, flow, if_rate=True)\n', (8105, 8137), False, 'from common.utils import flow_warp, upsample2d_flow_as\n'), ((8164, 8193), 'common.utils.flow_warp', 'flow_warp', (['x2', 'gyro_field_rsz'], {}), '(x2, gyro_field_rsz)\n', (8173, 8193), False, 'from common.utils import flow_warp, upsample2d_flow_as\n'), ((8318, 8337), 'common.utils.flow_warp', 'flow_warp', (['x2', 'flow'], {}), '(x2, flow)\n', (8327, 8337), False, 'from common.utils import flow_warp, upsample2d_flow_as\n'), ((9050, 9088), 'megengine.functional.concat', 'F.concat', (['[_cv_relu, x1, flow]'], {'axis': '(1)'}), '([_cv_relu, x1, flow], axis=1)\n', (9058, 9088), True, 'import megengine.functional as F\n'), ((9164, 9197), 'megengine.functional.concat', 'F.concat', (['[_x_feat, flow]'], {'axis': '(1)'}), '([_x_feat, flow], axis=1)\n', (9172, 9197), True, 'import megengine.functional as F\n'), ((9379, 9466), 'megengine.functional.vision.interpolate', 'F.vision.interpolate', (['(flow * 4)'], {'scale_factor': '(4)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(flow * 4, scale_factor=4, mode='bilinear',\n align_corners=True)\n", (9399, 9466), True, 'import megengine.functional as F\n'), ((5429, 5475), 'megengine.module.init.calculate_fan_in_and_fan_out', 'nn.init.calculate_fan_in_and_fan_out', (['m.weight'], {}), '(m.weight)\n', (5465, 5475), True, 'import megengine.module as nn\n'), ((5546, 5585), 'megengine.module.init.uniform_', 'nn.init.uniform_', (['m.bias', '(-bound)', 'bound'], {}), '(m.bias, -bound, bound)\n', (5562, 5585), True, 'import megengine.module as nn\n'), ((5344, 5356), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (5353, 5356), False, 'import math\n'), ((5508, 5525), 'math.sqrt', 'math.sqrt', (['fan_in'], {}), '(fan_in)\n', (5517, 5525), False, 'import math\n'), ((7030, 7065), 'megengine.functional.stack', 'F.stack', (["statistics['mean']"], {'axis': '(0)'}), "(statistics['mean'], axis=0)\n", (7037, 7065), True, 'import megengine.functional as F\n'), ((7141, 7175), 'megengine.functional.stack', 'F.stack', (["statistics['var']"], {'axis': '(0)'}), "(statistics['var'], axis=0)\n", (7148, 7175), True, 'import megengine.functional as F\n')]
|
from datetime import datetime, timezone
from typing import Generic, Optional, Type, TypeVar
from fastapi_users.authentication.strategy.db import AccessTokenDatabase
from fastapi_users.authentication.strategy.db.models import BaseAccessToken
from pydantic import UUID4
from sqlalchemy import Column, types
from sqlalchemy.ext.asyncio import AsyncSession
from sqlmodel import Field, Session, SQLModel, select
def now_utc():
return datetime.now(timezone.utc)
class SQLModelBaseAccessToken(BaseAccessToken, SQLModel):
__tablename__ = "accesstoken"
token: str = Field(
sa_column=Column("token", types.String(length=43), primary_key=True)
)
created_at: datetime = Field(
default_factory=now_utc,
sa_column=Column(
"created_at", types.DateTime(timezone=True), nullable=False, index=True
),
)
user_id: UUID4 = Field(foreign_key="user.id", nullable=False)
class Config:
orm_mode = True
A = TypeVar("A", bound=SQLModelBaseAccessToken)
class SQLModelAccessTokenDatabase(Generic[A], AccessTokenDatabase[A]):
"""
Access token database adapter for SQLModel.
:param user_db_model: SQLModel model of a DB representation of an access token.
:param session: SQLAlchemy session.
"""
def __init__(self, access_token_model: Type[A], session: Session):
self.access_token_model = access_token_model
self.session = session
async def get_by_token(
self, token: str, max_age: Optional[datetime] = None
) -> Optional[A]:
statement = select(self.access_token_model).where(
self.access_token_model.token == token
)
if max_age is not None:
statement = statement.where(self.access_token_model.created_at >= max_age)
results = self.session.exec(statement)
return results.first()
async def create(self, access_token: A) -> A:
self.session.add(access_token)
self.session.commit()
self.session.refresh(access_token)
return access_token
async def update(self, access_token: A) -> A:
self.session.add(access_token)
self.session.commit()
self.session.refresh(access_token)
return access_token
async def delete(self, access_token: A) -> None:
self.session.delete(access_token)
self.session.commit()
class SQLModelAccessTokenDatabaseAsync(Generic[A], AccessTokenDatabase[A]):
"""
Access token database adapter for SQLModel working purely asynchronously.
:param user_db_model: SQLModel model of a DB representation of an access token.
:param session: SQLAlchemy async session.
"""
def __init__(self, access_token_model: Type[A], session: AsyncSession):
self.access_token_model = access_token_model
self.session = session
async def get_by_token(
self, token: str, max_age: Optional[datetime] = None
) -> Optional[A]:
statement = select(self.access_token_model).where(
self.access_token_model.token == token
)
if max_age is not None:
statement = statement.where(self.access_token_model.created_at >= max_age)
results = await self.session.execute(statement)
object = results.first()
if object is None:
return None
return object[0]
async def create(self, access_token: A) -> A:
self.session.add(access_token)
await self.session.commit()
await self.session.refresh(access_token)
return access_token
async def update(self, access_token: A) -> A:
self.session.add(access_token)
await self.session.commit()
await self.session.refresh(access_token)
return access_token
async def delete(self, access_token: A) -> None:
await self.session.delete(access_token)
await self.session.commit()
|
[
"sqlmodel.Field",
"sqlmodel.select"
] |
[((974, 1017), 'typing.TypeVar', 'TypeVar', (['"""A"""'], {'bound': 'SQLModelBaseAccessToken'}), "('A', bound=SQLModelBaseAccessToken)\n", (981, 1017), False, 'from typing import Generic, Optional, Type, TypeVar\n'), ((436, 462), 'datetime.datetime.now', 'datetime.now', (['timezone.utc'], {}), '(timezone.utc)\n', (448, 462), False, 'from datetime import datetime, timezone\n'), ((880, 924), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""user.id"""', 'nullable': '(False)'}), "(foreign_key='user.id', nullable=False)\n", (885, 924), False, 'from sqlmodel import Field, Session, SQLModel, select\n'), ((616, 639), 'sqlalchemy.types.String', 'types.String', ([], {'length': '(43)'}), '(length=43)\n', (628, 639), False, 'from sqlalchemy import Column, types\n'), ((784, 813), 'sqlalchemy.types.DateTime', 'types.DateTime', ([], {'timezone': '(True)'}), '(timezone=True)\n', (798, 813), False, 'from sqlalchemy import Column, types\n'), ((1568, 1599), 'sqlmodel.select', 'select', (['self.access_token_model'], {}), '(self.access_token_model)\n', (1574, 1599), False, 'from sqlmodel import Field, Session, SQLModel, select\n'), ((2970, 3001), 'sqlmodel.select', 'select', (['self.access_token_model'], {}), '(self.access_token_model)\n', (2976, 3001), False, 'from sqlmodel import Field, Session, SQLModel, select\n')]
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import numpy as np
import megengine.autodiff as ad
import megengine.functional as F
import megengine.optimizer as optimizer
from megengine import Parameter
from megengine import Tensor as tensor
from megengine import tensor
from megengine.core.tensor.function import Function
from megengine.module import Module
def test_single_input():
data_shape = (9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
class MulFunc(Function):
def forward(self, a):
self.a = a
return a * 10
def backward(self, grad_o):
return grad_o * 10
class Simple(Module):
def __init__(self, a):
super().__init__()
self.a = Parameter(a, dtype=np.float32)
self.layer1 = MulFunc()
def forward(self):
x = self.layer1(self.a)
return x
net = Simple(av)
gm = ad.GradManager().attach(net.parameters())
opt = optimizer.SGD(net.parameters(), lr=1.0)
opt.clear_grad()
with gm:
loss = net()
gm.backward(loss.sum())
opt.step()
np.testing.assert_almost_equal(loss.numpy(), (av * 10))
np.testing.assert_almost_equal(net.a.numpy(), (av - 10))
def test_multi_input():
data_shape = (9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
class MulFunc(Function):
def forward(self, a, b):
self.a = a
self.b = b
return a * b
def backward(self, grad_o):
return grad_o * self.b * 2, grad_o * self.a * 3
class Simple(Module):
def __init__(self, a, b):
super().__init__()
self.a = Parameter(a, dtype=np.float32)
self.b = Parameter(b, dtype=np.float32)
self.layer1 = MulFunc()
def forward(self):
x = self.layer1(self.a, self.b)
return x
net = Simple(av, bv)
gm = ad.GradManager().attach(net.parameters())
opt = optimizer.SGD(net.parameters(), lr=1.0)
opt.clear_grad()
with gm:
loss = net()
gm.backward(loss.sum())
opt.step()
np.testing.assert_almost_equal(loss.numpy(), (av * bv))
np.testing.assert_almost_equal(net.a.numpy(), (av - 2 * bv))
np.testing.assert_almost_equal(net.b.numpy(), (bv - 3 * av))
def test_multi_output():
data_shape = (9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
class MulFunc(Function):
def forward(self, a, b):
self.a = a
self.b = b
return a * b, a + b
def backward(self, grad_1, grad_2):
return grad_1 * (self.b + 1), grad_2 * (self.a + 1)
class Simple(Module):
def __init__(self, a, b):
super().__init__()
self.a = Parameter(a, dtype=np.float32)
self.b = Parameter(b, dtype=np.float32)
self.layer1 = MulFunc()
def forward(self):
x, y = self.layer1(self.a, self.b)
return x + y
net = Simple(av, bv)
gm = ad.GradManager().attach(net.parameters())
opt = optimizer.SGD(net.parameters(), lr=1.0)
opt.clear_grad()
with gm:
loss = net()
gm.backward(loss.sum())
opt.step()
np.testing.assert_almost_equal(loss.numpy(), (av * bv + av + bv), decimal=6)
np.testing.assert_almost_equal(net.a.numpy(), (av - bv - 1), decimal=6)
np.testing.assert_almost_equal(net.b.numpy(), (bv - av - 1), decimal=6)
def test_skip_invalid_grad():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
c = np.random.random(data_shape).astype(np.float32)
cookie = tensor(c)
class EqWithFakeGrad(Function):
def forward(self, a, b):
return a + b
def backward(self, grad_o):
_ = grad_o
return cookie, cookie
class Simple(Module):
def __init__(self, a, b):
super().__init__()
self.a = Parameter(a, dtype=np.float32)
self.b = Parameter(b, dtype=np.float32)
self.layer1 = EqWithFakeGrad()
def forward(self):
x = self.layer1(self.a, self.b)
return x
net = Simple(av, bv)
optim = optimizer.SGD(net.parameters(), lr=1.0)
gm = ad.GradManager().attach(net.parameters())
optim.clear_grad()
with gm:
loss = net().sum()
gm.backward(loss)
optim.step()
np.testing.assert_almost_equal(net.a.numpy(), av - c)
np.testing.assert_almost_equal(net.b.numpy(), bv - c)
def test_ste():
class STE(Function):
def forward(self, x):
maxv, minv = x.max(), x.min()
scale = F.maximum(maxv, -minv) / 127
return F.round(x / scale) * scale
def backward(self, grad_y):
return grad_y
class Simple(Module):
def __init__(self, a):
super().__init__()
self.a = Parameter(a, dtype=np.float32)
self.layer1 = STE()
def forward(self):
x = self.layer1(self.a)
x = (x * 2.0).sum()
return x
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
net = Simple(av)
optim = optimizer.SGD(net.parameters(), lr=1.0)
gm = ad.GradManager().attach(net.parameters())
optim.clear_grad()
with gm:
loss = net()
gm.backward(loss.sum())
optim.step()
np.testing.assert_almost_equal(
net.a.numpy(),
av - np.broadcast_to(np.array([2.0], dtype=np.float32), data_shape),
)
def test_deepcopy():
class Sigmoid(Function):
def __init__(self, param):
super().__init__()
self.param = param
def forward(self, x):
y = 1 / (1 + F.exp(-x))
self.save_for_backward(y)
return y
def backward(self, grad_y):
(y,) = self.saved_tensors
return grad_y * y * (1 - y)
origin = Sigmoid(0)
new = copy.deepcopy(Sigmoid(0))
assert new.param == origin.param
def test_none_in_out_grad():
class Test(Function):
def forward(self, a, b):
return a, b
def backward(self, grad_a, grad_b):
assert grad_b is None
return (grad_a, 0.0)
class Simple(Module):
def __init__(self, a, b):
super().__init__()
self.a = Parameter(a, dtype=np.float32)
self.b = Parameter(b, dtype=np.float32)
self.layer = Test()
def forward(self):
aa, bb = self.layer(self.a, self.b)
return aa, bb
a = tensor(np.array([1.0], dtype=np.float32))
b = tensor(np.array([2.0], dtype=np.float32))
net = Simple(a, b)
optim = optimizer.SGD(net.parameters(), lr=1.0)
gm = ad.GradManager().attach(net.parameters())
optim.clear_grad()
with gm:
loss, _ = net()
gm.backward(loss)
optim.step()
np.testing.assert_almost_equal(
net.a.numpy(), np.array([1.0 - 1.0], dtype=np.float32)
)
np.testing.assert_almost_equal(
net.b.numpy(), np.array([2.0 - 0.0], dtype=np.float32)
)
def test_zero_grad():
class StopGradient(Function):
def forward(self, a):
return a
def backward(self, *_):
return None
class Simple(Module):
def __init__(self, a):
super().__init__()
self.a = Parameter(a, dtype=np.float32)
self.layer = StopGradient()
def forward(self):
b = self.a * 3.0
c = self.a * 4.0
return self.layer(b) + c
a = tensor(np.array([1.0], dtype=np.float32))
net = Simple(a)
optim = optimizer.SGD(net.parameters(), lr=1.0)
gm = ad.GradManager().attach(net.parameters())
optim.clear_grad()
with gm:
loss = net()
gm.backward(loss.sum())
optim.step()
np.testing.assert_almost_equal(
net.a.numpy(), np.array([1.0 - 4.0], dtype=np.float32),
)
|
[
"megengine.functional.round",
"megengine.functional.maximum",
"megengine.tensor",
"megengine.autodiff.GradManager",
"megengine.Parameter",
"megengine.functional.exp"
] |
[((4180, 4189), 'megengine.tensor', 'tensor', (['c'], {}), '(c)\n', (4186, 4189), False, 'from megengine import tensor\n'), ((7147, 7180), 'numpy.array', 'np.array', (['[1.0]'], {'dtype': 'np.float32'}), '([1.0], dtype=np.float32)\n', (7155, 7180), True, 'import numpy as np\n'), ((7197, 7230), 'numpy.array', 'np.array', (['[2.0]'], {'dtype': 'np.float32'}), '([2.0], dtype=np.float32)\n', (7205, 7230), True, 'import numpy as np\n'), ((7521, 7560), 'numpy.array', 'np.array', (['[1.0 - 1.0]'], {'dtype': 'np.float32'}), '([1.0 - 1.0], dtype=np.float32)\n', (7529, 7560), True, 'import numpy as np\n'), ((7626, 7665), 'numpy.array', 'np.array', (['[2.0 - 0.0]'], {'dtype': 'np.float32'}), '([2.0 - 0.0], dtype=np.float32)\n', (7634, 7665), True, 'import numpy as np\n'), ((8158, 8191), 'numpy.array', 'np.array', (['[1.0]'], {'dtype': 'np.float32'}), '([1.0], dtype=np.float32)\n', (8166, 8191), True, 'import numpy as np\n'), ((8482, 8521), 'numpy.array', 'np.array', (['[1.0 - 4.0]'], {'dtype': 'np.float32'}), '([1.0 - 4.0], dtype=np.float32)\n', (8490, 8521), True, 'import numpy as np\n'), ((742, 770), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (758, 770), True, 'import numpy as np\n'), ((1077, 1107), 'megengine.Parameter', 'Parameter', (['a'], {'dtype': 'np.float32'}), '(a, dtype=np.float32)\n', (1086, 1107), False, 'from megengine import Parameter\n'), ((1260, 1276), 'megengine.autodiff.GradManager', 'ad.GradManager', ([], {}), '()\n', (1274, 1276), True, 'import megengine.autodiff as ad\n'), ((1639, 1667), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (1655, 1667), True, 'import numpy as np\n'), ((1696, 1724), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (1712, 1724), True, 'import numpy as np\n'), ((2088, 2118), 'megengine.Parameter', 'Parameter', (['a'], {'dtype': 'np.float32'}), '(a, dtype=np.float32)\n', (2097, 2118), False, 'from megengine import Parameter\n'), ((2140, 2170), 'megengine.Parameter', 'Parameter', (['b'], {'dtype': 'np.float32'}), '(b, dtype=np.float32)\n', (2149, 2170), False, 'from megengine import Parameter\n'), ((2335, 2351), 'megengine.autodiff.GradManager', 'ad.GradManager', ([], {}), '()\n', (2349, 2351), True, 'import megengine.autodiff as ad\n'), ((2784, 2812), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (2800, 2812), True, 'import numpy as np\n'), ((2841, 2869), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (2857, 2869), True, 'import numpy as np\n'), ((3252, 3282), 'megengine.Parameter', 'Parameter', (['a'], {'dtype': 'np.float32'}), '(a, dtype=np.float32)\n', (3261, 3282), False, 'from megengine import Parameter\n'), ((3304, 3334), 'megengine.Parameter', 'Parameter', (['b'], {'dtype': 'np.float32'}), '(b, dtype=np.float32)\n', (3313, 3334), False, 'from megengine import Parameter\n'), ((3506, 3522), 'megengine.autodiff.GradManager', 'ad.GradManager', ([], {}), '()\n', (3520, 3522), True, 'import megengine.autodiff as ad\n'), ((4006, 4034), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (4022, 4034), True, 'import numpy as np\n'), ((4063, 4091), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (4079, 4091), True, 'import numpy as np\n'), ((4119, 4147), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (4135, 4147), True, 'import numpy as np\n'), ((4492, 4522), 'megengine.Parameter', 'Parameter', (['a'], {'dtype': 'np.float32'}), '(a, dtype=np.float32)\n', (4501, 4522), False, 'from megengine import Parameter\n'), ((4544, 4574), 'megengine.Parameter', 'Parameter', (['b'], {'dtype': 'np.float32'}), '(b, dtype=np.float32)\n', (4553, 4574), False, 'from megengine import Parameter\n'), ((4798, 4814), 'megengine.autodiff.GradManager', 'ad.GradManager', ([], {}), '()\n', (4812, 4814), True, 'import megengine.autodiff as ad\n'), ((5445, 5475), 'megengine.Parameter', 'Parameter', (['a'], {'dtype': 'np.float32'}), '(a, dtype=np.float32)\n', (5454, 5475), False, 'from megengine import Parameter\n'), ((5665, 5693), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (5681, 5693), True, 'import numpy as np\n'), ((5795, 5811), 'megengine.autodiff.GradManager', 'ad.GradManager', ([], {}), '()\n', (5809, 5811), True, 'import megengine.autodiff as ad\n'), ((6914, 6944), 'megengine.Parameter', 'Parameter', (['a'], {'dtype': 'np.float32'}), '(a, dtype=np.float32)\n', (6923, 6944), False, 'from megengine import Parameter\n'), ((6966, 6996), 'megengine.Parameter', 'Parameter', (['b'], {'dtype': 'np.float32'}), '(b, dtype=np.float32)\n', (6975, 6996), False, 'from megengine import Parameter\n'), ((7316, 7332), 'megengine.autodiff.GradManager', 'ad.GradManager', ([], {}), '()\n', (7330, 7332), True, 'import megengine.autodiff as ad\n'), ((7948, 7978), 'megengine.Parameter', 'Parameter', (['a'], {'dtype': 'np.float32'}), '(a, dtype=np.float32)\n', (7957, 7978), False, 'from megengine import Parameter\n'), ((8274, 8290), 'megengine.autodiff.GradManager', 'ad.GradManager', ([], {}), '()\n', (8288, 8290), True, 'import megengine.autodiff as ad\n'), ((5197, 5219), 'megengine.functional.maximum', 'F.maximum', (['maxv', '(-minv)'], {}), '(maxv, -minv)\n', (5206, 5219), True, 'import megengine.functional as F\n'), ((5245, 5263), 'megengine.functional.round', 'F.round', (['(x / scale)'], {}), '(x / scale)\n', (5252, 5263), True, 'import megengine.functional as F\n'), ((6033, 6066), 'numpy.array', 'np.array', (['[2.0]'], {'dtype': 'np.float32'}), '([2.0], dtype=np.float32)\n', (6041, 6066), True, 'import numpy as np\n'), ((6292, 6301), 'megengine.functional.exp', 'F.exp', (['(-x)'], {}), '(-x)\n', (6297, 6301), True, 'import megengine.functional as F\n')]
|
import io
import numpy as np
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.module as M
import megengine.utils.network_node as N
from megengine.jit.tracing import trace
from megengine.tensor import Tensor
from megengine.utils.comp_graph_tools import GraphInference
from megengine.utils.network import Network as Net
from megengine.utils.network import as_oprnode, set_symbolic_shape
from megengine.utils.network_node import Host2DeviceCopy, VarNode
def test_metadata():
x = Tensor(0)
@trace(symbolic=True, capture_as_const=True)
def fwd(x):
return x * 2
fwd(x)
orig_model = io.BytesIO()
fwd.dump(orig_model, user_info="test", optimize_for_inference=False)
orig_model.seek(0)
graph = Net.load(orig_model)
assert graph.metadata == {
"user_info": "test",
"graph_modified": False, # False: tracing.dump
"optimized_for_inference": False,
}
orig_model.seek(0)
graph.dump(
orig_model,
user_info={"str": "x", "tensor": x, "module": M.Module, "none": None},
optimize_for_inference=True,
enable_nchw4=True,
enable_ioc16=True,
)
orig_model.seek(0)
graph = Net.load(orig_model)
assert graph.metadata == {
"user_info": {"str": "x", "tensor": x, "module": M.Module, "none": None},
"graph_modified": True, # True: Network.dump
"optimized_for_inference": True,
"enable_nchw4": True,
"enable_ioc16": True,
}
orig_model.seek(0)
fwd.dump(orig_model, enable_metadata=False)
orig_model.seek(0)
graph = Net.load(orig_model)
assert graph.metadata is None
def test_replace_var():
a = Tensor([1, 2])
b = Tensor([3, 4])
@trace(symbolic=True, capture_as_const=True)
def fwd(a, b):
return (a + b) * 2
fwd(a, b)
orig_model = io.BytesIO()
fwd.dump(
orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False
)
orig_model.seek(0)
graph = Net.load(orig_model)
vara = graph.var_filter.name("a").as_unique()
varb = graph.var_filter.name("b").as_unique()
out = F.mul(vara, varb)
out = F.relu(out)
opnode = list(graph.opr_filter.has_input(vara))
repl_dict = {opnode[0].outputs[0]: out}
graph.replace_vars(repl_dict)
modified_model = io.BytesIO()
graph.dump(modified_model)
modified_model.seek(0)
load_graph = GraphInference(modified_model)
out = load_graph.run(a, b)
np.testing.assert_equal(out["o"], [6, 16])
def test_replace_opr():
a = Tensor([1, 2])
b = Tensor([3, 4])
@trace(symbolic=True, capture_as_const=True)
def fwd(a, b):
return (a + b) * 2
fwd(a, b)
orig_model = io.BytesIO()
fwd.dump(
orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False
)
orig_model.seek(0)
graph = Net.load(orig_model)
vara = graph.var_filter.name("a").as_unique()
varb = graph.var_filter.name("b").as_unique()
out1 = F.sub(vara, varb)
out1 = F.relu(out1)
out1 = graph.add_dep_oprs(out1)
orig_opr = graph.opr_filter.has_input(vara).as_unique()
repl_dict = {orig_opr: out1[0].owner}
graph.replace_oprs(repl_dict)
modified_model1 = io.BytesIO()
graph.dump(modified_model1)
modified_model1.seek(0)
load_graph = GraphInference(modified_model1)
out = load_graph.run(a, b)
np.testing.assert_equal(out["o"], [0, 0])
def test_splice_network():
x = F.ones((2,))
y = F.ones((2,))
@trace(symbolic=True, capture_as_const=True)
def fun1(a, b):
return (a + b) * 2
@trace(symbolic=True, capture_as_const=True)
def fun2(a):
return a * 2 - 1
model = io.BytesIO()
fun1(x, y)
fun2(x)
fun1.dump(
model,
arg_names=["net1_i0", "net1_i1"],
output_names=["net1_o0"],
optimize_for_inference=False,
)
model.seek(0)
net1 = Net.load(model)
model.seek(0)
fun2.dump(
model,
arg_names=["net2_i0"],
output_names=["net2_o0"],
optimize_for_inference=False,
)
model.seek(0)
net2 = Net.load(model)
net1.add_output(*net2.output_vars)
var = net1.var_filter.name("net1_i0").as_unique()
repl_var = net2.var_filter.name("net2_o0").as_unique()
net1.replace_vars({var: repl_var})
assert "net1_i0" not in [var.name for var in net1.all_vars]
assert "net2_i0" in [var.name for var in net1.all_vars]
model.seek(0)
net1.dump(model, keep_var_name=2, optimize_for_inference=False)
model.seek(0)
net = Net.load(model)
assert "net1_i0" not in [var.name for var in net.all_vars]
assert "net2_i0" in [var.name for var in net.all_vars]
def test_modify_params():
a = Tensor([1, 2])
b = Tensor([3, 4])
@trace(symbolic=True, capture_as_const=True)
def fwd(a, b):
return (a + b) * 2
fwd(a, b)
orig_model = io.BytesIO()
fwd.dump(
orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False
)
orig_model.seek(0)
graph = Net.load(orig_model)
param_const = graph.params_filter.as_unique()
param_const.set_value(3)
modified_model = io.BytesIO()
graph.dump(modified_model)
modified_model.seek(0)
load_graph = GraphInference(modified_model)
out = load_graph.run(a, b)
np.testing.assert_equal(out["o"], [12, 18])
def test_make_const():
a = Tensor([1, 2])
b = Tensor([3, 4])
@trace(symbolic=True, capture_as_const=True)
def fwd(a, b):
return (a + b) * 2
fwd(a, b)
orig_model = io.BytesIO()
fwd.dump(
orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False
)
orig_model.seek(0)
graph = Net.load(orig_model)
const_b = graph.make_const(np.array([0.0, 0.0]), name="b")
varb = graph.var_filter.name("b").as_unique()
repl_dict = {varb: const_b}
graph.replace_vars(repl_dict)
modified_model = io.BytesIO()
graph.dump(modified_model)
modified_model.seek(0)
load_graph = GraphInference(modified_model)
out = load_graph.run(a)
np.testing.assert_equal(out["o"], [2, 4])
def test_add_input():
a = Tensor([1, 2])
b = Tensor([3, 4])
@trace(symbolic=True, capture_as_const=True)
def fwd(a, b):
return (a + b) * 2
fwd(a, b)
orig_model = io.BytesIO()
fwd.dump(
orig_model, arg_names=["a", "b"], output_names="o", optimize_for_inference=False
)
orig_model.seek(0)
graph = Net.load(orig_model)
inp_c = graph.make_input_node((2,), np.int32, name="c")
varo = graph.var_filter.name("o").as_unique()
out = F.add(varo, inp_c)
out.name = "o1"
graph.remove_output(varo)
graph.add_output(out)
modified_model = io.BytesIO()
graph.dump(modified_model)
modified_model.seek(0)
load_graph = GraphInference(modified_model)
out = load_graph.run(a, b, a)
np.testing.assert_equal(out["o1"], ((a + b) * 2 + a).numpy())
def test_add_remove_output():
a = Tensor([1.0, 2.0])
b = Tensor([3.0, 4.0])
@trace(symbolic=True, capture_as_const=True)
def fwd(a, b):
return (a + b) * 2, (a - b)
fwd(a, b)
orig_model = io.BytesIO()
fwd.dump(
orig_model,
arg_names=["a", "b"],
output_names=["o1", "o2"],
optimize_for_inference=False,
)
orig_model.seek(0)
net = Net.load(orig_model)
var_a = net.var_filter.name("a").as_unique()
var_b = net.var_filter.name("b").as_unique()
y1 = (var_a + var_b) * 3
y2 = F.sigmoid(var_a + var_b)
net.remove_output(*net.output_vars)
y1.name = "new_o1"
y2.name = "new_o2"
net.add_output(y1, y2)
modified_model = io.BytesIO()
net.dump(modified_model)
modified_model.seek(0)
g = GraphInference(modified_model)
out = g.run(a.numpy(), b.numpy())
np.testing.assert_equal(out["new_o1"], ((a + b) * 3).numpy())
np.testing.assert_equal(out["new_o2"], (F.sigmoid((a + b))).numpy())
def test_query():
class Model(M.Module):
def __init__(self):
super().__init__()
self.conv1 = M.Conv2d(3, 32, 3)
self.conv2 = M.Conv2d(32, 32, 3)
self.conv3 = M.Conv2d(32, 32, 3)
def forward(self, data):
x = self.conv1(data)
x = self.conv2(x)
x = self.conv3(x)
return x
n = Model()
@trace(symbolic=True, capture_as_const=True)
def fwd(data):
return n(data)
fwd(Tensor(np.random.random((1, 3, 224, 224))))
orig_model = io.BytesIO()
fwd.dump(
orig_model,
arg_names=["data"],
output_names="o",
keep_opr_name=True,
keep_var_name=True,
optimize_for_inference=False,
)
orig_model.seek(0)
graph = Net.load(orig_model)
r = graph.data_providers_filter.as_count()
assert r == 1
opr = graph.get_opr_by_type(Host2DeviceCopy)
assert isinstance(opr, Host2DeviceCopy)
r1 = graph.params_filter.as_count()
assert r1 == 6
r2 = graph.opr_filter.type(N.ConvolutionForward).as_count()
assert r2 == 3
r3 = graph.opr_filter.not_type(N.ConvolutionForward).as_count()
assert r3 == len(graph.all_oprs) - r2
var = graph.var_filter.name("data").as_unique()
r4 = graph.opr_filter.has_input(var).as_count()
assert r4 == 1
r5 = graph.opr_filter.name("data").as_count()
assert r5 == 1
opr = graph.get_opr_by_name("data")
assert isinstance(opr, Host2DeviceCopy)
var = graph.get_var_by_name("data")
assert isinstance(var, VarNode)
r6 = graph.var_filter.name("*bias").as_count()
assert r6 == 3
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return F.exp(x)
orig_model = io.BytesIO()
f(Tensor(5.0))
f.dump(orig_model, optimize_for_inference=False)
orig_model.seek(0)
optimize_model = io.BytesIO()
net = Net.load(orig_model)
net.dump(optimize_model, enable_io16xc32=True)
optimize_model.seek(0)
res = G.load_graph(optimize_model)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_reset_batchsize():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return F.exp(x)
orig_model = io.BytesIO()
f(Tensor(np.random.random((3, 3, 224, 224))))
f.dump(orig_model, optimize_for_inference=False)
orig_model.seek(0)
modified_model = io.BytesIO()
net = Net.load(orig_model)
net.reset_batch_size(1)
net.dump(modified_model, optimize_for_inference=False)
modified_model.seek(0)
net1 = Net.load(modified_model)
assert net1.data_providers_filter.as_unique().shape[0] == 1
def test_modify_opr_name():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return F.exp(x)
orig_model = io.BytesIO()
f(Tensor(np.random.random((3, 3, 224, 224))))
f.dump(orig_model, arg_names=["a"], optimize_for_inference=False)
orig_model.seek(0)
modified_model = io.BytesIO()
net = Net.load(orig_model)
net.modify_opr_names("net")
net.modify_opr_names(lambda x: "net1." + x)
net.dump(modified_model, optimize_for_inference=False)
modified_model.seek(0)
net1 = Net.load(modified_model)
assert net1.data_providers_filter.as_unique().name == "net1.net.a"
def test_dump_cond_take():
a = Tensor([1.0, 2.0])
@trace(symbolic=True, capture_as_const=True)
def fwd(a):
return F.cond_take(a > 1, a)
fwd(a)
orig_model = io.BytesIO()
fwd.dump(
orig_model,
arg_names=["a"],
output_names=["o1", "o2"],
optimize_for_inference=False,
)
orig_model.seek(0)
net = Net.load(orig_model)
var_a = net.input_vars[0]
val, idx = F.cond_take(var_a > 1, var_a)
net.remove_output(*net.output_vars)
val.name = "value"
idx.name = "index"
net.add_output(val, idx)
modified_model = io.BytesIO()
net.dump(modified_model)
modified_model.seek(0)
g = GraphInference(modified_model)
out = g.run(a.numpy())
data = a.numpy()
mask = a.numpy() > 1
np.testing.assert_equal(out["index"], np.where(mask.reshape(-1))[0])
np.testing.assert_equal(out["value"], data[mask])
def test_set_symbolic_shape():
a = Tensor([1.0, 2.0])
@trace(symbolic=True, capture_as_const=True)
def fwd(a):
return F.relu(a * 2)
fwd(a)
orig_model = io.BytesIO()
fwd.dump(
orig_model, arg_names=["a"], output_names=["o"], optimize_for_inference=False,
)
orig_model.seek(0)
net = Net.load(orig_model)
var_a = net.input_vars[0]
saved_symbolic_shape = set_symbolic_shape(True)
assert isinstance(var_a.shape, VarNode)
set_symbolic_shape(False)
assert var_a.shape == var_a.partial_shape
set_symbolic_shape(saved_symbolic_shape)
|
[
"megengine.utils.comp_graph_tools.GraphInference",
"megengine.functional.ones",
"megengine.functional.sigmoid",
"megengine.module.Conv2d",
"megengine.functional.cond_take",
"megengine.utils.network.Network.load",
"megengine.functional.add",
"megengine.jit.tracing.trace",
"megengine.functional.mul",
"megengine.utils.network.set_symbolic_shape",
"megengine.core.tensor.megbrain_graph.load_graph",
"megengine.functional.exp",
"megengine.functional.relu",
"megengine.tensor.Tensor",
"megengine.functional.sub"
] |
[((534, 543), 'megengine.tensor.Tensor', 'Tensor', (['(0)'], {}), '(0)\n', (540, 543), False, 'from megengine.tensor import Tensor\n'), ((550, 593), 'megengine.jit.tracing.trace', 'trace', ([], {'symbolic': '(True)', 'capture_as_const': '(True)'}), '(symbolic=True, capture_as_const=True)\n', (555, 593), False, 'from megengine.jit.tracing import trace\n'), ((661, 673), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (671, 673), False, 'import io\n'), ((782, 802), 'megengine.utils.network.Network.load', 'Net.load', (['orig_model'], {}), '(orig_model)\n', (790, 802), True, 'from megengine.utils.network import Network as Net\n'), ((1238, 1258), 'megengine.utils.network.Network.load', 'Net.load', (['orig_model'], {}), '(orig_model)\n', (1246, 1258), True, 'from megengine.utils.network import Network as Net\n'), ((1640, 1660), 'megengine.utils.network.Network.load', 'Net.load', (['orig_model'], {}), '(orig_model)\n', (1648, 1660), True, 'from megengine.utils.network import Network as Net\n'), ((1730, 1744), 'megengine.tensor.Tensor', 'Tensor', (['[1, 2]'], {}), '([1, 2])\n', (1736, 1744), False, 'from megengine.tensor import Tensor\n'), ((1753, 1767), 'megengine.tensor.Tensor', 'Tensor', (['[3, 4]'], {}), '([3, 4])\n', (1759, 1767), False, 'from megengine.tensor import Tensor\n'), ((1774, 1817), 'megengine.jit.tracing.trace', 'trace', ([], {'symbolic': '(True)', 'capture_as_const': '(True)'}), '(symbolic=True, capture_as_const=True)\n', (1779, 1817), False, 'from megengine.jit.tracing import trace\n'), ((1896, 1908), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (1906, 1908), False, 'import io\n'), ((2054, 2074), 'megengine.utils.network.Network.load', 'Net.load', (['orig_model'], {}), '(orig_model)\n', (2062, 2074), True, 'from megengine.utils.network import Network as Net\n'), ((2186, 2203), 'megengine.functional.mul', 'F.mul', (['vara', 'varb'], {}), '(vara, varb)\n', (2191, 2203), True, 'import megengine.functional as F\n'), ((2214, 2225), 'megengine.functional.relu', 'F.relu', (['out'], {}), '(out)\n', (2220, 2225), True, 'import megengine.functional as F\n'), ((2379, 2391), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (2389, 2391), False, 'import io\n'), ((2467, 2497), 'megengine.utils.comp_graph_tools.GraphInference', 'GraphInference', (['modified_model'], {}), '(modified_model)\n', (2481, 2497), False, 'from megengine.utils.comp_graph_tools import GraphInference\n'), ((2534, 2576), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (["out['o']", '[6, 16]'], {}), "(out['o'], [6, 16])\n", (2557, 2576), True, 'import numpy as np\n'), ((2612, 2626), 'megengine.tensor.Tensor', 'Tensor', (['[1, 2]'], {}), '([1, 2])\n', (2618, 2626), False, 'from megengine.tensor import Tensor\n'), ((2635, 2649), 'megengine.tensor.Tensor', 'Tensor', (['[3, 4]'], {}), '([3, 4])\n', (2641, 2649), False, 'from megengine.tensor import Tensor\n'), ((2656, 2699), 'megengine.jit.tracing.trace', 'trace', ([], {'symbolic': '(True)', 'capture_as_const': '(True)'}), '(symbolic=True, capture_as_const=True)\n', (2661, 2699), False, 'from megengine.jit.tracing import trace\n'), ((2778, 2790), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (2788, 2790), False, 'import io\n'), ((2936, 2956), 'megengine.utils.network.Network.load', 'Net.load', (['orig_model'], {}), '(orig_model)\n', (2944, 2956), True, 'from megengine.utils.network import Network as Net\n'), ((3069, 3086), 'megengine.functional.sub', 'F.sub', (['vara', 'varb'], {}), '(vara, varb)\n', (3074, 3086), True, 'import megengine.functional as F\n'), ((3098, 3110), 'megengine.functional.relu', 'F.relu', (['out1'], {}), '(out1)\n', (3104, 3110), True, 'import megengine.functional as F\n'), ((3306, 3318), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (3316, 3318), False, 'import io\n'), ((3397, 3428), 'megengine.utils.comp_graph_tools.GraphInference', 'GraphInference', (['modified_model1'], {}), '(modified_model1)\n', (3411, 3428), False, 'from megengine.utils.comp_graph_tools import GraphInference\n'), ((3464, 3505), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (["out['o']", '[0, 0]'], {}), "(out['o'], [0, 0])\n", (3487, 3505), True, 'import numpy as np\n'), ((3543, 3555), 'megengine.functional.ones', 'F.ones', (['(2,)'], {}), '((2,))\n', (3549, 3555), True, 'import megengine.functional as F\n'), ((3564, 3576), 'megengine.functional.ones', 'F.ones', (['(2,)'], {}), '((2,))\n', (3570, 3576), True, 'import megengine.functional as F\n'), ((3583, 3626), 'megengine.jit.tracing.trace', 'trace', ([], {'symbolic': '(True)', 'capture_as_const': '(True)'}), '(symbolic=True, capture_as_const=True)\n', (3588, 3626), False, 'from megengine.jit.tracing import trace\n'), ((3680, 3723), 'megengine.jit.tracing.trace', 'trace', ([], {'symbolic': '(True)', 'capture_as_const': '(True)'}), '(symbolic=True, capture_as_const=True)\n', (3685, 3723), False, 'from megengine.jit.tracing import trace\n'), ((3779, 3791), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (3789, 3791), False, 'import io\n'), ((3998, 4013), 'megengine.utils.network.Network.load', 'Net.load', (['model'], {}), '(model)\n', (4006, 4013), True, 'from megengine.utils.network import Network as Net\n'), ((4200, 4215), 'megengine.utils.network.Network.load', 'Net.load', (['model'], {}), '(model)\n', (4208, 4215), True, 'from megengine.utils.network import Network as Net\n'), ((4645, 4660), 'megengine.utils.network.Network.load', 'Net.load', (['model'], {}), '(model)\n', (4653, 4660), True, 'from megengine.utils.network import Network as Net\n'), ((4820, 4834), 'megengine.tensor.Tensor', 'Tensor', (['[1, 2]'], {}), '([1, 2])\n', (4826, 4834), False, 'from megengine.tensor import Tensor\n'), ((4843, 4857), 'megengine.tensor.Tensor', 'Tensor', (['[3, 4]'], {}), '([3, 4])\n', (4849, 4857), False, 'from megengine.tensor import Tensor\n'), ((4864, 4907), 'megengine.jit.tracing.trace', 'trace', ([], {'symbolic': '(True)', 'capture_as_const': '(True)'}), '(symbolic=True, capture_as_const=True)\n', (4869, 4907), False, 'from megengine.jit.tracing import trace\n'), ((4986, 4998), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (4996, 4998), False, 'import io\n'), ((5144, 5164), 'megengine.utils.network.Network.load', 'Net.load', (['orig_model'], {}), '(orig_model)\n', (5152, 5164), True, 'from megengine.utils.network import Network as Net\n'), ((5266, 5278), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (5276, 5278), False, 'import io\n'), ((5354, 5384), 'megengine.utils.comp_graph_tools.GraphInference', 'GraphInference', (['modified_model'], {}), '(modified_model)\n', (5368, 5384), False, 'from megengine.utils.comp_graph_tools import GraphInference\n'), ((5421, 5464), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (["out['o']", '[12, 18]'], {}), "(out['o'], [12, 18])\n", (5444, 5464), True, 'import numpy as np\n'), ((5499, 5513), 'megengine.tensor.Tensor', 'Tensor', (['[1, 2]'], {}), '([1, 2])\n', (5505, 5513), False, 'from megengine.tensor import Tensor\n'), ((5522, 5536), 'megengine.tensor.Tensor', 'Tensor', (['[3, 4]'], {}), '([3, 4])\n', (5528, 5536), False, 'from megengine.tensor import Tensor\n'), ((5543, 5586), 'megengine.jit.tracing.trace', 'trace', ([], {'symbolic': '(True)', 'capture_as_const': '(True)'}), '(symbolic=True, capture_as_const=True)\n', (5548, 5586), False, 'from megengine.jit.tracing import trace\n'), ((5665, 5677), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (5675, 5677), False, 'import io\n'), ((5823, 5843), 'megengine.utils.network.Network.load', 'Net.load', (['orig_model'], {}), '(orig_model)\n', (5831, 5843), True, 'from megengine.utils.network import Network as Net\n'), ((6046, 6058), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (6056, 6058), False, 'import io\n'), ((6134, 6164), 'megengine.utils.comp_graph_tools.GraphInference', 'GraphInference', (['modified_model'], {}), '(modified_model)\n', (6148, 6164), False, 'from megengine.utils.comp_graph_tools import GraphInference\n'), ((6198, 6239), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (["out['o']", '[2, 4]'], {}), "(out['o'], [2, 4])\n", (6221, 6239), True, 'import numpy as np\n'), ((6273, 6287), 'megengine.tensor.Tensor', 'Tensor', (['[1, 2]'], {}), '([1, 2])\n', (6279, 6287), False, 'from megengine.tensor import Tensor\n'), ((6296, 6310), 'megengine.tensor.Tensor', 'Tensor', (['[3, 4]'], {}), '([3, 4])\n', (6302, 6310), False, 'from megengine.tensor import Tensor\n'), ((6317, 6360), 'megengine.jit.tracing.trace', 'trace', ([], {'symbolic': '(True)', 'capture_as_const': '(True)'}), '(symbolic=True, capture_as_const=True)\n', (6322, 6360), False, 'from megengine.jit.tracing import trace\n'), ((6439, 6451), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (6449, 6451), False, 'import io\n'), ((6597, 6617), 'megengine.utils.network.Network.load', 'Net.load', (['orig_model'], {}), '(orig_model)\n', (6605, 6617), True, 'from megengine.utils.network import Network as Net\n'), ((6739, 6757), 'megengine.functional.add', 'F.add', (['varo', 'inp_c'], {}), '(varo, inp_c)\n', (6744, 6757), True, 'import megengine.functional as F\n'), ((6855, 6867), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (6865, 6867), False, 'import io\n'), ((6944, 6974), 'megengine.utils.comp_graph_tools.GraphInference', 'GraphInference', (['modified_model'], {}), '(modified_model)\n', (6958, 6974), False, 'from megengine.utils.comp_graph_tools import GraphInference\n'), ((7117, 7135), 'megengine.tensor.Tensor', 'Tensor', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (7123, 7135), False, 'from megengine.tensor import Tensor\n'), ((7144, 7162), 'megengine.tensor.Tensor', 'Tensor', (['[3.0, 4.0]'], {}), '([3.0, 4.0])\n', (7150, 7162), False, 'from megengine.tensor import Tensor\n'), ((7169, 7212), 'megengine.jit.tracing.trace', 'trace', ([], {'symbolic': '(True)', 'capture_as_const': '(True)'}), '(symbolic=True, capture_as_const=True)\n', (7174, 7212), False, 'from megengine.jit.tracing import trace\n'), ((7300, 7312), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (7310, 7312), False, 'import io\n'), ((7490, 7510), 'megengine.utils.network.Network.load', 'Net.load', (['orig_model'], {}), '(orig_model)\n', (7498, 7510), True, 'from megengine.utils.network import Network as Net\n'), ((7648, 7672), 'megengine.functional.sigmoid', 'F.sigmoid', (['(var_a + var_b)'], {}), '(var_a + var_b)\n', (7657, 7672), True, 'import megengine.functional as F\n'), ((7809, 7821), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (7819, 7821), False, 'import io\n'), ((7887, 7917), 'megengine.utils.comp_graph_tools.GraphInference', 'GraphInference', (['modified_model'], {}), '(modified_model)\n', (7901, 7917), False, 'from megengine.utils.comp_graph_tools import GraphInference\n'), ((8507, 8550), 'megengine.jit.tracing.trace', 'trace', ([], {'symbolic': '(True)', 'capture_as_const': '(True)'}), '(symbolic=True, capture_as_const=True)\n', (8512, 8550), False, 'from megengine.jit.tracing import trace\n'), ((8663, 8675), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (8673, 8675), False, 'import io\n'), ((8900, 8920), 'megengine.utils.network.Network.load', 'Net.load', (['orig_model'], {}), '(orig_model)\n', (8908, 8920), True, 'from megengine.utils.network import Network as Net\n'), ((9805, 9848), 'megengine.jit.tracing.trace', 'trace', ([], {'symbolic': '(True)', 'capture_as_const': '(True)'}), '(symbolic=True, capture_as_const=True)\n', (9810, 9848), False, 'from megengine.jit.tracing import trace\n'), ((9905, 9917), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (9915, 9917), False, 'import io\n'), ((10035, 10047), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (10045, 10047), False, 'import io\n'), ((10058, 10078), 'megengine.utils.network.Network.load', 'Net.load', (['orig_model'], {}), '(orig_model)\n', (10066, 10078), True, 'from megengine.utils.network import Network as Net\n'), ((10168, 10196), 'megengine.core.tensor.megbrain_graph.load_graph', 'G.load_graph', (['optimize_model'], {}), '(optimize_model)\n', (10180, 10196), True, 'import megengine.core.tensor.megbrain_graph as G\n'), ((10341, 10384), 'megengine.jit.tracing.trace', 'trace', ([], {'symbolic': '(True)', 'capture_as_const': '(True)'}), '(symbolic=True, capture_as_const=True)\n', (10346, 10384), False, 'from megengine.jit.tracing import trace\n'), ((10441, 10453), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (10451, 10453), False, 'import io\n'), ((10602, 10614), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (10612, 10614), False, 'import io\n'), ((10625, 10645), 'megengine.utils.network.Network.load', 'Net.load', (['orig_model'], {}), '(orig_model)\n', (10633, 10645), True, 'from megengine.utils.network import Network as Net\n'), ((10772, 10796), 'megengine.utils.network.Network.load', 'Net.load', (['modified_model'], {}), '(modified_model)\n', (10780, 10796), True, 'from megengine.utils.network import Network as Net\n'), ((10896, 10939), 'megengine.jit.tracing.trace', 'trace', ([], {'symbolic': '(True)', 'capture_as_const': '(True)'}), '(symbolic=True, capture_as_const=True)\n', (10901, 10939), False, 'from megengine.jit.tracing import trace\n'), ((10996, 11008), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (11006, 11008), False, 'import io\n'), ((11174, 11186), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (11184, 11186), False, 'import io\n'), ((11197, 11217), 'megengine.utils.network.Network.load', 'Net.load', (['orig_model'], {}), '(orig_model)\n', (11205, 11217), True, 'from megengine.utils.network import Network as Net\n'), ((11396, 11420), 'megengine.utils.network.Network.load', 'Net.load', (['modified_model'], {}), '(modified_model)\n', (11404, 11420), True, 'from megengine.utils.network import Network as Net\n'), ((11530, 11548), 'megengine.tensor.Tensor', 'Tensor', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (11536, 11548), False, 'from megengine.tensor import Tensor\n'), ((11555, 11598), 'megengine.jit.tracing.trace', 'trace', ([], {'symbolic': '(True)', 'capture_as_const': '(True)'}), '(symbolic=True, capture_as_const=True)\n', (11560, 11598), False, 'from megengine.jit.tracing import trace\n'), ((11681, 11693), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (11691, 11693), False, 'import io\n'), ((11866, 11886), 'megengine.utils.network.Network.load', 'Net.load', (['orig_model'], {}), '(orig_model)\n', (11874, 11886), True, 'from megengine.utils.network import Network as Net\n'), ((11933, 11962), 'megengine.functional.cond_take', 'F.cond_take', (['(var_a > 1)', 'var_a'], {}), '(var_a > 1, var_a)\n', (11944, 11962), True, 'import megengine.functional as F\n'), ((12101, 12113), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (12111, 12113), False, 'import io\n'), ((12179, 12209), 'megengine.utils.comp_graph_tools.GraphInference', 'GraphInference', (['modified_model'], {}), '(modified_model)\n', (12193, 12209), False, 'from megengine.utils.comp_graph_tools import GraphInference\n'), ((12361, 12410), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (["out['value']", 'data[mask]'], {}), "(out['value'], data[mask])\n", (12384, 12410), True, 'import numpy as np\n'), ((12453, 12471), 'megengine.tensor.Tensor', 'Tensor', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (12459, 12471), False, 'from megengine.tensor import Tensor\n'), ((12478, 12521), 'megengine.jit.tracing.trace', 'trace', ([], {'symbolic': '(True)', 'capture_as_const': '(True)'}), '(symbolic=True, capture_as_const=True)\n', (12483, 12521), False, 'from megengine.jit.tracing import trace\n'), ((12596, 12608), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (12606, 12608), False, 'import io\n'), ((12749, 12769), 'megengine.utils.network.Network.load', 'Net.load', (['orig_model'], {}), '(orig_model)\n', (12757, 12769), True, 'from megengine.utils.network import Network as Net\n'), ((12828, 12852), 'megengine.utils.network.set_symbolic_shape', 'set_symbolic_shape', (['(True)'], {}), '(True)\n', (12846, 12852), False, 'from megengine.utils.network import as_oprnode, set_symbolic_shape\n'), ((12901, 12926), 'megengine.utils.network.set_symbolic_shape', 'set_symbolic_shape', (['(False)'], {}), '(False)\n', (12919, 12926), False, 'from megengine.utils.network import as_oprnode, set_symbolic_shape\n'), ((12977, 13017), 'megengine.utils.network.set_symbolic_shape', 'set_symbolic_shape', (['saved_symbolic_shape'], {}), '(saved_symbolic_shape)\n', (12995, 13017), False, 'from megengine.utils.network import as_oprnode, set_symbolic_shape\n'), ((5875, 5895), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (5883, 5895), True, 'import numpy as np\n'), ((9878, 9886), 'megengine.functional.exp', 'F.exp', (['x'], {}), '(x)\n', (9883, 9886), True, 'import megengine.functional as F\n'), ((9924, 9935), 'megengine.tensor.Tensor', 'Tensor', (['(5.0)'], {}), '(5.0)\n', (9930, 9935), False, 'from megengine.tensor import Tensor\n'), ((10414, 10422), 'megengine.functional.exp', 'F.exp', (['x'], {}), '(x)\n', (10419, 10422), True, 'import megengine.functional as F\n'), ((10969, 10977), 'megengine.functional.exp', 'F.exp', (['x'], {}), '(x)\n', (10974, 10977), True, 'import megengine.functional as F\n'), ((11630, 11651), 'megengine.functional.cond_take', 'F.cond_take', (['(a > 1)', 'a'], {}), '(a > 1, a)\n', (11641, 11651), True, 'import megengine.functional as F\n'), ((12553, 12566), 'megengine.functional.relu', 'F.relu', (['(a * 2)'], {}), '(a * 2)\n', (12559, 12566), True, 'import megengine.functional as F\n'), ((8227, 8245), 'megengine.module.Conv2d', 'M.Conv2d', (['(3)', '(32)', '(3)'], {}), '(3, 32, 3)\n', (8235, 8245), True, 'import megengine.module as M\n'), ((8271, 8290), 'megengine.module.Conv2d', 'M.Conv2d', (['(32)', '(32)', '(3)'], {}), '(32, 32, 3)\n', (8279, 8290), True, 'import megengine.module as M\n'), ((8316, 8335), 'megengine.module.Conv2d', 'M.Conv2d', (['(32)', '(32)', '(3)'], {}), '(32, 32, 3)\n', (8324, 8335), True, 'import megengine.module as M\n'), ((8609, 8643), 'numpy.random.random', 'np.random.random', (['(1, 3, 224, 224)'], {}), '((1, 3, 224, 224))\n', (8625, 8643), True, 'import numpy as np\n'), ((10467, 10501), 'numpy.random.random', 'np.random.random', (['(3, 3, 224, 224)'], {}), '((3, 3, 224, 224))\n', (10483, 10501), True, 'import numpy as np\n'), ((11022, 11056), 'numpy.random.random', 'np.random.random', (['(3, 3, 224, 224)'], {}), '((3, 3, 224, 224))\n', (11038, 11056), True, 'import numpy as np\n'), ((8067, 8083), 'megengine.functional.sigmoid', 'F.sigmoid', (['(a + b)'], {}), '(a + b)\n', (8076, 8083), True, 'import megengine.functional as F\n')]
|
from fastapi import FastAPI
from sqlmodel import SQLModel, create_engine, Session, select
from datetime import datetime
from datastore.model import Label, LabelAssignment, Meter, Measurement
from pydantic import BaseModel
SQLITE_FILE_NAME = "database.db"
sqlite_url = f"sqlite:///{SQLITE_FILE_NAME}"
connect_args = {"check_same_thread": False}
engine = create_engine(sqlite_url, echo=True, connect_args=connect_args)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
app = FastAPI()
@app.on_event("startup")
def on_startup():
create_db_and_tables()
@app.get("/meters")
def read_usage():
with Session(engine) as session:
meters = session.query(Meter).all()
return meters
@app.get("/meters/{meter_id}/")
def read_meter(meter_id: int):
with Session(engine) as session:
meter = session.query(Meter).filter(Meter.id == meter_id).first()
return meter
@app.get("/meters/{meter_id}/measurements")
def read_measurements(meter_id: int, start_date: datetime = None, end_date: datetime = None):
with Session(engine) as session:
query = session.query(Measurement).filter(
Measurement.meter_id == meter_id)
if start_date:
query = query.filter(Measurement.capture_time >= start_date)
if end_date:
query = query.filter(Measurement.capture_time <= end_date)
measurements = query.all()
return measurements
class LabelAssignmentPostData(BaseModel):
label_id: int
start_time: datetime
end_time: datetime
@app.post("/meters/{meter_id}/labels")
def assign_label(meter_id: int, data: LabelAssignmentPostData):
with Session(engine) as session:
assignment = LabelAssignment(meter_id=meter_id,
label_id=data.label_id,
start_time=data.start_time,
end_time=data.end_time)
ass1 = LabelAssignment(meter_id=1, label_id=1, start_time=datetime(
2020, 1, 1, 0, 0, 30), end_time=datetime(2020, 1, 1, 0, 0, 40))
session.add(assignment)
session.commit()
return "Label assigned successfully!"
@app.get("/meters/{meter_id}/labels")
def get_assigned_labels(meter_id: int):
with Session(engine) as session:
labels = session.query(LabelAssignment.start_time, LabelAssignment.end_time, Label.name, Label.color, LabelAssignment.id).filter(
LabelAssignment.meter_id == meter_id).join(Label).all()
return labels
@app.delete("/labels/assignments/{assignment_id}")
def delete_assignment(assignment_id: int):
with Session(engine) as session:
session.query(LabelAssignment).filter(
LabelAssignment.id == assignment_id).delete()
session.commit()
return "Label assignment deleted successfully!"
@app.get("/labels")
def get_labels():
with Session(engine) as session:
labels = session.query(Label).all()
return labels
@app.post("/db/setDefaults")
def set_defaults():
clear_db()
with Session(engine) as session:
meter1 = Meter(serial_number="Meter one", id=1)
meter2 = Meter(serial_number="Meter two", id=2)
for i in range(0, 59):
session.add(Measurement(meter_id=meter1.id, voltage_phase_1=10+i,
voltage_phase_2=(i*i) % 230, voltage_phase_3=30, capture_time=datetime(2020, 1, 1, 0, 0, i)))
session.add(meter1)
session.add(meter2)
label1 = Label(name="Label one", id=1, color="red")
label2 = Label(name="Label two", id=2, color="blue")
ass1 = LabelAssignment(meter_id=meter1.id, label_id=label1.id, start_time=datetime(
2020, 1, 1, 0, 0, 30), end_time=datetime(2020, 1, 1, 0, 0, 40))
session.add(label1)
session.add(label2)
session.add(ass1)
session.commit()
return "Default set successfully!"
@app.post("/db/clear")
def clear_db():
with Session(engine) as session:
session.query(Meter).delete()
session.query(Measurement).delete()
session.query(Label).delete()
session.query(LabelAssignment).delete()
session.commit()
return "DB cleared successfuly!"
if __name__ == "__main__":
import os
import uvicorn
host = os.getenv("SMIC_HOST", "0.0.0.0")
port = int(os.getenv("SMIC_PORT", 8081))
uvicorn.run(app, host=host, port=port)
|
[
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.Session",
"sqlmodel.create_engine"
] |
[((355, 418), 'sqlmodel.create_engine', 'create_engine', (['sqlite_url'], {'echo': '(True)', 'connect_args': 'connect_args'}), '(sqlite_url, echo=True, connect_args=connect_args)\n', (368, 418), False, 'from sqlmodel import SQLModel, create_engine, Session, select\n'), ((498, 507), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (505, 507), False, 'from fastapi import FastAPI\n'), ((453, 489), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (481, 489), False, 'from sqlmodel import SQLModel, create_engine, Session, select\n'), ((4336, 4369), 'os.getenv', 'os.getenv', (['"""SMIC_HOST"""', '"""0.0.0.0"""'], {}), "('SMIC_HOST', '0.0.0.0')\n", (4345, 4369), False, 'import os\n'), ((4420, 4458), 'uvicorn.run', 'uvicorn.run', (['app'], {'host': 'host', 'port': 'port'}), '(app, host=host, port=port)\n', (4431, 4458), False, 'import uvicorn\n'), ((629, 644), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (636, 644), False, 'from sqlmodel import SQLModel, create_engine, Session, select\n'), ((797, 812), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (804, 812), False, 'from sqlmodel import SQLModel, create_engine, Session, select\n'), ((1069, 1084), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1076, 1084), False, 'from sqlmodel import SQLModel, create_engine, Session, select\n'), ((1670, 1685), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1677, 1685), False, 'from sqlmodel import SQLModel, create_engine, Session, select\n'), ((1719, 1834), 'datastore.model.LabelAssignment', 'LabelAssignment', ([], {'meter_id': 'meter_id', 'label_id': 'data.label_id', 'start_time': 'data.start_time', 'end_time': 'data.end_time'}), '(meter_id=meter_id, label_id=data.label_id, start_time=data.\n start_time, end_time=data.end_time)\n', (1734, 1834), False, 'from datastore.model import Label, LabelAssignment, Meter, Measurement\n'), ((2282, 2297), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (2289, 2297), False, 'from sqlmodel import SQLModel, create_engine, Session, select\n'), ((2643, 2658), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (2650, 2658), False, 'from sqlmodel import SQLModel, create_engine, Session, select\n'), ((2905, 2920), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (2912, 2920), False, 'from sqlmodel import SQLModel, create_engine, Session, select\n'), ((3074, 3089), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (3081, 3089), False, 'from sqlmodel import SQLModel, create_engine, Session, select\n'), ((3119, 3157), 'datastore.model.Meter', 'Meter', ([], {'serial_number': '"""Meter one"""', 'id': '(1)'}), "(serial_number='Meter one', id=1)\n", (3124, 3157), False, 'from datastore.model import Label, LabelAssignment, Meter, Measurement\n'), ((3175, 3213), 'datastore.model.Meter', 'Meter', ([], {'serial_number': '"""Meter two"""', 'id': '(2)'}), "(serial_number='Meter two', id=2)\n", (3180, 3213), False, 'from datastore.model import Label, LabelAssignment, Meter, Measurement\n'), ((3528, 3570), 'datastore.model.Label', 'Label', ([], {'name': '"""Label one"""', 'id': '(1)', 'color': '"""red"""'}), "(name='Label one', id=1, color='red')\n", (3533, 3570), False, 'from datastore.model import Label, LabelAssignment, Meter, Measurement\n'), ((3588, 3631), 'datastore.model.Label', 'Label', ([], {'name': '"""Label two"""', 'id': '(2)', 'color': '"""blue"""'}), "(name='Label two', id=2, color='blue')\n", (3593, 3631), False, 'from datastore.model import Label, LabelAssignment, Meter, Measurement\n'), ((4000, 4015), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (4007, 4015), False, 'from sqlmodel import SQLModel, create_engine, Session, select\n'), ((4385, 4413), 'os.getenv', 'os.getenv', (['"""SMIC_PORT"""', '(8081)'], {}), "('SMIC_PORT', 8081)\n", (4394, 4413), False, 'import os\n'), ((2004, 2034), 'datetime.datetime', 'datetime', (['(2020)', '(1)', '(1)', '(0)', '(0)', '(30)'], {}), '(2020, 1, 1, 0, 0, 30)\n', (2012, 2034), False, 'from datetime import datetime\n'), ((2058, 2088), 'datetime.datetime', 'datetime', (['(2020)', '(1)', '(1)', '(0)', '(0)', '(40)'], {}), '(2020, 1, 1, 0, 0, 40)\n', (2066, 2088), False, 'from datetime import datetime\n'), ((3714, 3744), 'datetime.datetime', 'datetime', (['(2020)', '(1)', '(1)', '(0)', '(0)', '(30)'], {}), '(2020, 1, 1, 0, 0, 30)\n', (3722, 3744), False, 'from datetime import datetime\n'), ((3768, 3798), 'datetime.datetime', 'datetime', (['(2020)', '(1)', '(1)', '(0)', '(0)', '(40)'], {}), '(2020, 1, 1, 0, 0, 40)\n', (3776, 3798), False, 'from datetime import datetime\n'), ((3421, 3450), 'datetime.datetime', 'datetime', (['(2020)', '(1)', '(1)', '(0)', '(0)', 'i'], {}), '(2020, 1, 1, 0, 0, i)\n', (3429, 3450), False, 'from datetime import datetime\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import functools
from typing import Iterable, List, Optional, Union
import numpy as np
import megengine._internal as mgb
from megengine._internal import CompGraph, CompNode
from ..core import zeros
from ..core.graph import _use_default_if_none
from ..core.tensor import Tensor, wrap_io_tensor
from .elemwise import ceil
from .utils import _decide_comp_node_and_comp_graph
@wrap_io_tensor
def broadcast_to(inp: Tensor, shape: Union[int, Iterable[int]]) -> Tensor:
"""
Broadcast a tensor to ``shape``
:param inp: The input tensor
:param shape: The target shape
:return: The output tensor
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
data = tensor(np.arange(0, 6, dtype=np.float32).reshape(2, 3))
out = F.broadcast_to(data, (4, 2, 3))
print(out.numpy())
Outputs:
.. testoutput::
[[[0. 1. 2.]
[3. 4. 5.]]
[[0. 1. 2.]
[3. 4. 5.]]
[[0. 1. 2.]
[3. 4. 5.]]
[[0. 1. 2.]
[3. 4. 5.]]]
"""
if isinstance(shape, int):
shape = (shape,)
return mgb.opr.broadcast(inp, shape)
def _get_idx(index, axis):
index_dims = len(index.imm_shape)
idx = []
comp_node, comp_graph = _decide_comp_node_and_comp_graph(index)
for i in range(index_dims):
if i != axis:
shape = [1] * index_dims
shape[i] = index.axis_shape(i)
arange = mgb.opr.linspace(
0,
index.axis_shape(i) - 1,
index.axis_shape(i),
comp_node=comp_node,
comp_graph=comp_graph,
)
arange = (
arange.reshape(*shape)
.broadcast(index.shape)
.reshape(-1)
.astype(np.int32)
)
idx.append(arange)
else:
idx.append(index.reshape(-1))
return tuple(idx)
@wrap_io_tensor
def gather(inp: Tensor, axis: int, index: Tensor) -> Tensor:
r"""
Gather data from :attr:`inp` on :attr:`axis` using :attr:`index`.
For a 3-D tensor, the output is specified by::
out[i][j][k] = inp[index[i][j][k]][j][k] # if axis == 0
out[i][j][k] = inp[i][index[i][j][k]][k] # if axis == 1
out[i][j][k] = inp[i][j][index[i][j][k]] # if axis == 2
if :attr:`inp` is an n-dimensional tensor with size
:math:`(x_0,x_1,...,x_{i-1},x_i,x_{i+1},...,x_{n-1})` and axis=i,
then :attr:`index` must be an n-dimensional tensor with size
:math:`(x_0,x_1,...,x_{i-1},y,x_{i+1},...,x_{n-1})` where :math:`y\ge 1` and
output will have the same size as :attr:`index`.
:param inp: the source tensor
:param axis: the axis along which to index
:param index: the indices of elements to gather
Examples:
.. testcode::
import megengine.functional as F
from megengine.core import tensor
inp = tensor([
[1,2], [3,4], [5,6],
])
index = tensor([[0,2], [1,0]])
oup = F.gather(inp, 0, index)
print(oup.numpy())
Outputs:
.. testoutput::
[[1 6]
[3 2]]
"""
input_shape = inp.imm_shape
index_shape = index.imm_shape
input_dims = len(input_shape)
index_dims = len(index_shape)
if input_dims != index_dims:
raise ValueError(
"The index tensor must have same dimensions as input tensor, "
"But the input dims:{}, the index dims:{}".format(input_dims, index_dims)
)
if axis < 0 or axis >= input_dims:
raise ValueError(
"Index axis {} is output of bounds, should in range [0 {})".format(
axis, input_dims
)
)
for i in range(input_dims):
if i != axis and input_shape[i] != index_shape[i]:
raise ValueError(
"The input {} and index {} must have the same size apart from axis {}".format(
input_shape, index_shape, axis
)
)
idx = _get_idx(index, axis)
return mgb.opr.advanced_indexing(inp)[idx].reshape(
index.shape
) # pylint: disable=no-member
@wrap_io_tensor
def concat(
inps: Iterable[Tensor],
axis: int = 0,
device: Optional[CompNode] = None,
comp_graph: Optional[CompGraph] = None,
) -> Tensor:
r"""
Concat some tensors
:param inps: Input tensors to concat
:param axis: the dimension over which the tensors are concatenated. Default: 0
:param device: The comp node output on. Default: None
:param comp_graph: The graph in which output is. Default: None
:return: The output tensor
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
data1 = tensor(np.arange(0, 6, dtype=np.float32).reshape((2, 3)))
data2 = tensor(np.arange(6, 12, dtype=np.float32).reshape((2, 3)))
out = F.concat([data1, data2])
print(out.numpy())
Outputs:
.. testoutput::
[[ 0. 1. 2.]
[ 3. 4. 5.]
[ 6. 7. 8.]
[ 9. 10. 11.]]
"""
# Output buffer not supported
return mgb.opr.concat(
*list(inps), axis=axis, comp_node=device, comp_graph=comp_graph
)
@wrap_io_tensor
def scatter(inp: Tensor, axis: int, index: Tensor, source: Tensor) -> Tensor:
r"""
Writes all values from the tensor :attr:`source` into :attr:`inp` at the indices specified in the :attr:`index` tensor.
For each value in :attr:`source`, its output index is specified by its index
in :attr:`source` for ``axis != dimension`` and by the corresponding value in
:attr:`index` for ``axis = dimension``.
For a 3-D tensor, :attr:`inp` is updated as::
inp[index[i][j][k]][j][k] = source[i][j][k] # if axis == 0
inp[i][index[i][j][k]][k] = source[i][j][k] # if axis == 1
inp[i][j][index[i][j][k]] = source[i][j][k] # if axis == 2
:attr:`inp`, :attr:`index` and :attr:`source` should have same number of dimensions.
It is also required that ``source.shape(d) <= inp.shape(d)`` and ``index.shape(d) == source.shape(d)``
for all dimensions ``d``.
Moreover, the values of :attr:`index` must be between ``0`` and ``inp.shape(axis) - 1`` inclusive.
.. note::
Please notice that, due to performance issues, the result is uncertain on the GPU device
if scatter difference positions from source to the same destination position
regard to index tensor.
Show the case using the following examples, the oup[0][2] is maybe
from source[0][2] which value is 0.2256 or source[1][2] which value is 0.5339
if set the index[1][2] from 1 to 0.
:param inp: the inp tensor which to be scattered
:param axis: the axis along which to index
:param index: the indices of elements to scatter
:param source: the source element(s) to scatter
Examples:
.. testcode::
import numpy as np
import megengine.functional as F
from megengine.core import tensor
inp = tensor(np.zeros(shape=(3,5),dtype=np.float32))
source = tensor([[0.9935,0.9465,0.2256,0.8926,0.4396],[0.7723,0.0718,0.5939,0.357,0.4576]])
index = tensor([[0,2,0,2,1],[2,0,1,1,2]])
oup = F.scatter(inp, 0, index,source)
print(oup.numpy())
Outputs:
.. testoutput::
[[0.9935 0.0718 0.2256 0. 0. ]
[0. 0. 0.5939 0.357 0.4396]
[0.7723 0.9465 0. 0.8926 0.4576]]
"""
input_shape = inp.imm_shape
index_shape = index.imm_shape
source_shape = source.imm_shape
input_dims = len(input_shape)
index_dims = len(index_shape)
source_dims = len(source_shape)
if input_dims != index_dims or input_dims != source_dims:
raise ValueError("The input, source and index tensor must have same dimensions")
if axis < 0 or axis >= input_dims:
raise ValueError(
"Index axis {} is output of bounds, should in range [0 {})".format(
axis, input_dims
)
)
for i in range(source_dims):
if source_shape[i] > input_shape[i]:
raise ValueError(
"The each shape size for source {} must be less than or equal to input {} ".format(
source_shape, input_shape
)
)
for i in range(index_dims):
if index_shape[i] != source_shape[i]:
raise ValueError(
"The each shape size for index {} must be equal to source {} ".format(
index_shape, source_shape
)
)
for i in range(index_dims):
if i != axis and index_shape[i] > input_shape[i]:
raise ValueError(
"The index {} must be less than or equal to input {} size apart from axis {}".format(
index_shape, input_shape, axis
)
)
idx = _get_idx(index, axis)
return mgb.opr.set_advanced_indexing(inp, source.flatten())[idx]
@wrap_io_tensor
def where(mask: Tensor, x: Tensor, y: Tensor) -> Tensor:
r"""
Select elements either from Tensor x or Tensor y, according to mask.
.. math::
\textrm{out}_i = x_i \textrm{ if } \textrm{mask}_i \textrm{ is True else } y_i
:param mask: a mask used for choosing x or y
:param x: the first choice
:param y: the second choice
Examples:
.. testcode::
from megengine import tensor
import megengine.functional as F
mask = tensor(np.array([[1, 0], [0, 1]], dtype=np.int32))
x = tensor(np.array([[1, np.inf], [np.nan, 4]],
dtype=np.float32))
y = tensor(np.array([[5, 6], [7, 8]], dtype=np.float32))
out = F.where(mask, x, y)
print(out.numpy())
Outputs:
.. testoutput::
[[1. 6.]
[7. 4.]]
"""
v0, index0 = mgb.opr.cond_take(
x, mask, mode=mgb.opr_param_defs.CondTake.Mode.EQ, val=1
)
v1, index1 = mgb.opr.cond_take(
y, mask, mode=mgb.opr_param_defs.CondTake.Mode.EQ, val=0
)
out = x.flatten()
index = mgb.opr.concat(index0, index1, axis=0)
v = mgb.opr.concat(v0, v1, axis=0)
out = mgb.opr.set_advanced_indexing(out, v)[index]
out = out.reshape(x.shape)
return out
@wrap_io_tensor
def cond_take(mask: Tensor, x: Tensor, val=1) -> Tensor:
r"""
Take elements from data if specific condition is satisfied on mask. This operator has two outputs: the first is the elements taken, and the second is the indices corresponding to those elements; they are both 1-dimensional. High-dimension input would first be flattened.
:param mask: condition param; must be the same shape with data
:param x: input tensor from which to take elements
:param val: value to be compared to by mode
Examples:
.. testcode::
from megengine import tensor
import megengine.functional as F
mask = tensor(np.array([[1, 0], [0, 1]], dtype=np.int32))
x = tensor(np.array([[1, np.inf], [np.nan, 4]],
dtype=np.float32))
v, index = F.cond_take(mask, x, 1)
print(v, index)
Outputs:
.. testoutput::
Tensor([1. 4.]) Tensor([0 3], dtype=int32)
"""
v, index = mgb.opr.cond_take(
x, mask, mode=mgb.opr_param_defs.CondTake.Mode.EQ, val=val
)
return v, index
def shapeof(x: Tensor, axis=None):
r"""
The shape of input tensor.
"""
return x.shapeof(axis=axis)
@wrap_io_tensor
def dimshuffle(inp: Tensor, pattern: Iterable[int]) -> Tensor:
r"""
Swap shapes and strides according to given pattern
:param inp: Input tensor
:param pattern: a list of integers including 0, 1, ... , ``ndim``-1, and any number of ``'x'`` char in dimensions where this tensor should be broadcasted. For examples:
* (``'x'``) -> make a 0d (scalar) into a 1d vector
* (0, 1) -> identity for 2d vectors
* (1, 0) -> inverts the first and second dimensions
* (``'x'``, 0) -> make a row out of a 1d vector (N to 1xN)
* (0, ``'x'``) -> make a column out of a 1d vector (N to Nx1)
* (2, 0, 1) -> AxBxC to CxAxB
* (0, ``'x'``, 1) -> AxB to Ax1xB
* (1, ``'x'``, 0) -> AxB to Bx1xA
* (1,) -> This remove dimensions 0. It must be a broadcastable dimension (1xA to A)
:return: The output tensor
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.array([[1, 1], [0, 0]], dtype=np.int32))
out = F.dimshuffle(x, (1, 0))
print(out.numpy())
Outputs:
.. testoutput::
[[1 0]
[1 0]]
"""
return mgb.opr.dimshuffle(inp, pattern)
@wrap_io_tensor
def reshape(inp: Tensor, target_shape: Iterable[int]) -> Tensor:
r"""
Reshape a tensor to given target shape; total number of logical elements must
remain unchanged
:param inp: Input tensor
:param target_shape: target shape, the components would be concatenated to form the
target shape, and it can contain an element of -1 representing unspec_axis.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.arange(12, dtype=np.int32))
out = F.reshape(x, (3, 2, 2))
print(out.numpy())
Outputs:
.. testoutput::
[[[ 0 1]
[ 2 3]]
[[ 4 5]
[ 6 7]]
[[ 8 9]
[10 11]]]
"""
return mgb.opr.reshape(inp, target_shape)
def transpose(inp: Tensor, pattern: Iterable[int]) -> Tensor:
r"""Equivalent to :func:`dimshuffle`
"""
return dimshuffle(inp, pattern)
@wrap_io_tensor
def add_axis(inp: Tensor, axis: int) -> Tensor:
r"""
Add dimension before given axis.
:param inp: Input tensor
:param axis: Place of new axes
:return: The output tensor
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor([1, 2])
out = F.add_axis(x, 0)
print(out.shape)
Outputs:
.. testoutput::
(1, 2)
"""
if not isinstance(axis, int):
raise ValueError("axis must be int, but got type:{}".format(type(axis)))
return mgb.opr.add_axis(inp, axis)
@wrap_io_tensor
def remove_axis(inp: Tensor, axis: int) -> Tensor:
r"""
Remove dimension of shape 1.
:param inp: Input tensor
:param axis: Place of axis to be removed
:return: The output tensor
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.array([1, 2], dtype=np.int32).reshape(1, 1, 2, 1))
out = F.remove_axis(x, 3)
print(out.shape)
Outputs:
.. testoutput::
(1, 1, 2)
"""
if not isinstance(axis, int):
raise ValueError("axis must be int, but got type:{}".format(type(axis)))
return mgb.opr.remove_axis(inp, axis)
def linspace(
start: Union[int, float, Tensor],
stop: Union[int, float, Tensor],
num: Union[int, Tensor],
dtype=np.float32,
device: Optional[CompNode] = None,
comp_graph: Optional[CompGraph] = None,
) -> Tensor:
r"""
Return equally spaced numbers over a specified interval
:param start: Starting value of the squence, shoule be scalar
:param stop: The last value of the squence, shoule be scalar
:param num: number of values to generate
:param dtype: result data type
:return: The generated tensor
Examples:
.. testcode::
import numpy as np
import megengine.functional as F
a = F.linspace(3,10,5)
print(a.numpy())
.. testoutput::
[ 3. 4.75 6.5 8.25 10. ]
"""
if dtype is not np.float32:
raise ValueError("linspace is only implemented for float32")
device, comp_graph = _use_default_if_none(device, comp_graph)
ret = Tensor(
mgb.opr.linspace(start, stop, num, comp_node=device, comp_graph=comp_graph)
)
return ret.astype(dtype)
def arange(
start: Union[int, float, Tensor],
end: Union[int, float, Tensor],
step: Union[int, float, Tensor] = 1,
dtype=np.float32,
device: Optional[CompNode] = None,
comp_graph: Optional[CompGraph] = None,
) -> Tensor:
r"""
Returns a Tensor with values from `start` to `end` with adjacent interval `step`
:param start: starting value of the squence, shoule be scalar
:param end: ending value of the squence, shoule be scalar
:param step: the gap between each pair of adjacent values. Default 1
:param dtype: result data type
:return: The generated tensor
Examples:
.. testcode::
import numpy as np
import megengine.functional as F
a = F.arange(1, 5, 1)
print(a.numpy())
.. testoutput::
[1. 2. 3. 4.]
"""
if dtype is not np.float32:
raise ValueError("arange is only implemented for float32")
num = ceil((end - start) / step)
stop = start + step * (num - 1)
ret = linspace(start, stop, num, device=device, comp_graph=comp_graph)
return ret
def zeros_like(inp: Tensor) -> Tensor:
r"""
Returns a zero tensor with the same shape as input tensor
:param inp: input tensor
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
inp = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
out = F.zeros_like(inp)
print(out.numpy())
.. testoutput::
[[0 0 0]
[0 0 0]]
"""
return zeros(inp.shapeof()).astype(inp.dtype)
|
[
"megengine._internal.opr.cond_take",
"megengine._internal.opr.dimshuffle",
"megengine._internal.opr.add_axis",
"megengine._internal.opr.remove_axis",
"megengine._internal.opr.set_advanced_indexing",
"megengine._internal.opr.reshape",
"megengine._internal.opr.broadcast",
"megengine._internal.opr.linspace",
"megengine._internal.opr.advanced_indexing",
"megengine._internal.opr.concat"
] |
[((1563, 1592), 'megengine._internal.opr.broadcast', 'mgb.opr.broadcast', (['inp', 'shape'], {}), '(inp, shape)\n', (1580, 1592), True, 'import megengine._internal as mgb\n'), ((10424, 10499), 'megengine._internal.opr.cond_take', 'mgb.opr.cond_take', (['x', 'mask'], {'mode': 'mgb.opr_param_defs.CondTake.Mode.EQ', 'val': '(1)'}), '(x, mask, mode=mgb.opr_param_defs.CondTake.Mode.EQ, val=1)\n', (10441, 10499), True, 'import megengine._internal as mgb\n'), ((10531, 10606), 'megengine._internal.opr.cond_take', 'mgb.opr.cond_take', (['y', 'mask'], {'mode': 'mgb.opr_param_defs.CondTake.Mode.EQ', 'val': '(0)'}), '(y, mask, mode=mgb.opr_param_defs.CondTake.Mode.EQ, val=0)\n', (10548, 10606), True, 'import megengine._internal as mgb\n'), ((10655, 10693), 'megengine._internal.opr.concat', 'mgb.opr.concat', (['index0', 'index1'], {'axis': '(0)'}), '(index0, index1, axis=0)\n', (10669, 10693), True, 'import megengine._internal as mgb\n'), ((10702, 10732), 'megengine._internal.opr.concat', 'mgb.opr.concat', (['v0', 'v1'], {'axis': '(0)'}), '(v0, v1, axis=0)\n', (10716, 10732), True, 'import megengine._internal as mgb\n'), ((11809, 11886), 'megengine._internal.opr.cond_take', 'mgb.opr.cond_take', (['x', 'mask'], {'mode': 'mgb.opr_param_defs.CondTake.Mode.EQ', 'val': 'val'}), '(x, mask, mode=mgb.opr_param_defs.CondTake.Mode.EQ, val=val)\n', (11826, 11886), True, 'import megengine._internal as mgb\n'), ((13288, 13320), 'megengine._internal.opr.dimshuffle', 'mgb.opr.dimshuffle', (['inp', 'pattern'], {}), '(inp, pattern)\n', (13306, 13320), True, 'import megengine._internal as mgb\n'), ((14143, 14177), 'megengine._internal.opr.reshape', 'mgb.opr.reshape', (['inp', 'target_shape'], {}), '(inp, target_shape)\n', (14158, 14177), True, 'import megengine._internal as mgb\n'), ((14944, 14971), 'megengine._internal.opr.add_axis', 'mgb.opr.add_axis', (['inp', 'axis'], {}), '(inp, axis)\n', (14960, 14971), True, 'import megengine._internal as mgb\n'), ((15650, 15680), 'megengine._internal.opr.remove_axis', 'mgb.opr.remove_axis', (['inp', 'axis'], {}), '(inp, axis)\n', (15669, 15680), True, 'import megengine._internal as mgb\n'), ((10743, 10780), 'megengine._internal.opr.set_advanced_indexing', 'mgb.opr.set_advanced_indexing', (['out', 'v'], {}), '(out, v)\n', (10772, 10780), True, 'import megengine._internal as mgb\n'), ((16659, 16734), 'megengine._internal.opr.linspace', 'mgb.opr.linspace', (['start', 'stop', 'num'], {'comp_node': 'device', 'comp_graph': 'comp_graph'}), '(start, stop, num, comp_node=device, comp_graph=comp_graph)\n', (16675, 16734), True, 'import megengine._internal as mgb\n'), ((4531, 4561), 'megengine._internal.opr.advanced_indexing', 'mgb.opr.advanced_indexing', (['inp'], {}), '(inp)\n', (4556, 4561), True, 'import megengine._internal as mgb\n')]
|
# Creazione entità "base", editor support, creazione, id automatici
# https://sqlmodel.tiangolo.com/tutorial/insert/
from typing import Optional
from sqlmodel import Field, SQLModel, Session, create_engine
class Tag(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class ProductType(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_entities():
tag_offerta = Tag(name="Offerta")
tag_maionese = Tag(name="Con Maionese")
tag_nomayo = Tag(name="No mayo")
tipo_panino = ProductType(name="panino")
tipo_bibita = ProductType(name="bibita")
with Session(engine) as session:
session.add(tag_offerta)
session.add(tag_maionese)
session.add(tag_nomayo)
session.add(tipo_panino)
session.add(tipo_bibita)
session.commit()
print("After committing the session")
print("Tag 1:", tag_offerta)
# No refresh, no print
# https://sqlmodel.tiangolo.com/tutorial/automatic-id-none-refresh/
print("Product Type 1:", tipo_panino)
# Refresh automatica se accedo ad un attributo
print("Name of product Type 1:", tipo_panino.name)
# Refresh esplicita
session.refresh(tipo_bibita)
session.refresh(tag_maionese)
print("Product Type 2:", tipo_bibita)
print("After the session closes")
print("Tag 2:", tag_maionese)
def main():
create_db_and_tables()
create_entities()
if __name__ == "__main__":
main()
|
[
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.Field",
"sqlmodel.create_engine",
"sqlmodel.Session"
] |
[((525, 561), 'sqlmodel.create_engine', 'create_engine', (['sqlite_url'], {'echo': '(True)'}), '(sqlite_url, echo=True)\n', (538, 561), False, 'from sqlmodel import Field, SQLModel, Session, create_engine\n'), ((265, 302), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (270, 302), False, 'from sqlmodel import Field, SQLModel, Session, create_engine\n'), ((384, 421), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (389, 421), False, 'from sqlmodel import Field, SQLModel, Session, create_engine\n'), ((596, 632), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (624, 632), False, 'from sqlmodel import Field, SQLModel, Session, create_engine\n'), ((876, 891), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (883, 891), False, 'from sqlmodel import Field, SQLModel, Session, create_engine\n')]
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine.functional as F
from megengine import Parameter
from .init import ones_, zeros_
from .module import Module
class GroupNorm(Module):
"""
Simple implementation of GroupNorm. Only support 4d tensor now.
Reference: https://arxiv.org/pdf/1803.08494.pdf.
"""
def __init__(self, num_groups, num_channels, eps=1e-5, affine=True, **kwargs):
super().__init__(**kwargs)
assert num_channels % num_groups == 0
self.num_groups = num_groups
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = Parameter(np.ones(num_channels, dtype=np.float32))
self.bias = Parameter(np.zeros(num_channels, dtype=np.float32))
else:
self.weight = None
self.bias = None
self.reset_parameters()
def reset_parameters(self):
if self.affine:
ones_(self.weight)
zeros_(self.bias)
def forward(self, x):
N, C, H, W = x.shape
assert C == self.num_channels
x = x.reshape(N, self.num_groups, -1)
mean = x.mean(axis=2, keepdims=True)
var = (x * x).mean(axis=2, keepdims=True) - mean * mean
x = (x - mean) / F.sqrt(var + self.eps)
x = x.reshape(N, C, H, W)
if self.affine:
x = self.weight.reshape(1, -1, 1, 1) * x + self.bias.reshape(1, -1, 1, 1)
return x
def _module_info_string(self) -> str:
s = (
"groups={num_groups}, channels={num_channels}, "
"eps={eps}, affine={affine}"
)
return s.format(**self.__dict__)
class InstanceNorm(Module):
"""
Simple implementation of InstanceNorm. Only support 4d tensor now.
Reference: https://arxiv.org/abs/1607.08022.
Note that InstanceNorm equals using GroupNome with num_groups=num_channels.
"""
def __init__(self, num_channels, eps=1e-05, affine=True, **kwargs):
super().__init__(**kwargs)
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = Parameter(np.ones(num_channels, dtype="float32"))
self.bias = Parameter(np.zeros(num_channels, dtype="float32"))
else:
self.weight = None
self.bias = None
self.reset_parameters()
def reset_parameters(self):
if self.affine:
ones_(self.weight)
zeros_(self.bias)
def forward(self, x):
N, C, H, W = x.shape
assert C == self.num_channels
x = x.reshape(N, C, -1)
mean = x.mean(axis=2, keepdims=True)
var = (x ** 2).mean(axis=2, keepdims=True) - mean * mean
x = (x - mean) / F.sqrt(var + self.eps)
x = x.reshape(N, C, H, W)
if self.affine:
x = self.weight.reshape(1, -1, 1, 1) * x + self.bias.reshape(1, -1, 1, 1)
return x
def _module_info_string(self) -> str:
s = "channels={num_channels}, eps={eps}, affine={affine}"
return s.format(**self.__dict__)
class LayerNorm(Module):
"""
Simple implementation of LayerNorm. Support tensor of any shape as input.
Reference: https://arxiv.org/pdf/1803.08494.pdf.
"""
def __init__(self, normalized_shape, eps=1e-05, affine=True, **kwargs):
super().__init__(**kwargs)
if isinstance(normalized_shape, int):
normalized_shape = (normalized_shape,)
self.normalized_shape = tuple(normalized_shape)
self.eps = eps
self.affine = affine
if self.affine:
self.weight = Parameter(np.ones(self.normalized_shape, dtype="float32"))
self.bias = Parameter(np.zeros(self.normalized_shape, dtype="float32"))
else:
self.weight = None
self.bias = None
self.reset_parameters()
def reset_parameters(self):
if self.affine:
ones_(self.weight)
zeros_(self.bias)
def forward(self, x):
x_shape = x.shape
dim_delta = len(x_shape) - len(self.normalized_shape)
non_flatten_shape = x_shape[:dim_delta]
x = x.reshape(*non_flatten_shape, -1)
mean = x.mean(axis=-1, keepdims=True)
var = (x ** 2).mean(axis=-1, keepdims=True) - mean * mean
x = (x - mean) / F.sqrt(var + self.eps)
x = x.reshape(x_shape)
if self.affine:
x = self.weight * x + self.bias
return x
def _module_info_string(self) -> str:
s = "normalized_shape={normalized_shape}, eps={eps}, affine={affine}"
return s.format(**self.__dict__)
|
[
"megengine.functional.sqrt"
] |
[((1634, 1656), 'megengine.functional.sqrt', 'F.sqrt', (['(var + self.eps)'], {}), '(var + self.eps)\n', (1640, 1656), True, 'import megengine.functional as F\n'), ((3137, 3159), 'megengine.functional.sqrt', 'F.sqrt', (['(var + self.eps)'], {}), '(var + self.eps)\n', (3143, 3159), True, 'import megengine.functional as F\n'), ((4729, 4751), 'megengine.functional.sqrt', 'F.sqrt', (['(var + self.eps)'], {}), '(var + self.eps)\n', (4735, 4751), True, 'import megengine.functional as F\n'), ((1017, 1056), 'numpy.ones', 'np.ones', (['num_channels'], {'dtype': 'np.float32'}), '(num_channels, dtype=np.float32)\n', (1024, 1056), True, 'import numpy as np\n'), ((1092, 1132), 'numpy.zeros', 'np.zeros', (['num_channels'], {'dtype': 'np.float32'}), '(num_channels, dtype=np.float32)\n', (1100, 1132), True, 'import numpy as np\n'), ((2536, 2574), 'numpy.ones', 'np.ones', (['num_channels'], {'dtype': '"""float32"""'}), "(num_channels, dtype='float32')\n", (2543, 2574), True, 'import numpy as np\n'), ((2610, 2649), 'numpy.zeros', 'np.zeros', (['num_channels'], {'dtype': '"""float32"""'}), "(num_channels, dtype='float32')\n", (2618, 2649), True, 'import numpy as np\n'), ((4023, 4070), 'numpy.ones', 'np.ones', (['self.normalized_shape'], {'dtype': '"""float32"""'}), "(self.normalized_shape, dtype='float32')\n", (4030, 4070), True, 'import numpy as np\n'), ((4106, 4154), 'numpy.zeros', 'np.zeros', (['self.normalized_shape'], {'dtype': '"""float32"""'}), "(self.normalized_shape, dtype='float32')\n", (4114, 4154), True, 'import numpy as np\n')]
|
from typing import List, Union
from fastapi import APIRouter, Request
from fastapi.exceptions import HTTPException
from sqlmodel import Session, or_, select
from ..db import ActiveSession
from ..models.content import Content, ContentIncoming, ContentResponse
from ..security import AuthenticatedUser, User, get_current_user
router = APIRouter()
@router.get("/", response_model=List[ContentResponse])
async def list_contents(*, session: Session = ActiveSession):
contents = session.exec(select(Content)).all()
return contents
@router.get("/{id_or_slug}/", response_model=ContentResponse)
async def query_content(*, id_or_slug: Union[str, int], session: Session = ActiveSession):
content = session.query(Content).where(
or_(
Content.id == id_or_slug,
Content.slug == id_or_slug,
)
)
if not content:
raise HTTPException(status_code=404, detail="Content not found")
return content.first()
@router.post("/", response_model=ContentResponse, dependencies=[AuthenticatedUser])
async def create_content(
*,
session: Session = ActiveSession,
request: Request,
content: ContentIncoming,
):
# set the ownsership of the content to the current user
db_content = Content.from_orm(content)
user: User = get_current_user(request=request)
db_content.user_id = user.id
session.add(db_content)
session.commit()
session.refresh(db_content)
return db_content
@router.patch(
"/{content_id}/",
response_model=ContentResponse,
dependencies=[AuthenticatedUser],
)
async def update_content(
*,
content_id: int,
session: Session = ActiveSession,
request: Request,
patch: ContentIncoming,
):
# Query the content
content = session.get(Content, content_id)
if not content:
raise HTTPException(status_code=404, detail="Content not found")
# Check the user owns the content
current_user: User = get_current_user(request=request)
if content.user_id != current_user.id and not current_user.superuser:
raise HTTPException(status_code=403, detail="You don't own this content")
# Update the content
patch_data = patch.dict(exclude_unset=True)
for key, value in patch_data.items():
setattr(content, key, value)
# Commit the session
session.commit()
session.refresh(content)
return content
@router.delete("/{content_id}/", dependencies=[AuthenticatedUser])
def delete_content(*, session: Session = ActiveSession, request: Request, content_id: int):
content = session.get(Content, content_id)
if not content:
raise HTTPException(status_code=404, detail="Content not found")
# Check the user owns the content
current_user = get_current_user(request=request)
if content.user_id != current_user.id and not current_user.superuser:
raise HTTPException(status_code=403, detail="You don't own this content")
session.delete(content)
session.commit()
return {"ok": True}
|
[
"sqlmodel.or_",
"sqlmodel.select"
] |
[((336, 347), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (345, 347), False, 'from fastapi import APIRouter, Request\n'), ((745, 802), 'sqlmodel.or_', 'or_', (['(Content.id == id_or_slug)', '(Content.slug == id_or_slug)'], {}), '(Content.id == id_or_slug, Content.slug == id_or_slug)\n', (748, 802), False, 'from sqlmodel import Session, or_, select\n'), ((878, 936), 'fastapi.exceptions.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Content not found"""'}), "(status_code=404, detail='Content not found')\n", (891, 936), False, 'from fastapi.exceptions import HTTPException\n'), ((1831, 1889), 'fastapi.exceptions.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Content not found"""'}), "(status_code=404, detail='Content not found')\n", (1844, 1889), False, 'from fastapi.exceptions import HTTPException\n'), ((2076, 2143), 'fastapi.exceptions.HTTPException', 'HTTPException', ([], {'status_code': '(403)', 'detail': '"""You don\'t own this content"""'}), '(status_code=403, detail="You don\'t own this content")\n', (2089, 2143), False, 'from fastapi.exceptions import HTTPException\n'), ((2635, 2693), 'fastapi.exceptions.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Content not found"""'}), "(status_code=404, detail='Content not found')\n", (2648, 2693), False, 'from fastapi.exceptions import HTTPException\n'), ((2873, 2940), 'fastapi.exceptions.HTTPException', 'HTTPException', ([], {'status_code': '(403)', 'detail': '"""You don\'t own this content"""'}), '(status_code=403, detail="You don\'t own this content")\n', (2886, 2940), False, 'from fastapi.exceptions import HTTPException\n'), ((495, 510), 'sqlmodel.select', 'select', (['Content'], {}), '(Content)\n', (501, 510), False, 'from sqlmodel import Session, or_, select\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
from tempfile import TemporaryFile
import numpy as np
import megengine as mge
from megengine import Parameter, Tensor
def test_tensor_serialization():
def tensor_eq(a, b):
assert a.dtype == b.dtype
assert a.device == b.device
np.testing.assert_equal(a.numpy(), b.numpy())
with TemporaryFile() as f:
data = np.random.randint(low=0, high=7, size=[233])
a = Tensor(data, device="xpux", dtype=np.int32)
pickle.dump(a, f)
f.seek(0)
b = pickle.load(f)
np.testing.assert_equal(a.numpy(), b.numpy())
with TemporaryFile() as f:
a = Parameter(np.random.random(size=(233, 2)).astype(np.float32))
pickle.dump(a, f)
f.seek(0)
b = pickle.load(f)
assert isinstance(b, Parameter)
np.testing.assert_equal(a.numpy(), b.numpy())
with TemporaryFile() as f:
a = Tensor(np.random.random(size=(2, 233)).astype(np.float32))
pickle.dump(a, f)
f.seek(0)
b = pickle.load(f)
assert type(b) is Tensor
np.testing.assert_equal(a.numpy(), b.numpy())
with TemporaryFile() as f:
a = Tensor(np.random.random(size=(2, 233)).astype(np.float32))
mge.save(a, f)
f.seek(0)
b = mge.load(f, map_location="cpux")
assert type(b) is Tensor
assert "cpu" in str(b.device)
np.testing.assert_equal(a.numpy(), b.numpy())
with TemporaryFile() as f:
if mge.is_cuda_available():
device_org = mge.get_default_device()
mge.set_default_device("gpu0")
a = Tensor(np.random.random(size=(2, 233)).astype(np.float32))
mge.save(a, f)
f.seek(0)
mge.set_default_device("cpux")
b = mge.load(f, map_location={"gpu0": "cpu0"})
assert type(b) is Tensor
assert "cpu0" in str(b.device)
np.testing.assert_equal(a.numpy(), b.numpy())
mge.set_default_device(device_org)
|
[
"megengine.is_cuda_available",
"megengine.Tensor",
"megengine.load",
"megengine.get_default_device",
"megengine.set_default_device",
"megengine.save"
] |
[((705, 720), 'tempfile.TemporaryFile', 'TemporaryFile', ([], {}), '()\n', (718, 720), False, 'from tempfile import TemporaryFile\n'), ((742, 786), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(7)', 'size': '[233]'}), '(low=0, high=7, size=[233])\n', (759, 786), True, 'import numpy as np\n'), ((799, 842), 'megengine.Tensor', 'Tensor', (['data'], {'device': '"""xpux"""', 'dtype': 'np.int32'}), "(data, device='xpux', dtype=np.int32)\n", (805, 842), False, 'from megengine import Parameter, Tensor\n'), ((851, 868), 'pickle.dump', 'pickle.dump', (['a', 'f'], {}), '(a, f)\n', (862, 868), False, 'import pickle\n'), ((899, 913), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (910, 913), False, 'import pickle\n'), ((978, 993), 'tempfile.TemporaryFile', 'TemporaryFile', ([], {}), '()\n', (991, 993), False, 'from tempfile import TemporaryFile\n'), ((1082, 1099), 'pickle.dump', 'pickle.dump', (['a', 'f'], {}), '(a, f)\n', (1093, 1099), False, 'import pickle\n'), ((1130, 1144), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1141, 1144), False, 'import pickle\n'), ((1249, 1264), 'tempfile.TemporaryFile', 'TemporaryFile', ([], {}), '()\n', (1262, 1264), False, 'from tempfile import TemporaryFile\n'), ((1350, 1367), 'pickle.dump', 'pickle.dump', (['a', 'f'], {}), '(a, f)\n', (1361, 1367), False, 'import pickle\n'), ((1398, 1412), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1409, 1412), False, 'import pickle\n'), ((1510, 1525), 'tempfile.TemporaryFile', 'TemporaryFile', ([], {}), '()\n', (1523, 1525), False, 'from tempfile import TemporaryFile\n'), ((1611, 1625), 'megengine.save', 'mge.save', (['a', 'f'], {}), '(a, f)\n', (1619, 1625), True, 'import megengine as mge\n'), ((1656, 1688), 'megengine.load', 'mge.load', (['f'], {'map_location': '"""cpux"""'}), "(f, map_location='cpux')\n", (1664, 1688), True, 'import megengine as mge\n'), ((1824, 1839), 'tempfile.TemporaryFile', 'TemporaryFile', ([], {}), '()\n', (1837, 1839), False, 'from tempfile import TemporaryFile\n'), ((1857, 1880), 'megengine.is_cuda_available', 'mge.is_cuda_available', ([], {}), '()\n', (1878, 1880), True, 'import megengine as mge\n'), ((1907, 1931), 'megengine.get_default_device', 'mge.get_default_device', ([], {}), '()\n', (1929, 1931), True, 'import megengine as mge\n'), ((1944, 1974), 'megengine.set_default_device', 'mge.set_default_device', (['"""gpu0"""'], {}), "('gpu0')\n", (1966, 1974), True, 'import megengine as mge\n'), ((2062, 2076), 'megengine.save', 'mge.save', (['a', 'f'], {}), '(a, f)\n', (2070, 2076), True, 'import megengine as mge\n'), ((2111, 2141), 'megengine.set_default_device', 'mge.set_default_device', (['"""cpux"""'], {}), "('cpux')\n", (2133, 2141), True, 'import megengine as mge\n'), ((2158, 2200), 'megengine.load', 'mge.load', (['f'], {'map_location': "{'gpu0': 'cpu0'}"}), "(f, map_location={'gpu0': 'cpu0'})\n", (2166, 2200), True, 'import megengine as mge\n'), ((2351, 2385), 'megengine.set_default_device', 'mge.set_default_device', (['device_org'], {}), '(device_org)\n', (2373, 2385), True, 'import megengine as mge\n'), ((1022, 1053), 'numpy.random.random', 'np.random.random', ([], {'size': '(233, 2)'}), '(size=(233, 2))\n', (1038, 1053), True, 'import numpy as np\n'), ((1290, 1321), 'numpy.random.random', 'np.random.random', ([], {'size': '(2, 233)'}), '(size=(2, 233))\n', (1306, 1321), True, 'import numpy as np\n'), ((1551, 1582), 'numpy.random.random', 'np.random.random', ([], {'size': '(2, 233)'}), '(size=(2, 233))\n', (1567, 1582), True, 'import numpy as np\n'), ((1998, 2029), 'numpy.random.random', 'np.random.random', ([], {'size': '(2, 233)'}), '(size=(2, 233))\n', (2014, 2029), True, 'import numpy as np\n')]
|
from fastapi.exceptions import HTTPException
from sfm.models import Project, WorkItem
from sqlmodel import Session, select
from opencensus.ext.azure.log_exporter import AzureLogHandler
from sfm.config import get_settings
from sfm.logger import create_logger
from sfm.utils import (
create_project_auth_token,
hash_project_auth_token,
verify_admin_key,
)
app_settings = get_settings()
logger = create_logger(__name__)
def get_all(db: Session, skip: int = None, limit: int = None):
"""Get all the projects and return them."""
projects = db.exec(
select(Project).order_by(Project.id).offset(skip).limit(limit)
).all()
if not projects:
logger.debug("Projects not found")
raise HTTPException(status_code=404, detail="Projects not found")
return projects
def get_by_id(db: Session, project_id: int):
"""Get the project with corresponding id and return it."""
project = db.get(Project, project_id)
if not project:
logger.debug("Projects not found")
raise HTTPException(status_code=404, detail="Project not found")
return project
def create_project(db: Session, project_data, admin_key):
"""Take data from request and create a new project in the database."""
project_name_repeat = db.exec(
select(Project).where(Project.name == project_data.name)
).first()
if project_name_repeat is not None:
logger.debug("Database entry already exists")
raise HTTPException(status_code=409, detail="Database entry already exists")
verified_admin = verify_admin_key(admin_key)
if verified_admin:
project_temp = project_data.dict()
token = create_project_auth_token()
hashed_token = hash_project_auth_token(token)
project_temp.update({"project_auth_token_hashed": hashed_token})
project_db = Project(**project_temp)
db.add(project_db)
db.commit()
else:
logger.warning("Attempted to verify as an admin with incorrect credentials")
raise HTTPException(status_code=401, detail="Credentials are incorrect")
# Check the new record
db.refresh(project_db)
new_project = db.get(Project, project_db.id)
if new_project.name == project_data.name:
return [new_project, token] # successfully created record
else:
logger.error("Project did not store correctly in database")
return False # didn't store correctly
def delete_project(db: Session, project_id, admin_key):
"""Take a project_name and remove the row from the database."""
verified_admin = verify_admin_key(admin_key)
if verified_admin:
project = db.get(Project, project_id)
if not project:
logger.debug("Project not found")
raise HTTPException(status_code=404, detail="Project not found")
for item in project.work_items:
db.delete(item)
db.delete(project)
db.commit()
else:
logger.warning("Attempted to verify as admin with incorrect credentials")
raise HTTPException(status_code=401, detail="Credentials are incorrect")
# Check our work
row = db.get(Project, project_id)
if row:
logger.error("Project did not delete correctly")
return False # Row didn't successfully delete or another one exists
else:
return True # Successful deletion
def refresh_project_key(db: Session, project_id, admin_key):
verified_admin = verify_admin_key(admin_key)
if verified_admin:
project_db = db.get(Project, project_id)
if not project_db:
logger.debug("Project with matching id not found")
raise HTTPException(
status_code=404, detail="Project with matching id not found"
)
new_token = create_project_auth_token()
hashed_token = hash_project_auth_token(new_token)
project_db.project_auth_token_hashed = hashed_token
db.add(project_db)
db.commit()
else:
logger.warning("Attempted to verify as admin with incorrect credentials")
raise HTTPException(status_code=401, detail="Credentials are incorrect")
check = db.exec(
select(Project).where(Project.project_auth_token_hashed == hashed_token)
)
if check:
return new_token
else:
logger.error("Project auth token did not update correctly")
return False
def update_project(db: Session, project_id, project_data, admin_key):
"""Take data from request and update an existing Project in the database."""
verified_admin = verify_admin_key(admin_key)
if verified_admin:
project = db.get(Project, project_id)
if not project:
logger.debug("Project not found")
raise HTTPException(status_code=404, detail="Project not found")
project_newdata = project_data.dict(exclude_unset=True, exclude_defaults=True)
for key, value in project_newdata.items():
setattr(project, key, value)
db.add(project)
db.commit()
else:
logger.warning("Attempted to verify as admin with incorrect credentials")
raise HTTPException(status_code=401, detail="Credentials are incorrect")
# return updated item
db.refresh(project)
if project:
return project # updated record
else:
logger.error("Project did not store correctly")
return False # didn't store correctly
|
[
"sqlmodel.select"
] |
[((382, 396), 'sfm.config.get_settings', 'get_settings', ([], {}), '()\n', (394, 396), False, 'from sfm.config import get_settings\n'), ((408, 431), 'sfm.logger.create_logger', 'create_logger', (['__name__'], {}), '(__name__)\n', (421, 431), False, 'from sfm.logger import create_logger\n'), ((1568, 1595), 'sfm.utils.verify_admin_key', 'verify_admin_key', (['admin_key'], {}), '(admin_key)\n', (1584, 1595), False, 'from sfm.utils import create_project_auth_token, hash_project_auth_token, verify_admin_key\n'), ((2590, 2617), 'sfm.utils.verify_admin_key', 'verify_admin_key', (['admin_key'], {}), '(admin_key)\n', (2606, 2617), False, 'from sfm.utils import create_project_auth_token, hash_project_auth_token, verify_admin_key\n'), ((3466, 3493), 'sfm.utils.verify_admin_key', 'verify_admin_key', (['admin_key'], {}), '(admin_key)\n', (3482, 3493), False, 'from sfm.utils import create_project_auth_token, hash_project_auth_token, verify_admin_key\n'), ((4589, 4616), 'sfm.utils.verify_admin_key', 'verify_admin_key', (['admin_key'], {}), '(admin_key)\n', (4605, 4616), False, 'from sfm.utils import create_project_auth_token, hash_project_auth_token, verify_admin_key\n'), ((730, 789), 'fastapi.exceptions.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Projects not found"""'}), "(status_code=404, detail='Projects not found')\n", (743, 789), False, 'from fastapi.exceptions import HTTPException\n'), ((1039, 1097), 'fastapi.exceptions.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Project not found"""'}), "(status_code=404, detail='Project not found')\n", (1052, 1097), False, 'from fastapi.exceptions import HTTPException\n'), ((1475, 1545), 'fastapi.exceptions.HTTPException', 'HTTPException', ([], {'status_code': '(409)', 'detail': '"""Database entry already exists"""'}), "(status_code=409, detail='Database entry already exists')\n", (1488, 1545), False, 'from fastapi.exceptions import HTTPException\n'), ((1678, 1705), 'sfm.utils.create_project_auth_token', 'create_project_auth_token', ([], {}), '()\n', (1703, 1705), False, 'from sfm.utils import create_project_auth_token, hash_project_auth_token, verify_admin_key\n'), ((1729, 1759), 'sfm.utils.hash_project_auth_token', 'hash_project_auth_token', (['token'], {}), '(token)\n', (1752, 1759), False, 'from sfm.utils import create_project_auth_token, hash_project_auth_token, verify_admin_key\n'), ((1854, 1877), 'sfm.models.Project', 'Project', ([], {}), '(**project_temp)\n', (1861, 1877), False, 'from sfm.models import Project, WorkItem\n'), ((2034, 2100), 'fastapi.exceptions.HTTPException', 'HTTPException', ([], {'status_code': '(401)', 'detail': '"""Credentials are incorrect"""'}), "(status_code=401, detail='Credentials are incorrect')\n", (2047, 2100), False, 'from fastapi.exceptions import HTTPException\n'), ((3056, 3122), 'fastapi.exceptions.HTTPException', 'HTTPException', ([], {'status_code': '(401)', 'detail': '"""Credentials are incorrect"""'}), "(status_code=401, detail='Credentials are incorrect')\n", (3069, 3122), False, 'from fastapi.exceptions import HTTPException\n'), ((3801, 3828), 'sfm.utils.create_project_auth_token', 'create_project_auth_token', ([], {}), '()\n', (3826, 3828), False, 'from sfm.utils import create_project_auth_token, hash_project_auth_token, verify_admin_key\n'), ((3852, 3886), 'sfm.utils.hash_project_auth_token', 'hash_project_auth_token', (['new_token'], {}), '(new_token)\n', (3875, 3886), False, 'from sfm.utils import create_project_auth_token, hash_project_auth_token, verify_admin_key\n'), ((4100, 4166), 'fastapi.exceptions.HTTPException', 'HTTPException', ([], {'status_code': '(401)', 'detail': '"""Credentials are incorrect"""'}), "(status_code=401, detail='Credentials are incorrect')\n", (4113, 4166), False, 'from fastapi.exceptions import HTTPException\n'), ((5165, 5231), 'fastapi.exceptions.HTTPException', 'HTTPException', ([], {'status_code': '(401)', 'detail': '"""Credentials are incorrect"""'}), "(status_code=401, detail='Credentials are incorrect')\n", (5178, 5231), False, 'from fastapi.exceptions import HTTPException\n'), ((2775, 2833), 'fastapi.exceptions.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Project not found"""'}), "(status_code=404, detail='Project not found')\n", (2788, 2833), False, 'from fastapi.exceptions import HTTPException\n'), ((3675, 3750), 'fastapi.exceptions.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Project with matching id not found"""'}), "(status_code=404, detail='Project with matching id not found')\n", (3688, 3750), False, 'from fastapi.exceptions import HTTPException\n'), ((4774, 4832), 'fastapi.exceptions.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Project not found"""'}), "(status_code=404, detail='Project not found')\n", (4787, 4832), False, 'from fastapi.exceptions import HTTPException\n'), ((4197, 4212), 'sqlmodel.select', 'select', (['Project'], {}), '(Project)\n', (4203, 4212), False, 'from sqlmodel import Session, select\n'), ((1295, 1310), 'sqlmodel.select', 'select', (['Project'], {}), '(Project)\n', (1301, 1310), False, 'from sqlmodel import Session, select\n'), ((577, 592), 'sqlmodel.select', 'select', (['Project'], {}), '(Project)\n', (583, 592), False, 'from sqlmodel import Session, select\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import functools
from typing import Callable, Optional
import megengine._internal as mgb
from ..core import set_default_device
_master_ip = None
_master_port = 0
_world_size = 0
_rank = 0
_backend = None
def init_process_group(
master_ip: str,
master_port: int,
world_size: int,
rank: int,
dev: int,
backend: Optional[str] = "nccl",
) -> None:
"""Initialize the distributed process group, and also specify the device used in the current process.
:param master_ip: IP address of the master node.
:param master_port: Port available for all processes to communicate.
:param world_size: Total number of processes participating in the job.
:param rank: Rank of the current process.
:param dev: The GPU device id to bind this process to.
:param backend: Communicator backend, currently support 'nccl' and 'ucx'
"""
global _master_ip # pylint: disable=global-statement
global _master_port # pylint: disable=global-statement
global _world_size # pylint: disable=global-statement
global _rank # pylint: disable=global-statement
global _backend # pylint: disable=global-statement
if not isinstance(master_ip, str):
raise TypeError("Expect type str but got {}".format(type(master_ip)))
if not isinstance(master_port, int):
raise TypeError("Expect type int but got {}".format(type(master_port)))
if not isinstance(world_size, int):
raise TypeError("Expect type int but got {}".format(type(world_size)))
if not isinstance(rank, int):
raise TypeError("Expect type int but got {}".format(type(rank)))
if not isinstance(backend, str):
raise TypeError("Expect type str but got {}".format(type(backend)))
_master_ip = master_ip
_master_port = master_port
_world_size = world_size
_rank = rank
_backend = backend
set_default_device(mgb.comp_node("gpu" + str(dev)))
if rank == 0:
_master_port = mgb.config.create_mm_server("0.0.0.0", master_port)
if _master_port == -1:
raise Exception("Failed to start server on port {}".format(master_port))
else:
assert master_port > 0, "master_port must be specified for non-zero rank"
def is_distributed() -> bool:
"""Return True if the distributed process group has been initialized"""
return _world_size is not None and _world_size > 1
def get_master_ip() -> str:
"""Get the IP address of the master node"""
return str(_master_ip)
def get_master_port() -> int:
"""Get the port of the rpc server on the master node"""
return _master_port
def get_world_size() -> int:
"""Get the total number of processes participating in the job"""
return _world_size
def get_rank() -> int:
"""Get the rank of the current process"""
return _rank
def get_backend() -> str:
"""Get the backend str"""
return str(_backend)
def group_barrier() -> None:
"""Block until all ranks in the group reach this barrier"""
mgb.config.group_barrier(_master_ip, _master_port, _world_size, _rank)
def synchronized(func: Callable):
"""Decorator. Decorated function will synchronize when finished.
Specifically, we use this to prevent data race during hub.load"""
@functools.wraps(func)
def _(*args, **kwargs):
if not is_distributed():
return func(*args, **kwargs)
ret = func(*args, **kwargs)
group_barrier()
return ret
return _
|
[
"megengine._internal.config.create_mm_server",
"megengine._internal.config.group_barrier"
] |
[((3376, 3446), 'megengine._internal.config.group_barrier', 'mgb.config.group_barrier', (['_master_ip', '_master_port', '_world_size', '_rank'], {}), '(_master_ip, _master_port, _world_size, _rank)\n', (3400, 3446), True, 'import megengine._internal as mgb\n'), ((3628, 3649), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (3643, 3649), False, 'import functools\n'), ((2339, 2390), 'megengine._internal.config.create_mm_server', 'mgb.config.create_mm_server', (['"""0.0.0.0"""', 'master_port'], {}), "('0.0.0.0', master_port)\n", (2366, 2390), True, 'import megengine._internal as mgb\n')]
|
# -*- coding:utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import layers
class RPN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rpn_reg_mean, cfg.rpn_reg_std)
# check anchor settings
assert len(set(len(x) for x in cfg.anchor_scales)) == 1
assert len(set(len(x) for x in cfg.anchor_ratios)) == 1
self.num_cell_anchors = len(cfg.anchor_scales[0]) * len(cfg.anchor_ratios[0])
rpn_channel = cfg.rpn_channel
self.in_features = cfg.rpn_in_features
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=cfg.anchor_scales,
anchor_ratios=cfg.anchor_ratios,
strides=cfg.rpn_stride,
offset=self.cfg.anchor_offset,
)
self.matcher = layers.Matcher(
cfg.match_thresholds, cfg.match_labels, cfg.match_allow_low_quality
)
self.rpn_conv = M.Conv2d(256, rpn_channel, kernel_size=3, stride=1, padding=1)
self.rpn_cls_score = M.Conv2d(
rpn_channel, self.num_cell_anchors, kernel_size=1, stride=1
)
self.rpn_bbox_offsets = M.Conv2d(
rpn_channel, self.num_cell_anchors * 4, kernel_size=1, stride=1
)
for l in [self.rpn_conv, self.rpn_cls_score, self.rpn_bbox_offsets]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def forward(self, features, im_info, boxes=None):
# prediction
features = [features[x] for x in self.in_features]
# get anchors
anchors_list = self.anchor_generator(features)
pred_cls_logit_list = []
pred_bbox_offset_list = []
for x in features:
t = F.relu(self.rpn_conv(x))
scores = self.rpn_cls_score(t)
pred_cls_logit_list.append(
scores.reshape(
scores.shape[0],
self.num_cell_anchors,
scores.shape[2],
scores.shape[3],
)
)
bbox_offsets = self.rpn_bbox_offsets(t)
pred_bbox_offset_list.append(
bbox_offsets.reshape(
bbox_offsets.shape[0],
self.num_cell_anchors,
4,
bbox_offsets.shape[2],
bbox_offsets.shape[3],
)
)
# get rois from the predictions
rpn_rois = self.find_top_rpn_proposals(
pred_cls_logit_list, pred_bbox_offset_list, anchors_list, im_info
)
if self.training:
rpn_labels, rpn_offsets = self.get_ground_truth(
anchors_list, boxes, im_info[:, 4].astype("int32")
)
pred_cls_logits, pred_bbox_offsets = self.merge_rpn_score_box(
pred_cls_logit_list, pred_bbox_offset_list
)
fg_mask = rpn_labels > 0
valid_mask = rpn_labels >= 0
num_valid = valid_mask.sum()
# rpn classification loss
loss_rpn_cls = F.loss.binary_cross_entropy(
pred_cls_logits[valid_mask], rpn_labels[valid_mask]
)
# rpn regression loss
loss_rpn_bbox = layers.smooth_l1_loss(
pred_bbox_offsets[fg_mask],
rpn_offsets[fg_mask],
self.cfg.rpn_smooth_l1_beta,
).sum() / F.maximum(num_valid, 1)
loss_dict = {"loss_rpn_cls": loss_rpn_cls, "loss_rpn_bbox": loss_rpn_bbox}
return rpn_rois, loss_dict
else:
return rpn_rois
def find_top_rpn_proposals(
self, rpn_cls_score_list, rpn_bbox_offset_list, anchors_list, im_info
):
prev_nms_top_n = (
self.cfg.train_prev_nms_top_n
if self.training
else self.cfg.test_prev_nms_top_n
)
post_nms_top_n = (
self.cfg.train_post_nms_top_n
if self.training
else self.cfg.test_post_nms_top_n
)
return_rois = []
for bid in range(im_info.shape[0]):
batch_proposal_list = []
batch_score_list = []
batch_level_list = []
for l, (rpn_cls_score, rpn_bbox_offset, anchors) in enumerate(
zip(rpn_cls_score_list, rpn_bbox_offset_list, anchors_list)
):
# get proposals and scores
offsets = rpn_bbox_offset[bid].transpose(2, 3, 0, 1).reshape(-1, 4)
proposals = self.box_coder.decode(anchors, offsets)
scores = rpn_cls_score[bid].transpose(1, 2, 0).flatten()
scores.detach()
# prev nms top n
scores, order = F.topk(scores, descending=True, k=prev_nms_top_n)
proposals = proposals[order]
batch_proposal_list.append(proposals)
batch_score_list.append(scores)
batch_level_list.append(F.full_like(scores, l))
# gather proposals, scores, level
proposals = F.concat(batch_proposal_list, axis=0)
scores = F.concat(batch_score_list, axis=0)
levels = F.concat(batch_level_list, axis=0)
proposals = layers.get_clipped_boxes(proposals, im_info[bid])
# filter invalid proposals and apply total level nms
keep_mask = layers.filter_boxes(proposals)
proposals = proposals[keep_mask]
scores = scores[keep_mask]
levels = levels[keep_mask]
nms_keep_inds = layers.batched_nms(
proposals, scores, levels, self.cfg.rpn_nms_threshold, post_nms_top_n
)
# generate rois to rcnn head, rois shape (N, 5), info [batch_id, x1, y1, x2, y2]
rois = F.concat([proposals, scores.reshape(-1, 1)], axis=1)
rois = rois[nms_keep_inds]
batch_inds = F.full((rois.shape[0], 1), bid)
batch_rois = F.concat([batch_inds, rois[:, :4]], axis=1)
return_rois.append(batch_rois)
return_rois = F.concat(return_rois, axis=0)
return return_rois.detach()
def merge_rpn_score_box(self, rpn_cls_score_list, rpn_bbox_offset_list):
final_rpn_cls_score_list = []
final_rpn_bbox_offset_list = []
for bid in range(rpn_cls_score_list[0].shape[0]):
batch_rpn_cls_score_list = []
batch_rpn_bbox_offset_list = []
for i in range(len(self.in_features)):
rpn_cls_scores = rpn_cls_score_list[i][bid].transpose(1, 2, 0).flatten()
rpn_bbox_offsets = (
rpn_bbox_offset_list[i][bid].transpose(2, 3, 0, 1).reshape(-1, 4)
)
batch_rpn_cls_score_list.append(rpn_cls_scores)
batch_rpn_bbox_offset_list.append(rpn_bbox_offsets)
batch_rpn_cls_scores = F.concat(batch_rpn_cls_score_list, axis=0)
batch_rpn_bbox_offsets = F.concat(batch_rpn_bbox_offset_list, axis=0)
final_rpn_cls_score_list.append(batch_rpn_cls_scores)
final_rpn_bbox_offset_list.append(batch_rpn_bbox_offsets)
final_rpn_cls_scores = F.concat(final_rpn_cls_score_list, axis=0)
final_rpn_bbox_offsets = F.concat(final_rpn_bbox_offset_list, axis=0)
return final_rpn_cls_scores, final_rpn_bbox_offsets
def get_ground_truth(self, anchors_list, batched_gt_boxes, batched_num_gts):
anchors = F.concat(anchors_list, axis=0)
labels_list = []
offsets_list = []
for bid in range(batched_gt_boxes.shape[0]):
gt_boxes = batched_gt_boxes[bid, :batched_num_gts[bid]]
overlaps = layers.get_iou(gt_boxes[:, :4], anchors)
matched_indices, labels = self.matcher(overlaps)
offsets = self.box_coder.encode(anchors, gt_boxes[matched_indices, :4])
# sample positive labels
num_positive = int(self.cfg.num_sample_anchors * self.cfg.positive_anchor_ratio)
labels = layers.sample_labels(labels, num_positive, 1, -1)
# sample negative labels
num_positive = (labels == 1).sum().astype("int32")
num_negative = self.cfg.num_sample_anchors - num_positive
labels = layers.sample_labels(labels, num_negative, 0, -1)
labels_list.append(labels)
offsets_list.append(offsets)
return (
F.concat(labels_list, axis=0).detach(),
F.concat(offsets_list, axis=0).detach(),
)
|
[
"megengine.functional.maximum",
"megengine.module.init.normal_",
"megengine.functional.full",
"megengine.functional.topk",
"megengine.functional.concat",
"megengine.module.Conv2d",
"megengine.functional.loss.binary_cross_entropy",
"megengine.module.init.fill_",
"megengine.functional.full_like"
] |
[((580, 630), 'layers.BoxCoder', 'layers.BoxCoder', (['cfg.rpn_reg_mean', 'cfg.rpn_reg_std'], {}), '(cfg.rpn_reg_mean, cfg.rpn_reg_std)\n', (595, 630), False, 'import layers\n'), ((997, 1148), 'layers.AnchorBoxGenerator', 'layers.AnchorBoxGenerator', ([], {'anchor_scales': 'cfg.anchor_scales', 'anchor_ratios': 'cfg.anchor_ratios', 'strides': 'cfg.rpn_stride', 'offset': 'self.cfg.anchor_offset'}), '(anchor_scales=cfg.anchor_scales, anchor_ratios=\n cfg.anchor_ratios, strides=cfg.rpn_stride, offset=self.cfg.anchor_offset)\n', (1022, 1148), False, 'import layers\n'), ((1227, 1315), 'layers.Matcher', 'layers.Matcher', (['cfg.match_thresholds', 'cfg.match_labels', 'cfg.match_allow_low_quality'], {}), '(cfg.match_thresholds, cfg.match_labels, cfg.\n match_allow_low_quality)\n', (1241, 1315), False, 'import layers\n'), ((1358, 1420), 'megengine.module.Conv2d', 'M.Conv2d', (['(256)', 'rpn_channel'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256, rpn_channel, kernel_size=3, stride=1, padding=1)\n', (1366, 1420), True, 'import megengine.module as M\n'), ((1450, 1519), 'megengine.module.Conv2d', 'M.Conv2d', (['rpn_channel', 'self.num_cell_anchors'], {'kernel_size': '(1)', 'stride': '(1)'}), '(rpn_channel, self.num_cell_anchors, kernel_size=1, stride=1)\n', (1458, 1519), True, 'import megengine.module as M\n'), ((1574, 1647), 'megengine.module.Conv2d', 'M.Conv2d', (['rpn_channel', '(self.num_cell_anchors * 4)'], {'kernel_size': '(1)', 'stride': '(1)'}), '(rpn_channel, self.num_cell_anchors * 4, kernel_size=1, stride=1)\n', (1582, 1647), True, 'import megengine.module as M\n'), ((6539, 6568), 'megengine.functional.concat', 'F.concat', (['return_rois'], {'axis': '(0)'}), '(return_rois, axis=0)\n', (6547, 6568), True, 'import megengine.functional as F\n'), ((7651, 7693), 'megengine.functional.concat', 'F.concat', (['final_rpn_cls_score_list'], {'axis': '(0)'}), '(final_rpn_cls_score_list, axis=0)\n', (7659, 7693), True, 'import megengine.functional as F\n'), ((7727, 7771), 'megengine.functional.concat', 'F.concat', (['final_rpn_bbox_offset_list'], {'axis': '(0)'}), '(final_rpn_bbox_offset_list, axis=0)\n', (7735, 7771), True, 'import megengine.functional as F\n'), ((7932, 7962), 'megengine.functional.concat', 'F.concat', (['anchors_list'], {'axis': '(0)'}), '(anchors_list, axis=0)\n', (7940, 7962), True, 'import megengine.functional as F\n'), ((1760, 1794), 'megengine.module.init.normal_', 'M.init.normal_', (['l.weight'], {'std': '(0.01)'}), '(l.weight, std=0.01)\n', (1774, 1794), True, 'import megengine.module as M\n'), ((1807, 1830), 'megengine.module.init.fill_', 'M.init.fill_', (['l.bias', '(0)'], {}), '(l.bias, 0)\n', (1819, 1830), True, 'import megengine.module as M\n'), ((3520, 3605), 'megengine.functional.loss.binary_cross_entropy', 'F.loss.binary_cross_entropy', (['pred_cls_logits[valid_mask]', 'rpn_labels[valid_mask]'], {}), '(pred_cls_logits[valid_mask], rpn_labels[valid_mask]\n )\n', (3547, 3605), True, 'import megengine.functional as F\n'), ((5526, 5563), 'megengine.functional.concat', 'F.concat', (['batch_proposal_list'], {'axis': '(0)'}), '(batch_proposal_list, axis=0)\n', (5534, 5563), True, 'import megengine.functional as F\n'), ((5585, 5619), 'megengine.functional.concat', 'F.concat', (['batch_score_list'], {'axis': '(0)'}), '(batch_score_list, axis=0)\n', (5593, 5619), True, 'import megengine.functional as F\n'), ((5641, 5675), 'megengine.functional.concat', 'F.concat', (['batch_level_list'], {'axis': '(0)'}), '(batch_level_list, axis=0)\n', (5649, 5675), True, 'import megengine.functional as F\n'), ((5701, 5750), 'layers.get_clipped_boxes', 'layers.get_clipped_boxes', (['proposals', 'im_info[bid]'], {}), '(proposals, im_info[bid])\n', (5725, 5750), False, 'import layers\n'), ((5840, 5870), 'layers.filter_boxes', 'layers.filter_boxes', (['proposals'], {}), '(proposals)\n', (5859, 5870), False, 'import layers\n'), ((6022, 6115), 'layers.batched_nms', 'layers.batched_nms', (['proposals', 'scores', 'levels', 'self.cfg.rpn_nms_threshold', 'post_nms_top_n'], {}), '(proposals, scores, levels, self.cfg.rpn_nms_threshold,\n post_nms_top_n)\n', (6040, 6115), False, 'import layers\n'), ((6372, 6403), 'megengine.functional.full', 'F.full', (['(rois.shape[0], 1)', 'bid'], {}), '((rois.shape[0], 1), bid)\n', (6378, 6403), True, 'import megengine.functional as F\n'), ((6429, 6472), 'megengine.functional.concat', 'F.concat', (['[batch_inds, rois[:, :4]]'], {'axis': '(1)'}), '([batch_inds, rois[:, :4]], axis=1)\n', (6437, 6472), True, 'import megengine.functional as F\n'), ((7357, 7399), 'megengine.functional.concat', 'F.concat', (['batch_rpn_cls_score_list'], {'axis': '(0)'}), '(batch_rpn_cls_score_list, axis=0)\n', (7365, 7399), True, 'import megengine.functional as F\n'), ((7437, 7481), 'megengine.functional.concat', 'F.concat', (['batch_rpn_bbox_offset_list'], {'axis': '(0)'}), '(batch_rpn_bbox_offset_list, axis=0)\n', (7445, 7481), True, 'import megengine.functional as F\n'), ((8160, 8200), 'layers.get_iou', 'layers.get_iou', (['gt_boxes[:, :4]', 'anchors'], {}), '(gt_boxes[:, :4], anchors)\n', (8174, 8200), False, 'import layers\n'), ((8499, 8548), 'layers.sample_labels', 'layers.sample_labels', (['labels', 'num_positive', '(1)', '(-1)'], {}), '(labels, num_positive, 1, -1)\n', (8519, 8548), False, 'import layers\n'), ((8740, 8789), 'layers.sample_labels', 'layers.sample_labels', (['labels', 'num_negative', '(0)', '(-1)'], {}), '(labels, num_negative, 0, -1)\n', (8760, 8789), False, 'import layers\n'), ((3866, 3889), 'megengine.functional.maximum', 'F.maximum', (['num_valid', '(1)'], {}), '(num_valid, 1)\n', (3875, 3889), True, 'import megengine.functional as F\n'), ((5193, 5242), 'megengine.functional.topk', 'F.topk', (['scores'], {'descending': '(True)', 'k': 'prev_nms_top_n'}), '(scores, descending=True, k=prev_nms_top_n)\n', (5199, 5242), True, 'import megengine.functional as F\n'), ((5431, 5453), 'megengine.functional.full_like', 'F.full_like', (['scores', 'l'], {}), '(scores, l)\n', (5442, 5453), True, 'import megengine.functional as F\n'), ((8901, 8930), 'megengine.functional.concat', 'F.concat', (['labels_list'], {'axis': '(0)'}), '(labels_list, axis=0)\n', (8909, 8930), True, 'import megengine.functional as F\n'), ((8953, 8983), 'megengine.functional.concat', 'F.concat', (['offsets_list'], {'axis': '(0)'}), '(offsets_list, axis=0)\n', (8961, 8983), True, 'import megengine.functional as F\n'), ((3694, 3798), 'layers.smooth_l1_loss', 'layers.smooth_l1_loss', (['pred_bbox_offsets[fg_mask]', 'rpn_offsets[fg_mask]', 'self.cfg.rpn_smooth_l1_beta'], {}), '(pred_bbox_offsets[fg_mask], rpn_offsets[fg_mask],\n self.cfg.rpn_smooth_l1_beta)\n', (3715, 3798), False, 'import layers\n')]
|
"""domain tag
Revision ID: 3321586ad6c4
Revises: <PASSWORD>
Create Date: 2021-11-27 19:30:48.062908
"""
import sqlalchemy as sa
import sqlmodel
import sqlmodel.sql.sqltypes
from alembic import op
# revision identifiers, used by Alembic.
revision = "3321586ad6c4"
down_revision = "7c2a518ed636"
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"domains",
sa.Column(
"tag", sqlmodel.sql.sqltypes.AutoString(), server_default="", nullable=False
),
)
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("domains", "tag")
# ### end Alembic commands ###
|
[
"sqlmodel.sql.sqltypes.AutoString"
] |
[((722, 754), 'alembic.op.drop_column', 'op.drop_column', (['"""domains"""', '"""tag"""'], {}), "('domains', 'tag')\n", (736, 754), False, 'from alembic import op\n'), ((503, 537), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (535, 537), False, 'import sqlmodel\n')]
|
from fastapi import APIRouter, Depends
from ..utils import engine, get_session
from ..models.capacity import Capacity
from sqlmodel import Session, select, SQLModel, and_
from sqlalchemy.exc import NoResultFound
from ..models.user import User
from ..models.team import Team
router = APIRouter(prefix="/api/capacities", tags=["capacity"])
session = Session(engine)
@router.post("/")
async def post_capacity(*, capacity: Capacity, session: Session = Depends(get_session)):
"""
Post new capacity.
Parameters
----------
capacity : Capacity
Capacity that is to be added to the database.
session : Session
SQL session that is to be used to add the capacity.
Defaults to creating a dependency on the running SQL model session.
"""
statement = select(Capacity).where(
and_(
Capacity.user_id == capacity.user_id,
Capacity.team_id == capacity.team_id,
capacity.year == capacity.year,
Capacity.month == capacity.month,
)
)
try:
result = session.exec(statement).one()
return False
except NoResultFound:
session.add(capacity)
session.commit()
session.refresh(capacity)
return capacity
@router.get("/")
async def get_capacities(
session: Session = Depends(get_session),
is_locked: bool = None,
user_id: int = None,
team_id: int = None,
month: int = None,
year: int = None,
):
"""
Get list of all capacities.
Parameters
----------
session : Session
SQL session that is to be used to get a list of the epic areas.
Defaults to creating a dependency on the running SQL model session.
is_locked : bool
Whether or not the capacity is locked or not.
user_id : int
User id of the user in question.
team_id : int
Team id of the user's team.
month : int
Month of capacity in question.
year : int
Year of capacity in question.
"""
statement = select(Capacity)
# Select capacity by user_id, team_id, month, year
if (user_id and team_id and month and year) != None:
statement = (
select(
Capacity.id.label("capacity_id"),
User.short_name.label("user_short_name"),
Team.short_name.label("team_short_name"),
Capacity.year,
Capacity.month,
Capacity.days,
)
.select_from(Capacity)
.join(User, Capacity.user_id == User.id)
.join(Team, Capacity.team_id == Team.id)
.where(Capacity.user_id == user_id)
.where(Capacity.team_id == team_id)
.where(Capacity.month == month)
.where(Capacity.year == year)
)
result = session.exec(statement).all()
return result
@router.delete("/")
async def delete_capacities(
capacity_id: str = None,
session: Session = Depends(get_session),
):
"""
Delete a capacity
Parameters
----------
capacity_id : str
ID of the capacity that is to be removed from the database.
session : Session
SQL session that is to be used to delete the capacity.
Defaults to creating a dependency on the running SQL model session.
"""
statement = select(Capacity).where(
Capacity.id == capacity_id,
)
capacity_to_delete = session.exec(statement).one()
session.delete(capacity_to_delete)
session.commit()
return True
|
[
"sqlmodel.Session",
"sqlmodel.select",
"sqlmodel.and_"
] |
[((284, 338), 'fastapi.APIRouter', 'APIRouter', ([], {'prefix': '"""/api/capacities"""', 'tags': "['capacity']"}), "(prefix='/api/capacities', tags=['capacity'])\n", (293, 338), False, 'from fastapi import APIRouter, Depends\n'), ((349, 364), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (356, 364), False, 'from sqlmodel import Session, select, SQLModel, and_\n'), ((451, 471), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (458, 471), False, 'from fastapi import APIRouter, Depends\n'), ((1324, 1344), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1331, 1344), False, 'from fastapi import APIRouter, Depends\n'), ((2033, 2049), 'sqlmodel.select', 'select', (['Capacity'], {}), '(Capacity)\n', (2039, 2049), False, 'from sqlmodel import Session, select, SQLModel, and_\n'), ((2976, 2996), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (2983, 2996), False, 'from fastapi import APIRouter, Depends\n'), ((828, 979), 'sqlmodel.and_', 'and_', (['(Capacity.user_id == capacity.user_id)', '(Capacity.team_id == capacity.team_id)', '(capacity.year == capacity.year)', '(Capacity.month == capacity.month)'], {}), '(Capacity.user_id == capacity.user_id, Capacity.team_id == capacity.\n team_id, capacity.year == capacity.year, Capacity.month == capacity.month)\n', (832, 979), False, 'from sqlmodel import Session, select, SQLModel, and_\n'), ((796, 812), 'sqlmodel.select', 'select', (['Capacity'], {}), '(Capacity)\n', (802, 812), False, 'from sqlmodel import Session, select, SQLModel, and_\n'), ((3337, 3353), 'sqlmodel.select', 'select', (['Capacity'], {}), '(Capacity)\n', (3343, 3353), False, 'from sqlmodel import Session, select, SQLModel, and_\n')]
|
"""Classes for probing values of Variables, for example, along a line."""
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import get_default, basestr, Struct
from sfepy.linalg import make_axis_rotation_matrix, norm_l2_along_axis
import six
def write_results(filename, probe, results):
"""
Write probing results into a file.
Parameters
----------
filename : str or file object
The output file name.
probe : Probe subclass instance
The probe used to obtain the results.
results : dict
The dictionary of probing results. Keys are data names, values are
the probed values.
"""
fd = open(filename, 'w') if isinstance(filename, basestr) else filename
fd.write('\n'.join(probe.report()) + '\n')
for key, result in six.iteritems(results):
pars, vals = result
fd.write('\n# %s %d\n' % (key, vals.shape[-1]))
if vals.ndim == 1:
aux = nm.hstack((pars[:,None], vals[:,None]))
else:
aux = nm.hstack((pars[:,None], vals))
nm.savetxt(fd, aux)
if isinstance(filename, basestr):
fd.close()
def read_results(filename, only_names=None):
"""
Read probing results from a file.
Parameters
----------
filename : str or file object
The probe results file name.
Returns
-------
header : Struct instance
The probe data header.
results : dict
The dictionary of probing results. Keys are data names, values are
the probed values.
"""
from sfepy.base.ioutils import read_array
only_names = get_default(only_names, [])
fd = open(filename, 'r') if isinstance(filename, basestr) else filename
header = read_header(fd)
results = {}
for name, nc in get_data_name(fd):
if name not in only_names: continue
result = read_array(fd, header.n_point, nc + 1, nm.float64)
results[name] = result
return header, results
def read_header(fd):
"""
Read the probe data header from file descriptor fd.
Returns
-------
header : Struct instance
The probe data header.
"""
header = Struct(name='probe_data_header')
header.probe_class = fd.readline().strip()
aux = fd.readline().strip().split(':')[1]
header.n_point = int(aux.strip().split()[0])
details = []
while 1:
line = fd.readline().strip()
if line == '-----':
break
else:
details.append(line)
header.details = '\n'.join(details)
return header
def get_data_name(fd):
"""
Try to read next data name in file fd.
Returns
-------
name : str
The data name.
nc : int
The number of data columns.
"""
name = None
while 1:
try:
line = fd.readline()
if (len(line) == 0): break
if len(line) == 1: continue
except:
raise StopIteration
line = line.strip().split()
if (len(line) == 3) and (line[0] == '#'):
name = line[1]
nc = int(line[2])
yield name, nc
class Probe(Struct):
"""
Base class for all point probes. Enforces two points minimum.
"""
cache = Struct(name='probe_shared_evaluate_cache')
is_cyclic = False
def __init__(self, name, share_geometry=True, n_point=None, **kwargs):
"""
Parameters
----------
name : str
The probe name, set automatically by the subclasses.
share_geometry : bool
Set to True to indicate that all the probes will work on the same
domain. Certain data are then computed only for the first probe and
cached.
n_point : int
The (fixed) number of probe points, when positive. When non-positive,
the number of points is adaptively increased starting from -n_point,
until the neighboring point distance is less than the diameter of the
elements enclosing the points. When None, it is set to -10.
For additional parameters see the __init__() docstrings of the
subclasses.
"""
Struct.__init__(self, name=name, share_geometry=share_geometry,
**kwargs)
self.set_n_point(n_point)
self.options = Struct(close_limit=0.1, size_hint=None)
self.cache = Struct(name='probe_local_evaluate_cache')
self.is_refined = False
def get_evaluate_cache(self):
"""
Return the evaluate cache for domain-related data given by
`self.share_geometry`.
"""
return Probe.cache if self.share_geometry else self.cache
def set_n_point(self, n_point):
"""
Set the number of probe points.
Parameters
----------
n_point : int
The (fixed) number of probe points, when positive. When non-positive,
the number of points is adaptively increased starting from -n_point,
until the neighboring point distance is less than the diameter of the
elements enclosing the points. When None, it is set to -10.
"""
if n_point is None:
n_point = -10
if n_point <= 0:
n_point = max(-n_point, 2)
self.n_point_required = -1
else:
n_point = max(n_point, 2)
self.n_point_required = n_point
self.n_point0 = self.n_point = n_point
def set_options(self, close_limit=None, size_hint=None):
"""
Set the probe options.
Parameters
----------
close_limit : float
The maximum limit distance of a point from the closest
element allowed for extrapolation.
size_hint : float
Element size hint for the refinement of probe parametrization.
"""
if close_limit is not None:
self.options.close_limit = close_limit
if size_hint is not None:
self.options.size_hint = size_hint
def report(self):
"""Report the probe parameters."""
out = [self.__class__.__name__]
if self.n_point_required == -1:
aux = 'adaptive'
else:
aux = 'fixed'
out.append('number of points: %s (%s)' % (self.n_point, aux))
return out
def __call__(self, variable, **kwargs):
"""
Probe the given variable. The actual implementation is in self.probe(),
so that it can be overridden in subclasses.
Parameters
----------
variable : Variable instance
The variable to be sampled along the probe.
**kwargs : additional arguments
See :func:`Probe.probe()`.
"""
return self.probe(variable, **kwargs)
def probe(self, variable, mode='val', ret_points=False):
"""
Probe the given variable.
Parameters
----------
variable : Variable instance
The variable to be sampled along the probe.
mode : {'val', 'grad'}, optional
The evaluation mode: the variable value (default) or the
variable value gradient.
ret_points : bool
If True, return also the probe points.
Returns
-------
pars : array
The parametrization of the probe points.
points : array, optional
If `ret_points` is True, the coordinates of points corresponding to
`pars`, where the `variable` is evaluated.
vals : array
The probed values.
"""
refine_flag = None
ev = variable.evaluate_at
field = variable.field
cache = field.get_evaluate_cache(cache=self.get_evaluate_cache(),
share_geometry=self.share_geometry)
self.reset_refinement()
while True:
pars, points = self.get_points(refine_flag)
if not nm.isfinite(points).all():
raise ValueError('Inf/nan in probe points!')
vals, cells = ev(points, mode=mode, strategy='general',
close_limit=self.options.close_limit,
cache=cache, ret_cells=True)
if self.is_refined:
break
else:
refine_flag = self.refine_points(variable, points, cells)
if (refine_flag == False).all():
break
self.is_refined = True
if ret_points:
return pars, points, vals
else:
return pars, vals
def reset_refinement(self):
"""
Reset the probe refinement state.
"""
self.is_refined = False
self.n_point = self.n_point0
def refine_points(self, variable, points, cells):
"""
Mark intervals between points for a refinement, based on element
sizes at those points. Assumes the points to be ordered.
Returns
-------
refine_flag : bool array
True at places corresponding to intervals between subsequent points
that need to be refined.
"""
if self.n_point_required == self.n_point:
refine_flag = nm.array([False])
else:
if self.options.size_hint is None:
ed = variable.get_element_diameters(cells, 0)
pd = 0.5 * (ed[1:] + ed[:-1])
else:
pd = self.options.size_hint
dist = norm_l2_along_axis(points[1:] - points[:-1])
refine_flag = dist > pd
if self.is_cyclic:
pd1 = 0.5 * (ed[0] + ed[-1])
dist1 = nla.norm(points[0] - points[-1])
refine_flag = nm.r_[refine_flag, dist1 > pd1]
return refine_flag
@staticmethod
def refine_pars(pars, refine_flag, cyclic_val=None):
"""
Refine the probe parametrization based on the refine_flag.
"""
ii = nm.where(refine_flag)[0]
ip = ii + 1
if cyclic_val is not None:
cpars = nm.r_[pars, cyclic_val]
pp = 0.5 * (cpars[ip] + cpars[ii])
else:
pp = 0.5 * (pars[ip] + pars[ii])
pars = nm.insert(pars, ip, pp)
return pars
class PointsProbe(Probe):
"""
Probe variables in given points.
"""
def __init__(self, points, share_geometry=True):
"""
Parameters
----------
points : array_like
The coordinates of the points.
"""
points = nm.array(points, dtype=nm.float64, order='C')
if points.ndim == 1:
points.shape = points.shape + (1,)
n_point = points.shape[0]
name = 'points %d' % n_point
Probe.__init__(self, name=name, share_geometry=share_geometry,
points=points, n_point=n_point)
self.n_point_single = n_point
def report(self):
"""Report the probe parameters."""
out = Probe.report(self)
for ii, point in enumerate(self.points):
out.append('point %d: %s' % (ii, point))
out.append('-----')
return out
def refine_points(self, variable, points, cache):
"""No refinement for this probe."""
refine_flag = nm.array([False])
return refine_flag
def get_points(self, refine_flag=None):
"""
Get the probe points.
Returns
-------
pars : array_like
The independent coordinate of the probe.
points : array_like
The probe points, parametrized by pars.
"""
pars = nm.arange(self.n_point, dtype=nm.float64)
return pars, self.points
class LineProbe(Probe):
"""
Probe variables along a line.
If n_point is positive, that number of evenly spaced points is used. If
n_point is None or non-positive, an adaptive refinement based on element
diameters is used and the number of points and their spacing are determined
automatically. If it is negative, -n_point is used as an initial guess.
"""
def __init__(self, p0, p1, n_point, share_geometry=True):
"""
Parameters
----------
p0 : array_like
The coordinates of the start point.
p1 : array_like
The coordinates of the end point.
"""
p0 = nm.array(p0, dtype=nm.float64)
p1 = nm.array(p1, dtype=nm.float64)
name = 'line [%s, %s]' % (p0, p1)
Probe.__init__(self, name=name, share_geometry=share_geometry,
p0=p0, p1=p1, n_point=n_point)
dirvec = self.p1 - self.p0
self.length = nm.linalg.norm(dirvec)
self.dirvec = dirvec / self.length
def report(self):
"""Report the probe parameters."""
out = Probe.report(self)
out.append('point 0: %s' % self.p0)
out.append('point 1: %s' % self.p1)
out.append('-----')
return out
def get_points(self, refine_flag=None):
"""
Get the probe points.
Returns
-------
pars : array_like
The independent coordinate of the probe.
points : array_like
The probe points, parametrized by pars.
"""
if self.is_refined:
return self.pars, self.points
if refine_flag is None:
pars = nm.linspace(0, self.length, self.n_point)
else:
pars = Probe.refine_pars(self.pars, refine_flag)
self.n_point = pars.shape[0]
self.pars = pars
self.points = self.p0 + self.dirvec * pars[:,None]
return pars, self.points
class RayProbe(Probe):
"""
Probe variables along a ray. The points are parametrized by a function of
radial coordinates from a given point in a given direction.
"""
def __init__(self, p0, dirvec, p_fun, n_point, both_dirs,
share_geometry=True):
"""
Parameters
----------
p0 : array_like
The coordinates of the start point.
dirvec : array_like
The probe direction vector.
p_fun : function
The function returning the probe parametrization along the dirvec
direction.
both_dirs : bool
If True, the probe works, starting at p0, symmetrically in both
dirvec and -dirvec directions.
"""
p0 = nm.array(p0, dtype=nm.float64)
dirvec = nm.array(dirvec, dtype=nm.float64)
dirvec /= nla.norm(dirvec)
name = 'ray %s [%s, %s]' % (p_fun.__name__, p0, dirvec)
if both_dirs:
n_point_true = 2 * n_point
else:
n_point_true = n_point
Probe.__init__(self, name=name, share_geometry=share_geometry,
p0=p0, dirvec=dirvec, p_fun=p_fun,
n_point=n_point_true, both_dirs=both_dirs)
self.n_point_single = n_point
def report(self):
"""Report the probe parameters."""
out = Probe.report(self)
out.append('point 0: %s' % self.p0)
out.append('direction vector: %s' % self.dirvec)
out.append('both directions: %s' % self.both_dirs)
out.append('distribution function: %s' % self.p_fun.__name__)
out.append('-----')
return out
def refine_points(self, variable, points, cache):
"""No refinement for this probe."""
refine_flag = nm.array([False])
return refine_flag
def gen_points(self, sign):
"""Generate the probe points and their parametrization."""
pars = self.p_fun(nm.arange(self.n_point_single, dtype=nm.float64))
points = self.p0 + sign * self.dirvec * pars[:,None]
return pars, points
def get_points(self, refine_flag=None):
"""
Get the probe points.
Returns
-------
pars : array_like
The independent coordinate of the probe.
points : array_like
The probe points, parametrized by pars.
"""
pars, points = self.gen_points(1.0)
if self.both_dirs:
pars0, points0 = self.gen_points(-1.0)
pars = nm.concatenate((-pars0[::-1], pars))
points = nm.concatenate((points0[::-1], points))
return pars, points
class CircleProbe(Probe):
"""
Probe variables along a circle.
If n_point is positive, that number of evenly spaced points is used. If
n_point is None or non-positive, an adaptive refinement based on element
diameters is used and the number of points and their spacing are determined
automatically. If it is negative, -n_point is used as an initial guess.
"""
is_cyclic = True
def __init__(self, centre, normal, radius, n_point, share_geometry=True):
"""
Parameters
----------
centre : array_like
The coordinates of the circle centre.
normal : array_like
The normal vector perpendicular to the circle plane.
radius : float
The radius of the circle.
"""
centre = nm.array(centre, dtype=nm.float64)
normal = nm.array(normal, dtype=nm.float64)
normal /= nla.norm(normal)
name = 'circle [%s, %s, %s]' % (centre, normal, radius)
Probe.__init__(self, name=name, share_geometry=share_geometry,
centre=centre, normal=normal,
radius=radius, n_point=n_point)
def report(self):
"""Report the probe parameters."""
out = Probe.report(self)
out.append('centre: %s' % self.centre)
out.append('normal: %s' % self.normal)
out.append('radius: %s' % self.radius)
out.append('-----')
return out
def get_points(self, refine_flag=None):
"""
Get the probe points.
Returns
-------
pars : array_like
The independent coordinate of the probe.
points : array_like
The probe points, parametrized by pars.
"""
# Vector of angles.
if self.is_refined:
return self.pars, self.points
if refine_flag is None:
pars = nm.linspace(0.0, 2.0*nm.pi, self.n_point + 1)[:-1]
else:
pars = Probe.refine_pars(self.pars, refine_flag,
cyclic_val=2.0 * nm.pi)
self.n_point = pars.shape[0]
self.pars = pars
# Create the points in xy plane, centered at the origin.
x = self.radius * nm.cos(pars[:,None])
y = self.radius * nm.sin(pars[:,None])
if len(self.centre) == 3:
z = nm.zeros((self.n_point, 1), dtype=nm.float64)
points = nm.c_[x, y, z]
# Rotate to satisfy the normal, shift to the centre.
n1 = nm.array([0.0, 0.0, 1.0], dtype=nm.float64)
axis = nm.cross(n1, self.normal)
angle = nm.arccos(nm.dot(n1, self.normal))
if nla.norm(axis) < 0.1:
# n1 == self.normal
rot_mtx = nm.eye(3, dtype=nm.float64)
else:
rot_mtx = make_axis_rotation_matrix(axis, angle)
points = nm.dot(points, rot_mtx)
else:
points = nm.c_[x, y]
points += self.centre
self.points = points
return pars, points
class IntegralProbe(Struct):
"""Evaluate integral expressions."""
def __init__(self, name, problem, expressions, labels):
Struct.__init__(self, name=name, problem=problem,
expressions=expressions, labels=labels)
def __call__(self, ip, state=None, **kwargs):
return self.problem.evaluate(self.expressions[ip], state, **kwargs)
|
[
"sfepy.base.base.Struct",
"sfepy.linalg.make_axis_rotation_matrix",
"sfepy.base.base.get_default",
"sfepy.base.base.Struct.__init__",
"sfepy.linalg.norm_l2_along_axis",
"sfepy.base.ioutils.read_array"
] |
[((845, 867), 'six.iteritems', 'six.iteritems', (['results'], {}), '(results)\n', (858, 867), False, 'import six\n'), ((1663, 1690), 'sfepy.base.base.get_default', 'get_default', (['only_names', '[]'], {}), '(only_names, [])\n', (1674, 1690), False, 'from sfepy.base.base import get_default, basestr, Struct\n'), ((2218, 2250), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""probe_data_header"""'}), "(name='probe_data_header')\n", (2224, 2250), False, 'from sfepy.base.base import get_default, basestr, Struct\n'), ((3300, 3342), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""probe_shared_evaluate_cache"""'}), "(name='probe_shared_evaluate_cache')\n", (3306, 3342), False, 'from sfepy.base.base import get_default, basestr, Struct\n'), ((1113, 1132), 'numpy.savetxt', 'nm.savetxt', (['fd', 'aux'], {}), '(fd, aux)\n', (1123, 1132), True, 'import numpy as nm\n'), ((1916, 1966), 'sfepy.base.ioutils.read_array', 'read_array', (['fd', 'header.n_point', '(nc + 1)', 'nm.float64'], {}), '(fd, header.n_point, nc + 1, nm.float64)\n', (1926, 1966), False, 'from sfepy.base.ioutils import read_array\n'), ((4230, 4303), 'sfepy.base.base.Struct.__init__', 'Struct.__init__', (['self'], {'name': 'name', 'share_geometry': 'share_geometry'}), '(self, name=name, share_geometry=share_geometry, **kwargs)\n', (4245, 4303), False, 'from sfepy.base.base import get_default, basestr, Struct\n'), ((4387, 4426), 'sfepy.base.base.Struct', 'Struct', ([], {'close_limit': '(0.1)', 'size_hint': 'None'}), '(close_limit=0.1, size_hint=None)\n', (4393, 4426), False, 'from sfepy.base.base import get_default, basestr, Struct\n'), ((4448, 4489), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""probe_local_evaluate_cache"""'}), "(name='probe_local_evaluate_cache')\n", (4454, 4489), False, 'from sfepy.base.base import get_default, basestr, Struct\n'), ((10325, 10348), 'numpy.insert', 'nm.insert', (['pars', 'ip', 'pp'], {}), '(pars, ip, pp)\n', (10334, 10348), True, 'import numpy as nm\n'), ((10654, 10699), 'numpy.array', 'nm.array', (['points'], {'dtype': 'nm.float64', 'order': '"""C"""'}), "(points, dtype=nm.float64, order='C')\n", (10662, 10699), True, 'import numpy as nm\n'), ((11382, 11399), 'numpy.array', 'nm.array', (['[False]'], {}), '([False])\n', (11390, 11399), True, 'import numpy as nm\n'), ((11731, 11772), 'numpy.arange', 'nm.arange', (['self.n_point'], {'dtype': 'nm.float64'}), '(self.n_point, dtype=nm.float64)\n', (11740, 11772), True, 'import numpy as nm\n'), ((12471, 12501), 'numpy.array', 'nm.array', (['p0'], {'dtype': 'nm.float64'}), '(p0, dtype=nm.float64)\n', (12479, 12501), True, 'import numpy as nm\n'), ((12515, 12545), 'numpy.array', 'nm.array', (['p1'], {'dtype': 'nm.float64'}), '(p1, dtype=nm.float64)\n', (12523, 12545), True, 'import numpy as nm\n'), ((12772, 12794), 'numpy.linalg.norm', 'nm.linalg.norm', (['dirvec'], {}), '(dirvec)\n', (12786, 12794), True, 'import numpy as nm\n'), ((14532, 14562), 'numpy.array', 'nm.array', (['p0'], {'dtype': 'nm.float64'}), '(p0, dtype=nm.float64)\n', (14540, 14562), True, 'import numpy as nm\n'), ((14580, 14614), 'numpy.array', 'nm.array', (['dirvec'], {'dtype': 'nm.float64'}), '(dirvec, dtype=nm.float64)\n', (14588, 14614), True, 'import numpy as nm\n'), ((14633, 14649), 'numpy.linalg.norm', 'nla.norm', (['dirvec'], {}), '(dirvec)\n', (14641, 14649), True, 'import numpy.linalg as nla\n'), ((15558, 15575), 'numpy.array', 'nm.array', (['[False]'], {}), '([False])\n', (15566, 15575), True, 'import numpy as nm\n'), ((17224, 17258), 'numpy.array', 'nm.array', (['centre'], {'dtype': 'nm.float64'}), '(centre, dtype=nm.float64)\n', (17232, 17258), True, 'import numpy as nm\n'), ((17276, 17310), 'numpy.array', 'nm.array', (['normal'], {'dtype': 'nm.float64'}), '(normal, dtype=nm.float64)\n', (17284, 17310), True, 'import numpy as nm\n'), ((17329, 17345), 'numpy.linalg.norm', 'nla.norm', (['normal'], {}), '(normal)\n', (17337, 17345), True, 'import numpy.linalg as nla\n'), ((19627, 19720), 'sfepy.base.base.Struct.__init__', 'Struct.__init__', (['self'], {'name': 'name', 'problem': 'problem', 'expressions': 'expressions', 'labels': 'labels'}), '(self, name=name, problem=problem, expressions=expressions,\n labels=labels)\n', (19642, 19720), False, 'from sfepy.base.base import get_default, basestr, Struct\n'), ((999, 1040), 'numpy.hstack', 'nm.hstack', (['(pars[:, None], vals[:, None])'], {}), '((pars[:, None], vals[:, None]))\n', (1008, 1040), True, 'import numpy as nm\n'), ((1072, 1104), 'numpy.hstack', 'nm.hstack', (['(pars[:, None], vals)'], {}), '((pars[:, None], vals))\n', (1081, 1104), True, 'import numpy as nm\n'), ((9320, 9337), 'numpy.array', 'nm.array', (['[False]'], {}), '([False])\n', (9328, 9337), True, 'import numpy as nm\n'), ((9591, 9635), 'sfepy.linalg.norm_l2_along_axis', 'norm_l2_along_axis', (['(points[1:] - points[:-1])'], {}), '(points[1:] - points[:-1])\n', (9609, 9635), False, 'from sfepy.linalg import make_axis_rotation_matrix, norm_l2_along_axis\n'), ((10077, 10098), 'numpy.where', 'nm.where', (['refine_flag'], {}), '(refine_flag)\n', (10085, 10098), True, 'import numpy as nm\n'), ((13483, 13524), 'numpy.linspace', 'nm.linspace', (['(0)', 'self.length', 'self.n_point'], {}), '(0, self.length, self.n_point)\n', (13494, 13524), True, 'import numpy as nm\n'), ((15729, 15777), 'numpy.arange', 'nm.arange', (['self.n_point_single'], {'dtype': 'nm.float64'}), '(self.n_point_single, dtype=nm.float64)\n', (15738, 15777), True, 'import numpy as nm\n'), ((16298, 16334), 'numpy.concatenate', 'nm.concatenate', (['(-pars0[::-1], pars)'], {}), '((-pars0[::-1], pars))\n', (16312, 16334), True, 'import numpy as nm\n'), ((16356, 16395), 'numpy.concatenate', 'nm.concatenate', (['(points0[::-1], points)'], {}), '((points0[::-1], points))\n', (16370, 16395), True, 'import numpy as nm\n'), ((18665, 18686), 'numpy.cos', 'nm.cos', (['pars[:, None]'], {}), '(pars[:, None])\n', (18671, 18686), True, 'import numpy as nm\n'), ((18712, 18733), 'numpy.sin', 'nm.sin', (['pars[:, None]'], {}), '(pars[:, None])\n', (18718, 18733), True, 'import numpy as nm\n'), ((18784, 18829), 'numpy.zeros', 'nm.zeros', (['(self.n_point, 1)'], {'dtype': 'nm.float64'}), '((self.n_point, 1), dtype=nm.float64)\n', (18792, 18829), True, 'import numpy as nm\n'), ((18949, 18992), 'numpy.array', 'nm.array', (['[0.0, 0.0, 1.0]'], {'dtype': 'nm.float64'}), '([0.0, 0.0, 1.0], dtype=nm.float64)\n', (18957, 18992), True, 'import numpy as nm\n'), ((19012, 19037), 'numpy.cross', 'nm.cross', (['n1', 'self.normal'], {}), '(n1, self.normal)\n', (19020, 19037), True, 'import numpy as nm\n'), ((19326, 19349), 'numpy.dot', 'nm.dot', (['points', 'rot_mtx'], {}), '(points, rot_mtx)\n', (19332, 19349), True, 'import numpy as nm\n'), ((9773, 9805), 'numpy.linalg.norm', 'nla.norm', (['(points[0] - points[-1])'], {}), '(points[0] - points[-1])\n', (9781, 9805), True, 'import numpy.linalg as nla\n'), ((18317, 18364), 'numpy.linspace', 'nm.linspace', (['(0.0)', '(2.0 * nm.pi)', '(self.n_point + 1)'], {}), '(0.0, 2.0 * nm.pi, self.n_point + 1)\n', (18328, 18364), True, 'import numpy as nm\n'), ((19068, 19091), 'numpy.dot', 'nm.dot', (['n1', 'self.normal'], {}), '(n1, self.normal)\n', (19074, 19091), True, 'import numpy as nm\n'), ((19109, 19123), 'numpy.linalg.norm', 'nla.norm', (['axis'], {}), '(axis)\n', (19117, 19123), True, 'import numpy.linalg as nla\n'), ((19193, 19220), 'numpy.eye', 'nm.eye', (['(3)'], {'dtype': 'nm.float64'}), '(3, dtype=nm.float64)\n', (19199, 19220), True, 'import numpy as nm\n'), ((19265, 19303), 'sfepy.linalg.make_axis_rotation_matrix', 'make_axis_rotation_matrix', (['axis', 'angle'], {}), '(axis, angle)\n', (19290, 19303), False, 'from sfepy.linalg import make_axis_rotation_matrix, norm_l2_along_axis\n'), ((8032, 8051), 'numpy.isfinite', 'nm.isfinite', (['points'], {}), '(points)\n', (8043, 8051), True, 'import numpy as nm\n')]
|
from itertools import product
import numpy as np
from megengine import tensor
from megengine.module import (
Conv2d,
ConvBn2d,
ConvRelu2d,
DequantStub,
Module,
QuantStub,
)
from megengine.quantization.quantize import disable_fake_quant, quantize_qat
def test_qat_convbn2d():
in_channels = 32
out_channels = 64
kernel_size = 3
for groups, bias in product([1, 4], [True, False]):
module = ConvBn2d(
in_channels, out_channels, kernel_size, groups=groups, bias=bias
)
module.train()
qat_module = quantize_qat(module, inplace=False)
disable_fake_quant(qat_module)
inputs = tensor(np.random.randn(4, in_channels, 32, 32).astype(np.float32))
normal_outputs = module(inputs)
# import pdb
# pdb.set_trace()
qat_outputs = qat_module(inputs)
np.testing.assert_allclose(
normal_outputs.numpy(), qat_outputs.numpy(), atol=5e-6
)
np.testing.assert_allclose(
module.bn.running_mean.numpy(),
qat_module.bn.running_mean.numpy(),
atol=5e-8,
)
np.testing.assert_allclose(
module.bn.running_var.numpy(), qat_module.bn.running_var.numpy(), atol=5e-7,
)
module.eval()
normal_outputs = module(inputs)
qat_module.eval()
qat_outputs = qat_module(inputs)
np.testing.assert_allclose(
normal_outputs.numpy(), qat_outputs.numpy(), atol=5e-6
)
def test_qat_conv():
in_channels = 32
out_channels = 64
kernel_size = 3
class TestNet(Module):
def __init__(self, groups, bias):
super().__init__()
self.quant = QuantStub()
self.dequant = DequantStub()
self.conv = Conv2d(
in_channels, out_channels, kernel_size, groups=groups, bias=bias
)
self.conv_relu = ConvRelu2d(
out_channels, in_channels, kernel_size, groups=groups, bias=bias
)
def forward(self, inp):
out = self.quant(inp)
out = self.conv(out)
out = self.conv_relu(out)
out = self.dequant(out)
return out
inputs = tensor(np.random.randn(4, in_channels, 32, 32).astype(np.float32))
for groups, bias in product([1, 4], [True, False]):
net = TestNet(groups, bias)
net.train()
qat_net = quantize_qat(net, inplace=False)
disable_fake_quant(qat_net)
normal_outputs = net(inputs)
qat_outputs = qat_net(inputs)
np.testing.assert_allclose(normal_outputs.numpy(), qat_outputs.numpy())
net.eval()
normal_outputs = net(inputs)
qat_net.eval()
qat_outputs = qat_net(inputs)
np.testing.assert_allclose(normal_outputs.numpy(), qat_outputs.numpy())
|
[
"megengine.quantization.quantize.quantize_qat",
"megengine.module.QuantStub",
"megengine.module.ConvBn2d",
"megengine.quantization.quantize.disable_fake_quant",
"megengine.module.DequantStub",
"megengine.module.ConvRelu2d",
"megengine.module.Conv2d"
] |
[((390, 420), 'itertools.product', 'product', (['[1, 4]', '[True, False]'], {}), '([1, 4], [True, False])\n', (397, 420), False, 'from itertools import product\n'), ((2349, 2379), 'itertools.product', 'product', (['[1, 4]', '[True, False]'], {}), '([1, 4], [True, False])\n', (2356, 2379), False, 'from itertools import product\n'), ((439, 513), 'megengine.module.ConvBn2d', 'ConvBn2d', (['in_channels', 'out_channels', 'kernel_size'], {'groups': 'groups', 'bias': 'bias'}), '(in_channels, out_channels, kernel_size, groups=groups, bias=bias)\n', (447, 513), False, 'from megengine.module import Conv2d, ConvBn2d, ConvRelu2d, DequantStub, Module, QuantStub\n'), ((580, 615), 'megengine.quantization.quantize.quantize_qat', 'quantize_qat', (['module'], {'inplace': '(False)'}), '(module, inplace=False)\n', (592, 615), False, 'from megengine.quantization.quantize import disable_fake_quant, quantize_qat\n'), ((624, 654), 'megengine.quantization.quantize.disable_fake_quant', 'disable_fake_quant', (['qat_module'], {}), '(qat_module)\n', (642, 654), False, 'from megengine.quantization.quantize import disable_fake_quant, quantize_qat\n'), ((2455, 2487), 'megengine.quantization.quantize.quantize_qat', 'quantize_qat', (['net'], {'inplace': '(False)'}), '(net, inplace=False)\n', (2467, 2487), False, 'from megengine.quantization.quantize import disable_fake_quant, quantize_qat\n'), ((2496, 2523), 'megengine.quantization.quantize.disable_fake_quant', 'disable_fake_quant', (['qat_net'], {}), '(qat_net)\n', (2514, 2523), False, 'from megengine.quantization.quantize import disable_fake_quant, quantize_qat\n'), ((1731, 1742), 'megengine.module.QuantStub', 'QuantStub', ([], {}), '()\n', (1740, 1742), False, 'from megengine.module import Conv2d, ConvBn2d, ConvRelu2d, DequantStub, Module, QuantStub\n'), ((1770, 1783), 'megengine.module.DequantStub', 'DequantStub', ([], {}), '()\n', (1781, 1783), False, 'from megengine.module import Conv2d, ConvBn2d, ConvRelu2d, DequantStub, Module, QuantStub\n'), ((1808, 1880), 'megengine.module.Conv2d', 'Conv2d', (['in_channels', 'out_channels', 'kernel_size'], {'groups': 'groups', 'bias': 'bias'}), '(in_channels, out_channels, kernel_size, groups=groups, bias=bias)\n', (1814, 1880), False, 'from megengine.module import Conv2d, ConvBn2d, ConvRelu2d, DequantStub, Module, QuantStub\n'), ((1940, 2016), 'megengine.module.ConvRelu2d', 'ConvRelu2d', (['out_channels', 'in_channels', 'kernel_size'], {'groups': 'groups', 'bias': 'bias'}), '(out_channels, in_channels, kernel_size, groups=groups, bias=bias)\n', (1950, 2016), False, 'from megengine.module import Conv2d, ConvBn2d, ConvRelu2d, DequantStub, Module, QuantStub\n'), ((2265, 2304), 'numpy.random.randn', 'np.random.randn', (['(4)', 'in_channels', '(32)', '(32)'], {}), '(4, in_channels, 32, 32)\n', (2280, 2304), True, 'import numpy as np\n'), ((679, 718), 'numpy.random.randn', 'np.random.randn', (['(4)', 'in_channels', '(32)', '(32)'], {}), '(4, in_channels, 32, 32)\n', (694, 718), True, 'import numpy as np\n')]
|
from fastapi import APIRouter, Depends, HTTPException, Query, Path
from sqlmodel import Session, select
from sqlalchemy.exc import IntegrityError
from typing import List
import datetime as dt
from app.src.common.security import get_current_user
from app.src.common.utils import profiling_api
from app.src.models.app_user import AppUser
from app.src.models.product_type import (
ProductType,
ProductTypeRead,
ProductTypeCreate,
ProductTypeUpdate,
)
from app.src.db.engine import get_session
router = APIRouter()
# A scopo didattico inserita la validazione di producttype_id con Path:
# - non potrà essere < 1
async def get_producttype_or_404(
*,
session: Session = Depends(get_session),
producttype_id: int = Path(..., ge=1),
current_user: AppUser = Depends(get_current_user),
):
start_time = dt.datetime.now()
try:
db_pt = session.get(ProductType, producttype_id)
if db_pt:
return {
"db_pt": db_pt,
"username": current_user.username,
"start_time": start_time,
}
else:
raise HTTPException(status_code=404, detail="Product type not found")
except KeyError:
raise HTTPException(status_code=400, detail="Product type not found")
@router.get("/", response_model=List[ProductTypeRead])
# lte -> less than or equal
async def read_product_types(
*,
session: Session = Depends(get_session),
offset: int = 0,
limit: int = Query(default=100, lte=100),
current_user: AppUser = Depends(get_current_user),
):
"""
Get all the existing product types
"""
start_time = dt.datetime.now()
product_types = session.exec(select(ProductType).offset(offset).limit(limit)).all()
profiling_api("ProductType:get:all", start_time, current_user.username)
return product_types
@router.get("/{producttype_id}", response_model=ProductTypeRead)
async def read_product_type(
*, producttype_id: int, db_pt: ProductType = Depends(get_producttype_or_404)
):
"""
Get the product type by id
"""
profiling_api(
f"ProductType:read:by_id:{producttype_id}",
db_pt["start_time"],
db_pt["username"],
)
return db_pt["db_pt"]
@router.post("/", response_model=ProductTypeRead)
async def create_product_type(
*,
session: Session = Depends(get_session),
product_type: ProductTypeCreate,
current_user: AppUser = Depends(get_current_user),
):
"""
Create a product type
"""
start_time = dt.datetime.now()
try:
db_pt = ProductType.from_orm(product_type)
session.add(db_pt)
session.commit()
session.refresh(db_pt)
except IntegrityError:
raise HTTPException(
status_code=404, detail="Impossible to create product type with same name"
)
profiling_api("ProductType:insert:single", start_time, current_user.username)
return db_pt
@router.patch("/{producttype_id}", response_model=ProductTypeRead)
async def update_product_type(
*,
producttype_id: int,
session: Session = Depends(get_session),
pt: ProductTypeUpdate,
db_pt: ProductType = Depends(get_producttype_or_404),
):
"""
Modify a product type
"""
# exclude_unset=True: it would only include the values
# that were sent by the client
existing_pt = db_pt["db_pt"]
pt_data = pt.dict(exclude_unset=True)
for key, value in pt_data.items():
setattr(existing_pt, key, value)
session.add(existing_pt)
session.commit()
session.refresh(existing_pt)
profiling_api(
f"ProductType:update:by_id:{producttype_id}",
db_pt["start_time"],
db_pt["username"],
)
return existing_pt
@router.delete("/{producttype_id}")
async def delete_product_type(
*,
producttype_id: int,
session: Session = Depends(get_session),
db_pt: ProductType = Depends(get_producttype_or_404),
):
"""
Delete and remove an existing product type by id; it must be >= 1
"""
existing_db_pt = db_pt["db_pt"]
session.delete(existing_db_pt)
session.commit()
profiling_api(
f"ProductType:delete:by_id:{producttype_id}",
db_pt["start_time"],
db_pt["username"],
)
return {"ok": True}
|
[
"sqlmodel.select"
] |
[((518, 529), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (527, 529), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((693, 713), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (700, 713), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((741, 756), 'fastapi.Path', 'Path', (['...'], {'ge': '(1)'}), '(..., ge=1)\n', (745, 756), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((786, 811), 'fastapi.Depends', 'Depends', (['get_current_user'], {}), '(get_current_user)\n', (793, 811), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((833, 850), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (848, 850), True, 'import datetime as dt\n'), ((1435, 1455), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1442, 1455), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((1495, 1522), 'fastapi.Query', 'Query', ([], {'default': '(100)', 'lte': '(100)'}), '(default=100, lte=100)\n', (1500, 1522), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((1552, 1577), 'fastapi.Depends', 'Depends', (['get_current_user'], {}), '(get_current_user)\n', (1559, 1577), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((1654, 1671), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (1669, 1671), True, 'import datetime as dt\n'), ((1764, 1835), 'app.src.common.utils.profiling_api', 'profiling_api', (['"""ProductType:get:all"""', 'start_time', 'current_user.username'], {}), "('ProductType:get:all', start_time, current_user.username)\n", (1777, 1835), False, 'from app.src.common.utils import profiling_api\n'), ((2006, 2037), 'fastapi.Depends', 'Depends', (['get_producttype_or_404'], {}), '(get_producttype_or_404)\n', (2013, 2037), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((2092, 2194), 'app.src.common.utils.profiling_api', 'profiling_api', (['f"""ProductType:read:by_id:{producttype_id}"""', "db_pt['start_time']", "db_pt['username']"], {}), "(f'ProductType:read:by_id:{producttype_id}', db_pt[\n 'start_time'], db_pt['username'])\n", (2105, 2194), False, 'from app.src.common.utils import profiling_api\n'), ((2360, 2380), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (2367, 2380), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((2447, 2472), 'fastapi.Depends', 'Depends', (['get_current_user'], {}), '(get_current_user)\n', (2454, 2472), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((2536, 2553), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (2551, 2553), True, 'import datetime as dt\n'), ((2854, 2931), 'app.src.common.utils.profiling_api', 'profiling_api', (['"""ProductType:insert:single"""', 'start_time', 'current_user.username'], {}), "('ProductType:insert:single', start_time, current_user.username)\n", (2867, 2931), False, 'from app.src.common.utils import profiling_api\n'), ((3104, 3124), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (3111, 3124), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((3178, 3209), 'fastapi.Depends', 'Depends', (['get_producttype_or_404'], {}), '(get_producttype_or_404)\n', (3185, 3209), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((3592, 3696), 'app.src.common.utils.profiling_api', 'profiling_api', (['f"""ProductType:update:by_id:{producttype_id}"""', "db_pt['start_time']", "db_pt['username']"], {}), "(f'ProductType:update:by_id:{producttype_id}', db_pt[\n 'start_time'], db_pt['username'])\n", (3605, 3696), False, 'from app.src.common.utils import profiling_api\n'), ((3870, 3890), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (3877, 3890), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((3917, 3948), 'fastapi.Depends', 'Depends', (['get_producttype_or_404'], {}), '(get_producttype_or_404)\n', (3924, 3948), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((4135, 4239), 'app.src.common.utils.profiling_api', 'profiling_api', (['f"""ProductType:delete:by_id:{producttype_id}"""', "db_pt['start_time']", "db_pt['username']"], {}), "(f'ProductType:delete:by_id:{producttype_id}', db_pt[\n 'start_time'], db_pt['username'])\n", (4148, 4239), False, 'from app.src.common.utils import profiling_api\n'), ((2579, 2613), 'app.src.models.product_type.ProductType.from_orm', 'ProductType.from_orm', (['product_type'], {}), '(product_type)\n', (2599, 2613), False, 'from app.src.models.product_type import ProductType, ProductTypeRead, ProductTypeCreate, ProductTypeUpdate\n'), ((1127, 1190), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Product type not found"""'}), "(status_code=404, detail='Product type not found')\n", (1140, 1190), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((1226, 1289), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(400)', 'detail': '"""Product type not found"""'}), "(status_code=400, detail='Product type not found')\n", (1239, 1289), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((2738, 2832), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Impossible to create product type with same name"""'}), "(status_code=404, detail=\n 'Impossible to create product type with same name')\n", (2751, 2832), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((1705, 1724), 'sqlmodel.select', 'select', (['ProductType'], {}), '(ProductType)\n', (1711, 1724), False, 'from sqlmodel import Session, select\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import json
import os
import subprocess
import sys
import time
import numpy as np
from resnet50 import Resnet50
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine._internal.plugin import CompGraphProfiler
from megengine.core import Graph, tensor
from megengine.core.graph import get_default_graph
from megengine.functional.debug_param import (
get_conv_execution_strategy,
set_conv_execution_strategy,
)
from megengine.jit import trace
from megengine.module import BatchNorm2d, Conv2d, Linear, MaxPool2d, Module
from megengine.optimizer import SGD
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "..", "examples"))
def init_profiler(comp_graph=get_default_graph()):
profiler = CompGraphProfiler(comp_graph)
return profiler
def dump_profiler(profiler, filename):
with open(filename, "w") as fout:
json.dump(profiler.get(), fout, indent=2)
def print_gpu_usage():
stdout = subprocess.getoutput("nvidia-smi")
for line in stdout.split("\n"):
for item in line.split(" "):
if "MiB" in item:
print("Finish with GPU Usage", item)
break
def run_perf(
batch_size=64,
warm_up=True,
dump_prof=None,
opt_level=2,
conv_fastrun=False,
run_step=True,
track_bn_stats=True,
warm_up_iter=20,
run_iter=100,
num_gpu=None,
device=0,
server=None,
port=None,
scale_batch_size=False,
eager=False,
):
if conv_fastrun:
set_conv_execution_strategy("PROFILE")
if num_gpu:
dist.init_process_group(args.server, args.port, num_gpu, device, device)
if scale_batch_size:
batch_size = batch_size // num_gpu
print("Run with data parallel, batch size = {} per GPU".format(batch_size))
data = tensor(np.random.randn(batch_size, 3, 224, 224).astype("float32"))
label = tensor(np.random.randint(1000, size=[batch_size,], dtype=np.int32))
net = Resnet50(track_bn_stats=track_bn_stats)
opt = SGD(net.parameters(), lr=0.01, momentum=0.9, weight_decay=1e-4)
def train_func(data, label):
logits = net(data)
loss = F.cross_entropy_with_softmax(logits, label)
if num_gpu:
loss = loss / num_gpu
opt.zero_grad()
opt.backward(loss)
return loss
train_func = trace(
train_func,
symbolic=(not eager),
opt_level=opt_level,
profiling=not (dump_prof is None),
)
if warm_up:
print("Warm up ...")
for _ in range(warm_up_iter):
opt.zero_grad()
train_func(data, label)
if run_step:
opt.step()
print_gpu_usage()
print("Running train ...")
start = time.time()
for _ in range(run_iter):
opt.zero_grad()
train_func(data, label)
if run_step:
opt.step()
time_used = time.time() - start
if dump_prof:
with open(dump_prof, "w") as fout:
json.dump(train_func.get_profile(), fout, indent=2)
return time_used / run_iter
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Running regression test on Resnet 50",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("--batch-size", type=int, default=64, help="batch size ")
parser.add_argument(
"--warm-up", type=str2bool, default=True, help="whether to warm up"
)
parser.add_argument(
"--dump-prof",
type=str,
default=None,
help="pass the json file path to dump the profiling result",
)
parser.add_argument("--opt-level", type=int, default=2, help="graph opt level")
parser.add_argument(
"--conv-fastrun",
type=str2bool,
default=False,
help="whether to use conv fastrun mode",
)
parser.add_argument(
"--run-step",
type=str2bool,
default=True,
help="whether to run optimizer.step()",
)
parser.add_argument(
"--track-bn-stats",
type=str2bool,
default=True,
help="whether to track bn stats",
)
parser.add_argument(
"--warm-up-iter", type=int, default=20, help="number of iters to warm up"
)
parser.add_argument(
"--run-iter", type=int, default=100, help="number of iters to collect wall time"
)
parser.add_argument("--server", default="0.0.0.0")
parser.add_argument("--port", type=int, default=2222)
parser.add_argument(
"--scale-batch-size",
type=str2bool,
default=False,
help="whether to divide batch size by number of GPUs",
)
parser.add_argument(
"--eager", type=str2bool, default=False, help="whether to use eager mode"
)
# Data parallel related
parser.add_argument("--num-gpu", type=int, default=None)
parser.add_argument("--device", type=int, default=0)
args = parser.parse_args()
print(vars(args))
os.environ["MGB_JIT_BACKEND"] = "NVRTC"
t = run_perf(**vars(args))
print("**********************************")
print("Wall time per iter {:.0f} ms".format(t * 1000))
print("**********************************")
get_default_graph().clear_device_memory()
|
[
"megengine.jit.trace",
"megengine.core.graph.get_default_graph",
"megengine.distributed.init_process_group",
"megengine.functional.cross_entropy_with_softmax",
"megengine._internal.plugin.CompGraphProfiler",
"megengine.functional.debug_param.set_conv_execution_strategy"
] |
[((1128, 1147), 'megengine.core.graph.get_default_graph', 'get_default_graph', ([], {}), '()\n', (1145, 1147), False, 'from megengine.core.graph import get_default_graph\n'), ((1165, 1194), 'megengine._internal.plugin.CompGraphProfiler', 'CompGraphProfiler', (['comp_graph'], {}), '(comp_graph)\n', (1182, 1194), False, 'from megengine._internal.plugin import CompGraphProfiler\n'), ((1382, 1416), 'subprocess.getoutput', 'subprocess.getoutput', (['"""nvidia-smi"""'], {}), "('nvidia-smi')\n", (1402, 1416), False, 'import subprocess\n'), ((2401, 2440), 'resnet50.Resnet50', 'Resnet50', ([], {'track_bn_stats': 'track_bn_stats'}), '(track_bn_stats=track_bn_stats)\n', (2409, 2440), False, 'from resnet50 import Resnet50\n'), ((2780, 2876), 'megengine.jit.trace', 'trace', (['train_func'], {'symbolic': '(not eager)', 'opt_level': 'opt_level', 'profiling': '(not dump_prof is None)'}), '(train_func, symbolic=not eager, opt_level=opt_level, profiling=not \n dump_prof is None)\n', (2785, 2876), False, 'from megengine.jit import trace\n'), ((3180, 3191), 'time.time', 'time.time', ([], {}), '()\n', (3189, 3191), False, 'import time\n'), ((3849, 3984), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Running regression test on Resnet 50"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Running regression test on Resnet 50',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (3872, 3984), False, 'import argparse\n'), ((1039, 1064), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1054, 1064), False, 'import os\n'), ((1934, 1972), 'megengine.functional.debug_param.set_conv_execution_strategy', 'set_conv_execution_strategy', (['"""PROFILE"""'], {}), "('PROFILE')\n", (1961, 1972), False, 'from megengine.functional.debug_param import get_conv_execution_strategy, set_conv_execution_strategy\n'), ((1998, 2070), 'megengine.distributed.init_process_group', 'dist.init_process_group', (['args.server', 'args.port', 'num_gpu', 'device', 'device'], {}), '(args.server, args.port, num_gpu, device, device)\n', (2021, 2070), True, 'import megengine.distributed as dist\n'), ((2329, 2387), 'numpy.random.randint', 'np.random.randint', (['(1000)'], {'size': '[batch_size]', 'dtype': 'np.int32'}), '(1000, size=[batch_size], dtype=np.int32)\n', (2346, 2387), True, 'import numpy as np\n'), ((2591, 2634), 'megengine.functional.cross_entropy_with_softmax', 'F.cross_entropy_with_softmax', (['logits', 'label'], {}), '(logits, label)\n', (2619, 2634), True, 'import megengine.functional as F\n'), ((3339, 3350), 'time.time', 'time.time', ([], {}), '()\n', (3348, 3350), False, 'import time\n'), ((3753, 3806), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Boolean value expected."""'], {}), "('Boolean value expected.')\n", (3779, 3806), False, 'import argparse\n'), ((5951, 5970), 'megengine.core.graph.get_default_graph', 'get_default_graph', ([], {}), '()\n', (5968, 5970), False, 'from megengine.core.graph import get_default_graph\n'), ((2250, 2290), 'numpy.random.randn', 'np.random.randn', (['batch_size', '(3)', '(224)', '(224)'], {}), '(batch_size, 3, 224, 224)\n', (2265, 2290), True, 'import numpy as np\n')]
|
from sqlmodel import Session, select
from .models import Person, engine
def create_person(nome: str, idade:int):
person = Person(nome=nome, idade=idade)
with Session(engine) as session:
session.add(person)
session.commit()
session.refresh(person)
return person
def all_person():
query = select(Person)
with Session(engine) as session:
result = session.execute(query).scalars().all()
return result
|
[
"sqlmodel.Session",
"sqlmodel.select"
] |
[((335, 349), 'sqlmodel.select', 'select', (['Person'], {}), '(Person)\n', (341, 349), False, 'from sqlmodel import Session, select\n'), ((167, 182), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (174, 182), False, 'from sqlmodel import Session, select\n'), ((363, 378), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (370, 378), False, 'from sqlmodel import Session, select\n')]
|
from typing import Optional
from sqlmodel import Field, SQLModel
from datetime import datetime
class Capacity(SQLModel, table=True):
"""Create an SQLModel for capcities"""
id: Optional[int] = Field(default=None, primary_key=True)
user_id: int = Field(foreign_key="app_db.appuser.id")
team_id: int = Field(foreign_key="app_db.team.id")
year: int
month: int
days: int
created_at: datetime
updated_at: datetime
is_locked: bool = False
__table_args__ = {"schema": "app_db"}
|
[
"sqlmodel.Field"
] |
[((203, 240), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (208, 240), False, 'from sqlmodel import Field, SQLModel\n'), ((260, 298), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""app_db.appuser.id"""'}), "(foreign_key='app_db.appuser.id')\n", (265, 298), False, 'from sqlmodel import Field, SQLModel\n'), ((318, 353), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""app_db.team.id"""'}), "(foreign_key='app_db.team.id')\n", (323, 353), False, 'from sqlmodel import Field, SQLModel\n')]
|
import uuid
from datetime import datetime
from typing import Optional, List
import pydantic
from sqlalchemy import Column, JSON
from sqlmodel import Field, Relationship
from api.db.models.base import BaseModel, BaseTable
class SchemaDef(pydantic.BaseModel):
id: Optional[str] = None
name: Optional[str] = None
version: Optional[str] = None
attributes: Optional[List[str]] = []
class Governance(pydantic.BaseModel):
schema_def: Optional[SchemaDef] = None
cred_def_id: Optional[str] = None
cred_def_tag: Optional[str] = None
class SandboxBase(BaseModel):
tag: Optional[str] = Field(nullable=True)
governance: dict = Field(default={}, sa_column=Column(JSON))
governance_cas: dict = Field(default={}, sa_column=Column(JSON))
class Sandbox(SandboxBase, BaseTable, table=True):
lobs: List["Lob"] = Relationship(back_populates="sandbox") # noqa: F821
students: List["Student"] = Relationship(back_populates="sandbox") # noqa: F821
applicants: List["Applicant"] = Relationship(back_populates="sandbox") # noqa: F821
class SandboxCreate(SandboxBase):
tag: Optional[str] = None
governance: Optional[Governance] = None
governance_cas: Optional[Governance] = None
class SandboxUpdate(SandboxBase):
id: uuid.UUID
tag: Optional[str] = None
governance: Optional[Governance] = None
governance_cas: Optional[Governance] = None
class SandboxRead(SandboxBase):
id: uuid.UUID
created_at: datetime
updated_at: datetime
tag: Optional[str] = None
governance: Optional[Governance] = None
governance_cas: Optional[Governance] = None
|
[
"sqlmodel.Relationship",
"sqlmodel.Field"
] |
[((614, 634), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (619, 634), False, 'from sqlmodel import Field, Relationship\n'), ((846, 884), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""sandbox"""'}), "(back_populates='sandbox')\n", (858, 884), False, 'from sqlmodel import Field, Relationship\n'), ((931, 969), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""sandbox"""'}), "(back_populates='sandbox')\n", (943, 969), False, 'from sqlmodel import Field, Relationship\n'), ((1020, 1058), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""sandbox"""'}), "(back_populates='sandbox')\n", (1032, 1058), False, 'from sqlmodel import Field, Relationship\n'), ((686, 698), 'sqlalchemy.Column', 'Column', (['JSON'], {}), '(JSON)\n', (692, 698), False, 'from sqlalchemy import Column, JSON\n'), ((755, 767), 'sqlalchemy.Column', 'Column', (['JSON'], {}), '(JSON)\n', (761, 767), False, 'from sqlalchemy import Column, JSON\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import cv2
import megengine as mge
import megengine.data.dataset as dataset
import megengine.jit as jit
import numpy as np
from megengine.utils.http_download import download_from_url
from official.vision.segmentation.deeplabv3plus import DeepLabV3Plus
class Config:
NUM_CLASSES = 21
IMG_SIZE = 512
IMG_MEAN = [103.530, 116.280, 123.675]
IMG_STD = [57.375, 57.120, 58.395]
cfg = Config()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--image_path", type=str, default=None, help="inference image")
parser.add_argument("--model_path", type=str, default=None, help="inference model")
args = parser.parse_args()
net = load_model(args.model_path)
if args.image_path is None:
download_from_url("https://data.megengine.org.cn/images/cat.jpg", "test.jpg")
img = cv2.imread("test.jpg")
else:
img = cv2.imread(args.image_path)
pred = inference(img, net)
cv2.imwrite("out.jpg", pred)
def load_model(model_path):
model_dict = mge.load(model_path)
net = DeepLabV3Plus(class_num=cfg.NUM_CLASSES)
net.load_state_dict(model_dict["state_dict"])
print("load model %s" % (model_path))
net.eval()
return net
def inference(img, net):
@jit.trace(symbolic=True, opt_level=2)
def pred_fun(data, net=None):
net.eval()
pred = net(data)
return pred
img = (img.astype("float32") - np.array(cfg.IMG_MEAN)) / np.array(cfg.IMG_STD)
orih, oriw = img.shape[:2]
img = cv2.resize(img, (cfg.IMG_SIZE, cfg.IMG_SIZE))
img = img.transpose(2, 0, 1)[np.newaxis]
data = mge.tensor()
data.set_value(img)
pred = pred_fun(data, net=net)
pred = pred.numpy().squeeze().argmax(0)
pred = cv2.resize(
pred.astype("uint8"), (oriw, orih), interpolation=cv2.INTER_NEAREST
)
class_colors = dataset.PascalVOC.class_colors
out = np.zeros((orih, oriw, 3))
nids = np.unique(pred)
for t in nids:
out[pred == t] = class_colors[t]
return out
if __name__ == "__main__":
main()
|
[
"megengine.jit.trace",
"megengine.tensor",
"megengine.load",
"megengine.utils.http_download.download_from_url"
] |
[((829, 854), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (852, 854), False, 'import argparse\n'), ((1343, 1371), 'cv2.imwrite', 'cv2.imwrite', (['"""out.jpg"""', 'pred'], {}), "('out.jpg', pred)\n", (1354, 1371), False, 'import cv2\n'), ((1419, 1439), 'megengine.load', 'mge.load', (['model_path'], {}), '(model_path)\n', (1427, 1439), True, 'import megengine as mge\n'), ((1450, 1490), 'official.vision.segmentation.deeplabv3plus.DeepLabV3Plus', 'DeepLabV3Plus', ([], {'class_num': 'cfg.NUM_CLASSES'}), '(class_num=cfg.NUM_CLASSES)\n', (1463, 1490), False, 'from official.vision.segmentation.deeplabv3plus import DeepLabV3Plus\n'), ((1645, 1682), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)', 'opt_level': '(2)'}), '(symbolic=True, opt_level=2)\n', (1654, 1682), True, 'import megengine.jit as jit\n'), ((1906, 1951), 'cv2.resize', 'cv2.resize', (['img', '(cfg.IMG_SIZE, cfg.IMG_SIZE)'], {}), '(img, (cfg.IMG_SIZE, cfg.IMG_SIZE))\n', (1916, 1951), False, 'import cv2\n'), ((2009, 2021), 'megengine.tensor', 'mge.tensor', ([], {}), '()\n', (2019, 2021), True, 'import megengine as mge\n'), ((2291, 2316), 'numpy.zeros', 'np.zeros', (['(orih, oriw, 3)'], {}), '((orih, oriw, 3))\n', (2299, 2316), True, 'import numpy as np\n'), ((2328, 2343), 'numpy.unique', 'np.unique', (['pred'], {}), '(pred)\n', (2337, 2343), True, 'import numpy as np\n'), ((1141, 1218), 'megengine.utils.http_download.download_from_url', 'download_from_url', (['"""https://data.megengine.org.cn/images/cat.jpg"""', '"""test.jpg"""'], {}), "('https://data.megengine.org.cn/images/cat.jpg', 'test.jpg')\n", (1158, 1218), False, 'from megengine.utils.http_download import download_from_url\n'), ((1233, 1255), 'cv2.imread', 'cv2.imread', (['"""test.jpg"""'], {}), "('test.jpg')\n", (1243, 1255), False, 'import cv2\n'), ((1280, 1307), 'cv2.imread', 'cv2.imread', (['args.image_path'], {}), '(args.image_path)\n', (1290, 1307), False, 'import cv2\n'), ((1843, 1864), 'numpy.array', 'np.array', (['cfg.IMG_STD'], {}), '(cfg.IMG_STD)\n', (1851, 1864), True, 'import numpy as np\n'), ((1817, 1839), 'numpy.array', 'np.array', (['cfg.IMG_MEAN'], {}), '(cfg.IMG_MEAN)\n', (1825, 1839), True, 'import numpy as np\n')]
|
"""
Finite element reference mappings.
"""
import numpy as nm
from sfepy import Config
from sfepy.base.base import get_default, output
from sfepy.base.mem_usage import raise_if_too_large
from sfepy.discrete.common.mappings import Mapping
from sfepy.discrete.common.extmods.mappings import CMapping
from sfepy.discrete import PolySpace
class FEMapping(Mapping):
"""
Base class for finite element mappings.
"""
def __init__(self, coors, conn, poly_space=None, gel=None, order=1):
self.coors = coors
self.conn = conn
try:
nm.take(self.coors, self.conn)
except IndexError:
output('coordinates shape: %s' % list(coors.shape))
output('connectivity: min: %d, max: %d' % (conn.min(), conn.max()))
msg = 'incompatible connectivity and coordinates (see above)'
raise IndexError(msg)
self.n_el, self.n_ep = conn.shape
self.dim = self.coors.shape[1]
if poly_space is None:
poly_space = PolySpace.any_from_args(None, gel, order,
base='lagrange',
force_bubble=False)
self.poly_space = poly_space
self.indices = slice(None)
def get_geometry(self):
"""
Return reference element geometry as a GeometryElement instance.
"""
return self.poly_space.geometry
def get_base(self, coors, diff=False):
"""
Get base functions or their gradient evaluated in given
coordinates.
"""
bf = self.poly_space.eval_base(coors, diff=diff)
return bf
def get_physical_qps(self, qp_coors):
"""
Get physical quadrature points corresponding to given reference
element quadrature points.
Returns
-------
qps : array
The physical quadrature points ordered element by element,
i.e. with shape (n_el, n_qp, dim).
"""
bf = self.get_base(qp_coors)
qps = nm.dot(nm.atleast_2d(bf.squeeze()), self.coors[self.conn])
# Reorder so that qps are really element by element.
qps = nm.ascontiguousarray(nm.swapaxes(qps, 0, 1))
return qps
class VolumeMapping(FEMapping):
"""
Mapping from reference domain to physical domain of the same space
dimension.
"""
def get_mapping(self, qp_coors, weights, poly_space=None, ori=None,
transform=None):
"""
Get the mapping for given quadrature points, weights, and
polynomial space.
Returns
-------
cmap : CMapping instance
The volume mapping.
"""
poly_space = get_default(poly_space, self.poly_space)
bf_g = self.get_base(qp_coors, diff=True)
ebf_g = poly_space.eval_base(qp_coors, diff=True, ori=ori,
force_axis=True, transform=transform)
size = ebf_g.nbytes * self.n_el
site_config = Config()
raise_if_too_large(size, site_config.refmap_memory_factor())
flag = (ori is not None) or (ebf_g.shape[0] > 1)
cmap = CMapping(self.n_el, qp_coors.shape[0], self.dim,
poly_space.n_nod, mode='volume', flag=flag)
cmap.describe(self.coors, self.conn, bf_g, ebf_g, weights)
return cmap
class SurfaceMapping(FEMapping):
"""
Mapping from reference domain to physical domain of the space
dimension higher by one.
"""
def set_basis_indices(self, indices):
"""
Set indices to cell-based basis that give the facet-based basis.
"""
self.indices = indices
def get_base(self, coors, diff=False):
"""
Get base functions or their gradient evaluated in given
coordinates.
"""
bf = self.poly_space.eval_base(coors, diff=diff)
ii = max(self.dim - 1, 1)
return nm.ascontiguousarray(bf[..., :ii:, self.indices])
def get_mapping(self, qp_coors, weights, poly_space=None, mode='surface'):
"""
Get the mapping for given quadrature points, weights, and
polynomial space.
Returns
-------
cmap : CMapping instance
The surface mapping.
"""
poly_space = get_default(poly_space, self.poly_space)
bf_g = self.get_base(qp_coors, diff=True)
if nm.allclose(bf_g, 0.0) and self.dim > 1:
raise ValueError('zero base function gradient!')
cmap = CMapping(self.n_el, qp_coors.shape[0], self.dim,
poly_space.n_nod, mode=mode)
cmap.describe(self.coors, self.conn, bf_g, None, weights)
if self.dim == 1:
# Fix normals.
ii = nm.where(self.conn == 0)[0]
cmap.normal[ii] *= -1.0
return cmap
|
[
"sfepy.base.base.get_default",
"sfepy.discrete.common.extmods.mappings.CMapping",
"sfepy.discrete.PolySpace.any_from_args",
"sfepy.Config"
] |
[((2744, 2784), 'sfepy.base.base.get_default', 'get_default', (['poly_space', 'self.poly_space'], {}), '(poly_space, self.poly_space)\n', (2755, 2784), False, 'from sfepy.base.base import get_default, output\n'), ((3041, 3049), 'sfepy.Config', 'Config', ([], {}), '()\n', (3047, 3049), False, 'from sfepy import Config\n'), ((3193, 3290), 'sfepy.discrete.common.extmods.mappings.CMapping', 'CMapping', (['self.n_el', 'qp_coors.shape[0]', 'self.dim', 'poly_space.n_nod'], {'mode': '"""volume"""', 'flag': 'flag'}), "(self.n_el, qp_coors.shape[0], self.dim, poly_space.n_nod, mode=\n 'volume', flag=flag)\n", (3201, 3290), False, 'from sfepy.discrete.common.extmods.mappings import CMapping\n'), ((3973, 4021), 'numpy.ascontiguousarray', 'nm.ascontiguousarray', (['bf[..., :ii, self.indices]'], {}), '(bf[..., :ii, self.indices])\n', (3993, 4021), True, 'import numpy as nm\n'), ((4339, 4379), 'sfepy.base.base.get_default', 'get_default', (['poly_space', 'self.poly_space'], {}), '(poly_space, self.poly_space)\n', (4350, 4379), False, 'from sfepy.base.base import get_default, output\n'), ((4560, 4637), 'sfepy.discrete.common.extmods.mappings.CMapping', 'CMapping', (['self.n_el', 'qp_coors.shape[0]', 'self.dim', 'poly_space.n_nod'], {'mode': 'mode'}), '(self.n_el, qp_coors.shape[0], self.dim, poly_space.n_nod, mode=mode)\n', (4568, 4637), False, 'from sfepy.discrete.common.extmods.mappings import CMapping\n'), ((575, 605), 'numpy.take', 'nm.take', (['self.coors', 'self.conn'], {}), '(self.coors, self.conn)\n', (582, 605), True, 'import numpy as nm\n'), ((1025, 1103), 'sfepy.discrete.PolySpace.any_from_args', 'PolySpace.any_from_args', (['None', 'gel', 'order'], {'base': '"""lagrange"""', 'force_bubble': '(False)'}), "(None, gel, order, base='lagrange', force_bubble=False)\n", (1048, 1103), False, 'from sfepy.discrete import PolySpace\n'), ((2220, 2242), 'numpy.swapaxes', 'nm.swapaxes', (['qps', '(0)', '(1)'], {}), '(qps, 0, 1)\n', (2231, 2242), True, 'import numpy as nm\n'), ((4442, 4464), 'numpy.allclose', 'nm.allclose', (['bf_g', '(0.0)'], {}), '(bf_g, 0.0)\n', (4453, 4464), True, 'import numpy as nm\n'), ((4798, 4822), 'numpy.where', 'nm.where', (['(self.conn == 0)'], {}), '(self.conn == 0)\n', (4806, 4822), True, 'import numpy as nm\n')]
|
from datetime import datetime, date
from typing import Optional
from fastapi import APIRouter, Depends
from sqlmodel import Field, SQLModel
from ...db import get_session
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
router = APIRouter()
class HistoryVpi(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
history_id: int
detail: str
vpi_method: str
velum_structure: str
tonsil_enlargement_right: str
tonsil_enlargement_left: str
adenoid_hypertrophy_percent: int
tonsilectomy_right: bool
tonsilectomy_left: bool
ademoidectomy: bool
tongue_tie: bool
tongue_tie_frenectomy: bool
veloadenoid_clodure: str
gap_type: str
gap_length: str
vpi: str
speech_therapy: bool
furlow_palatoplasty: bool
furlow_palatoplasty_date: date
sphincteroplasty: bool
sphicteroplasty_date: date
obturator: bool
obturator_date: date
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
@router.post("/history_vpi", response_model=HistoryVpi)
async def create_history_vpi(history_vpi: HistoryVpi, session: AsyncSession = Depends(get_session)):
session.add(history_vpi)
await session.commit()
await session.refresh(history_vpi)
return history_vpi
@router.get("/history_vpi/{id}", response_model=HistoryVpi)
async def get_history_vpi(id: int, session: AsyncSession = Depends(get_session)):
history_vpis = await session.execute(select(HistoryVpi).where(HistoryVpi.id == id))
history_vpi = history_vpis.scalars().first()
return history_vpi
@router.put("/history_vpi/{id}", response_model=HistoryVpi)
async def update_history_vpi(id: int, session: AsyncSession = Depends(get_session)):
return None
@router.delete("/history_vpi/{id}")
async def delete_history_vpi(session: AsyncSession = Depends(get_session)):
return None
|
[
"sqlmodel.Field"
] |
[((261, 272), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (270, 272), False, 'from fastapi import APIRouter, Depends\n'), ((339, 376), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (344, 376), False, 'from sqlmodel import Field, SQLModel\n'), ((1212, 1232), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1219, 1232), False, 'from fastapi import APIRouter, Depends\n'), ((1474, 1494), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1481, 1494), False, 'from fastapi import APIRouter, Depends\n'), ((1781, 1801), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1788, 1801), False, 'from fastapi import APIRouter, Depends\n'), ((1911, 1931), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1918, 1931), False, 'from fastapi import APIRouter, Depends\n'), ((1538, 1556), 'sqlalchemy.select', 'select', (['HistoryVpi'], {}), '(HistoryVpi)\n', (1544, 1556), False, 'from sqlalchemy import select\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
train_dataloader, valid_dataloader = build_dataset(args)
train_queue = iter(train_dataloader) # infinite
steps_per_epoch = 1280000 // (world_size * args.batch_size)
# build model
model = resnet_model.__dict__[args.arch]()
# Sync parameters
if world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
# Optimizer
opt = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * world_size, # scale weight decay in "SUM" mode
)
# train and valid func
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
gm.backward(loss)
opt.step().clear_grad()
return loss, acc1, acc5
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = F.distributed.all_reduce_sum(acc1) / world_size
acc5 = F.distributed.all_reduce_sum(acc5) / world_size
return loss, acc1, acc5
# multi-step learning rate scheduler with warmup
def adjust_learning_rate(step):
lr = args.lr * 0.1 ** bisect.bisect_right(
[30 * steps_per_epoch, 60 * steps_per_epoch, 80 * steps_per_epoch], step
)
if step < 5 * steps_per_epoch: # warmup
lr = args.lr * (step / (5 * steps_per_epoch))
for param_group in opt.param_groups:
param_group["lr"] = lr
return lr
# start training
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
for step in range(0, args.epochs * steps_per_epoch):
lr = adjust_learning_rate(step)
t = time.time()
image, label = next(train_queue)
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
loss, acc1, acc5 = train_step(image, label)
objs.update(loss.item())
top1.update(100 * acc1.item())
top5.update(100 * acc5.item())
clck.update(time.time() - t)
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info(
"Epoch %d Step %d, LR %.4f, %s %s %s %s",
step // steps_per_epoch,
step,
lr,
objs,
top1,
top5,
clck,
)
objs.reset()
top1.reset()
top5.reset()
clck.reset()
if (step + 1) % steps_per_epoch == 0:
model.eval()
_, valid_acc1, valid_acc5 = valid(valid_step, valid_dataloader, args)
model.train()
logging.info(
"Epoch %d Test Acc@1 %.3f, Acc@5 %.3f",
(step + 1) // steps_per_epoch,
valid_acc1,
valid_acc5,
)
megengine.save(
{
"epoch": (step + 1) // steps_per_epoch,
"state_dict": model.state_dict(),
},
os.path.join(args.save, args.arch, "checkpoint.pkl"),
)
def valid(func, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
n = image.shape[0]
loss, acc1, acc5 = func(image, label)
objs.update(loss.item(), n)
top1.update(100 * acc1.item(), n)
top5.update(100 * acc5.item(), n)
clck.update(time.time() - t, n)
t = time.time()
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info("Test step %d, %s %s %s %s", step, objs, top1, top5, clck)
return objs.avg, top1.avg, top5.avg
def build_dataset(args):
train_dataset = data.dataset.ImageNet(args.data, train=True)
train_sampler = data.Infinite(
data.RandomSampler(train_dataset, batch_size=args.batch_size, drop_last=True)
)
train_dataloader = data.DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
[ # Baseline Augmentation for small models
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.Normalize(
mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395]
), # BGR
T.ToMode("CHW"),
]
)
if args.arch in ("resnet18", "resnet34")
else T.Compose(
[ # Facebook Augmentation for large models
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
T.Normalize(
mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395]
), # BGR
T.ToMode("CHW"),
]
),
num_workers=args.workers,
)
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_dataloader = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[
T.Resize(256),
T.CenterCrop(224),
T.Normalize(
mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395]
), # BGR
T.ToMode("CHW"),
]
),
num_workers=args.workers,
)
return train_dataloader, valid_dataloader
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=":.3f"):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
if __name__ == "__main__":
main()
|
[
"megengine.functional.topk_accuracy",
"megengine.data.transform.RandomHorizontalFlip",
"megengine.distributed.init_process_group",
"megengine.utils.dtr.DTR",
"megengine.distributed.get_rank",
"megengine.functional.distributed.all_reduce_sum",
"megengine.data.transform.CenterCrop",
"megengine.distributed.get_world_size",
"megengine.tensor",
"megengine.data.transform.Resize",
"megengine.functional.nn.cross_entropy",
"megengine.data.SequentialSampler",
"megengine.distributed.Server",
"megengine.data.transform.Normalize",
"megengine.logger.get_logger",
"megengine.data.transform.ToMode",
"megengine.distributed.make_allreduce_cb",
"megengine.data.RandomSampler",
"megengine.data.transform.RandomResizedCrop",
"megengine.data.transform.ColorJitter",
"megengine.data.dataset.ImageNet",
"megengine.autodiff.GradManager"
] |
[((753, 782), 'megengine.logger.get_logger', 'megengine.logger.get_logger', ([], {}), '()\n', (780, 782), False, 'import megengine\n'), ((809, 875), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""MegEngine ImageNet Training"""'}), "(description='MegEngine ImageNet Training')\n", (832, 875), False, 'import argparse\n'), ((8402, 8413), 'time.time', 'time.time', ([], {}), '()\n', (8411, 8413), False, 'import time\n'), ((9079, 9123), 'megengine.data.dataset.ImageNet', 'data.dataset.ImageNet', (['args.data'], {'train': '(True)'}), '(args.data, train=True)\n', (9100, 9123), True, 'import megengine.data as data\n'), ((10255, 10300), 'megengine.data.dataset.ImageNet', 'data.dataset.ImageNet', (['args.data'], {'train': '(False)'}), '(args.data, train=False)\n', (10276, 10300), True, 'import megengine.data as data\n'), ((10321, 10391), 'megengine.data.SequentialSampler', 'data.SequentialSampler', (['valid_dataset'], {'batch_size': '(100)', 'drop_last': '(False)'}), '(valid_dataset, batch_size=100, drop_last=False)\n', (10343, 10391), True, 'import megengine.data as data\n'), ((2923, 2955), 'megengine.distributed.Server', 'dist.Server', ([], {'port': 'args.dist_port'}), '(port=args.dist_port)\n', (2934, 2955), True, 'import megengine.distributed as dist\n'), ((3038, 3061), 'multiprocessing.Pool', 'multiprocessing.Pool', (['(1)'], {}), '(1)\n', (3058, 3061), False, 'import multiprocessing\n'), ((3897, 3929), 'megengine.utils.dtr.DTR', 'DTR', ([], {'memory_budget': '(5 * 1024 ** 3)'}), '(memory_budget=5 * 1024 ** 3)\n', (3900, 3929), False, 'from megengine.utils.dtr import DTR\n'), ((4156, 4315), 'megengine.distributed.init_process_group', 'dist.init_process_group', ([], {'master_ip': 'args.dist_addr', 'port': 'args.dist_port', 'world_size': 'world_size', 'rank': 'rank', 'device': '(rank % ngpus_per_node)', 'backend': '"""nccl"""'}), "(master_ip=args.dist_addr, port=args.dist_port,\n world_size=world_size, rank=rank, device=rank % ngpus_per_node, backend\n ='nccl')\n", (4179, 4315), True, 'import megengine.distributed as dist\n'), ((5686, 5719), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits', 'label'], {}), '(logits, label)\n', (5704, 5719), True, 'import megengine.functional as F\n'), ((5741, 5784), 'megengine.functional.topk_accuracy', 'F.topk_accuracy', (['logits', 'label'], {'topk': '(1, 5)'}), '(logits, label, topk=(1, 5))\n', (5756, 5784), True, 'import megengine.functional as F\n'), ((6781, 6792), 'time.time', 'time.time', ([], {}), '()\n', (6790, 6792), False, 'import time\n'), ((6851, 6891), 'megengine.tensor', 'megengine.tensor', (['image'], {'dtype': '"""float32"""'}), "(image, dtype='float32')\n", (6867, 6891), False, 'import megengine\n'), ((6908, 6946), 'megengine.tensor', 'megengine.tensor', (['label'], {'dtype': '"""int32"""'}), "(label, dtype='int32')\n", (6924, 6946), False, 'import megengine\n'), ((8485, 8525), 'megengine.tensor', 'megengine.tensor', (['image'], {'dtype': '"""float32"""'}), "(image, dtype='float32')\n", (8501, 8525), False, 'import megengine\n'), ((8542, 8580), 'megengine.tensor', 'megengine.tensor', (['label'], {'dtype': '"""int32"""'}), "(label, dtype='int32')\n", (8558, 8580), False, 'import megengine\n'), ((8829, 8840), 'time.time', 'time.time', ([], {}), '()\n', (8838, 8840), False, 'import time\n'), ((9167, 9244), 'megengine.data.RandomSampler', 'data.RandomSampler', (['train_dataset'], {'batch_size': 'args.batch_size', 'drop_last': '(True)'}), '(train_dataset, batch_size=args.batch_size, drop_last=True)\n', (9185, 9244), True, 'import megengine.data as data\n'), ((3964, 3998), 'os.path.join', 'os.path.join', (['args.save', 'args.arch'], {}), '(args.save, args.arch)\n', (3976, 3998), False, 'import os\n'), ((4053, 4098), 'os.path.join', 'os.path.join', (['args.save', 'args.arch', '"""log.txt"""'], {}), "(args.save, args.arch, 'log.txt')\n", (4065, 4098), False, 'import os\n'), ((4459, 4474), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (4472, 4474), True, 'import megengine.distributed as dist\n'), ((4476, 4497), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (4495, 4497), True, 'import megengine.distributed as dist\n'), ((4918, 4940), 'megengine.autodiff.GradManager', 'autodiff.GradManager', ([], {}), '()\n', (4938, 4940), True, 'import megengine.autodiff as autodiff\n'), ((5405, 5438), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits', 'label'], {}), '(logits, label)\n', (5423, 5438), True, 'import megengine.functional as F\n'), ((5464, 5507), 'megengine.functional.topk_accuracy', 'F.topk_accuracy', (['logits', 'label'], {'topk': '(1, 5)'}), '(logits, label, topk=(1, 5))\n', (5479, 5507), True, 'import megengine.functional as F\n'), ((4995, 5024), 'megengine.distributed.make_allreduce_cb', 'dist.make_allreduce_cb', (['"""SUM"""'], {}), "('SUM')\n", (5017, 5024), True, 'import megengine.distributed as dist\n'), ((5863, 5897), 'megengine.functional.distributed.all_reduce_sum', 'F.distributed.all_reduce_sum', (['loss'], {}), '(loss)\n', (5891, 5897), True, 'import megengine.functional as F\n'), ((5930, 5964), 'megengine.functional.distributed.all_reduce_sum', 'F.distributed.all_reduce_sum', (['acc1'], {}), '(acc1)\n', (5958, 5964), True, 'import megengine.functional as F\n'), ((5997, 6031), 'megengine.functional.distributed.all_reduce_sum', 'F.distributed.all_reduce_sum', (['acc5'], {}), '(acc5)\n', (6025, 6031), True, 'import megengine.functional as F\n'), ((6197, 6294), 'bisect.bisect_right', 'bisect.bisect_right', (['[30 * steps_per_epoch, 60 * steps_per_epoch, 80 * steps_per_epoch]', 'step'], {}), '([30 * steps_per_epoch, 60 * steps_per_epoch, 80 *\n steps_per_epoch], step)\n', (6216, 6294), False, 'import bisect\n'), ((7132, 7143), 'time.time', 'time.time', ([], {}), '()\n', (7141, 7143), False, 'import time\n'), ((7193, 7208), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (7206, 7208), True, 'import megengine.distributed as dist\n'), ((8158, 8210), 'os.path.join', 'os.path.join', (['args.save', 'args.arch', '"""checkpoint.pkl"""'], {}), "(args.save, args.arch, 'checkpoint.pkl')\n", (8170, 8210), False, 'import os\n'), ((8797, 8808), 'time.time', 'time.time', ([], {}), '()\n', (8806, 8808), False, 'import time\n'), ((8885, 8900), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (8898, 8900), True, 'import megengine.distributed as dist\n'), ((10559, 10572), 'megengine.data.transform.Resize', 'T.Resize', (['(256)'], {}), '(256)\n', (10567, 10572), True, 'import megengine.data.transform as T\n'), ((10590, 10607), 'megengine.data.transform.CenterCrop', 'T.CenterCrop', (['(224)'], {}), '(224)\n', (10602, 10607), True, 'import megengine.data.transform as T\n'), ((10625, 10697), 'megengine.data.transform.Normalize', 'T.Normalize', ([], {'mean': '[103.53, 116.28, 123.675]', 'std': '[57.375, 57.12, 58.395]'}), '(mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395])\n', (10636, 10697), True, 'import megengine.data.transform as T\n'), ((10763, 10778), 'megengine.data.transform.ToMode', 'T.ToMode', (['"""CHW"""'], {}), "('CHW')\n", (10771, 10778), True, 'import megengine.data.transform as T\n'), ((9446, 9470), 'megengine.data.transform.RandomResizedCrop', 'T.RandomResizedCrop', (['(224)'], {}), '(224)\n', (9465, 9470), True, 'import megengine.data.transform as T\n'), ((9488, 9512), 'megengine.data.transform.RandomHorizontalFlip', 'T.RandomHorizontalFlip', ([], {}), '()\n', (9510, 9512), True, 'import megengine.data.transform as T\n'), ((9530, 9602), 'megengine.data.transform.Normalize', 'T.Normalize', ([], {'mean': '[103.53, 116.28, 123.675]', 'std': '[57.375, 57.12, 58.395]'}), '(mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395])\n', (9541, 9602), True, 'import megengine.data.transform as T\n'), ((9668, 9683), 'megengine.data.transform.ToMode', 'T.ToMode', (['"""CHW"""'], {}), "('CHW')\n", (9676, 9683), True, 'import megengine.data.transform as T\n'), ((9854, 9878), 'megengine.data.transform.RandomResizedCrop', 'T.RandomResizedCrop', (['(224)'], {}), '(224)\n', (9873, 9878), True, 'import megengine.data.transform as T\n'), ((9896, 9920), 'megengine.data.transform.RandomHorizontalFlip', 'T.RandomHorizontalFlip', ([], {}), '()\n', (9918, 9920), True, 'import megengine.data.transform as T\n'), ((9938, 9997), 'megengine.data.transform.ColorJitter', 'T.ColorJitter', ([], {'brightness': '(0.4)', 'contrast': '(0.4)', 'saturation': '(0.4)'}), '(brightness=0.4, contrast=0.4, saturation=0.4)\n', (9951, 9997), True, 'import megengine.data.transform as T\n'), ((10015, 10087), 'megengine.data.transform.Normalize', 'T.Normalize', ([], {'mean': '[103.53, 116.28, 123.675]', 'std': '[57.375, 57.12, 58.395]'}), '(mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395])\n', (10026, 10087), True, 'import megengine.data.transform as T\n'), ((10153, 10168), 'megengine.data.transform.ToMode', 'T.ToMode', (['"""CHW"""'], {}), "('CHW')\n", (10161, 10168), True, 'import megengine.data.transform as T\n')]
|
from typing import Optional, Dict, List, Any, Union
import datetime as dt
from sqlmodel import Field, Session, SQLModel, create_engine, select
import threading as th
import queue
# ~~~ Database ~~~~~~~~~~~~~~~
class Database:
def __init__(self, uri: str):
self.engine = create_engine(uri)
SQLModel.metadata.create_all(self.engine)
def create_all(self, items: List[SQLModel]):
with Session(self.engine) as session:
for item in items:
session.add(item)
session.commit()
def get_by_id(self, id: Union[str, int], model: SQLModel):
with Session(self.engine) as session:
stmt = select(model).where(model.id == id)
return session.exec(stmt).first()
def get_by_field(self, key: str, value: Any, model: SQLModel):
stmt = select(model).where(getattr(model, key) == value)
print(stmt)
return self.exec(stmt)
def exec(self, stmt: str, params = {}):
with Session(self.engine) as session:
return session.exec(stmt, params=params).all()
class DatabaseWorker(th.Thread):
def __init__(self,
uri: str,
queue: queue.Queue,
batch: int = None,
timeout: int = 10
):
super().__init__()
self.q = queue
self.db = None
self.uri = uri
self.timeout = timeout
self.batch = batch
def run(self):
self.db = Database(self.uri)
while True:
cache = []
try:
cache.append(self.q.get(timeout=self.timeout))
if self.batch:
if len(cache) % self.batch == 0:
self.db.create_all(cache)
cache = []
else:
cache = []
except queue.Empty:
self.db.create_all(cache)
break
# ~~~ Models ~~~~~~~~~~~~~~~~~
class Document(SQLModel, table=True):
id: str = Field(primary_key=True)
name: str
href: str
date: dt.datetime
text: Optional[str] = None
date_collected: dt.datetime
collected_by: str
class Paragraph(SQLModel, table=True):
id: str = Field(primary_key=True)
text: str
document_id: str = Field(foreign_key="document.id")
sentiment: str
sent_score: float
class Entity(SQLModel, table=True):
id: str = Field(primary_key=True)
name: str
description: Optional[str]
class EntityMention(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
text: str
score: Optional[float]
label: str
start: int
end: int
paragraph_id: str = Field(foreign_key="paragraph.id")
kb_id: Optional[str] = Field(foreign_key="entity.id")
class EntityFeature(SQLModel, table=True):
id: int = Field(primary_key=True)
kb_id: str = Field(foreign_key="entity.id")
key: str
value: str
|
[
"sqlmodel.create_engine",
"sqlmodel.select",
"sqlmodel.Session",
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.Field"
] |
[((2043, 2066), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (2048, 2066), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((2256, 2279), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (2261, 2279), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((2317, 2349), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""document.id"""'}), "(foreign_key='document.id')\n", (2322, 2349), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((2442, 2465), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (2447, 2465), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((2579, 2616), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (2584, 2616), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((2725, 2758), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""paragraph.id"""'}), "(foreign_key='paragraph.id')\n", (2730, 2758), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((2786, 2816), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""entity.id"""'}), "(foreign_key='entity.id')\n", (2791, 2816), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((2875, 2898), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (2880, 2898), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((2916, 2946), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""entity.id"""'}), "(foreign_key='entity.id')\n", (2921, 2946), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((284, 302), 'sqlmodel.create_engine', 'create_engine', (['uri'], {}), '(uri)\n', (297, 302), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((311, 352), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['self.engine'], {}), '(self.engine)\n', (339, 352), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((416, 436), 'sqlmodel.Session', 'Session', (['self.engine'], {}), '(self.engine)\n', (423, 436), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((620, 640), 'sqlmodel.Session', 'Session', (['self.engine'], {}), '(self.engine)\n', (627, 640), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((1004, 1024), 'sqlmodel.Session', 'Session', (['self.engine'], {}), '(self.engine)\n', (1011, 1024), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((841, 854), 'sqlmodel.select', 'select', (['model'], {}), '(model)\n', (847, 854), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((672, 685), 'sqlmodel.select', 'select', (['model'], {}), '(model)\n', (678, 685), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n')]
|
import os
import math
import argparse
from multiprocessing import Process, Queue
from tqdm import tqdm
import numpy as np
import megengine as mge
from megengine import jit
from config import config
import network
import dataset
import misc_utils
if_set_nms = True
def eval_all(args):
# model_path
saveDir = config.model_dir
evalDir = config.eval_dir
misc_utils.ensure_dir(evalDir)
model_file = os.path.join(saveDir,
'epoch_{}.pkl'.format(args.resume_weights))
assert os.path.exists(model_file)
# load data
records = misc_utils.load_json_lines(config.eval_source)
# multiprocessing
num_records = len(records)
num_devs = args.devices
num_image = math.ceil(num_records / num_devs)
result_queue = Queue(1000)
procs = []
all_results = []
for i in range(num_devs):
start = i * num_image
end = min(start + num_image, num_records)
split_records = records[start:end]
proc = Process(target=inference, args=(
model_file, i, split_records, result_queue))
proc.start()
procs.append(proc)
pbar = tqdm(total=num_records, ncols=50)
for i in range(num_records):
t = result_queue.get()
all_results.append(t)
pbar.update(1)
for p in procs:
p.join()
fpath = os.path.join(evalDir, 'dump-{}.json'.format(args.resume_weights))
misc_utils.save_json_lines(all_results, fpath)
def inference(model_file, device, records, result_queue):
@jit.trace(symbolic=False)
def val_func():
pred_boxes = net(net.inputs)
return pred_boxes
net = network.Network()
net.eval()
check_point = mge.load(model_file)
net.load_state_dict(check_point['state_dict'])
for record in records:
np.set_printoptions(precision=2, suppress=True)
net.eval()
image, gt_boxes, im_info, ID = get_data(record, device)
net.inputs["image"].set_value(image.astype(np.float32))
net.inputs["im_info"].set_value(im_info)
pred_boxes = val_func().numpy()
# nms
if if_set_nms:
from set_nms_utils import set_cpu_nms
n = pred_boxes.shape[0] // 2
idents = np.tile(np.arange(n)[:,None], (1, 2)).reshape(-1, 1)
pred_boxes = np.hstack((pred_boxes, idents))
keep = pred_boxes[:, -2] > 0.05
pred_boxes = pred_boxes[keep]
keep = set_cpu_nms(pred_boxes, 0.5)
pred_boxes = pred_boxes[keep][:, :-1]
else:
from set_nms_utils import cpu_nms
keep = pred_boxes[:, -1] > 0.05
pred_boxes = pred_boxes[keep]
keep = cpu_nms(pred_boxes, 0.5)
pred_boxes = pred_boxes[keep]
result_dict = dict(ID=ID, height=int(im_info[0, -2]), width=int(im_info[0, -1]),
dtboxes=boxes_dump(pred_boxes, False),
gtboxes=boxes_dump(gt_boxes, True))
result_queue.put_nowait(result_dict)
def boxes_dump(boxes, is_gt):
result = []
boxes = boxes.tolist()
for box in boxes:
if is_gt:
box_dict = {}
box_dict['box'] = [box[0], box[1], box[2]-box[0], box[3]-box[1]]
box_dict['tag'] = box[-1]
else:
box_dict = {}
box_dict['box'] = [box[0], box[1], box[2]-box[0], box[3]-box[1]]
box_dict['tag'] = 1
box_dict['score'] = box[-1]
result.append(box_dict)
return result
def get_data(record, device):
data = dataset.val_dataset(record)
image, gt_boxes, ID = \
data['data'], data['boxes'], data['ID']
if config.eval_resize == False:
resized_img, scale = image, 1
else:
resized_img, scale = dataset.resize_img_by_short_and_max_size(
image, config.eval_image_short_size, config.eval_image_max_size)
original_height, original_width = image.shape[0:2]
height, width = resized_img.shape[0:2]
transposed_img = np.ascontiguousarray(
resized_img.transpose(2, 0, 1)[None, :, :, :],
dtype=np.float32)
im_info = np.array([height, width, scale, original_height, original_width],
dtype=np.float32)[None, :]
return transposed_img, gt_boxes, im_info, ID
def run_test():
parser = argparse.ArgumentParser()
parser.add_argument('--resume_weights', '-r', default=None, type=str)
parser.add_argument('--devices', '-d', default=1, type=int)
args = parser.parse_args()
eval_all(args)
if __name__ == '__main__':
run_test()
|
[
"megengine.jit.trace",
"megengine.load"
] |
[((370, 400), 'misc_utils.ensure_dir', 'misc_utils.ensure_dir', (['evalDir'], {}), '(evalDir)\n', (391, 400), False, 'import misc_utils\n'), ((508, 534), 'os.path.exists', 'os.path.exists', (['model_file'], {}), '(model_file)\n', (522, 534), False, 'import os\n'), ((565, 611), 'misc_utils.load_json_lines', 'misc_utils.load_json_lines', (['config.eval_source'], {}), '(config.eval_source)\n', (591, 611), False, 'import misc_utils\n'), ((709, 742), 'math.ceil', 'math.ceil', (['(num_records / num_devs)'], {}), '(num_records / num_devs)\n', (718, 742), False, 'import math\n'), ((762, 773), 'multiprocessing.Queue', 'Queue', (['(1000)'], {}), '(1000)\n', (767, 773), False, 'from multiprocessing import Process, Queue\n'), ((1131, 1164), 'tqdm.tqdm', 'tqdm', ([], {'total': 'num_records', 'ncols': '(50)'}), '(total=num_records, ncols=50)\n', (1135, 1164), False, 'from tqdm import tqdm\n'), ((1401, 1447), 'misc_utils.save_json_lines', 'misc_utils.save_json_lines', (['all_results', 'fpath'], {}), '(all_results, fpath)\n', (1427, 1447), False, 'import misc_utils\n'), ((1512, 1537), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(False)'}), '(symbolic=False)\n', (1521, 1537), False, 'from megengine import jit\n'), ((1631, 1648), 'network.Network', 'network.Network', ([], {}), '()\n', (1646, 1648), False, 'import network\n'), ((1682, 1702), 'megengine.load', 'mge.load', (['model_file'], {}), '(model_file)\n', (1690, 1702), True, 'import megengine as mge\n'), ((3529, 3556), 'dataset.val_dataset', 'dataset.val_dataset', (['record'], {}), '(record)\n', (3548, 3556), False, 'import dataset\n'), ((4290, 4315), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4313, 4315), False, 'import argparse\n'), ((978, 1054), 'multiprocessing.Process', 'Process', ([], {'target': 'inference', 'args': '(model_file, i, split_records, result_queue)'}), '(target=inference, args=(model_file, i, split_records, result_queue))\n', (985, 1054), False, 'from multiprocessing import Process, Queue\n'), ((1789, 1836), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)', 'suppress': '(True)'}), '(precision=2, suppress=True)\n', (1808, 1836), True, 'import numpy as np\n'), ((3754, 3864), 'dataset.resize_img_by_short_and_max_size', 'dataset.resize_img_by_short_and_max_size', (['image', 'config.eval_image_short_size', 'config.eval_image_max_size'], {}), '(image, config.\n eval_image_short_size, config.eval_image_max_size)\n', (3794, 3864), False, 'import dataset\n'), ((4110, 4198), 'numpy.array', 'np.array', (['[height, width, scale, original_height, original_width]'], {'dtype': 'np.float32'}), '([height, width, scale, original_height, original_width], dtype=np.\n float32)\n', (4118, 4198), True, 'import numpy as np\n'), ((2300, 2331), 'numpy.hstack', 'np.hstack', (['(pred_boxes, idents)'], {}), '((pred_boxes, idents))\n', (2309, 2331), True, 'import numpy as np\n'), ((2437, 2465), 'set_nms_utils.set_cpu_nms', 'set_cpu_nms', (['pred_boxes', '(0.5)'], {}), '(pred_boxes, 0.5)\n', (2448, 2465), False, 'from set_nms_utils import set_cpu_nms\n'), ((2685, 2709), 'set_nms_utils.cpu_nms', 'cpu_nms', (['pred_boxes', '(0.5)'], {}), '(pred_boxes, 0.5)\n', (2692, 2709), False, 'from set_nms_utils import cpu_nms\n'), ((2230, 2242), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (2239, 2242), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import numpy as np
from megengine import get_logger as mge_get_logger
mge_version = mge.__version__
if mge_version <= "0.6.0":
# pylint: disable=import-error, no-name-in-module
import megengine._internal as mgb
from megengine._internal import cgtools
else:
import megengine.core.tensor.megbrain_graph as G
import megengine.core._imperative_rt as rt
import megengine.utils.comp_graph_tools as cgtools
if mge_version <= "1.1.0":
from megengine.core.tensor.raw_tensor import ( # pylint: disable=no-name-in-module,import-error
as_raw_tensor as Tensor,
)
else:
from megengine.tensor import Tensor
def get_logger(*args):
return mge_get_logger(*args)
def get_mge_version():
return mge_version
def get_symvar_value(sym_var):
if mge_version <= "0.6.0":
if sym_var.inferred_value is not None:
val = sym_var.inferred_value
return val
else:
cg = sym_var.owner_graph
func = cg.compile_outonly(sym_var)
val = func()
return val
else:
if sym_var.value is not None:
return sym_var.value
else:
out_node = G.ValueOutputNode(sym_var)
cg = out_node.outputs[0].graph
func = cg.compile(out_node.outputs)
func.execute()
return out_node.get_value()
def isnum(x):
return isinstance(x, (int, float))
def isconst(x):
return x.np_data is not None
def isvar(x):
return (
isinstance(x, mgb.SymbolVar)
if mge_version <= "0.6.0"
else isinstance(x, rt.VarNode) # pylint: disable=c-extension-no-member
)
def get_shape(x):
return x._get_imm_shape() if mge_version <= "0.6.0" else x.shape
def get_dep_vars(x, type=None):
return cgtools.get_dep_vars(x, type)
def get_dtype_name(x):
return (
x.dtype.metadata["mgb_dtype"]["name"] if isinstance(x.dtype, np.dtype) else None
)
def get_opr_type(x):
return cgtools.get_opr_type(x)
def get_owner_opr_type(x):
if mge_version <= "0.6.0":
return cgtools.get_type(x._var)
else:
return cgtools.get_owner_opr_type(x._var)
def load_comp_graph_from_file(path):
if mge_version <= "0.6.0":
cg, _, outputs = mgb.load_comp_graph_from_file(path)
else:
ret = G.load_graph(path)
cg = ret.graph
outputs = ret.output_vars_list
return cg, outputs
def graph_traversal(outputs):
(
map_oprs,
map_vars,
var2oprs,
opr2receivers,
indegree2opr,
opr2indegree,
) = cgtools.graph_traversal(outputs)
return map_oprs, map_vars, var2oprs, opr2receivers, indegree2opr, opr2indegree
def get_oprs_seq(outputs, prune_reshape=True):
all_oprs = cgtools.get_oprs_seq(outputs, prune_reshape=prune_reshape)
return all_oprs
def eval_partial(inp, oup):
if not isinstance(oup, (list, tuple)):
oup = (oup,)
inputs = cgtools.get_dep_vars(oup, "Host2DeviceCopy")
if mge_version <= "0.6.0":
cg = oup[0].owner_graph
outputs = list(map(mgb.copy_output, oup))
f = cg.compile(inputs, outputs)
result = f(inp)
else:
if not isinstance(inp, (list, tuple)):
inp = (inp,)
replace_dict = {}
inp_node_list = []
for i in inputs:
inp_node = G.InputNode(
device="xpux", dtype=inputs[0].dtype, graph=inputs[0].graph
)
replace_dict[i] = inp_node.outputs[0]
inp_node_list.append(inp_node)
new_out = cgtools.replace_vars(oup, replace_dict)
out_node_list = [G.OutputNode(i) for i in new_out]
new_out_list = [i.outputs[0] for i in out_node_list]
cg = new_out_list[0].graph
func = cg.compile(new_out_list)
for node, value in zip(inp_node_list, inp):
node.set_value(Tensor(value)._dev_tensor())
func.execute()
result = [o.get_value().numpy() for o in out_node_list]
return result
|
[
"megengine.core.tensor.megbrain_graph.InputNode",
"megengine.utils.comp_graph_tools.get_owner_opr_type",
"megengine.utils.comp_graph_tools.get_dep_vars",
"megengine.utils.comp_graph_tools.get_opr_type",
"megengine.core.tensor.megbrain_graph.load_graph",
"megengine.utils.comp_graph_tools.replace_vars",
"megengine.core.tensor.megbrain_graph.ValueOutputNode",
"megengine._internal.load_comp_graph_from_file",
"megengine.utils.comp_graph_tools.get_oprs_seq",
"megengine.core.tensor.megbrain_graph.OutputNode",
"megengine.utils.comp_graph_tools.graph_traversal",
"megengine.get_logger",
"megengine.tensor.Tensor",
"megengine.utils.comp_graph_tools.get_type"
] |
[((1102, 1123), 'megengine.get_logger', 'mge_get_logger', (['*args'], {}), '(*args)\n', (1116, 1123), True, 'from megengine import get_logger as mge_get_logger\n'), ((2222, 2251), 'megengine.utils.comp_graph_tools.get_dep_vars', 'cgtools.get_dep_vars', (['x', 'type'], {}), '(x, type)\n', (2242, 2251), True, 'import megengine.utils.comp_graph_tools as cgtools\n'), ((2419, 2442), 'megengine.utils.comp_graph_tools.get_opr_type', 'cgtools.get_opr_type', (['x'], {}), '(x)\n', (2439, 2442), True, 'import megengine.utils.comp_graph_tools as cgtools\n'), ((3029, 3061), 'megengine.utils.comp_graph_tools.graph_traversal', 'cgtools.graph_traversal', (['outputs'], {}), '(outputs)\n', (3052, 3061), True, 'import megengine.utils.comp_graph_tools as cgtools\n'), ((3209, 3267), 'megengine.utils.comp_graph_tools.get_oprs_seq', 'cgtools.get_oprs_seq', (['outputs'], {'prune_reshape': 'prune_reshape'}), '(outputs, prune_reshape=prune_reshape)\n', (3229, 3267), True, 'import megengine.utils.comp_graph_tools as cgtools\n'), ((3395, 3439), 'megengine.utils.comp_graph_tools.get_dep_vars', 'cgtools.get_dep_vars', (['oup', '"""Host2DeviceCopy"""'], {}), "(oup, 'Host2DeviceCopy')\n", (3415, 3439), True, 'import megengine.utils.comp_graph_tools as cgtools\n'), ((2518, 2542), 'megengine.utils.comp_graph_tools.get_type', 'cgtools.get_type', (['x._var'], {}), '(x._var)\n', (2534, 2542), True, 'import megengine.utils.comp_graph_tools as cgtools\n'), ((2568, 2602), 'megengine.utils.comp_graph_tools.get_owner_opr_type', 'cgtools.get_owner_opr_type', (['x._var'], {}), '(x._var)\n', (2594, 2602), True, 'import megengine.utils.comp_graph_tools as cgtools\n'), ((2698, 2733), 'megengine._internal.load_comp_graph_from_file', 'mgb.load_comp_graph_from_file', (['path'], {}), '(path)\n', (2727, 2733), True, 'import megengine._internal as mgb\n'), ((2758, 2776), 'megengine.core.tensor.megbrain_graph.load_graph', 'G.load_graph', (['path'], {}), '(path)\n', (2770, 2776), True, 'import megengine.core.tensor.megbrain_graph as G\n'), ((4014, 4053), 'megengine.utils.comp_graph_tools.replace_vars', 'cgtools.replace_vars', (['oup', 'replace_dict'], {}), '(oup, replace_dict)\n', (4034, 4053), True, 'import megengine.utils.comp_graph_tools as cgtools\n'), ((1611, 1637), 'megengine.core.tensor.megbrain_graph.ValueOutputNode', 'G.ValueOutputNode', (['sym_var'], {}), '(sym_var)\n', (1628, 1637), True, 'import megengine.core.tensor.megbrain_graph as G\n'), ((3800, 3872), 'megengine.core.tensor.megbrain_graph.InputNode', 'G.InputNode', ([], {'device': '"""xpux"""', 'dtype': 'inputs[0].dtype', 'graph': 'inputs[0].graph'}), "(device='xpux', dtype=inputs[0].dtype, graph=inputs[0].graph)\n", (3811, 3872), True, 'import megengine.core.tensor.megbrain_graph as G\n'), ((4079, 4094), 'megengine.core.tensor.megbrain_graph.OutputNode', 'G.OutputNode', (['i'], {}), '(i)\n', (4091, 4094), True, 'import megengine.core.tensor.megbrain_graph as G\n'), ((4328, 4341), 'megengine.tensor.Tensor', 'Tensor', (['value'], {}), '(value)\n', (4334, 4341), False, 'from megengine.tensor import Tensor\n')]
|
# c: 21.09.2008
import os
import numpy as nm
from sfepy import data_dir
from sfepy.fem import MeshIO
filename_mesh = data_dir + '/meshes/2d/special/circle_in_square.mesh'
## filename_mesh = data_dir + '/meshes/2d/special/circle_in_square_small.mesh'
## filename_mesh = data_dir + '/meshes/3d/special/cube_sphere.mesh'
## filename_mesh = data_dir + '/meshes/2d/special/cube_cylinder.mesh'
omega = 1
omega_squared = omega**2
conf_dir = os.path.dirname(__file__)
io = MeshIO.any_from_filename(filename_mesh, prefix_dir=conf_dir)
bbox, dim = io.read_bounding_box( ret_dim = True )
geom = {3 : '3_4', 2 : '2_3'}[dim]
x_left, x_right = bbox[:,0]
regions = {
'Y' : ('all', {}),
'Y1' : ('elements of group 1', {}),
'Y2' : ('elements of group 2', {}),
'Y2_Surface': ('r.Y1 *n r.Y2', {'can_cells' : False}),
'Left' : ('nodes in (x < %f)' % (x_left + 1e-3), {}),
'Right' : ('nodes in (x > %f)' % (x_right - 1e-3), {}),
}
material_2 = {
'name' : 'inclusion',
# epoxy
'function' : 'get_inclusion_pars',
}
def get_inclusion_pars(ts, coor, mode=None, region=None, ig=None):
"""TODO: implement proper 3D -> 2D transformation of constitutive
matrices."""
if mode == 'qp':
n_nod, dim = coor.shape
sym = (dim + 1) * dim / 2
dielectric = nm.eye( dim, dtype = nm.float64 )
# !!!
coupling = nm.ones( (dim, sym), dtype = nm.float64 )
# coupling[0,1] = 0.2
out = {
# Lame coefficients in 1e+10 Pa.
'lam' : 0.1798,
'mu' : 0.148,
# dielectric tensor
'dielectric' : dielectric,
# piezoelectric coupling
'coupling' : coupling,
'density' : 0.1142, # in 1e4 kg/m3
}
for key, val in out.iteritems():
out[key] = nm.tile(val, (coor.shape[0], 1, 1))
return out
functions = {
'get_inclusion_pars' : (get_inclusion_pars,),
}
field_0 = {
'name' : 'displacement',
'dtype' : nm.float64,
'shape' : dim,
'region' : 'Y',
'approx_order' : 1,
}
field_2 = {
'name' : 'potential',
'dtype' : nm.float64,
'shape' : (1,),
'region' : 'Y',
'approx_order' : 1,
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
'phi' : ('unknown field', 'potential', 1),
'psi' : ('test field', 'potential', 'phi'),
}
ebcs = {
'u1' : ('Left', {'u.all' : 0.0}),
'u2' : ('Right', {'u.0' : 0.1}),
'phi' : ('Y2_Surface', {'phi.all' : 0.0}),
}
integral_1 = {
'name' : 'i1',
'kind' : 'v',
'quadrature' : 'gauss_o2_d%d' % dim,
}
equations = {
'1' : """- %f * dw_mass_vector.i1.Y( inclusion.density, v, u )
+ dw_lin_elastic_iso.i1.Y( inclusion.lam, inclusion.mu, v, u )
- dw_piezo_coupling.i1.Y2( inclusion.coupling, v, phi )
= 0""" % omega_squared,
'2' : """dw_diffusion.i1.Y( inclusion.dielectric, psi, phi )
+ dw_piezo_coupling.i1.Y2( inclusion.coupling, u, psi )
= 0""",
}
##
# FE assembling parameters.
fe = {
'chunk_size' : 100000
}
##
# Solvers etc.
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 1,
'eps_a' : 1e-10,
'eps_r' : 1.0,
'macheps' : 1e-16,
'lin_red' : 1e-2, # Linear system error < (eps_a * lin_red).
'ls_red' : 0.1,
'ls_red_warp': 0.001,
'ls_on' : 1.1,
'ls_min' : 1e-5,
'check' : 0,
'delta' : 1e-6,
'is_plot' : False,
'problem' : 'nonlinear', # 'nonlinear' or 'linear' (ignore i_max)
}
|
[
"sfepy.fem.MeshIO.any_from_filename"
] |
[((438, 463), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (453, 463), False, 'import os\n'), ((469, 529), 'sfepy.fem.MeshIO.any_from_filename', 'MeshIO.any_from_filename', (['filename_mesh'], {'prefix_dir': 'conf_dir'}), '(filename_mesh, prefix_dir=conf_dir)\n', (493, 529), False, 'from sfepy.fem import MeshIO\n'), ((1301, 1330), 'numpy.eye', 'nm.eye', (['dim'], {'dtype': 'nm.float64'}), '(dim, dtype=nm.float64)\n', (1307, 1330), True, 'import numpy as nm\n'), ((1368, 1405), 'numpy.ones', 'nm.ones', (['(dim, sym)'], {'dtype': 'nm.float64'}), '((dim, sym), dtype=nm.float64)\n', (1375, 1405), True, 'import numpy as nm\n'), ((1824, 1859), 'numpy.tile', 'nm.tile', (['val', '(coor.shape[0], 1, 1)'], {}), '(val, (coor.shape[0], 1, 1))\n', (1831, 1859), True, 'import numpy as nm\n')]
|
from datetime import datetime
from typing import Optional
import typer
from sqlalchemy.orm.exc import UnmappedInstanceError
from sqlmodel import Session, select
from .database import engine
from .functions_aux import Status
from .tables import ToDo, Timer
app = typer.Typer()
@app.command()
def task(id: str, task: str = None,
status: Optional[Status] = typer.Option(None),
tag: str = None, remarks: str = None, project: str = None,
due_date: datetime = typer.Option(None, formats=['%Y-%m-%d']),
reminder: datetime = typer.Option(None, formats=['%Y-%m-%d'])):
"""Edit record from to-do list"""
with Session(engine) as session:
try:
query = session.get(ToDo, id)
if task is not None:
query.task = task
if tag is not None:
query.tag = tag
if remarks is not None:
query.remarks = remarks
if project is not None:
query.project = project
if status is None or status == query.status:
pass
elif status == 'done':
query.status = status
query.date_end = datetime.now().date()
elif status == 'doing' and query.status == 'done':
query.status = status
query.date_end = None
elif status == 'to do':
timer = session.exec(select(Timer).where(
Timer.id_todo == id)).all()
if len(timer) > 0:
typer.secho(f'\nTask already started\n',
fg=typer.colors.RED)
raise typer.Exit(code=1)
else:
query.status = status
query.date_end = None
else:
query.status = status
today = datetime.today()
if due_date is not None and reminder \
is not None and reminder >= due_date:
typer.secho(
f'\nreminder must be smaller than {due_date.date()}\n',
fg=typer.colors.RED)
raise typer.Exit(code=1)
elif due_date is not None and due_date <= today:
typer.secho(f'\ndue date must be grater than {today.date()}\n',
fg=typer.colors.RED)
raise typer.Exit(code=1)
elif reminder is not None and reminder <= today:
typer.secho(
f'\nreminder must be grater than {today.date()}\n',
fg=typer.colors.RED)
raise typer.Exit(code=1)
elif due_date is not None and query.reminder \
is not None and due_date < query.reminder:
typer.secho(
f'\ndue date must be grater than {query.reminder.date()}\n',
fg=typer.colors.RED)
raise typer.Exit(code=1)
elif reminder is not None and query.due_date \
is not None and reminder >= query.due_date:
typer.secho(
f'\nreminder must be smaller than {query.due_date.date()}\n',
fg=typer.colors.RED)
raise typer.Exit(code=1)
elif reminder is not None:
query.reminder = reminder
elif due_date is not None:
query.due_date = due_date
session.add(query)
edit = typer.confirm(f"""Are you sure you want to edit:
{query}""")
if not edit:
typer.secho("Not editing",
fg=typer.colors.RED)
raise typer.Abort()
typer.secho("Editing it!",
fg=typer.colors.RED)
session.commit()
except AttributeError:
typer.secho(f'\nInvalid task id\n',
fg=typer.colors.RED)
raise typer.Exit(code=1)
except UnmappedInstanceError:
typer.secho(f'\nInvalid task id\n',
fg=typer.colors.RED)
raise typer.Exit(code=1)
@app.command()
def project(project: str, new_project: str):
"""Edit project name in tasks"""
with Session(engine) as session:
tasks = session.exec(select(ToDo).where(
ToDo.project == project)).all()
if len(tasks) > 0:
for task in tasks:
task.project = new_project
session.add(task)
edit = typer.confirm(f"""Are you sure you want to edit:
{tasks}""")
if not edit:
typer.secho("Not editing",
fg=typer.colors.RED)
raise typer.Abort()
typer.secho("Editing it!",
fg=typer.colors.RED)
session.commit()
else:
typer.secho(f'\nInvalid project\n',
fg=typer.colors.RED)
raise typer.Exit(code=1)
@app.command()
def del_task(id: str):
"""Delete task"""
try:
with Session(engine) as session:
task = session.get(ToDo, id)
timers = session.exec(select(Timer).where(
Timer.id_todo == task.id)).all()
for timer in timers:
session.delete(timer)
session.delete(task)
edit = typer.confirm(f"""Are you sure you want to delete:
{task}""")
if not edit:
typer.secho("Not deleting",
fg=typer.colors.RED)
raise typer.Abort()
typer.secho("Deleting it!",
fg=typer.colors.RED)
session.commit()
except UnmappedInstanceError:
typer.secho(f'\nInvalid task id\n',
fg=typer.colors.RED)
raise typer.Exit(code=1)
@app.command()
def del_project(project: str):
"""Delete all tasks from a project"""
with Session(engine) as session:
tasks = session.exec(select(ToDo).where(
ToDo.project == project)).all()
if len(tasks) > 0:
for task in tasks:
timers = session.exec(select(Timer).where(
Timer.id_todo == task.id)).all()
for timer in timers:
session.delete(timer)
session.delete(task)
session.delete(task)
edit = typer.confirm(f"""Are you sure you want to delete:
{tasks}""")
if not edit:
typer.secho("Not deleting",
fg=typer.colors.RED)
raise typer.Abort()
typer.secho("deleting it!",
fg=typer.colors.RED)
session.commit()
else:
typer.secho(f'\nInvalid project\n',
fg=typer.colors.RED)
raise typer.Exit(code=1)
@app.command()
def timer(id: int,
end: datetime = typer.Option('', formats=['%Y-%m-%d %H:%M:%S'])):
"""Edit record from Timer"""
with Session(engine) as session:
try:
query = session.get(Timer, id)
if end <= query.start:
typer.secho(
f'\nEnd must be >= {query.start}\n',
fg=typer.colors.RED)
raise typer.Exit(code=1)
if end >= datetime.now():
typer.secho(
f'\nEnd must be < {datetime.now()}'
)
raise typer.Exit(code=1)
query.end = end
session.add(query)
edit = typer.confirm(f"""Are you sure you want to edit:
{query}""")
if not edit:
typer.secho("Not editing",
fg=typer.colors.RED)
raise typer.Abort()
typer.secho("Editing it!",
fg=typer.colors.RED)
session.commit()
except AttributeError:
typer.secho(f'\nInvalid timer id\n',
fg=typer.colors.RED)
@app.command()
def del_timer(id: int):
"""Delete record from Timer"""
with Session(engine) as session:
try:
query = session.get(Timer, id)
session.delete(query)
edit = typer.confirm(f"""Are you sure you want to delete:
{query}""")
if not edit:
typer.secho("Not deleting",
fg=typer.colors.RED)
raise typer.Abort()
typer.secho("deleting it!",
fg=typer.colors.RED)
session.commit()
except AttributeError:
typer.secho(f'\nInvalid timer id\n',
fg=typer.colors.RED)
|
[
"sqlmodel.Session",
"sqlmodel.select"
] |
[((265, 278), 'typer.Typer', 'typer.Typer', ([], {}), '()\n', (276, 278), False, 'import typer\n'), ((368, 386), 'typer.Option', 'typer.Option', (['None'], {}), '(None)\n', (380, 386), False, 'import typer\n'), ((486, 526), 'typer.Option', 'typer.Option', (['None'], {'formats': "['%Y-%m-%d']"}), "(None, formats=['%Y-%m-%d'])\n", (498, 526), False, 'import typer\n'), ((558, 598), 'typer.Option', 'typer.Option', (['None'], {'formats': "['%Y-%m-%d']"}), "(None, formats=['%Y-%m-%d'])\n", (570, 598), False, 'import typer\n'), ((7039, 7086), 'typer.Option', 'typer.Option', (['""""""'], {'formats': "['%Y-%m-%d %H:%M:%S']"}), "('', formats=['%Y-%m-%d %H:%M:%S'])\n", (7051, 7086), False, 'import typer\n'), ((648, 663), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (655, 663), False, 'from sqlmodel import Session, select\n'), ((4296, 4311), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (4303, 4311), False, 'from sqlmodel import Session, select\n'), ((6027, 6042), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (6034, 6042), False, 'from sqlmodel import Session, select\n'), ((7131, 7146), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (7138, 7146), False, 'from sqlmodel import Session, select\n'), ((8240, 8255), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (8247, 8255), False, 'from sqlmodel import Session, select\n'), ((1880, 1896), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (1894, 1896), False, 'from datetime import datetime\n'), ((3516, 3592), 'typer.confirm', 'typer.confirm', (['f"""Are you sure you want to edit:\n {query}"""'], {}), '(f"""Are you sure you want to edit:\n {query}""")\n', (3529, 3592), False, 'import typer\n'), ((3758, 3805), 'typer.secho', 'typer.secho', (['"""Editing it!"""'], {'fg': 'typer.colors.RED'}), "('Editing it!', fg=typer.colors.RED)\n", (3769, 3805), False, 'import typer\n'), ((4571, 4643), 'typer.confirm', 'typer.confirm', (['f"""Are you sure you want to edit:\n {tasks}"""'], {}), '(f"""Are you sure you want to edit:\n {tasks}""")\n', (4584, 4643), False, 'import typer\n'), ((4809, 4856), 'typer.secho', 'typer.secho', (['"""Editing it!"""'], {'fg': 'typer.colors.RED'}), "('Editing it!', fg=typer.colors.RED)\n", (4820, 4856), False, 'import typer\n'), ((4936, 4994), 'typer.secho', 'typer.secho', (['f"""\nInvalid project\n"""'], {'fg': 'typer.colors.RED'}), '(f"""\nInvalid project\n""", fg=typer.colors.RED)\n', (4947, 4994), False, 'import typer\n'), ((5035, 5053), 'typer.Exit', 'typer.Exit', ([], {'code': '(1)'}), '(code=1)\n', (5045, 5053), False, 'import typer\n'), ((5138, 5153), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (5145, 5153), False, 'from sqlmodel import Session, select\n'), ((5434, 5507), 'typer.confirm', 'typer.confirm', (['f"""Are you sure you want to delete:\n {task}"""'], {}), '(f"""Are you sure you want to delete:\n {task}""")\n', (5447, 5507), False, 'import typer\n'), ((5674, 5722), 'typer.secho', 'typer.secho', (['"""Deleting it!"""'], {'fg': 'typer.colors.RED'}), "('Deleting it!', fg=typer.colors.RED)\n", (5685, 5722), False, 'import typer\n'), ((5818, 5876), 'typer.secho', 'typer.secho', (['f"""\nInvalid task id\n"""'], {'fg': 'typer.colors.RED'}), '(f"""\nInvalid task id\n""", fg=typer.colors.RED)\n', (5829, 5876), False, 'import typer\n'), ((5909, 5927), 'typer.Exit', 'typer.Exit', ([], {'code': '(1)'}), '(code=1)\n', (5919, 5927), False, 'import typer\n'), ((6490, 6564), 'typer.confirm', 'typer.confirm', (['f"""Are you sure you want to delete:\n {tasks}"""'], {}), '(f"""Are you sure you want to delete:\n {tasks}""")\n', (6503, 6564), False, 'import typer\n'), ((6731, 6779), 'typer.secho', 'typer.secho', (['"""deleting it!"""'], {'fg': 'typer.colors.RED'}), "('deleting it!', fg=typer.colors.RED)\n", (6742, 6779), False, 'import typer\n'), ((6859, 6917), 'typer.secho', 'typer.secho', (['f"""\nInvalid project\n"""'], {'fg': 'typer.colors.RED'}), '(f"""\nInvalid project\n""", fg=typer.colors.RED)\n', (6870, 6917), False, 'import typer\n'), ((6958, 6976), 'typer.Exit', 'typer.Exit', ([], {'code': '(1)'}), '(code=1)\n', (6968, 6976), False, 'import typer\n'), ((7679, 7768), 'typer.confirm', 'typer.confirm', (['f"""Are you sure you want to edit:\n {query}"""'], {}), '(\n f"""Are you sure you want to edit:\n {query}""")\n', (7692, 7768), False, 'import typer\n'), ((7929, 7976), 'typer.secho', 'typer.secho', (['"""Editing it!"""'], {'fg': 'typer.colors.RED'}), "('Editing it!', fg=typer.colors.RED)\n", (7940, 7976), False, 'import typer\n'), ((8377, 8451), 'typer.confirm', 'typer.confirm', (['f"""Are you sure you want to delete:\n {query}"""'], {}), '(f"""Are you sure you want to delete:\n {query}""")\n', (8390, 8451), False, 'import typer\n'), ((8618, 8666), 'typer.secho', 'typer.secho', (['"""deleting it!"""'], {'fg': 'typer.colors.RED'}), "('deleting it!', fg=typer.colors.RED)\n", (8629, 8666), False, 'import typer\n'), ((2174, 2192), 'typer.Exit', 'typer.Exit', ([], {'code': '(1)'}), '(code=1)\n', (2184, 2192), False, 'import typer\n'), ((3634, 3681), 'typer.secho', 'typer.secho', (['"""Not editing"""'], {'fg': 'typer.colors.RED'}), "('Not editing', fg=typer.colors.RED)\n", (3645, 3681), False, 'import typer\n'), ((3732, 3745), 'typer.Abort', 'typer.Abort', ([], {}), '()\n', (3743, 3745), False, 'import typer\n'), ((3902, 3960), 'typer.secho', 'typer.secho', (['f"""\nInvalid task id\n"""'], {'fg': 'typer.colors.RED'}), '(f"""\nInvalid task id\n""", fg=typer.colors.RED)\n', (3913, 3960), False, 'import typer\n'), ((4001, 4019), 'typer.Exit', 'typer.Exit', ([], {'code': '(1)'}), '(code=1)\n', (4011, 4019), False, 'import typer\n'), ((4070, 4128), 'typer.secho', 'typer.secho', (['f"""\nInvalid task id\n"""'], {'fg': 'typer.colors.RED'}), '(f"""\nInvalid task id\n""", fg=typer.colors.RED)\n', (4081, 4128), False, 'import typer\n'), ((4169, 4187), 'typer.Exit', 'typer.Exit', ([], {'code': '(1)'}), '(code=1)\n', (4179, 4187), False, 'import typer\n'), ((4685, 4732), 'typer.secho', 'typer.secho', (['"""Not editing"""'], {'fg': 'typer.colors.RED'}), "('Not editing', fg=typer.colors.RED)\n", (4696, 4732), False, 'import typer\n'), ((4783, 4796), 'typer.Abort', 'typer.Abort', ([], {}), '()\n', (4794, 4796), False, 'import typer\n'), ((5549, 5597), 'typer.secho', 'typer.secho', (['"""Not deleting"""'], {'fg': 'typer.colors.RED'}), "('Not deleting', fg=typer.colors.RED)\n", (5560, 5597), False, 'import typer\n'), ((5648, 5661), 'typer.Abort', 'typer.Abort', ([], {}), '()\n', (5659, 5661), False, 'import typer\n'), ((6606, 6654), 'typer.secho', 'typer.secho', (['"""Not deleting"""'], {'fg': 'typer.colors.RED'}), "('Not deleting', fg=typer.colors.RED)\n", (6617, 6654), False, 'import typer\n'), ((6705, 6718), 'typer.Abort', 'typer.Abort', ([], {}), '()\n', (6716, 6718), False, 'import typer\n'), ((7266, 7337), 'typer.secho', 'typer.secho', (['f"""\nEnd must be >= {query.start}\n"""'], {'fg': 'typer.colors.RED'}), '(f"""\nEnd must be >= {query.start}\n""", fg=typer.colors.RED)\n', (7277, 7337), False, 'import typer\n'), ((7399, 7417), 'typer.Exit', 'typer.Exit', ([], {'code': '(1)'}), '(code=1)\n', (7409, 7417), False, 'import typer\n'), ((7440, 7454), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7452, 7454), False, 'from datetime import datetime\n'), ((7581, 7599), 'typer.Exit', 'typer.Exit', ([], {'code': '(1)'}), '(code=1)\n', (7591, 7599), False, 'import typer\n'), ((7805, 7852), 'typer.secho', 'typer.secho', (['"""Not editing"""'], {'fg': 'typer.colors.RED'}), "('Not editing', fg=typer.colors.RED)\n", (7816, 7852), False, 'import typer\n'), ((7903, 7916), 'typer.Abort', 'typer.Abort', ([], {}), '()\n', (7914, 7916), False, 'import typer\n'), ((8073, 8132), 'typer.secho', 'typer.secho', (['f"""\nInvalid timer id\n"""'], {'fg': 'typer.colors.RED'}), '(f"""\nInvalid timer id\n""", fg=typer.colors.RED)\n', (8084, 8132), False, 'import typer\n'), ((8493, 8541), 'typer.secho', 'typer.secho', (['"""Not deleting"""'], {'fg': 'typer.colors.RED'}), "('Not deleting', fg=typer.colors.RED)\n", (8504, 8541), False, 'import typer\n'), ((8592, 8605), 'typer.Abort', 'typer.Abort', ([], {}), '()\n', (8603, 8605), False, 'import typer\n'), ((8764, 8823), 'typer.secho', 'typer.secho', (['f"""\nInvalid timer id\n"""'], {'fg': 'typer.colors.RED'}), '(f"""\nInvalid timer id\n""", fg=typer.colors.RED)\n', (8775, 8823), False, 'import typer\n'), ((2406, 2424), 'typer.Exit', 'typer.Exit', ([], {'code': '(1)'}), '(code=1)\n', (2416, 2424), False, 'import typer\n'), ((2651, 2669), 'typer.Exit', 'typer.Exit', ([], {'code': '(1)'}), '(code=1)\n', (2661, 2669), False, 'import typer\n'), ((1200, 1214), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1212, 1214), False, 'from datetime import datetime\n'), ((2966, 2984), 'typer.Exit', 'typer.Exit', ([], {'code': '(1)'}), '(code=1)\n', (2976, 2984), False, 'import typer\n'), ((4353, 4365), 'sqlmodel.select', 'select', (['ToDo'], {}), '(ToDo)\n', (4359, 4365), False, 'from sqlmodel import Session, select\n'), ((6084, 6096), 'sqlmodel.select', 'select', (['ToDo'], {}), '(ToDo)\n', (6090, 6096), False, 'from sqlmodel import Session, select\n'), ((7524, 7538), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7536, 7538), False, 'from datetime import datetime\n'), ((1558, 1621), 'typer.secho', 'typer.secho', (['f"""\nTask already started\n"""'], {'fg': 'typer.colors.RED'}), '(f"""\nTask already started\n""", fg=typer.colors.RED)\n', (1569, 1621), False, 'import typer\n'), ((1678, 1696), 'typer.Exit', 'typer.Exit', ([], {'code': '(1)'}), '(code=1)\n', (1688, 1696), False, 'import typer\n'), ((3283, 3301), 'typer.Exit', 'typer.Exit', ([], {'code': '(1)'}), '(code=1)\n', (3293, 3301), False, 'import typer\n'), ((5241, 5254), 'sqlmodel.select', 'select', (['Timer'], {}), '(Timer)\n', (5247, 5254), False, 'from sqlmodel import Session, select\n'), ((6244, 6257), 'sqlmodel.select', 'select', (['Timer'], {}), '(Timer)\n', (6250, 6257), False, 'from sqlmodel import Session, select\n'), ((1434, 1447), 'sqlmodel.select', 'select', (['Timer'], {}), '(Timer)\n', (1440, 1447), False, 'from sqlmodel import Session, select\n')]
|
"""All Sfepy imports go in this module
"""
import numpy as np
from sfepy.base.goptions import goptions
from sfepy.discrete.fem import Field
from sfepy.discrete.fem import FEDomain as Domain
from sfepy.discrete import (
FieldVariable,
Material,
Integral,
Function,
Equation,
Equations,
Problem,
)
from sfepy.terms import Term, Terms
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.discrete import Functions
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.base.base import output
from toolz.curried import pipe, curry, do
goptions["verbose"] = False
output.set_output(quiet=True)
def check(ids):
"""Check that the fixed displacement nodes have been isolated
Args:
ids: the isolated IDs
Returns the unchanged IDs
>>> check([1, 2, 3, 4])
Traceback (most recent call last):
...
RuntimeError: length of ids is incorrect
"""
if len(ids) != 3:
raise RuntimeError("length of ids is incorrect")
return ids
@curry
def subdomain(i_x, domain_, eps, coords, **_):
"""Find the node IDs that will be fixed
Args:
i_x: the index (either 0 or 1) depends on direction of axes
domain_: the Sfepy domain
eps: a small value
coords: the coordinates of the nodes
Returns:
the isolated node IDs
"""
def i_y():
"""Switch the index from 0 -> 1 or from 1 -> 0
"""
return (i_x + 1) % 2
return pipe(
(coords[:, i_x] > -eps) & (coords[:, i_x] < eps),
lambda x: (coords[:, i_x] < domain_.get_mesh_bounding_box()[0][i_x] + eps) | x,
lambda x: (coords[:, i_x] > domain_.get_mesh_bounding_box()[1][i_x] - eps) | x,
lambda x: (coords[:, i_y()] < eps) & (coords[:, i_y()] > -eps) & x,
lambda x: np.where(x)[0],
check,
)
def get_bc(domain, delta_x, index, cond):
"""Make a displacement boundary condition
Args:
domain: the Sfepy domain
delta_x: the mesh spacing
index: the index (either 0 or 1) depends on direction of axes
cond: the BC dictionary
Returns:
the Sfepy boundary condition
"""
return pipe(
Function("fix_points", subdomain(index, domain, delta_x * 1e-3)),
lambda x: domain.create_region(
"region_fix_points",
"vertices by fix_points",
"vertex",
functions=Functions([x]),
),
lambda x: EssentialBC("fix_points_BC", x, cond),
)
def get_bcs(domain, delta_x):
"""Get the boundary conditions
Args:
domain: the Sfepy domain
delta_x: the mesh spacing
Returns:
the boundary conditions
"""
return Conditions(
[
get_bc(domain, delta_x, 1, {"u.0": 0.0}),
get_bc(domain, delta_x, 0, {"u.1": 0.0}),
]
)
def get_material(calc_stiffness, calc_prestress):
"""Get the material
Args:
calc_stiffness: the function for calculating the stiffness tensor
calc_prestress: the function for calculating the prestress
Returns:
the material
"""
def _material_func_(_, coors, mode=None, **__):
if mode == "qp":
return dict(D=calc_stiffness(coors), stress=calc_prestress(coors))
return None
return Material("m", function=Function("material_func", _material_func_))
def get_uv(shape, delta_x):
"""Get the fields for the displacement and test function
Args:
shape: the shape of the domain
delta_x: the mesh spacing
Returns:
tuple of field variables
"""
return pipe(
np.array(shape),
lambda x: gen_block_mesh(
x * delta_x, x + 1, np.zeros_like(shape), verbose=False
),
lambda x: Domain("domain", x),
lambda x: x.create_region("region_all", "all"),
lambda x: Field.from_args("fu", np.float64, "vector", x, approx_order=2),
lambda x: (
FieldVariable("u", "unknown", x),
FieldVariable("v", "test", x, primary_var_name="u"),
),
)
# field = Field.from_args('fu', np.float64, 'vector', region_all,
# pylint: disable=no-member
# approx_order=2)
def get_terms(u_field, v_field, calc_stiffness, calc_prestress):
"""Get the terms for the equation
Args:
u_field: the displacement field
v_field: the test function field
calc_stiffness: a function to calculate the stiffness tensor
calc_prestress: a function to calculate the prestress tensor
Returns:
a tuple of terms for the equation
"""
return (
Term.new(
"dw_lin_elastic(m.D, v, u)",
Integral("i", order=4),
v_field.field.region,
m=get_material(calc_stiffness, calc_prestress),
v=v_field,
u=u_field,
),
Term.new(
"dw_lin_prestress(m.stress, v)",
Integral("i", order=4),
v_field.field.region,
m=get_material(calc_stiffness, calc_prestress),
v=v_field,
),
)
def get_nls(evaluator):
"""Get the non-linear solver
Args:
evaluator: the problem evaluator
Returns:
the non-linear solver
"""
return Newton(
{},
lin_solver=ScipyDirect({}),
fun=evaluator.eval_residual,
fun_grad=evaluator.eval_tangent_matrix,
)
def get_problem(u_field, v_field, calc_stiffness, calc_prestress, delta_x):
"""Get the problem
Args:
u_field: the displacement field
v_field: the test function field
calc_stiffness: a functioin to calcuate the stiffness tensor
calc_prestress: a function to calculate the prestress tensor
delta_x: the mesh spacing
Returns:
the Sfepy problem
"""
return pipe(
get_terms(u_field, v_field, calc_stiffness, calc_prestress),
lambda x: Equation("balance_of_forces", Terms([x[0], x[1]])),
lambda x: Problem("elasticity", equations=Equations([x])),
do(lambda x: x.time_update(ebcs=get_bcs(v_field.field.region.domain, delta_x))),
do(lambda x: x.set_solver(get_nls(x.get_evaluator()))),
)
def get_displacement(vec, shape):
"""Extract reshaped displacement from output vector
Args:
vec: the output vector obtained from problem.solve()
shape: the shape of the mesh
Returns:
reshaped displacement field
"""
return pipe(
vec.create_output_dict()["u"].data,
lambda x: np.reshape(x, (tuple(y + 1 for y in shape) + x.shape[-1:])),
)
def get_strain(problem, shape):
"""Calculate the strain field
Args:
problem: the Sfepy problem
hape: the shape of the mesh
Returns:
the reshaped strain field
"""
return get_stress_strain(problem, shape, "ev_cauchy_strain.{dim}.region_all(u)")
def get_stress(problem, shape):
"""Calculate the strain field
Args:
problem: the Sfepy problem
shape: the shape of the mesh
Returns:
the reshaped stress field
"""
return get_stress_strain(
problem, shape, "ev_cauchy_stress.{dim}.region_all(m.D, u)"
)
def get_stress_strain(problem, shape, str_):
"""Get the stress or strain field depending on the str_ argument
Args:
problem: the Sfepy problem
shape: the shape of the domain
str_: string passed to problem.evaluate to extract the stress or
strain
Returns
the reshaped stress or strain field
"""
return pipe(
np.squeeze(
problem.evaluate(
str_.format(dim=len(shape)), mode="el_avg", copy_materials=False
)
),
lambda x: np.reshape(x, (shape + x.shape[-1:])),
)
def get_data(shape, problem, vec):
"""Extract the displacement, strain and stress fields
Args:
shape: the shape of the mesh
problem: the Sfepy problem
vec: the output vector from problem.solve
Returns:
a tuple of arrays for the strain, displacement and stress fields
"""
return (
get_strain(problem, shape),
get_displacement(vec, shape),
get_stress(problem, shape),
)
def solve(calc_stiffness, calc_prestress, shape, delta_x):
"""Solve the linear elasticity problem
Args:
calc_stiffness: the function to calculate the stiffness tensor
calc_prestress: the funtion to calculate the prestress tensor
shape: the shape of the mesh
delta_x: the mesh spacing
Returns:
a tuple of arrays for the displacement, strain and stress fields
"""
return pipe(
get_uv(shape, delta_x),
lambda x: get_problem(x[0], x[1], calc_stiffness, calc_prestress, delta_x),
lambda x: get_data(shape, x, x.solve()),
)
|
[
"sfepy.discrete.Equations",
"sfepy.discrete.conditions.EssentialBC",
"sfepy.solvers.ls.ScipyDirect",
"sfepy.discrete.Functions",
"sfepy.terms.Terms",
"sfepy.base.base.output.set_output",
"sfepy.discrete.Integral",
"sfepy.discrete.fem.Field.from_args",
"sfepy.discrete.FieldVariable",
"sfepy.discrete.Function",
"sfepy.discrete.fem.FEDomain"
] |
[((698, 727), 'sfepy.base.base.output.set_output', 'output.set_output', ([], {'quiet': '(True)'}), '(quiet=True)\n', (715, 727), False, 'from sfepy.base.base import output\n'), ((3702, 3717), 'numpy.array', 'np.array', (['shape'], {}), '(shape)\n', (3710, 3717), True, 'import numpy as np\n'), ((2537, 2574), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', (['"""fix_points_BC"""', 'x', 'cond'], {}), "('fix_points_BC', x, cond)\n", (2548, 2574), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((3409, 3451), 'sfepy.discrete.Function', 'Function', (['"""material_func"""', '_material_func_'], {}), "('material_func', _material_func_)\n", (3417, 3451), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((3850, 3869), 'sfepy.discrete.fem.FEDomain', 'Domain', (['"""domain"""', 'x'], {}), "('domain', x)\n", (3856, 3869), True, 'from sfepy.discrete.fem import FEDomain as Domain\n'), ((3945, 4007), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""fu"""', 'np.float64', '"""vector"""', 'x'], {'approx_order': '(2)'}), "('fu', np.float64, 'vector', x, approx_order=2)\n", (3960, 4007), False, 'from sfepy.discrete.fem import Field\n'), ((4778, 4800), 'sfepy.discrete.Integral', 'Integral', (['"""i"""'], {'order': '(4)'}), "('i', order=4)\n", (4786, 4800), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((5028, 5050), 'sfepy.discrete.Integral', 'Integral', (['"""i"""'], {'order': '(4)'}), "('i', order=4)\n", (5036, 5050), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((5395, 5410), 'sfepy.solvers.ls.ScipyDirect', 'ScipyDirect', (['{}'], {}), '({})\n', (5406, 5410), False, 'from sfepy.solvers.ls import ScipyDirect\n'), ((7812, 7847), 'numpy.reshape', 'np.reshape', (['x', '(shape + x.shape[-1:])'], {}), '(x, shape + x.shape[-1:])\n', (7822, 7847), True, 'import numpy as np\n'), ((1890, 1901), 'numpy.where', 'np.where', (['x'], {}), '(x)\n', (1898, 1901), True, 'import numpy as np\n'), ((3785, 3805), 'numpy.zeros_like', 'np.zeros_like', (['shape'], {}), '(shape)\n', (3798, 3805), True, 'import numpy as np\n'), ((4041, 4073), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""u"""', '"""unknown"""', 'x'], {}), "('u', 'unknown', x)\n", (4054, 4073), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((4087, 4138), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""v"""', '"""test"""', 'x'], {'primary_var_name': '"""u"""'}), "('v', 'test', x, primary_var_name='u')\n", (4100, 4138), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((6038, 6057), 'sfepy.terms.Terms', 'Terms', (['[x[0], x[1]]'], {}), '([x[0], x[1]])\n', (6043, 6057), False, 'from sfepy.terms import Term, Terms\n'), ((2492, 2506), 'sfepy.discrete.Functions', 'Functions', (['[x]'], {}), '([x])\n', (2501, 2506), False, 'from sfepy.discrete import Functions\n'), ((6110, 6124), 'sfepy.discrete.Equations', 'Equations', (['[x]'], {}), '([x])\n', (6119, 6124), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n')]
|
from typing import TYPE_CHECKING, List, Optional
from sqlmodel import Field, Relationship, SQLModel
if TYPE_CHECKING:
from .hero import Hero
class TeamBase(SQLModel):
name: str
headquarters: str
class Team(TeamBase, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
heroes: List["Hero"] = Relationship(back_populates="team")
class TeamCreate(TeamBase):
pass
class TeamRead(TeamBase):
id: int
class TeamUpdate(SQLModel):
id: Optional[int] = None
name: Optional[str] = None
headquarters: Optional[str] = None
|
[
"sqlmodel.Field",
"sqlmodel.Relationship"
] |
[((285, 322), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (290, 322), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((353, 388), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""team"""'}), "(back_populates='team')\n", (365, 388), False, 'from sqlmodel import Field, Relationship, SQLModel\n')]
|
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias)
self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
x = F.reshape(x, (-1, x.shape[1]))
gate_x = self.ih(x)
gate_h = self.hh(hidden)
i_r, i_i, i_n = F.split(gate_x, 3, axis=1)
h_r, h_i, h_n = F.split(gate_h, 3, axis=1)
resetgate = F.sigmoid(i_r + h_r)
inputgate = F.sigmoid(i_i + h_i)
newgate = F.tanh(i_n + (resetgate * h_n))
hy = newgate + inputgate * (hidden - newgate)
return hy
class GRU(M.Module):
"""
An implementation of GRUModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias))
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
GRUCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = F.zeros((self.num_layers, batch, self.hidden_size))
else:
h0 = hx
outs = []
hidden = list()
for layer in range(self.num_layers):
hidden.append(h0[layer, :, :])
length = input.shape[1] if self.batch_first else input.shape[0]
for t in range(length):
for layer in range(self.num_layers):
if layer == 0:
if self.batch_first:
hidden_l = self.rnn_cell_list[layer](
input[:, t, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
input[t, :, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
hidden[layer - 1], hidden[layer]
)
if self.dropout and (layer is not self.num_layers - 1):
hidden_l = F.dropout(hidden_l, self.dropout)
hidden[layer] = hidden_l
outs.append(hidden_l)
if self.batch_first:
output = F.stack(outs, axis=1)
else:
output = F.stack(outs, axis=0)
return output
# ================================= LSTM Implementation ==========================================================
class LSTMCell(M.Module):
"""
An implementation of LSTMCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias)
self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
hx, cx = hidden
x = F.reshape(x, (-1, x.shape[1]))
gates = self.x2h(x) + self.h2h(hx)
ingate, forgetgate, cellgate, outgate = F.split(gates, 4, axis=1)
ingate = F.sigmoid(ingate)
forgetgate = F.sigmoid(forgetgate)
cellgate = F.tanh(cellgate)
outgate = F.sigmoid(outgate)
cy = F.mul(cx, forgetgate) + F.mul(ingate, cellgate)
hy = F.mul(outgate, F.tanh(cy))
return (hy, cy)
class LSTM(M.Module):
"""
An implementation of LSTMModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(
LSTMCell(self.input_size, self.hidden_size, self.bias)
)
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
LSTMCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = F.zeros((self.num_layers, batch, self.hidden_size))
c0 = F.zeros((self.num_layers, batch, self.hidden_size))
else:
h0 = hx[0]
c0 = hx[1]
outs = []
hidden = list()
for layer in range(self.num_layers):
hidden.append((h0[layer, :, :], c0[layer, :, :]))
length = input.shape[1] if self.batch_first else input.shape[0]
for t in range(length):
for layer in range(self.num_layers):
if layer == 0:
inp = input[:, t, :] if self.batch_first else input[t, :, :]
hidden_l = self.rnn_cell_list[layer](
inp, (hidden[layer][0], hidden[layer][1])
)
else:
hidden_l = self.rnn_cell_list[layer](
hidden[layer - 1][0], (hidden[layer][0], hidden[layer][1])
)
if self.dropout and (layer is not self.num_layers - 1):
hidden_l = (
F.dropout(hidden_l[0], self.dropout),
F.dropout(hidden_l[1], self.dropout),
)
hidden[layer] = hidden_l
outs.append(hidden_l[0])
if self.batch_first:
output = F.stack(outs, axis=1)
else:
output = F.stack(outs, axis=0)
return output
|
[
"megengine.functional.zeros",
"megengine.functional.mul",
"megengine.functional.sigmoid",
"megengine.functional.split",
"megengine.functional.stack",
"megengine.module.Linear",
"megengine.functional.dropout",
"megengine.functional.tanh",
"megengine.functional.reshape",
"megengine.module.init.uniform_"
] |
[((520, 568), 'megengine.module.Linear', 'M.Linear', (['input_size', '(3 * hidden_size)'], {'bias': 'bias'}), '(input_size, 3 * hidden_size, bias=bias)\n', (528, 568), True, 'import megengine.module as M\n'), ((587, 636), 'megengine.module.Linear', 'M.Linear', (['hidden_size', '(3 * hidden_size)'], {'bias': 'bias'}), '(hidden_size, 3 * hidden_size, bias=bias)\n', (595, 636), True, 'import megengine.module as M\n'), ((876, 906), 'megengine.functional.reshape', 'F.reshape', (['x', '(-1, x.shape[1])'], {}), '(x, (-1, x.shape[1]))\n', (885, 906), True, 'import megengine.functional as F\n'), ((994, 1020), 'megengine.functional.split', 'F.split', (['gate_x', '(3)'], {'axis': '(1)'}), '(gate_x, 3, axis=1)\n', (1001, 1020), True, 'import megengine.functional as F\n'), ((1045, 1071), 'megengine.functional.split', 'F.split', (['gate_h', '(3)'], {'axis': '(1)'}), '(gate_h, 3, axis=1)\n', (1052, 1071), True, 'import megengine.functional as F\n'), ((1093, 1113), 'megengine.functional.sigmoid', 'F.sigmoid', (['(i_r + h_r)'], {}), '(i_r + h_r)\n', (1102, 1113), True, 'import megengine.functional as F\n'), ((1134, 1154), 'megengine.functional.sigmoid', 'F.sigmoid', (['(i_i + h_i)'], {}), '(i_i + h_i)\n', (1143, 1154), True, 'import megengine.functional as F\n'), ((1173, 1202), 'megengine.functional.tanh', 'F.tanh', (['(i_n + resetgate * h_n)'], {}), '(i_n + resetgate * h_n)\n', (1179, 1202), True, 'import megengine.functional as F\n'), ((3899, 3947), 'megengine.module.Linear', 'M.Linear', (['input_size', '(4 * hidden_size)'], {'bias': 'bias'}), '(input_size, 4 * hidden_size, bias=bias)\n', (3907, 3947), True, 'import megengine.module as M\n'), ((3967, 4016), 'megengine.module.Linear', 'M.Linear', (['hidden_size', '(4 * hidden_size)'], {'bias': 'bias'}), '(hidden_size, 4 * hidden_size, bias=bias)\n', (3975, 4016), True, 'import megengine.module as M\n'), ((4281, 4311), 'megengine.functional.reshape', 'F.reshape', (['x', '(-1, x.shape[1])'], {}), '(x, (-1, x.shape[1]))\n', (4290, 4311), True, 'import megengine.functional as F\n'), ((4405, 4430), 'megengine.functional.split', 'F.split', (['gates', '(4)'], {'axis': '(1)'}), '(gates, 4, axis=1)\n', (4412, 4430), True, 'import megengine.functional as F\n'), ((4449, 4466), 'megengine.functional.sigmoid', 'F.sigmoid', (['ingate'], {}), '(ingate)\n', (4458, 4466), True, 'import megengine.functional as F\n'), ((4488, 4509), 'megengine.functional.sigmoid', 'F.sigmoid', (['forgetgate'], {}), '(forgetgate)\n', (4497, 4509), True, 'import megengine.functional as F\n'), ((4529, 4545), 'megengine.functional.tanh', 'F.tanh', (['cellgate'], {}), '(cellgate)\n', (4535, 4545), True, 'import megengine.functional as F\n'), ((4564, 4582), 'megengine.functional.sigmoid', 'F.sigmoid', (['outgate'], {}), '(outgate)\n', (4573, 4582), True, 'import megengine.functional as F\n'), ((722, 749), 'math.sqrt', 'math.sqrt', (['self.hidden_size'], {}), '(self.hidden_size)\n', (731, 749), False, 'import math\n'), ((798, 827), 'megengine.module.init.uniform_', 'M.init.uniform_', (['w', '(-std)', 'std'], {}), '(w, -std, std)\n', (813, 827), True, 'import megengine.module as M\n'), ((2204, 2255), 'megengine.functional.zeros', 'F.zeros', (['(self.num_layers, batch, self.hidden_size)'], {}), '((self.num_layers, batch, self.hidden_size))\n', (2211, 2255), True, 'import megengine.functional as F\n'), ((3392, 3413), 'megengine.functional.stack', 'F.stack', (['outs'], {'axis': '(1)'}), '(outs, axis=1)\n', (3399, 3413), True, 'import megengine.functional as F\n'), ((3449, 3470), 'megengine.functional.stack', 'F.stack', (['outs'], {'axis': '(0)'}), '(outs, axis=0)\n', (3456, 3470), True, 'import megengine.functional as F\n'), ((4102, 4129), 'math.sqrt', 'math.sqrt', (['self.hidden_size'], {}), '(self.hidden_size)\n', (4111, 4129), False, 'import math\n'), ((4178, 4207), 'megengine.module.init.uniform_', 'M.init.uniform_', (['w', '(-std)', 'std'], {}), '(w, -std, std)\n', (4193, 4207), True, 'import megengine.module as M\n'), ((4597, 4618), 'megengine.functional.mul', 'F.mul', (['cx', 'forgetgate'], {}), '(cx, forgetgate)\n', (4602, 4618), True, 'import megengine.functional as F\n'), ((4621, 4644), 'megengine.functional.mul', 'F.mul', (['ingate', 'cellgate'], {}), '(ingate, cellgate)\n', (4626, 4644), True, 'import megengine.functional as F\n'), ((4674, 4684), 'megengine.functional.tanh', 'F.tanh', (['cy'], {}), '(cy)\n', (4680, 4684), True, 'import megengine.functional as F\n'), ((5661, 5712), 'megengine.functional.zeros', 'F.zeros', (['(self.num_layers, batch, self.hidden_size)'], {}), '((self.num_layers, batch, self.hidden_size))\n', (5668, 5712), True, 'import megengine.functional as F\n'), ((5730, 5781), 'megengine.functional.zeros', 'F.zeros', (['(self.num_layers, batch, self.hidden_size)'], {}), '((self.num_layers, batch, self.hidden_size))\n', (5737, 5781), True, 'import megengine.functional as F\n'), ((6974, 6995), 'megengine.functional.stack', 'F.stack', (['outs'], {'axis': '(1)'}), '(outs, axis=1)\n', (6981, 6995), True, 'import megengine.functional as F\n'), ((7031, 7052), 'megengine.functional.stack', 'F.stack', (['outs'], {'axis': '(0)'}), '(outs, axis=0)\n', (7038, 7052), True, 'import megengine.functional as F\n'), ((3230, 3263), 'megengine.functional.dropout', 'F.dropout', (['hidden_l', 'self.dropout'], {}), '(hidden_l, self.dropout)\n', (3239, 3263), True, 'import megengine.functional as F\n'), ((6721, 6757), 'megengine.functional.dropout', 'F.dropout', (['hidden_l[0]', 'self.dropout'], {}), '(hidden_l[0], self.dropout)\n', (6730, 6757), True, 'import megengine.functional as F\n'), ((6783, 6819), 'megengine.functional.dropout', 'F.dropout', (['hidden_l[1]', 'self.dropout'], {}), '(hidden_l[1], self.dropout)\n', (6792, 6819), True, 'import megengine.functional as F\n')]
|
import os.path as osp
from abc import ABCMeta, abstractmethod
import megengine as mge
import megengine.distributed as dist
from megengine.optimizer.optimizer import Optimizer
from megengine.module import Module
from edit.utils import mkdir_or_exist, build_from_cfg, get_root_logger
from ..hook import Hook, HOOKS, get_priority
module_ckpt_suffix = "_module.mge"
optim_ckpt_suffix = "_optim.mge"
class BaseRunner(metaclass=ABCMeta):
"""The base class of Runner, a training helper for Mge.
All subclasses should implement the following APIs:
- ``run()``
- ``train()``
- ``test()``
- ``save_checkpoint()``
- ``resume()``
Args:
model (:obj:`megengine.module.Module`): The model to be run.
optimizers_cfg (dict): optimizer configs
work_dir (str, optional): The working directory to save checkpoints and logs. Defaults to None.
"""
def __init__(self, model, optimizers_cfg=None, work_dir=None):
assert hasattr(model, 'train_step')
assert hasattr(model, 'test_step')
assert hasattr(model, 'create_gradmanager_and_optimizers')
assert hasattr(model, 'cal_for_eval')
self.model = model
self.optimizers_cfg = optimizers_cfg
self.logger = get_root_logger()
self.work_dir = work_dir
assert self.work_dir is not None
# get model name from the model class
self._model_name = self.model.__class__.__name__
self.mode = None
self._hooks = []
self._epoch = 0
self._iter = 0
self._inner_iter = 0
self._max_epochs = 0
self._max_iters = 0
@property
def model_name(self):
"""str: Name of the model, usually the module class name."""
return self._model_name
@property
def hooks(self):
"""list[:obj:`Hook`]: A list of registered hooks."""
return self._hooks
@property
def epoch(self):
"""int: Current epoch."""
return self._epoch
@property
def iter(self):
"""int: Current iteration."""
return self._iter
@property
def inner_iter(self):
"""int: Iteration in an epoch."""
return self._inner_iter
@property
def max_epochs(self):
"""int: Maximum training epochs."""
return self._max_epochs
@property
def max_iters(self):
"""int: Maximum training iterations."""
return self._max_iters
@abstractmethod
def train(self, data_loader):
pass
@abstractmethod
def test(self, data_loader):
pass
@abstractmethod
def run(self, data_loaders, workflow, max_iters):
pass
@abstractmethod
def save_checkpoint(self, out_dir, create_symlink=True):
pass
@abstractmethod
def resume(self, path2checkpoint):
pass
@abstractmethod
def register_training_hooks(self, lr_config, checkpoint_config, log_config):
"""Register default hooks for training.
Default hooks include:
- LrUpdaterHook
- CheckpointSaverHook
- log_config
"""
pass
def create_gradmanager_and_optimizers(self):
self.model.create_gradmanager_and_optimizers(self.optimizers_cfg)
def sync_model_params(self):
if dist.is_distributed():
self.logger.info("syncing the model's parameters...")
dist.bcast_list_(self.model.parameters(), dist.WORLD)
else:
pass # do nothing
def current_lr(self):
"""Get current learning rates.
Returns:
list[float] | dict[str, list[float]]: Current learning rates of all
param groups. If the runner has a dict of optimizers, this
method will return a dict.
"""
raise NotImplementedError("")
# if isinstance(self.optimizer, Optimizer):
# lr = [group['lr'] for group in self.optimizer.param_groups]
# elif isinstance(self.optimizer, dict):
# lr = dict()
# for name, optim in self.optimizer.items():
# lr[name] = [group['lr'] for group in optim.param_groups]
# else:
# raise RuntimeError('lr is not applicable because optimizer does not exist.')
# return lr
def current_momentum(self):
"""Get current momentums.
Returns:
list[float] | dict[str, list[float]]: Current momentums of all
param groups. If the runner has a dict of optimizers, this
method will return a dict.
"""
raise NotImplementedError("")
# def _get_momentum(optimizer):
# momentums = []
# for group in optimizer.param_groups:
# if 'momentum' in group.keys():
# momentums.append(group['momentum'])
# elif 'betas' in group.keys():
# momentums.append(group['betas'][0])
# else:
# momentums.append(0)
# return momentums
#
# if self.optimizer is None:
# raise RuntimeError('momentum is not applicable because optimizer does not exist.')
# elif isinstance(self.optimizer, Optimizer):
# momentums = _get_momentum(self.optimizer)
# elif isinstance(self.optimizer, dict):
# momentums = dict()
# for name, optim in self.optimizer.items():
# momentums[name] = _get_momentum(optim)
# return momentums
def register_hook(self, hook, priority='NORMAL'):
"""Register a hook into the hook list.
The hook will be inserted into a priority queue, with the specified
priority (See :class:`Priority` for details of priorities).
For hooks with the same priority, they will be triggered in the same
order as they are registered.
Args:
hook (:obj:`Hook`): The hook to be registered.
priority (int or str or :obj:`Priority`): Hook priority.
Lower value means higher priority.
"""
assert isinstance(hook, Hook)
if hasattr(hook, 'priority'):
raise ValueError('"priority" is a reserved attribute for hook')
priority = get_priority(priority)
hook.priority = priority
# insert the hook to a sorted list
inserted = False
for i in range(len(self._hooks) - 1, -1, -1):
if priority >= self._hooks[i].priority:
self._hooks.insert(i + 1, hook)
inserted = True
break
if not inserted:
self._hooks.insert(0, hook)
def call_hook(self, fn_name):
"""Call all hooks.
Args:
fn_name (str): The function name in each hook to be called, such as
"before_train_epoch".
"""
for hook in self._hooks:
getattr(hook, fn_name)(self)
def load_checkpoint(self, path2checkpoint, load_optim=True):
"""
:param path2checkpoint: e.g. workdirs/xxxxx/checkpoint/epoch_10
:return: dict
"""
assert osp.exists(path2checkpoint), "{} do not exist".format(path2checkpoint)
dirname = osp.split(path2checkpoint)[-1]
epoch, nums = dirname.split("_")
assert epoch in ("epoch", )
self.logger.info('load checkpoint from {}'.format(path2checkpoint))
# 遍历model中的所有配置optimizer的model,并进行load
res = dict()
res['nums'] = int(nums)
for submodule_name in self.optimizers_cfg.keys():
submodule = getattr(self.model, submodule_name, None)
assert submodule is not None, "model should have submodule {}".format(submodule_name)
assert isinstance(submodule, Module), "submodule should be instance of mge.module.Module"
if dist.get_rank() == 0:
module_state_dict = mge.load(osp.join(path2checkpoint, submodule_name + module_ckpt_suffix))
submodule.load_state_dict(module_state_dict, strict = False)
if load_optim:
optim_state_dict = mge.load(osp.join(path2checkpoint, submodule_name + optim_ckpt_suffix))
res[submodule_name] = optim_state_dict
return res
def register_momentum_hook(self, momentum_config):
if momentum_config is None:
return
if isinstance(momentum_config, dict):
assert 'policy' in momentum_config
policy_type = momentum_config.pop('policy')
# If the type of policy is all in lower case, e.g., 'cyclic',
# then its first letter will be capitalized, e.g., to be 'Cyclic'.
# This is for the convenient usage of momentum updater.
# Since this is not applicable for `CosineAnealingMomentumUpdater`,
# the string will not be changed if it contains capital letters.
if policy_type == policy_type.lower():
policy_type = policy_type.title()
hook_type = policy_type + 'MomentumUpdaterHook'
momentum_config['type'] = hook_type
hook = build_from_cfg(momentum_config, HOOKS)
else:
hook = momentum_config
self.register_hook(hook)
def register_optimizer_hook(self, optimizer_config):
if optimizer_config is None:
return
if isinstance(optimizer_config, dict):
optimizer_config.setdefault('type', 'OptimizerHook')
hook = build_from_cfg(optimizer_config, HOOKS)
else:
hook = optimizer_config
self.register_hook(hook)
def register_lr_hook(self, lr_config):
if isinstance(lr_config, dict):
assert 'policy' in lr_config
policy_type = lr_config.pop('policy')
# If the type of policy is all in lower case, e.g., 'cyclic',
# then its first letter will be capitalized, e.g., to be 'Cyclic'.
# This is for the convenient usage of Lr updater.
# Since this is not applicable for `CosineAnealingLrUpdater`,
# the string will not be changed if it contains capital letters.
if policy_type == policy_type.lower():
policy_type = policy_type.title()
hook_type = policy_type + 'LrUpdaterHook'
lr_config['type'] = hook_type
hook = build_from_cfg(lr_config, HOOKS)
else:
hook = lr_config
self.register_hook(hook)
def register_checkpoint_hook(self, checkpoint_config):
if isinstance(checkpoint_config, dict):
checkpoint_config.setdefault('type', 'CheckpointHook')
hook = build_from_cfg(checkpoint_config, HOOKS)
else:
hook = checkpoint_config
self.register_hook(hook)
def register_logger_hooks(self, log_config):
log_interval = log_config['interval']
for info in log_config['hooks']:
logger_hook = build_from_cfg(info, HOOKS, default_args=dict(interval=log_interval))
self.register_hook(logger_hook, priority='HIGH')
|
[
"megengine.distributed.is_distributed",
"megengine.distributed.get_rank"
] |
[((1254, 1271), 'edit.utils.get_root_logger', 'get_root_logger', ([], {}), '()\n', (1269, 1271), False, 'from edit.utils import mkdir_or_exist, build_from_cfg, get_root_logger\n'), ((3305, 3326), 'megengine.distributed.is_distributed', 'dist.is_distributed', ([], {}), '()\n', (3324, 3326), True, 'import megengine.distributed as dist\n'), ((7156, 7183), 'os.path.exists', 'osp.exists', (['path2checkpoint'], {}), '(path2checkpoint)\n', (7166, 7183), True, 'import os.path as osp\n'), ((7245, 7271), 'os.path.split', 'osp.split', (['path2checkpoint'], {}), '(path2checkpoint)\n', (7254, 7271), True, 'import os.path as osp\n'), ((9150, 9188), 'edit.utils.build_from_cfg', 'build_from_cfg', (['momentum_config', 'HOOKS'], {}), '(momentum_config, HOOKS)\n', (9164, 9188), False, 'from edit.utils import mkdir_or_exist, build_from_cfg, get_root_logger\n'), ((9516, 9555), 'edit.utils.build_from_cfg', 'build_from_cfg', (['optimizer_config', 'HOOKS'], {}), '(optimizer_config, HOOKS)\n', (9530, 9555), False, 'from edit.utils import mkdir_or_exist, build_from_cfg, get_root_logger\n'), ((10396, 10428), 'edit.utils.build_from_cfg', 'build_from_cfg', (['lr_config', 'HOOKS'], {}), '(lr_config, HOOKS)\n', (10410, 10428), False, 'from edit.utils import mkdir_or_exist, build_from_cfg, get_root_logger\n'), ((10699, 10739), 'edit.utils.build_from_cfg', 'build_from_cfg', (['checkpoint_config', 'HOOKS'], {}), '(checkpoint_config, HOOKS)\n', (10713, 10739), False, 'from edit.utils import mkdir_or_exist, build_from_cfg, get_root_logger\n'), ((7868, 7883), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (7881, 7883), True, 'import megengine.distributed as dist\n'), ((7935, 7997), 'os.path.join', 'osp.join', (['path2checkpoint', '(submodule_name + module_ckpt_suffix)'], {}), '(path2checkpoint, submodule_name + module_ckpt_suffix)\n', (7943, 7997), True, 'import os.path as osp\n'), ((8147, 8208), 'os.path.join', 'osp.join', (['path2checkpoint', '(submodule_name + optim_ckpt_suffix)'], {}), '(path2checkpoint, submodule_name + optim_ckpt_suffix)\n', (8155, 8208), True, 'import os.path as osp\n')]
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
from collections import defaultdict
from itertools import chain
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.module.qat as qat
from megengine.module.identity import Identity
from megengine.traced_module import trace_module
from megengine.traced_module.expr import CallFunction, CallMethod, Expr, GetAttr, Input
from megengine.traced_module.node import ModuleNode, Node, TensorNode
class IdentityMod(M.Module):
def forward(self, x):
return x
class MyBlock(M.Module):
def __init__(self, in_channels=3, channels=3):
super(MyBlock, self).__init__()
self.conv1 = M.Conv2d(in_channels, channels, 3, 1, padding=1, bias=False)
self.bn1 = M.BatchNorm2d(channels)
self.nothing = IdentityMod()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x) + 1
x = self.nothing(x)
return x
class MyModule(M.Module):
def __init__(self):
super(MyModule, self).__init__()
self.block0 = MyBlock()
self.block1 = MyBlock()
self.nothing = IdentityMod()
def forward(self, x):
x = self.block0(x)
x = self.block1(x)
x = self.nothing(x)
return x
class MyBlock1(M.Module):
def forward(self, a):
y = F.concat([a, a])
return a, y
class MyModule1(M.Module):
def __init__(self):
super().__init__()
self.block0 = MyBlock1()
self.block1 = MyBlock1()
def forward(self, a):
a, y1 = self.block0(a)
a = a + 1
a, y2 = self.block1(a)
return a, y1 + y2
class NewModule(M.Module):
def __init__(self, traced_module):
super(NewModule, self).__init__()
self.module = traced_module
def forward(self, x):
x = x - 1
x = self.module(x)
x = x + 1
return x
def _check_expr_users(traced_module):
node_user = defaultdict(list)
for expr in traced_module.graph._exprs:
for node in expr.inputs:
node_user[node].append(expr)
for node in traced_module.graph.nodes():
node.users.sort(key=lambda m: m._id)
node_user[node].sort(key=lambda m: m._id)
assert node.users == node_user[node]
def _init_cls(cls):
module = cls()
x = F.ones((1, 3, 3, 3))
y = module(x)
traced_module = trace_module(module, x)
return traced_module, x, y
def _init_block():
return _init_cls(MyBlock)
def _init_module():
return _init_cls(MyModule)
def test_search():
traced_module, *_ = _init_block()
graph = traced_module.graph
relu_expr = graph.get_function_by_type(F.relu).as_unique()
assert isinstance(relu_expr, CallFunction) and relu_expr.func == F.relu
conv_node = graph.get_module_by_type(M.Conv2d).as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
add_expr = graph.get_method_by_type("__add__").as_unique()
assert isinstance(add_expr, CallMethod) and add_expr.method == "__add__"
conv_node = graph.get_node_by_name("MyBlock_conv1").as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
def test_producer_and_users():
traced_module, *_ = _init_module()
def _check(exprs):
for expr in exprs:
for n in chain(expr.inputs, expr.outputs):
if not isinstance(n.expr, Input):
assert n.expr in exprs
for e in n.users:
assert e in exprs
assert n in e.inputs
for mod in traced_module.modules():
if not hasattr(mod, "argdef_graph_map"):
continue
for g in mod.argdef_graph_map.values():
_check(g._exprs)
def test_insert():
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_out = graph.get_function_by_type(F.relu).as_unique().outputs[0]
with graph.insert_exprs():
neg_out = F.neg(relu_out)
graph.replace_node({relu_out: neg_out})
graph.compile()
np.testing.assert_allclose(expect - 1, 1 - traced_module(x), atol=1e-6)
def test_insert_module():
class Neg(M.Module):
def __init__(self, name):
super().__init__(name)
self.identity = M.Identity()
self.identity_list = [M.Identity(), M.Identity()]
self.identity_dict = {"0": M.Identity(), "1": M.Identity()}
self.param = F.zeros((1,))
def forward(self, x):
x = self.identity(x)
for m in self.identity_dict:
x = self.identity_dict[m](x)
for m in self.identity_list:
x = m(x)
return F.neg(x) + self.param
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_out = graph.get_function_by_type(F.relu).as_unique().outputs[0]
self = graph.inputs[0]
setattr(traced_module, "neg", Neg(name="neg"))
setattr(traced_module, "neg2", Neg(name="neg"))
setattr(traced_module, "param", F.zeros((1,)))
with graph.insert_exprs():
neg_out = self.neg(relu_out)
neg_out = self.neg2(relu_out)
neg_out = neg_out + self.param
graph.replace_node({relu_out: neg_out})
graph.compile()
np.testing.assert_allclose(expect - 1, 1 - traced_module(x), atol=1e-6)
assert traced_module.neg.graph is not None
assert traced_module.neg2.graph is not None
assert traced_module.neg2.param is not None
assert len(traced_module.neg.graph._exprs) == 13
for n in traced_module.graph.nodes():
if isinstance(n, TensorNode):
assert n.value is None
def test_insert_qat_module():
class concat(qat.Concat):
pass
traced_module, x, expect = _init_block()
graph = traced_module.graph
self = graph.inputs[0]
out = graph.outputs[0]
setattr(traced_module, "cat_0", qat.Concat())
setattr(traced_module, "cat_1", concat())
with graph.insert_exprs():
x_0 = self.cat_0([out, out])
x_1 = self.cat_1([out, x_0])
graph.replace_node({out: x_1})
graph.compile()
x = F.copy(x)
np.testing.assert_allclose(
F.concat([expect, expect, expect]), traced_module(x), atol=1e-6
)
assert not hasattr(traced_module.cat_0, "graph")
assert traced_module.cat_1.graph is not None
def test_add_input_and_output():
traced_module, x, y = _init_module()
data_node = traced_module.graph.add_input_node(shape=(1, 3, 224, 224), name="data")
traced_module.graph.add_output_node(data_node)
assert data_node.name == "data"
assert traced_module.graph.inputs[-1] == data_node
assert len(traced_module.graph.inputs) == 3
assert len(traced_module.graph.outputs) == 2
y1, y2 = traced_module(x, x)
np.testing.assert_equal(y1.numpy(), y.numpy())
np.testing.assert_equal(y2.numpy(), x.numpy())
y1, y2 = traced_module(x, y)
np.testing.assert_equal(y2.numpy(), y.numpy())
traced_module.graph.reset_outputs(
({"orig_out": traced_module.graph.outputs[0]}, traced_module.graph.outputs[1])
)
out = traced_module(x, x)
assert isinstance(out, tuple)
assert isinstance(out[0], dict)
np.testing.assert_equal(out[0]["orig_out"].numpy(), y.numpy())
np.testing.assert_equal(out[1].numpy(), x.numpy())
def test_delete():
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_expr = graph.get_function_by_type(F.relu).as_unique()
node = relu_expr.outputs
repl_node = relu_expr.inputs
graph.replace_node({node[0]: repl_node[0]})
graph.compile()
np.testing.assert_allclose(expect - 1, F.relu(traced_module(x) - 1), atol=1e-6)
# clear graph
graph.replace_node({graph.outputs[0]: graph.inputs[1]})
graph.compile()
np.testing.assert_equal(len(list(graph._exprs)), 0)
np.testing.assert_equal(traced_module(x).numpy(), x.numpy())
def test_flatten():
traced_module, x, expect = _init_module()
traced_module = traced_module.flatten()
assert len(traced_module.graph._exprs) == 12
np.testing.assert_equal(expect.numpy(), traced_module(x).numpy())
traced_module = traced_module.flatten()
assert len(traced_module.graph._exprs) == 12
np.testing.assert_equal(expect.numpy(), traced_module(x).numpy())
traced_module, x, expect = _init_cls(MyModule1)
traced_module = traced_module.flatten()
_check_expr_users(traced_module)
def test_id_and_name():
def _check_id(traced_module):
_total_ids = traced_module.graph._total_ids
node_ids = [n._id for n in traced_module.graph.nodes().as_list()]
assert len(set(node_ids)) == len(node_ids)
assert max(node_ids) + 1 == _total_ids[0]
expr_ids = [n._id for n in traced_module.graph.exprs().as_list()]
assert len(set(expr_ids)) == len(expr_ids)
assert max(expr_ids) + 1 == _total_ids[1]
def _check_name(flatened_module):
node_names = [n._name for n in flatened_module.graph.nodes().as_list()]
assert len(set(node_names)) == len(node_names)
traced_module, x, expect = _init_module()
_check_id(traced_module)
flattened_module = traced_module.flatten()
_check_id(flattened_module)
_check_name(flattened_module)
# pickle check
obj = pickle.dumps(traced_module)
traced_module = pickle.loads(obj)
Node._set_next_id(159)
Expr._set_next_id(1024)
graph = traced_module.graph
for expr in graph.get_function_by_type(F.relu).as_list():
relu_out = expr.outputs[0]
cur_graph = expr.top_graph
with cur_graph.insert_exprs():
neg_out = F.neg(relu_out)
cur_graph.replace_node({relu_out: neg_out})
cur_graph.compile()
_check_id(traced_module)
flattened_module = traced_module.flatten()
_check_id(flattened_module)
_check_name(flattened_module)
# check trace TracedModule
obj = pickle.dumps(traced_module)
traced_module = pickle.loads(obj)
module = NewModule(traced_module)
traced_module = trace_module(module, x)
_check_id(traced_module)
flattened_module = traced_module.flatten()
_check_id(flattened_module)
_check_name(flattened_module)
def test_set_node_name():
traced_module, x, expect = _init_module()
graph = traced_module.graph
output_node = graph.outputs[0]
def rename(name):
output_node.name = name
np.testing.assert_raises(AssertionError, rename, "block1_out")
rename("output")
np.testing.assert_equal(str(graph.outputs[0]), "output")
def test_set_graph_name():
traced_module, x, expect = _init_module()
graph = traced_module.graph
output_node = graph.outputs[0]
node_name = output_node.name
graph.name = "Top"
node = graph.get_node_by_name("{}_{}".format("Top", node_name)).as_unique()
assert node is output_node
def test_extra_block():
class PostProcess(M.Module):
def forward(self, x):
return x * 2
class Net(M.Module):
def __init__(self, traced_module):
super().__init__()
self.post_process = PostProcess()
self.traced_module = traced_module
def forward(self, x):
x = self.traced_module(x)
x = self.post_process(x)
return x
traced_module, x, expect = _init_block()
module = Net(traced_module)
np.testing.assert_allclose(2 * expect, module(x), atol=1e-6)
traced_module = trace_module(module, x)
np.testing.assert_allclose(2 * expect, traced_module(x), atol=1e-6)
|
[
"megengine.functional.relu",
"megengine.traced_module.trace_module",
"megengine.traced_module.node.Node._set_next_id",
"megengine.functional.zeros",
"megengine.functional.ones",
"megengine.functional.neg",
"megengine.module.Conv2d",
"megengine.module.qat.Concat",
"megengine.traced_module.expr.Expr._set_next_id",
"megengine.functional.copy",
"megengine.module.Identity",
"megengine.module.BatchNorm2d",
"megengine.functional.concat"
] |
[((2316, 2333), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2327, 2333), False, 'from collections import defaultdict\n'), ((2686, 2706), 'megengine.functional.ones', 'F.ones', (['(1, 3, 3, 3)'], {}), '((1, 3, 3, 3))\n', (2692, 2706), True, 'import megengine.functional as F\n'), ((2745, 2768), 'megengine.traced_module.trace_module', 'trace_module', (['module', 'x'], {}), '(module, x)\n', (2757, 2768), False, 'from megengine.traced_module import trace_module\n'), ((6518, 6527), 'megengine.functional.copy', 'F.copy', (['x'], {}), '(x)\n', (6524, 6527), True, 'import megengine.functional as F\n'), ((9703, 9730), 'pickle.dumps', 'pickle.dumps', (['traced_module'], {}), '(traced_module)\n', (9715, 9730), False, 'import pickle\n'), ((9751, 9768), 'pickle.loads', 'pickle.loads', (['obj'], {}), '(obj)\n', (9763, 9768), False, 'import pickle\n'), ((9773, 9795), 'megengine.traced_module.node.Node._set_next_id', 'Node._set_next_id', (['(159)'], {}), '(159)\n', (9790, 9795), False, 'from megengine.traced_module.node import ModuleNode, Node, TensorNode\n'), ((9800, 9823), 'megengine.traced_module.expr.Expr._set_next_id', 'Expr._set_next_id', (['(1024)'], {}), '(1024)\n', (9817, 9823), False, 'from megengine.traced_module.expr import CallFunction, CallMethod, Expr, GetAttr, Input\n'), ((10331, 10358), 'pickle.dumps', 'pickle.dumps', (['traced_module'], {}), '(traced_module)\n', (10343, 10358), False, 'import pickle\n'), ((10379, 10396), 'pickle.loads', 'pickle.loads', (['obj'], {}), '(obj)\n', (10391, 10396), False, 'import pickle\n'), ((10455, 10478), 'megengine.traced_module.trace_module', 'trace_module', (['module', 'x'], {}), '(module, x)\n', (10467, 10478), False, 'from megengine.traced_module import trace_module\n'), ((10823, 10885), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['AssertionError', 'rename', '"""block1_out"""'], {}), "(AssertionError, rename, 'block1_out')\n", (10847, 10885), True, 'import numpy as np\n'), ((11876, 11899), 'megengine.traced_module.trace_module', 'trace_module', (['module', 'x'], {}), '(module, x)\n', (11888, 11899), False, 'from megengine.traced_module import trace_module\n'), ((1016, 1076), 'megengine.module.Conv2d', 'M.Conv2d', (['in_channels', 'channels', '(3)', '(1)'], {'padding': '(1)', 'bias': '(False)'}), '(in_channels, channels, 3, 1, padding=1, bias=False)\n', (1024, 1076), True, 'import megengine.module as M\n'), ((1096, 1119), 'megengine.module.BatchNorm2d', 'M.BatchNorm2d', (['channels'], {}), '(channels)\n', (1109, 1119), True, 'import megengine.module as M\n'), ((1691, 1707), 'megengine.functional.concat', 'F.concat', (['[a, a]'], {}), '([a, a])\n', (1699, 1707), True, 'import megengine.functional as F\n'), ((4366, 4381), 'megengine.functional.neg', 'F.neg', (['relu_out'], {}), '(relu_out)\n', (4371, 4381), True, 'import megengine.functional as F\n'), ((5432, 5445), 'megengine.functional.zeros', 'F.zeros', (['(1,)'], {}), '((1,))\n', (5439, 5445), True, 'import megengine.functional as F\n'), ((6288, 6300), 'megengine.module.qat.Concat', 'qat.Concat', ([], {}), '()\n', (6298, 6300), True, 'import megengine.module.qat as qat\n'), ((6568, 6602), 'megengine.functional.concat', 'F.concat', (['[expect, expect, expect]'], {}), '([expect, expect, expect])\n', (6576, 6602), True, 'import megengine.functional as F\n'), ((1246, 1255), 'megengine.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (1252, 1255), True, 'import megengine.functional as F\n'), ((3718, 3750), 'itertools.chain', 'chain', (['expr.inputs', 'expr.outputs'], {}), '(expr.inputs, expr.outputs)\n', (3723, 3750), False, 'from itertools import chain\n'), ((4672, 4684), 'megengine.module.Identity', 'M.Identity', ([], {}), '()\n', (4682, 4684), True, 'import megengine.module as M\n'), ((4844, 4857), 'megengine.functional.zeros', 'F.zeros', (['(1,)'], {}), '((1,))\n', (4851, 4857), True, 'import megengine.functional as F\n'), ((10050, 10065), 'megengine.functional.neg', 'F.neg', (['relu_out'], {}), '(relu_out)\n', (10055, 10065), True, 'import megengine.functional as F\n'), ((4719, 4731), 'megengine.module.Identity', 'M.Identity', ([], {}), '()\n', (4729, 4731), True, 'import megengine.module as M\n'), ((4733, 4745), 'megengine.module.Identity', 'M.Identity', ([], {}), '()\n', (4743, 4745), True, 'import megengine.module as M\n'), ((4786, 4798), 'megengine.module.Identity', 'M.Identity', ([], {}), '()\n', (4796, 4798), True, 'import megengine.module as M\n'), ((4805, 4817), 'megengine.module.Identity', 'M.Identity', ([], {}), '()\n', (4815, 4817), True, 'import megengine.module as M\n'), ((5093, 5101), 'megengine.functional.neg', 'F.neg', (['x'], {}), '(x)\n', (5098, 5101), True, 'import megengine.functional as F\n')]
|
from typing import List, Optional
from sqlmodel import Field, Relationship, SQLModel, create_engine
class Weapon(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
hero: "Hero" = Relationship(back_populates="weapon")
class Power(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
hero_id: int = Field(foreign_key="hero.id")
hero: "Hero" = Relationship(back_populates="powers")
class Team(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
headquarters: str
heroes: List["Hero"] = Relationship(back_populates="team")
class Hero(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
secret_name: str
age: Optional[int] = None
team_id: Optional[int] = Field(default=None, foreign_key="team.id")
team: Optional[Team] = Relationship(back_populates="heroes")
weapon_id: Optional[int] = Field(default=None, foreign_key="weapon.id")
weapon: Optional[Weapon] = Relationship(back_populates="owner")
powers: List[Power] = Relationship(back_populates="hero")
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def main():
create_db_and_tables()
if __name__ == "__main__":
main()
|
[
"sqlmodel.Relationship",
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.create_engine",
"sqlmodel.Field"
] |
[((1289, 1325), 'sqlmodel.create_engine', 'create_engine', (['sqlite_url'], {'echo': '(True)'}), '(sqlite_url, echo=True)\n', (1302, 1325), False, 'from sqlmodel import Field, Relationship, SQLModel, create_engine\n'), ((163, 200), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (168, 200), False, 'from sqlmodel import Field, Relationship, SQLModel, create_engine\n'), ((235, 272), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""weapon"""'}), "(back_populates='weapon')\n", (247, 272), False, 'from sqlmodel import Field, Relationship, SQLModel, create_engine\n'), ((334, 371), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (339, 371), False, 'from sqlmodel import Field, Relationship, SQLModel, create_engine\n'), ((406, 434), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""hero.id"""'}), "(foreign_key='hero.id')\n", (411, 434), False, 'from sqlmodel import Field, Relationship, SQLModel, create_engine\n'), ((454, 491), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""powers"""'}), "(back_populates='powers')\n", (466, 491), False, 'from sqlmodel import Field, Relationship, SQLModel, create_engine\n'), ((552, 589), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (557, 589), False, 'from sqlmodel import Field, Relationship, SQLModel, create_engine\n'), ((654, 689), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""team"""'}), "(back_populates='team')\n", (666, 689), False, 'from sqlmodel import Field, Relationship, SQLModel, create_engine\n'), ((750, 787), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (755, 787), False, 'from sqlmodel import Field, Relationship, SQLModel, create_engine\n'), ((883, 925), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""team.id"""'}), "(default=None, foreign_key='team.id')\n", (888, 925), False, 'from sqlmodel import Field, Relationship, SQLModel, create_engine\n'), ((953, 990), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""heroes"""'}), "(back_populates='heroes')\n", (965, 990), False, 'from sqlmodel import Field, Relationship, SQLModel, create_engine\n'), ((1023, 1067), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""weapon.id"""'}), "(default=None, foreign_key='weapon.id')\n", (1028, 1067), False, 'from sqlmodel import Field, Relationship, SQLModel, create_engine\n'), ((1099, 1135), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""owner"""'}), "(back_populates='owner')\n", (1111, 1135), False, 'from sqlmodel import Field, Relationship, SQLModel, create_engine\n'), ((1163, 1198), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""hero"""'}), "(back_populates='hero')\n", (1175, 1198), False, 'from sqlmodel import Field, Relationship, SQLModel, create_engine\n'), ((1360, 1396), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (1388, 1396), False, 'from sqlmodel import Field, Relationship, SQLModel, create_engine\n')]
|
import os
import time
from megengine.distributed.group import is_distributed
import megengine.distributed as dist
from megengine.data.dataloader import DataLoader
from edit.core.hook import Hook
from edit.utils import to_list, is_list_of, get_logger, mkdir_or_exist
class EvalIterHook(Hook):
"""evaluation hook by iteration-based.
This hook will regularly perform evaluation in a given interval
Args:
dataloader (DataLoader): A mge dataloader.
interval (int): Evaluation interval. Default: 3000.
eval_kwargs (dict): Other eval kwargs. It contains:
save_image (bool): Whether to save image.
save_path (str): The path to save image.
"""
def __init__(self, dataloader, **eval_kwargs):
if not isinstance(dataloader, DataLoader):
raise TypeError('dataloader must be a mge DataLoader, but got {}'.format(type(dataloader)))
self.dataloader = dataloader
self.eval_kwargs = eval_kwargs
self.interval = self.eval_kwargs.pop('interval', 10000)
self.save_image = self.eval_kwargs.pop('save_image', False)
self.save_path = self.eval_kwargs.pop('save_path', None)
self.log_path = self.eval_kwargs.pop('log_path', None)
self.multi_process = self.eval_kwargs.pop('multi_process', False)
self.ensemble = self.eval_kwargs.pop('ensemble', False)
mkdir_or_exist(self.save_path)
self.logger = get_logger(name = "EvalIterHook", log_file=self.log_path) # only for rank0
if is_distributed():
self.local_rank = dist.get_rank()
self.nranks = dist.get_world_size()
else:
self.local_rank = 0
self.nranks = 1
def after_train_iter(self, runner):
if not self.every_n_iters(runner, self.interval):
return
self.logger.info("start to eval for iter: {}".format(runner.iter+1))
save_path = os.path.join(self.save_path, "iter_{}".format(runner.iter+1))
mkdir_or_exist(save_path)
results = [] # list of dict
if self.multi_process:
assert is_distributed(), "when set multiprocess eval, you should use multi process training"
raise NotImplementedError("not support multi process for eval now")
elif self.local_rank == 0: # 全部交给rank0来处理
for data in self.dataloader:
outputs = runner.model.test_step(data, save_image=self.save_image, save_path=save_path, ensemble=self.ensemble)
result = runner.model.cal_for_eval(outputs, data)
assert isinstance(result, list)
results += result
self.evaluate(results, runner.iter+1)
else:
pass
if is_distributed():
dist.group_barrier()
def evaluate(self, results, iters):
"""Evaluation function.
Args:
runner (``BaseRunner``): The runner.
results (list of dict): Model forward results.
iter: now iter.
"""
save_path = os.path.join(self.save_path, "iter_{}".format(iters)) # save for some information. e.g. SVG for everyframe value in VSR.
eval_res = self.dataloader.dataset.evaluate(results, save_path)
self.logger.info("***** eval results for {} iters: *****".format(iters))
for name, val in eval_res.items():
self.logger.info("metric: {} average_val: {:.4f}".format(name, val))
|
[
"megengine.distributed.group.is_distributed",
"megengine.distributed.get_rank",
"megengine.distributed.group_barrier",
"megengine.distributed.get_world_size"
] |
[((1392, 1422), 'edit.utils.mkdir_or_exist', 'mkdir_or_exist', (['self.save_path'], {}), '(self.save_path)\n', (1406, 1422), False, 'from edit.utils import to_list, is_list_of, get_logger, mkdir_or_exist\n'), ((1445, 1500), 'edit.utils.get_logger', 'get_logger', ([], {'name': '"""EvalIterHook"""', 'log_file': 'self.log_path'}), "(name='EvalIterHook', log_file=self.log_path)\n", (1455, 1500), False, 'from edit.utils import to_list, is_list_of, get_logger, mkdir_or_exist\n'), ((1540, 1556), 'megengine.distributed.group.is_distributed', 'is_distributed', ([], {}), '()\n', (1554, 1556), False, 'from megengine.distributed.group import is_distributed\n'), ((2012, 2037), 'edit.utils.mkdir_or_exist', 'mkdir_or_exist', (['save_path'], {}), '(save_path)\n', (2026, 2037), False, 'from edit.utils import to_list, is_list_of, get_logger, mkdir_or_exist\n'), ((2752, 2768), 'megengine.distributed.group.is_distributed', 'is_distributed', ([], {}), '()\n', (2766, 2768), False, 'from megengine.distributed.group import is_distributed\n'), ((1588, 1603), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (1601, 1603), True, 'import megengine.distributed as dist\n'), ((1630, 1651), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (1649, 1651), True, 'import megengine.distributed as dist\n'), ((2125, 2141), 'megengine.distributed.group.is_distributed', 'is_distributed', ([], {}), '()\n', (2139, 2141), False, 'from megengine.distributed.group import is_distributed\n'), ((2782, 2802), 'megengine.distributed.group_barrier', 'dist.group_barrier', ([], {}), '()\n', (2800, 2802), True, 'import megengine.distributed as dist\n')]
|
"""v1-contact-invitation-key
Revision ID: <KEY>
Revises: e40469d1045a
Create Date: 2022-05-11 15:23:29.495804
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "e40469d1045a"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"contact",
sa.Column("invitation_key", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
)
op.execute("UPDATE contact SET invitation_key = 'v0'")
op.alter_column("contact", "invitation_key", nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("contact", "invitation_key")
# ### end Alembic commands ###
|
[
"sqlmodel.sql.sqltypes.AutoString"
] |
[((529, 583), 'alembic.op.execute', 'op.execute', (['"""UPDATE contact SET invitation_key = \'v0\'"""'], {}), '("UPDATE contact SET invitation_key = \'v0\'")\n', (539, 583), False, 'from alembic import op\n'), ((588, 648), 'alembic.op.alter_column', 'op.alter_column', (['"""contact"""', '"""invitation_key"""'], {'nullable': '(False)'}), "('contact', 'invitation_key', nullable=False)\n", (603, 648), False, 'from alembic import op\n'), ((773, 816), 'alembic.op.drop_column', 'op.drop_column', (['"""contact"""', '"""invitation_key"""'], {}), "('contact', 'invitation_key')\n", (787, 816), False, 'from alembic import op\n'), ((467, 501), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (499, 501), False, 'import sqlmodel\n')]
|
from typing import Optional
import uuid as uuid_pkg
from sqlmodel import SQLModel, Field
from datetime import datetime
class BaseUUIDModel(SQLModel):
id: uuid_pkg.UUID = Field(
default_factory=uuid_pkg.uuid4,
primary_key=True,
index=True,
nullable=False,
)
updated_at: Optional[datetime]
created_at: Optional[datetime]
|
[
"sqlmodel.Field"
] |
[((175, 262), 'sqlmodel.Field', 'Field', ([], {'default_factory': 'uuid_pkg.uuid4', 'primary_key': '(True)', 'index': '(True)', 'nullable': '(False)'}), '(default_factory=uuid_pkg.uuid4, primary_key=True, index=True,\n nullable=False)\n', (180, 262), False, 'from sqlmodel import SQLModel, Field\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import re
import subprocess
import sys
from math import ceil
import numpy as np
import pytest
import megengine as mge
import megengine.autodiff as ad
import megengine.distributed as dist
import megengine.functional as F
from megengine.device import get_default_device, set_default_device
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.tensor import Tensor
p_num = 4
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if mge.is_cuda_available():
return get_gpu_name()
else:
return get_cpu_name()
class MnistNet(Module):
def __init__(self, has_bn=True):
super().__init__()
self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True)
self.pool0 = AvgPool2d(2)
self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True)
self.pool1 = AvgPool2d(2)
self.fc0 = Linear(20 * 4 * 4, 500, bias=True)
self.fc1 = Linear(500, 10, bias=True)
self.bn0 = None
self.bn1 = None
if has_bn:
self.bn0 = BatchNorm2d(20)
self.bn1 = BatchNorm2d(20)
def forward(self, x):
x = self.conv0(x)
if self.bn0:
x = self.bn0(x)
x = F.relu(x)
x = self.pool0(x)
x = self.conv1(x)
if self.bn1:
x = self.bn1(x)
x = F.relu(x)
x = self.pool1(x)
x = F.flatten(x, 1)
x = self.fc0(x)
x = F.relu(x)
x = self.fc1(x)
return x
def train(data, label, net, opt, gm):
opt.clear_grad()
with gm:
pred = net(data)
loss = F.nn.cross_entropy(pred, label)
gm.backward(loss)
opt.step()
return loss
def update_model(model_path):
"""
Update the dumped model with test cases for new reference values.
The model with pre-trained weights is trained for one iter with the test data attached.
The loss and updated net state dict is dumped.
.. code-block:: python
from test_dp_correctness import update_model
update_model('mnist_model_with_test.mge') # for gpu
update_model('mnist_model_with_test_cpu.mge') # for cpu
"""
net = MnistNet(has_bn=True)
checkpoint = mge.load(model_path)
net.load_state_dict(checkpoint["net_init"])
lr = checkpoint["sgd_lr"]
opt = SGD(net.parameters(), lr=lr)
gm = ad.GradManager().attach(
net.parameters(), callbacks=[dist.make_allreduce_cb("MEAN", dist.WORLD)]
)
data = Tensor(checkpoint["data"], dtype=np.float32)
label = Tensor(checkpoint["label"], dtype=np.int32)
opt.clear_grad()
loss = train(data, label, net=net, opt=opt)
opt.step()
xpu_name = get_xpu_name()
checkpoint.update(
{"net_updated": net.state_dict(), "loss": loss.numpy(), "xpu": xpu_name}
)
mge.serialization.save(checkpoint, model_path)
def run_test(
model_path, use_jit, use_symbolic, sublinear_memory_config=None, max_err=None,
):
"""
Load the model with test cases and run the training for one iter.
The loss and updated weights are compared with reference value to verify the correctness.
Dump a new file with updated result by calling update_model
if you think the test fails due to numerical rounding errors instead of bugs.
Please think twice before you do so.
"""
checkpoint = mge.load(model_path)
data = checkpoint["data"]
label = checkpoint["label"]
@dist.launcher
def worker(max_err):
net = MnistNet(has_bn=True)
net.load_state_dict(checkpoint["net_init"])
lr = checkpoint["sgd_lr"]
opt = SGD(net.parameters(), lr=lr)
gm = ad.GradManager().attach(
net.parameters(), callbacks=[dist.make_allreduce_cb("MEAN", dist.WORLD)]
)
# use same data and label for all gpu's
# such that the result does not depend on number of gpu
data_train = Tensor(data)
label_train = Tensor(label)
loss = train(data_train, label_train, net, opt, gm)
np.testing.assert_allclose(loss.numpy(), checkpoint["loss"], atol=max_err)
if dist.get_rank():
return
for param, param_ref in zip(
net.state_dict().items(), checkpoint["net_updated"].items()
):
assert param[0] == param_ref[0]
if "bn" in param[0]:
ref = param_ref[1].reshape(param[1].shape)
np.testing.assert_allclose(param[1], ref, atol=max_err)
else:
np.testing.assert_allclose(param[1], param_ref[1], atol=max_err)
worker(max_err)
@pytest.mark.require_ngpu(2)
@pytest.mark.isolated_distributed
def test_dp_correctness():
model_name = "mnist_model_with_test.mge"
model_path = os.path.join(os.path.dirname(__file__), model_name)
set_conv_execution_strategy("HEURISTIC_REPRODUCIBLE")
run_test(model_path, False, False, max_err=1e-5)
|
[
"megengine.autodiff.GradManager",
"megengine.module.Conv2d",
"megengine.module.AvgPool2d",
"megengine.functional.nn.cross_entropy",
"megengine.functional.debug_param.set_conv_execution_strategy",
"megengine.distributed.make_allreduce_cb",
"megengine.serialization.save",
"megengine.is_cuda_available",
"megengine.module.BatchNorm2d",
"megengine.load",
"megengine.functional.relu",
"megengine.functional.flatten",
"megengine.distributed.get_rank",
"megengine.tensor.Tensor",
"megengine.module.Linear"
] |
[((5688, 5715), 'pytest.mark.require_ngpu', 'pytest.mark.require_ngpu', (['(2)'], {}), '(2)\n', (5712, 5715), False, 'import pytest\n'), ((1564, 1587), 'megengine.is_cuda_available', 'mge.is_cuda_available', ([], {}), '()\n', (1585, 1587), True, 'import megengine as mge\n'), ((3294, 3314), 'megengine.load', 'mge.load', (['model_path'], {}), '(model_path)\n', (3302, 3314), True, 'import megengine as mge\n'), ((3566, 3610), 'megengine.tensor.Tensor', 'Tensor', (["checkpoint['data']"], {'dtype': 'np.float32'}), "(checkpoint['data'], dtype=np.float32)\n", (3572, 3610), False, 'from megengine.tensor import Tensor\n'), ((3623, 3666), 'megengine.tensor.Tensor', 'Tensor', (["checkpoint['label']"], {'dtype': 'np.int32'}), "(checkpoint['label'], dtype=np.int32)\n", (3629, 3666), False, 'from megengine.tensor import Tensor\n'), ((3898, 3944), 'megengine.serialization.save', 'mge.serialization.save', (['checkpoint', 'model_path'], {}), '(checkpoint, model_path)\n', (3920, 3944), True, 'import megengine as mge\n'), ((4434, 4454), 'megengine.load', 'mge.load', (['model_path'], {}), '(model_path)\n', (4442, 4454), True, 'import megengine as mge\n'), ((5895, 5948), 'megengine.functional.debug_param.set_conv_execution_strategy', 'set_conv_execution_strategy', (['"""HEURISTIC_REPRODUCIBLE"""'], {}), "('HEURISTIC_REPRODUCIBLE')\n", (5922, 5948), False, 'from megengine.functional.debug_param import set_conv_execution_strategy\n'), ((975, 1067), 'subprocess.check_output', 'subprocess.check_output', (["['nvidia-smi', '--query-gpu=gpu_name', '--format=csv,noheader']"], {}), "(['nvidia-smi', '--query-gpu=gpu_name',\n '--format=csv,noheader'])\n", (998, 1067), False, 'import subprocess\n'), ((1770, 1809), 'megengine.module.Conv2d', 'Conv2d', (['(1)', '(20)'], {'kernel_size': '(5)', 'bias': '(True)'}), '(1, 20, kernel_size=5, bias=True)\n', (1776, 1809), False, 'from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module\n'), ((1831, 1843), 'megengine.module.AvgPool2d', 'AvgPool2d', (['(2)'], {}), '(2)\n', (1840, 1843), False, 'from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module\n'), ((1865, 1905), 'megengine.module.Conv2d', 'Conv2d', (['(20)', '(20)'], {'kernel_size': '(5)', 'bias': '(True)'}), '(20, 20, kernel_size=5, bias=True)\n', (1871, 1905), False, 'from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module\n'), ((1927, 1939), 'megengine.module.AvgPool2d', 'AvgPool2d', (['(2)'], {}), '(2)\n', (1936, 1939), False, 'from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module\n'), ((1959, 1993), 'megengine.module.Linear', 'Linear', (['(20 * 4 * 4)', '(500)'], {'bias': '(True)'}), '(20 * 4 * 4, 500, bias=True)\n', (1965, 1993), False, 'from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module\n'), ((2013, 2039), 'megengine.module.Linear', 'Linear', (['(500)', '(10)'], {'bias': '(True)'}), '(500, 10, bias=True)\n', (2019, 2039), False, 'from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module\n'), ((2299, 2308), 'megengine.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (2305, 2308), True, 'import megengine.functional as F\n'), ((2422, 2431), 'megengine.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (2428, 2431), True, 'import megengine.functional as F\n'), ((2470, 2485), 'megengine.functional.flatten', 'F.flatten', (['x', '(1)'], {}), '(x, 1)\n', (2479, 2485), True, 'import megengine.functional as F\n'), ((2522, 2531), 'megengine.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (2528, 2531), True, 'import megengine.functional as F\n'), ((2687, 2718), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['pred', 'label'], {}), '(pred, label)\n', (2705, 2718), True, 'import megengine.functional as F\n'), ((4995, 5007), 'megengine.tensor.Tensor', 'Tensor', (['data'], {}), '(data)\n', (5001, 5007), False, 'from megengine.tensor import Tensor\n'), ((5030, 5043), 'megengine.tensor.Tensor', 'Tensor', (['label'], {}), '(label)\n', (5036, 5043), False, 'from megengine.tensor import Tensor\n'), ((5201, 5216), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (5214, 5216), True, 'import megengine.distributed as dist\n'), ((5852, 5877), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (5867, 5877), False, 'import os\n'), ((2130, 2145), 'megengine.module.BatchNorm2d', 'BatchNorm2d', (['(20)'], {}), '(20)\n', (2141, 2145), False, 'from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module\n'), ((2169, 2184), 'megengine.module.BatchNorm2d', 'BatchNorm2d', (['(20)'], {}), '(20)\n', (2180, 2184), False, 'from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module\n'), ((3442, 3458), 'megengine.autodiff.GradManager', 'ad.GradManager', ([], {}), '()\n', (3456, 3458), True, 'import megengine.autodiff as ad\n'), ((1275, 1324), 'subprocess.check_output', 'subprocess.check_output', (["['cat', '/proc/cpuinfo']"], {}), "(['cat', '/proc/cpuinfo'])\n", (1298, 1324), False, 'import subprocess\n'), ((3504, 3546), 'megengine.distributed.make_allreduce_cb', 'dist.make_allreduce_cb', (['"""MEAN"""', 'dist.WORLD'], {}), "('MEAN', dist.WORLD)\n", (3526, 3546), True, 'import megengine.distributed as dist\n'), ((4741, 4757), 'megengine.autodiff.GradManager', 'ad.GradManager', ([], {}), '()\n', (4755, 4757), True, 'import megengine.autodiff as ad\n'), ((5509, 5564), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['param[1]', 'ref'], {'atol': 'max_err'}), '(param[1], ref, atol=max_err)\n', (5535, 5564), True, 'import numpy as np\n'), ((5599, 5663), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['param[1]', 'param_ref[1]'], {'atol': 'max_err'}), '(param[1], param_ref[1], atol=max_err)\n', (5625, 5663), True, 'import numpy as np\n'), ((4807, 4849), 'megengine.distributed.make_allreduce_cb', 'dist.make_allreduce_cb', (['"""MEAN"""', 'dist.WORLD'], {}), "('MEAN', dist.WORLD)\n", (4829, 4849), True, 'import megengine.distributed as dist\n'), ((1443, 1481), 're.sub', 're.sub', (['""".*model name.*:"""', '""""""', 'line', '(1)'], {}), "('.*model name.*:', '', line, 1)\n", (1449, 1481), False, 'import re\n')]
|
"""init
Revision ID: f9c634db477d
Revises:
Create Date: 2021-09-10 00:24:32.718895
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = 'f9c634db477d'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('song',
sa.Column('name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('artist', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('id', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_song_artist'), 'song', ['artist'], unique=False)
op.create_index(op.f('ix_song_id'), 'song', ['id'], unique=False)
op.create_index(op.f('ix_song_name'), 'song', ['name'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_song_name'), table_name='song')
op.drop_index(op.f('ix_song_id'), table_name='song')
op.drop_index(op.f('ix_song_artist'), table_name='song')
op.drop_table('song')
# ### end Alembic commands ###
|
[
"sqlmodel.sql.sqltypes.AutoString"
] |
[((1158, 1179), 'alembic.op.drop_table', 'op.drop_table', (['"""song"""'], {}), "('song')\n", (1171, 1179), False, 'from alembic import op\n'), ((599, 628), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (622, 628), True, 'import sqlalchemy as sa\n'), ((655, 677), 'alembic.op.f', 'op.f', (['"""ix_song_artist"""'], {}), "('ix_song_artist')\n", (659, 677), False, 'from alembic import op\n'), ((733, 751), 'alembic.op.f', 'op.f', (['"""ix_song_id"""'], {}), "('ix_song_id')\n", (737, 751), False, 'from alembic import op\n'), ((803, 823), 'alembic.op.f', 'op.f', (['"""ix_song_name"""'], {}), "('ix_song_name')\n", (807, 823), False, 'from alembic import op\n'), ((995, 1015), 'alembic.op.f', 'op.f', (['"""ix_song_name"""'], {}), "('ix_song_name')\n", (999, 1015), False, 'from alembic import op\n'), ((1054, 1072), 'alembic.op.f', 'op.f', (['"""ix_song_id"""'], {}), "('ix_song_id')\n", (1058, 1072), False, 'from alembic import op\n'), ((1111, 1133), 'alembic.op.f', 'op.f', (['"""ix_song_artist"""'], {}), "('ix_song_artist')\n", (1115, 1133), False, 'from alembic import op\n'), ((415, 449), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (447, 449), False, 'import sqlmodel\n'), ((492, 526), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (524, 526), False, 'import sqlmodel\n'), ((565, 577), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (575, 577), True, 'import sqlalchemy as sa\n')]
|
"""Initial migration
Revision ID: 5f31ff8814e7
Revises:
Create Date: 2022-04-30 19:39:20.164043+00:00
"""
# pylint: disable=no-member, invalid-name, missing-function-docstring, unused-import
import sqlalchemy as sa
import sqlalchemy_utils
import sqlmodel
from alembic import op
# revision identifiers, used by Alembic.
revision = "5f31ff8814e7"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"database",
sa.Column("name", sa.String(), nullable=True),
sa.Column("created_at", sa.DateTime(timezone=True), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), nullable=True),
sa.Column("id", sa.Integer(), nullable=True),
sa.Column("description", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("URI", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("read_only", sa.Boolean(), nullable=True),
sa.Column("async", sa.Boolean(), nullable=True),
sa.Column("cost", sa.Float(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name"),
)
op.create_index(op.f("ix_database_URI"), "database", ["URI"], unique=False)
op.create_index(op.f("ix_database_async"), "database", ["async"], unique=False)
op.create_index(op.f("ix_database_cost"), "database", ["cost"], unique=False)
op.create_index(
op.f("ix_database_description"),
"database",
["description"],
unique=False,
)
op.create_index(op.f("ix_database_id"), "database", ["id"], unique=False)
op.create_index(
op.f("ix_database_read_only"),
"database",
["read_only"],
unique=False,
)
op.create_table(
"node",
sa.Column("name", sa.String(), nullable=True),
sa.Column("created_at", sa.DateTime(timezone=True), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), nullable=True),
sa.Column(
"type",
sa.Enum("SOURCE", "TRANSFORM", "METRIC", "DIMENSION", name="nodetype"),
nullable=True,
),
sa.Column("id", sa.Integer(), nullable=True),
sa.Column("description", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("expression", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name"),
)
op.create_index(op.f("ix_node_description"), "node", ["description"], unique=False)
op.create_index(op.f("ix_node_expression"), "node", ["expression"], unique=False)
op.create_index(op.f("ix_node_id"), "node", ["id"], unique=False)
op.create_table(
"column",
sa.Column(
"type",
sa.Enum(
"BYTES",
"STR",
"FLOAT",
"INT",
"DECIMAL",
"BOOL",
"DATETIME",
"DATE",
"TIME",
"TIMEDELTA",
"LIST",
"DICT",
name="columntype",
),
nullable=True,
),
sa.Column("id", sa.Integer(), nullable=True),
sa.Column("name", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("dimension_id", sa.Integer(), nullable=True),
sa.Column(
"dimension_column",
sqlmodel.sql.sqltypes.AutoString(),
nullable=True,
),
sa.ForeignKeyConstraint(
["dimension_id"],
["node.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_column_dimension_column"),
"column",
["dimension_column"],
unique=False,
)
op.create_index(
op.f("ix_column_dimension_id"),
"column",
["dimension_id"],
unique=False,
)
op.create_index(op.f("ix_column_id"), "column", ["id"], unique=False)
op.create_index(op.f("ix_column_name"), "column", ["name"], unique=False)
op.create_table(
"noderelationship",
sa.Column("parent_id", sa.Integer(), nullable=True),
sa.Column("child_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["child_id"],
["node.id"],
),
sa.ForeignKeyConstraint(
["parent_id"],
["node.id"],
),
sa.PrimaryKeyConstraint("parent_id", "child_id"),
)
op.create_index(
op.f("ix_noderelationship_child_id"),
"noderelationship",
["child_id"],
unique=False,
)
op.create_index(
op.f("ix_noderelationship_parent_id"),
"noderelationship",
["parent_id"],
unique=False,
)
op.create_table(
"query",
sa.Column("id", sqlalchemy_utils.types.uuid.UUIDType(), nullable=False),
sa.Column("database_id", sa.Integer(), nullable=False),
sa.Column("catalog", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("schema_", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column(
"submitted_query",
sqlmodel.sql.sqltypes.AutoString(),
nullable=False,
),
sa.Column("executed_query", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("scheduled", sa.DateTime(), nullable=True),
sa.Column("started", sa.DateTime(), nullable=True),
sa.Column("finished", sa.DateTime(), nullable=True),
sa.Column("state", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("progress", sa.Float(), nullable=True),
sa.ForeignKeyConstraint(
["database_id"],
["database.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("ix_query_catalog"), "query", ["catalog"], unique=False)
op.create_index(
op.f("ix_query_database_id"),
"query",
["database_id"],
unique=False,
)
op.create_index(
op.f("ix_query_executed_query"),
"query",
["executed_query"],
unique=False,
)
op.create_index(op.f("ix_query_finished"), "query", ["finished"], unique=False)
op.create_index(op.f("ix_query_progress"), "query", ["progress"], unique=False)
op.create_index(op.f("ix_query_scheduled"), "query", ["scheduled"], unique=False)
op.create_index(op.f("ix_query_schema_"), "query", ["schema_"], unique=False)
op.create_index(op.f("ix_query_started"), "query", ["started"], unique=False)
op.create_index(op.f("ix_query_state"), "query", ["state"], unique=False)
op.create_index(
op.f("ix_query_submitted_query"),
"query",
["submitted_query"],
unique=False,
)
op.create_table(
"table",
sa.Column("id", sa.Integer(), nullable=True),
sa.Column("node_id", sa.Integer(), nullable=False),
sa.Column("database_id", sa.Integer(), nullable=False),
sa.Column("catalog", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("schema_", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("table", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("cost", sa.Float(), nullable=True),
sa.ForeignKeyConstraint(
["database_id"],
["database.id"],
),
sa.ForeignKeyConstraint(
["node_id"],
["node.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("ix_table_catalog"), "table", ["catalog"], unique=False)
op.create_index(op.f("ix_table_cost"), "table", ["cost"], unique=False)
op.create_index(
op.f("ix_table_database_id"),
"table",
["database_id"],
unique=False,
)
op.create_index(op.f("ix_table_id"), "table", ["id"], unique=False)
op.create_index(op.f("ix_table_node_id"), "table", ["node_id"], unique=False)
op.create_index(op.f("ix_table_schema_"), "table", ["schema_"], unique=False)
op.create_index(op.f("ix_table_table"), "table", ["table"], unique=False)
op.create_table(
"nodecolumns",
sa.Column("node_id", sa.Integer(), nullable=True),
sa.Column("column_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["column_id"],
["column.id"],
),
sa.ForeignKeyConstraint(
["node_id"],
["node.id"],
),
sa.PrimaryKeyConstraint("node_id", "column_id"),
)
op.create_index(
op.f("ix_nodecolumns_column_id"),
"nodecolumns",
["column_id"],
unique=False,
)
op.create_index(
op.f("ix_nodecolumns_node_id"),
"nodecolumns",
["node_id"],
unique=False,
)
op.create_table(
"tablecolumns",
sa.Column("table_id", sa.Integer(), nullable=True),
sa.Column("column_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["column_id"],
["column.id"],
),
sa.ForeignKeyConstraint(
["table_id"],
["table.id"],
),
sa.PrimaryKeyConstraint("table_id", "column_id"),
)
op.create_index(
op.f("ix_tablecolumns_column_id"),
"tablecolumns",
["column_id"],
unique=False,
)
op.create_index(
op.f("ix_tablecolumns_table_id"),
"tablecolumns",
["table_id"],
unique=False,
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f("ix_tablecolumns_table_id"), table_name="tablecolumns")
op.drop_index(op.f("ix_tablecolumns_column_id"), table_name="tablecolumns")
op.drop_table("tablecolumns")
op.drop_index(op.f("ix_nodecolumns_node_id"), table_name="nodecolumns")
op.drop_index(op.f("ix_nodecolumns_column_id"), table_name="nodecolumns")
op.drop_table("nodecolumns")
op.drop_index(op.f("ix_table_table"), table_name="table")
op.drop_index(op.f("ix_table_schema_"), table_name="table")
op.drop_index(op.f("ix_table_node_id"), table_name="table")
op.drop_index(op.f("ix_table_id"), table_name="table")
op.drop_index(op.f("ix_table_database_id"), table_name="table")
op.drop_index(op.f("ix_table_cost"), table_name="table")
op.drop_index(op.f("ix_table_catalog"), table_name="table")
op.drop_table("table")
op.drop_index(op.f("ix_query_submitted_query"), table_name="query")
op.drop_index(op.f("ix_query_state"), table_name="query")
op.drop_index(op.f("ix_query_started"), table_name="query")
op.drop_index(op.f("ix_query_schema_"), table_name="query")
op.drop_index(op.f("ix_query_scheduled"), table_name="query")
op.drop_index(op.f("ix_query_progress"), table_name="query")
op.drop_index(op.f("ix_query_finished"), table_name="query")
op.drop_index(op.f("ix_query_executed_query"), table_name="query")
op.drop_index(op.f("ix_query_database_id"), table_name="query")
op.drop_index(op.f("ix_query_catalog"), table_name="query")
op.drop_table("query")
op.drop_index(op.f("ix_noderelationship_parent_id"), table_name="noderelationship")
op.drop_index(op.f("ix_noderelationship_child_id"), table_name="noderelationship")
op.drop_table("noderelationship")
op.drop_index(op.f("ix_column_name"), table_name="column")
op.drop_index(op.f("ix_column_id"), table_name="column")
op.drop_index(op.f("ix_column_dimension_id"), table_name="column")
op.drop_index(op.f("ix_column_dimension_column"), table_name="column")
op.drop_table("column")
op.drop_index(op.f("ix_node_id"), table_name="node")
op.drop_index(op.f("ix_node_expression"), table_name="node")
op.drop_index(op.f("ix_node_description"), table_name="node")
op.drop_table("node")
op.drop_index(op.f("ix_database_read_only"), table_name="database")
op.drop_index(op.f("ix_database_id"), table_name="database")
op.drop_index(op.f("ix_database_description"), table_name="database")
op.drop_index(op.f("ix_database_cost"), table_name="database")
op.drop_index(op.f("ix_database_async"), table_name="database")
op.drop_index(op.f("ix_database_URI"), table_name="database")
op.drop_table("database")
# ### end Alembic commands ###
|
[
"sqlmodel.sql.sqltypes.AutoString"
] |
[((9906, 9935), 'alembic.op.drop_table', 'op.drop_table', (['"""tablecolumns"""'], {}), "('tablecolumns')\n", (9919, 9935), False, 'from alembic import op\n'), ((10094, 10122), 'alembic.op.drop_table', 'op.drop_table', (['"""nodecolumns"""'], {}), "('nodecolumns')\n", (10107, 10122), False, 'from alembic import op\n'), ((10569, 10591), 'alembic.op.drop_table', 'op.drop_table', (['"""table"""'], {}), "('table')\n", (10582, 10591), False, 'from alembic import op\n'), ((11257, 11279), 'alembic.op.drop_table', 'op.drop_table', (['"""query"""'], {}), "('query')\n", (11270, 11279), False, 'from alembic import op\n'), ((11459, 11492), 'alembic.op.drop_table', 'op.drop_table', (['"""noderelationship"""'], {}), "('noderelationship')\n", (11472, 11492), False, 'from alembic import op\n'), ((11767, 11790), 'alembic.op.drop_table', 'op.drop_table', (['"""column"""'], {}), "('column')\n", (11780, 11790), False, 'from alembic import op\n'), ((11983, 12004), 'alembic.op.drop_table', 'op.drop_table', (['"""node"""'], {}), "('node')\n", (11996, 12004), False, 'from alembic import op\n'), ((12421, 12446), 'alembic.op.drop_table', 'op.drop_table', (['"""database"""'], {}), "('database')\n", (12434, 12446), False, 'from alembic import op\n'), ((1138, 1167), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (1161, 1167), True, 'import sqlalchemy as sa\n'), ((1177, 1204), 'sqlalchemy.UniqueConstraint', 'sa.UniqueConstraint', (['"""name"""'], {}), "('name')\n", (1196, 1204), True, 'import sqlalchemy as sa\n'), ((1232, 1255), 'alembic.op.f', 'op.f', (['"""ix_database_URI"""'], {}), "('ix_database_URI')\n", (1236, 1255), False, 'from alembic import op\n'), ((1312, 1337), 'alembic.op.f', 'op.f', (['"""ix_database_async"""'], {}), "('ix_database_async')\n", (1316, 1337), False, 'from alembic import op\n'), ((1396, 1420), 'alembic.op.f', 'op.f', (['"""ix_database_cost"""'], {}), "('ix_database_cost')\n", (1400, 1420), False, 'from alembic import op\n'), ((1487, 1518), 'alembic.op.f', 'op.f', (['"""ix_database_description"""'], {}), "('ix_database_description')\n", (1491, 1518), False, 'from alembic import op\n'), ((1613, 1635), 'alembic.op.f', 'op.f', (['"""ix_database_id"""'], {}), "('ix_database_id')\n", (1617, 1635), False, 'from alembic import op\n'), ((1700, 1729), 'alembic.op.f', 'op.f', (['"""ix_database_read_only"""'], {}), "('ix_database_read_only')\n", (1704, 1729), False, 'from alembic import op\n'), ((2438, 2467), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (2461, 2467), True, 'import sqlalchemy as sa\n'), ((2477, 2504), 'sqlalchemy.UniqueConstraint', 'sa.UniqueConstraint', (['"""name"""'], {}), "('name')\n", (2496, 2504), True, 'import sqlalchemy as sa\n'), ((2532, 2559), 'alembic.op.f', 'op.f', (['"""ix_node_description"""'], {}), "('ix_node_description')\n", (2536, 2559), False, 'from alembic import op\n'), ((2620, 2646), 'alembic.op.f', 'op.f', (['"""ix_node_expression"""'], {}), "('ix_node_expression')\n", (2624, 2646), False, 'from alembic import op\n'), ((2706, 2724), 'alembic.op.f', 'op.f', (['"""ix_node_id"""'], {}), "('ix_node_id')\n", (2710, 2724), False, 'from alembic import op\n'), ((3585, 3639), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['dimension_id']", "['node.id']"], {}), "(['dimension_id'], ['node.id'])\n", (3608, 3639), True, 'import sqlalchemy as sa\n'), ((3684, 3713), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (3707, 3713), True, 'import sqlalchemy as sa\n'), ((3750, 3784), 'alembic.op.f', 'op.f', (['"""ix_column_dimension_column"""'], {}), "('ix_column_dimension_column')\n", (3754, 3784), False, 'from alembic import op\n'), ((3891, 3921), 'alembic.op.f', 'op.f', (['"""ix_column_dimension_id"""'], {}), "('ix_column_dimension_id')\n", (3895, 3921), False, 'from alembic import op\n'), ((4015, 4035), 'alembic.op.f', 'op.f', (['"""ix_column_id"""'], {}), "('ix_column_id')\n", (4019, 4035), False, 'from alembic import op\n'), ((4089, 4111), 'alembic.op.f', 'op.f', (['"""ix_column_name"""'], {}), "('ix_column_name')\n", (4093, 4111), False, 'from alembic import op\n'), ((4325, 4375), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['child_id']", "['node.id']"], {}), "(['child_id'], ['node.id'])\n", (4348, 4375), True, 'import sqlalchemy as sa\n'), ((4420, 4471), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['parent_id']", "['node.id']"], {}), "(['parent_id'], ['node.id'])\n", (4443, 4471), True, 'import sqlalchemy as sa\n'), ((4516, 4564), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""parent_id"""', '"""child_id"""'], {}), "('parent_id', 'child_id')\n", (4539, 4564), True, 'import sqlalchemy as sa\n'), ((4601, 4637), 'alembic.op.f', 'op.f', (['"""ix_noderelationship_child_id"""'], {}), "('ix_noderelationship_child_id')\n", (4605, 4637), False, 'from alembic import op\n'), ((4746, 4783), 'alembic.op.f', 'op.f', (['"""ix_noderelationship_parent_id"""'], {}), "('ix_noderelationship_parent_id')\n", (4750, 4783), False, 'from alembic import op\n'), ((5762, 5819), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['database_id']", "['database.id']"], {}), "(['database_id'], ['database.id'])\n", (5785, 5819), True, 'import sqlalchemy as sa\n'), ((5864, 5893), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (5887, 5893), True, 'import sqlalchemy as sa\n'), ((5921, 5945), 'alembic.op.f', 'op.f', (['"""ix_query_catalog"""'], {}), "('ix_query_catalog')\n", (5925, 5945), False, 'from alembic import op\n'), ((6012, 6040), 'alembic.op.f', 'op.f', (['"""ix_query_database_id"""'], {}), "('ix_query_database_id')\n", (6016, 6040), False, 'from alembic import op\n'), ((6141, 6172), 'alembic.op.f', 'op.f', (['"""ix_query_executed_query"""'], {}), "('ix_query_executed_query')\n", (6145, 6172), False, 'from alembic import op\n'), ((6267, 6292), 'alembic.op.f', 'op.f', (['"""ix_query_finished"""'], {}), "('ix_query_finished')\n", (6271, 6292), False, 'from alembic import op\n'), ((6351, 6376), 'alembic.op.f', 'op.f', (['"""ix_query_progress"""'], {}), "('ix_query_progress')\n", (6355, 6376), False, 'from alembic import op\n'), ((6435, 6461), 'alembic.op.f', 'op.f', (['"""ix_query_scheduled"""'], {}), "('ix_query_scheduled')\n", (6439, 6461), False, 'from alembic import op\n'), ((6521, 6545), 'alembic.op.f', 'op.f', (['"""ix_query_schema_"""'], {}), "('ix_query_schema_')\n", (6525, 6545), False, 'from alembic import op\n'), ((6603, 6627), 'alembic.op.f', 'op.f', (['"""ix_query_started"""'], {}), "('ix_query_started')\n", (6607, 6627), False, 'from alembic import op\n'), ((6685, 6707), 'alembic.op.f', 'op.f', (['"""ix_query_state"""'], {}), "('ix_query_state')\n", (6689, 6707), False, 'from alembic import op\n'), ((6772, 6804), 'alembic.op.f', 'op.f', (['"""ix_query_submitted_query"""'], {}), "('ix_query_submitted_query')\n", (6776, 6804), False, 'from alembic import op\n'), ((7400, 7457), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['database_id']", "['database.id']"], {}), "(['database_id'], ['database.id'])\n", (7423, 7457), True, 'import sqlalchemy as sa\n'), ((7502, 7551), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['node_id']", "['node.id']"], {}), "(['node_id'], ['node.id'])\n", (7525, 7551), True, 'import sqlalchemy as sa\n'), ((7596, 7625), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (7619, 7625), True, 'import sqlalchemy as sa\n'), ((7653, 7677), 'alembic.op.f', 'op.f', (['"""ix_table_catalog"""'], {}), "('ix_table_catalog')\n", (7657, 7677), False, 'from alembic import op\n'), ((7735, 7756), 'alembic.op.f', 'op.f', (['"""ix_table_cost"""'], {}), "('ix_table_cost')\n", (7739, 7756), False, 'from alembic import op\n'), ((7820, 7848), 'alembic.op.f', 'op.f', (['"""ix_table_database_id"""'], {}), "('ix_table_database_id')\n", (7824, 7848), False, 'from alembic import op\n'), ((7940, 7959), 'alembic.op.f', 'op.f', (['"""ix_table_id"""'], {}), "('ix_table_id')\n", (7944, 7959), False, 'from alembic import op\n'), ((8012, 8036), 'alembic.op.f', 'op.f', (['"""ix_table_node_id"""'], {}), "('ix_table_node_id')\n", (8016, 8036), False, 'from alembic import op\n'), ((8094, 8118), 'alembic.op.f', 'op.f', (['"""ix_table_schema_"""'], {}), "('ix_table_schema_')\n", (8098, 8118), False, 'from alembic import op\n'), ((8176, 8198), 'alembic.op.f', 'op.f', (['"""ix_table_table"""'], {}), "('ix_table_table')\n", (8180, 8198), False, 'from alembic import op\n'), ((8406, 8459), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['column_id']", "['column.id']"], {}), "(['column_id'], ['column.id'])\n", (8429, 8459), True, 'import sqlalchemy as sa\n'), ((8504, 8553), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['node_id']", "['node.id']"], {}), "(['node_id'], ['node.id'])\n", (8527, 8553), True, 'import sqlalchemy as sa\n'), ((8598, 8645), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""node_id"""', '"""column_id"""'], {}), "('node_id', 'column_id')\n", (8621, 8645), True, 'import sqlalchemy as sa\n'), ((8682, 8714), 'alembic.op.f', 'op.f', (['"""ix_nodecolumns_column_id"""'], {}), "('ix_nodecolumns_column_id')\n", (8686, 8714), False, 'from alembic import op\n'), ((8819, 8849), 'alembic.op.f', 'op.f', (['"""ix_nodecolumns_node_id"""'], {}), "('ix_nodecolumns_node_id')\n", (8823, 8849), False, 'from alembic import op\n'), ((9097, 9150), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['column_id']", "['column.id']"], {}), "(['column_id'], ['column.id'])\n", (9120, 9150), True, 'import sqlalchemy as sa\n'), ((9195, 9246), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['table_id']", "['table.id']"], {}), "(['table_id'], ['table.id'])\n", (9218, 9246), True, 'import sqlalchemy as sa\n'), ((9291, 9339), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""table_id"""', '"""column_id"""'], {}), "('table_id', 'column_id')\n", (9314, 9339), True, 'import sqlalchemy as sa\n'), ((9376, 9409), 'alembic.op.f', 'op.f', (['"""ix_tablecolumns_column_id"""'], {}), "('ix_tablecolumns_column_id')\n", (9380, 9409), False, 'from alembic import op\n'), ((9515, 9547), 'alembic.op.f', 'op.f', (['"""ix_tablecolumns_table_id"""'], {}), "('ix_tablecolumns_table_id')\n", (9519, 9547), False, 'from alembic import op\n'), ((9761, 9793), 'alembic.op.f', 'op.f', (['"""ix_tablecolumns_table_id"""'], {}), "('ix_tablecolumns_table_id')\n", (9765, 9793), False, 'from alembic import op\n'), ((9840, 9873), 'alembic.op.f', 'op.f', (['"""ix_tablecolumns_column_id"""'], {}), "('ix_tablecolumns_column_id')\n", (9844, 9873), False, 'from alembic import op\n'), ((9954, 9984), 'alembic.op.f', 'op.f', (['"""ix_nodecolumns_node_id"""'], {}), "('ix_nodecolumns_node_id')\n", (9958, 9984), False, 'from alembic import op\n'), ((10030, 10062), 'alembic.op.f', 'op.f', (['"""ix_nodecolumns_column_id"""'], {}), "('ix_nodecolumns_column_id')\n", (10034, 10062), False, 'from alembic import op\n'), ((10141, 10163), 'alembic.op.f', 'op.f', (['"""ix_table_table"""'], {}), "('ix_table_table')\n", (10145, 10163), False, 'from alembic import op\n'), ((10203, 10227), 'alembic.op.f', 'op.f', (['"""ix_table_schema_"""'], {}), "('ix_table_schema_')\n", (10207, 10227), False, 'from alembic import op\n'), ((10267, 10291), 'alembic.op.f', 'op.f', (['"""ix_table_node_id"""'], {}), "('ix_table_node_id')\n", (10271, 10291), False, 'from alembic import op\n'), ((10331, 10350), 'alembic.op.f', 'op.f', (['"""ix_table_id"""'], {}), "('ix_table_id')\n", (10335, 10350), False, 'from alembic import op\n'), ((10390, 10418), 'alembic.op.f', 'op.f', (['"""ix_table_database_id"""'], {}), "('ix_table_database_id')\n", (10394, 10418), False, 'from alembic import op\n'), ((10458, 10479), 'alembic.op.f', 'op.f', (['"""ix_table_cost"""'], {}), "('ix_table_cost')\n", (10462, 10479), False, 'from alembic import op\n'), ((10519, 10543), 'alembic.op.f', 'op.f', (['"""ix_table_catalog"""'], {}), "('ix_table_catalog')\n", (10523, 10543), False, 'from alembic import op\n'), ((10610, 10642), 'alembic.op.f', 'op.f', (['"""ix_query_submitted_query"""'], {}), "('ix_query_submitted_query')\n", (10614, 10642), False, 'from alembic import op\n'), ((10682, 10704), 'alembic.op.f', 'op.f', (['"""ix_query_state"""'], {}), "('ix_query_state')\n", (10686, 10704), False, 'from alembic import op\n'), ((10744, 10768), 'alembic.op.f', 'op.f', (['"""ix_query_started"""'], {}), "('ix_query_started')\n", (10748, 10768), False, 'from alembic import op\n'), ((10808, 10832), 'alembic.op.f', 'op.f', (['"""ix_query_schema_"""'], {}), "('ix_query_schema_')\n", (10812, 10832), False, 'from alembic import op\n'), ((10872, 10898), 'alembic.op.f', 'op.f', (['"""ix_query_scheduled"""'], {}), "('ix_query_scheduled')\n", (10876, 10898), False, 'from alembic import op\n'), ((10938, 10963), 'alembic.op.f', 'op.f', (['"""ix_query_progress"""'], {}), "('ix_query_progress')\n", (10942, 10963), False, 'from alembic import op\n'), ((11003, 11028), 'alembic.op.f', 'op.f', (['"""ix_query_finished"""'], {}), "('ix_query_finished')\n", (11007, 11028), False, 'from alembic import op\n'), ((11068, 11099), 'alembic.op.f', 'op.f', (['"""ix_query_executed_query"""'], {}), "('ix_query_executed_query')\n", (11072, 11099), False, 'from alembic import op\n'), ((11139, 11167), 'alembic.op.f', 'op.f', (['"""ix_query_database_id"""'], {}), "('ix_query_database_id')\n", (11143, 11167), False, 'from alembic import op\n'), ((11207, 11231), 'alembic.op.f', 'op.f', (['"""ix_query_catalog"""'], {}), "('ix_query_catalog')\n", (11211, 11231), False, 'from alembic import op\n'), ((11298, 11335), 'alembic.op.f', 'op.f', (['"""ix_noderelationship_parent_id"""'], {}), "('ix_noderelationship_parent_id')\n", (11302, 11335), False, 'from alembic import op\n'), ((11386, 11422), 'alembic.op.f', 'op.f', (['"""ix_noderelationship_child_id"""'], {}), "('ix_noderelationship_child_id')\n", (11390, 11422), False, 'from alembic import op\n'), ((11511, 11533), 'alembic.op.f', 'op.f', (['"""ix_column_name"""'], {}), "('ix_column_name')\n", (11515, 11533), False, 'from alembic import op\n'), ((11574, 11594), 'alembic.op.f', 'op.f', (['"""ix_column_id"""'], {}), "('ix_column_id')\n", (11578, 11594), False, 'from alembic import op\n'), ((11635, 11665), 'alembic.op.f', 'op.f', (['"""ix_column_dimension_id"""'], {}), "('ix_column_dimension_id')\n", (11639, 11665), False, 'from alembic import op\n'), ((11706, 11740), 'alembic.op.f', 'op.f', (['"""ix_column_dimension_column"""'], {}), "('ix_column_dimension_column')\n", (11710, 11740), False, 'from alembic import op\n'), ((11809, 11827), 'alembic.op.f', 'op.f', (['"""ix_node_id"""'], {}), "('ix_node_id')\n", (11813, 11827), False, 'from alembic import op\n'), ((11866, 11892), 'alembic.op.f', 'op.f', (['"""ix_node_expression"""'], {}), "('ix_node_expression')\n", (11870, 11892), False, 'from alembic import op\n'), ((11931, 11958), 'alembic.op.f', 'op.f', (['"""ix_node_description"""'], {}), "('ix_node_description')\n", (11935, 11958), False, 'from alembic import op\n'), ((12023, 12052), 'alembic.op.f', 'op.f', (['"""ix_database_read_only"""'], {}), "('ix_database_read_only')\n", (12027, 12052), False, 'from alembic import op\n'), ((12095, 12117), 'alembic.op.f', 'op.f', (['"""ix_database_id"""'], {}), "('ix_database_id')\n", (12099, 12117), False, 'from alembic import op\n'), ((12160, 12191), 'alembic.op.f', 'op.f', (['"""ix_database_description"""'], {}), "('ix_database_description')\n", (12164, 12191), False, 'from alembic import op\n'), ((12234, 12258), 'alembic.op.f', 'op.f', (['"""ix_database_cost"""'], {}), "('ix_database_cost')\n", (12238, 12258), False, 'from alembic import op\n'), ((12301, 12326), 'alembic.op.f', 'op.f', (['"""ix_database_async"""'], {}), "('ix_database_async')\n", (12305, 12326), False, 'from alembic import op\n'), ((12369, 12392), 'alembic.op.f', 'op.f', (['"""ix_database_URI"""'], {}), "('ix_database_URI')\n", (12373, 12392), False, 'from alembic import op\n'), ((560, 571), 'sqlalchemy.String', 'sa.String', ([], {}), '()\n', (569, 571), True, 'import sqlalchemy as sa\n'), ((621, 647), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {'timezone': '(True)'}), '(timezone=True)\n', (632, 647), True, 'import sqlalchemy as sa\n'), ((697, 723), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {'timezone': '(True)'}), '(timezone=True)\n', (708, 723), True, 'import sqlalchemy as sa\n'), ((765, 777), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (775, 777), True, 'import sqlalchemy as sa\n'), ((828, 862), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (860, 862), False, 'import sqlmodel\n'), ((905, 939), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (937, 939), False, 'import sqlmodel\n'), ((989, 1001), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (999, 1001), True, 'import sqlalchemy as sa\n'), ((1046, 1058), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (1056, 1058), True, 'import sqlalchemy as sa\n'), ((1102, 1112), 'sqlalchemy.Float', 'sa.Float', ([], {}), '()\n', (1110, 1112), True, 'import sqlalchemy as sa\n'), ((1865, 1876), 'sqlalchemy.String', 'sa.String', ([], {}), '()\n', (1874, 1876), True, 'import sqlalchemy as sa\n'), ((1926, 1952), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {'timezone': '(True)'}), '(timezone=True)\n', (1937, 1952), True, 'import sqlalchemy as sa\n'), ((2002, 2028), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {'timezone': '(True)'}), '(timezone=True)\n', (2013, 2028), True, 'import sqlalchemy as sa\n'), ((2097, 2167), 'sqlalchemy.Enum', 'sa.Enum', (['"""SOURCE"""', '"""TRANSFORM"""', '"""METRIC"""', '"""DIMENSION"""'], {'name': '"""nodetype"""'}), "('SOURCE', 'TRANSFORM', 'METRIC', 'DIMENSION', name='nodetype')\n", (2104, 2167), True, 'import sqlalchemy as sa\n'), ((2231, 2243), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2241, 2243), True, 'import sqlalchemy as sa\n'), ((2294, 2328), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2326, 2328), False, 'import sqlmodel\n'), ((2378, 2412), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2410, 2412), False, 'import sqlmodel\n'), ((2846, 2984), 'sqlalchemy.Enum', 'sa.Enum', (['"""BYTES"""', '"""STR"""', '"""FLOAT"""', '"""INT"""', '"""DECIMAL"""', '"""BOOL"""', '"""DATETIME"""', '"""DATE"""', '"""TIME"""', '"""TIMEDELTA"""', '"""LIST"""', '"""DICT"""'], {'name': '"""columntype"""'}), "('BYTES', 'STR', 'FLOAT', 'INT', 'DECIMAL', 'BOOL', 'DATETIME',\n 'DATE', 'TIME', 'TIMEDELTA', 'LIST', 'DICT', name='columntype')\n", (2853, 2984), True, 'import sqlalchemy as sa\n'), ((3267, 3279), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (3277, 3279), True, 'import sqlalchemy as sa\n'), ((3323, 3357), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (3355, 3357), False, 'import sqlmodel\n'), ((3410, 3422), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (3420, 3422), True, 'import sqlalchemy as sa\n'), ((3503, 3537), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (3535, 3537), False, 'import sqlmodel\n'), ((4227, 4239), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (4237, 4239), True, 'import sqlalchemy as sa\n'), ((4287, 4299), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (4297, 4299), True, 'import sqlalchemy as sa\n'), ((4926, 4964), 'sqlalchemy_utils.types.uuid.UUIDType', 'sqlalchemy_utils.types.uuid.UUIDType', ([], {}), '()\n', (4962, 4964), False, 'import sqlalchemy_utils\n'), ((5016, 5028), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (5026, 5028), True, 'import sqlalchemy as sa\n'), ((5076, 5110), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (5108, 5110), False, 'import sqlmodel\n'), ((5157, 5191), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (5189, 5191), False, 'import sqlmodel\n'), ((5271, 5305), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (5303, 5305), False, 'import sqlmodel\n'), ((5382, 5416), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (5414, 5416), False, 'import sqlmodel\n'), ((5465, 5478), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (5476, 5478), True, 'import sqlalchemy as sa\n'), ((5525, 5538), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (5536, 5538), True, 'import sqlalchemy as sa\n'), ((5586, 5599), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (5597, 5599), True, 'import sqlalchemy as sa\n'), ((5644, 5678), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (5676, 5678), False, 'import sqlmodel\n'), ((5726, 5736), 'sqlalchemy.Float', 'sa.Float', ([], {}), '()\n', (5734, 5736), True, 'import sqlalchemy as sa\n'), ((6942, 6954), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (6952, 6954), True, 'import sqlalchemy as sa\n'), ((7001, 7013), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (7011, 7013), True, 'import sqlalchemy as sa\n'), ((7065, 7077), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (7075, 7077), True, 'import sqlalchemy as sa\n'), ((7125, 7159), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (7157, 7159), False, 'import sqlmodel\n'), ((7206, 7240), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (7238, 7240), False, 'import sqlmodel\n'), ((7285, 7319), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (7317, 7319), False, 'import sqlmodel\n'), ((7364, 7374), 'sqlalchemy.Float', 'sa.Float', ([], {}), '()\n', (7372, 7374), True, 'import sqlalchemy as sa\n'), ((8307, 8319), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (8317, 8319), True, 'import sqlalchemy as sa\n'), ((8368, 8380), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (8378, 8380), True, 'import sqlalchemy as sa\n'), ((8998, 9010), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (9008, 9010), True, 'import sqlalchemy as sa\n'), ((9059, 9071), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (9069, 9071), True, 'import sqlalchemy as sa\n')]
|
# from sqlmodel import select
from app.src.db.engine import get_db
from sqlmodel import Session, select
from app.src.db.manager import create_table
from app.src.models.db.product import Product
from app.src.models.db.product_type import ProductType
from app.src.models.db.tag import Tag
def test_data():
# create the tables (come renderle dipendenti da una sessione)
engine = get_db()
create_table()
with Session(engine) as session:
# define data
type_panini = ProductType(name="Panino", description="Tutto ciò che è panino")
cibo_tag = Tag(name="cibo")
panino = Product(
name="panino",
description="panino buono",
price=3.30,
available=True,
product_type=type_panini,
tags=[cibo_tag],
)
kebab = Product(
name="kebab",
description="senza cipolla",
price=4,
available=True,
product_type=type_panini,
tags=[cibo_tag],
)
session.add(type_panini)
session.add(cibo_tag)
session.add(panino)
session.add(kebab)
statement = select(Product)
results = session.exec(statement)
products = results.all()
assert len(products) >= 1
results = select(Product).where(Product.name == "kebab")
results = session.exec(statement)
products = results.all()
assert len(products) != 0
session.rollback()
|
[
"sqlmodel.Session",
"sqlmodel.select"
] |
[((387, 395), 'app.src.db.engine.get_db', 'get_db', ([], {}), '()\n', (393, 395), False, 'from app.src.db.engine import get_db\n'), ((400, 414), 'app.src.db.manager.create_table', 'create_table', ([], {}), '()\n', (412, 414), False, 'from app.src.db.manager import create_table\n'), ((425, 440), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (432, 440), False, 'from sqlmodel import Session, select\n'), ((497, 561), 'app.src.models.db.product_type.ProductType', 'ProductType', ([], {'name': '"""Panino"""', 'description': '"""Tutto ciò che è panino"""'}), "(name='Panino', description='Tutto ciò che è panino')\n", (508, 561), False, 'from app.src.models.db.product_type import ProductType\n'), ((581, 597), 'app.src.models.db.tag.Tag', 'Tag', ([], {'name': '"""cibo"""'}), "(name='cibo')\n", (584, 597), False, 'from app.src.models.db.tag import Tag\n'), ((615, 740), 'app.src.models.db.product.Product', 'Product', ([], {'name': '"""panino"""', 'description': '"""panino buono"""', 'price': '(3.3)', 'available': '(True)', 'product_type': 'type_panini', 'tags': '[cibo_tag]'}), "(name='panino', description='panino buono', price=3.3, available=\n True, product_type=type_panini, tags=[cibo_tag])\n", (622, 740), False, 'from app.src.models.db.product import Product\n'), ((836, 958), 'app.src.models.db.product.Product', 'Product', ([], {'name': '"""kebab"""', 'description': '"""senza cipolla"""', 'price': '(4)', 'available': '(True)', 'product_type': 'type_panini', 'tags': '[cibo_tag]'}), "(name='kebab', description='senza cipolla', price=4, available=True,\n product_type=type_panini, tags=[cibo_tag])\n", (843, 958), False, 'from app.src.models.db.product import Product\n'), ((1178, 1193), 'sqlmodel.select', 'select', (['Product'], {}), '(Product)\n', (1184, 1193), False, 'from sqlmodel import Session, select\n'), ((1322, 1337), 'sqlmodel.select', 'select', (['Product'], {}), '(Product)\n', (1328, 1337), False, 'from sqlmodel import Session, select\n')]
|
from pathlib import Path
import pytest
from sqlmodel import select
from kfs import db
@pytest.fixture()
def base_dir(tmp_path: Path) -> Path:
return tmp_path
@pytest.fixture()
def sql_file_path(base_dir: Path) -> Path:
return base_dir / "kfs.db"
@pytest.fixture()
def sqlite_url(sql_file_path: Path) -> str:
return f"sqlite:///{sql_file_path}"
@pytest.fixture(autouse=True)
def database(sqlite_url: str) -> None:
db.init(sqlite_url)
def test_init(sql_file_path: Path) -> None:
"""After init, the database has been created and the file exists"""
assert sql_file_path.exists()
def test_database() -> None:
with db.get_session() as session:
# Create a new file with a tag
tag = db.Tag(category="vendor", value="chevron")
file = db.File(name="test_file.csv", path="/some/directory", tags=[tag])
session.add(file)
session.commit()
with db.get_session() as session:
# Retrieve the file from database
read_file = session.exec(select(db.File)).one()
assert file is not read_file
assert read_file.name == "test_file.csv"
assert read_file.path == "/some/directory"
assert len(read_file.tags) == 1
read_tag = read_file.tags[0]
assert read_tag.category == "vendor"
assert read_tag.value == "chevron"
assert len(read_tag.files) == 1
|
[
"sqlmodel.select"
] |
[((91, 107), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (105, 107), False, 'import pytest\n'), ((169, 185), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (183, 185), False, 'import pytest\n'), ((263, 279), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (277, 279), False, 'import pytest\n'), ((367, 395), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (381, 395), False, 'import pytest\n'), ((439, 458), 'kfs.db.init', 'db.init', (['sqlite_url'], {}), '(sqlite_url)\n', (446, 458), False, 'from kfs import db\n'), ((651, 667), 'kfs.db.get_session', 'db.get_session', ([], {}), '()\n', (665, 667), False, 'from kfs import db\n'), ((733, 775), 'kfs.db.Tag', 'db.Tag', ([], {'category': '"""vendor"""', 'value': '"""chevron"""'}), "(category='vendor', value='chevron')\n", (739, 775), False, 'from kfs import db\n'), ((791, 856), 'kfs.db.File', 'db.File', ([], {'name': '"""test_file.csv"""', 'path': '"""/some/directory"""', 'tags': '[tag]'}), "(name='test_file.csv', path='/some/directory', tags=[tag])\n", (798, 856), False, 'from kfs import db\n'), ((918, 934), 'kfs.db.get_session', 'db.get_session', ([], {}), '()\n', (932, 934), False, 'from kfs import db\n'), ((1022, 1037), 'sqlmodel.select', 'select', (['db.File'], {}), '(db.File)\n', (1028, 1037), False, 'from sqlmodel import select\n')]
|
"""
Database related APIs.
"""
import logging
from typing import List
from fastapi import APIRouter, Depends
from sqlmodel import Session, select
from datajunction.models.database import Database
from datajunction.utils import get_session
_logger = logging.getLogger(__name__)
router = APIRouter()
@router.get("/databases/", response_model=List[Database])
def read_databases(*, session: Session = Depends(get_session)) -> List[Database]:
"""
List the available databases.
"""
return session.exec(select(Database)).all()
|
[
"sqlmodel.select"
] |
[((253, 280), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (270, 280), False, 'import logging\n'), ((290, 301), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (299, 301), False, 'from fastapi import APIRouter, Depends\n'), ((403, 423), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (410, 423), False, 'from fastapi import APIRouter, Depends\n'), ((518, 534), 'sqlmodel.select', 'select', (['Database'], {}), '(Database)\n', (524, 534), False, 'from sqlmodel import Session, select\n')]
|
# This example implements macroscopic homogenized model of Biot-Darcy-Brinkman model of flow in deformable
# double porous media.
# The mathematical model is described in:
#
#<NAME>., <NAME>., <NAME>.
#The Biot-Darcy-Brinkman model of flow in deformable double porous media; homogenization and numerical modelling.
# Computers and Mathematics with applications, 78(9):3044-3066, 2019,
# https://doi.org/10.1016/j.camwa.2019.04.004
#
# Run simulation:
#
# ./simple.py example_perfusion_BDB/perf_BDB_mac.py
#
# The results are stored in `example_perfusion_BDB/results/macro` directory.
#
import numpy as nm
from sfepy.homogenization.micmac import get_homog_coefs_linear
from sfepy.homogenization.utils import define_box_regions
from sfepy.discrete.fem.mesh import Mesh
import os.path as osp
material_cache = {}
data_dir = 'example_perfusion_BDB'
def coefs2qp(coefs, nqp):
out = {}
for k, v in coefs.items():
if type(v) not in [nm.ndarray, float]:
continue
if type(v) is nm.ndarray:
if len(v.shape) >= 3:
out[k] = v
out[k] = nm.tile(v, (nqp, 1, 1))
return out
# Get raw homogenized coefficients, recalculate them if necessary
def get_raw_coefs(problem):
if 'raw_coefs' not in material_cache:
micro_filename = material_cache['meso_filename']
coefs_filename = 'coefs_meso'
coefs_filename = osp.join(problem.conf.options.get('output_dir', '.'),
coefs_filename) + '.h5'
coefs = get_homog_coefs_linear(0, 0, None,
micro_filename=micro_filename, coefs_filename=coefs_filename)
coefs['B'] = coefs['B'][:, nm.newaxis]
material_cache['raw_coefs'] = coefs
return material_cache['raw_coefs']
#Get homogenized coefficients in quadrature points
def get_homog(coors,pb, mode, **kwargs):
if not (mode == 'qp'):
return
nqp = coors.shape[0]
coefs=get_raw_coefs(pb)
for k in coefs.keys():
v = coefs[k]
if type(v) is nm.ndarray:
if len(v.shape) == 0:
coefs[k] = v.reshape((1, 1))
elif len(v.shape) == 1:
coefs[k] = v[:, nm.newaxis]
elif isinstance(v, float):
coefs[k] = nm.array([[v]])
out = coefs2qp(coefs, nqp)
return out
#Definition of dirichlet boundary conditions
def get_ebc( coors, amplitude, cg1, cg2,const=False):
"""
Define the essential boundary conditions as a function of coordinates
`coors` of region nodes.
"""
y = coors[:, 1] - cg1
z = coors[:, 2] - cg2
val = amplitude*((cg1**2 - (abs(y)**2))+(cg2**2 - (abs(z)**2)))
if const:
val=nm.ones_like(y) *amplitude
return val
#Returns value of \phi_c\bar{w}^{mes} as a material function
def get_ebc_mat( coors,pb, mode, amplitude, cg1, cg2,konst=False):
if mode == 'qp':
val = get_ebc( coors, amplitude, cg1, cg2,konst)
phic = get_raw_coefs(pb)['vol']["fraction_Zc"]
v_w1 = val[:, nm.newaxis, nm.newaxis]
return {'val': v_w1*phic}
#Definition of boundary conditions for numerical example at http://sfepy.org/sfepy_examples/example_perfusion_BDB/
def define_bc(cg1,cg2, val_in=1e2, val_out=1e2):
funs = {
'w_in': (lambda ts, coor, bc, problem, **kwargs:
get_ebc( coor, val_in, cg1, cg2),),
'w_out': (lambda ts, coor, bc, problem, **kwargs:
get_ebc( coor, val_out, cg1, cg2),),
'w_in_mat': (lambda ts,coor, problem, mode=None, **kwargs:
get_ebc_mat( coor, problem, mode, val_in,
cg1, cg2),),
'w_out_mat': (lambda ts,coor, problem, mode=None, **kwargs:
get_ebc_mat( coor, problem, mode, val_out,
cg1, cg2),),
}
mats = {
'w_in': 'w_in_mat',
'w_out': 'w_out_mat',
}
ebcs = {
'fix_u_in': ('In', {'u.all': 0.0}),
'fix_u_out': ('Out', {'u.all': 0.0}),
'w_in': ('In', {'w.0': 'w_in','w.[1,2]': 0.0}),
'w_out': ('Out', {'w.0': 'w_out','w.[1,2]': 0.0}),
'wB_dirichlet':('Bottom',{'w.2' :0.0,'u.2':0.0}),
'WT_dirichlet':('Top',{'w.2' :0.0,'u.2':0.0}),
'wN_dirichlet':('Near',{'w.1' :0.0,'u.1':0.0}),
'wF_dirichlet':('Far',{'w.1' :0.0,'u.1':0.0}),
}
lcbcs = {
'imv': ('Omega', {'ls.all' : None}, None, 'integral_mean_value')
}
return ebcs, funs, mats, lcbcs
#Definition of macroscopic problem
def define(filename_mesh=None,cg1=None, cg2=None):
if filename_mesh is None:
filename_mesh = osp.join(data_dir, 'macro_perf.vtk')
cg1, cg2 = 0.0015, 0.0015 # y and z coordinates of center of gravity
mesh = Mesh.from_file(filename_mesh)
poroela_mezo_file = osp.join(data_dir,'perf_BDB_mes.py')
material_cache['meso_filename']=poroela_mezo_file
bbox = mesh.get_bounding_box()
regions = define_box_regions(mesh.dim, bbox[0], bbox[1], eps=1e-6)
regions.update({
'Omega': 'all',
'Wall': ('r.Top +v r.Bottom +v r.Far +v r.Near', 'facet'),
'In': ('r.Left -v r.Wall', 'facet'),
'Out': ('r.Right -v r.Wall', 'facet'),
})
ebcs, bc_funs, mats, lcbcs = define_bc(cg1,cg2,val_in=1.e4,val_out=1.e4)
fields = {
'displacement': ('real', 'vector', 'Omega', 1),
'pressure': ('real', 'scalar', 'Omega', 1),
'velocity': ('real', 'vector', 'Omega', 2),
}
variables = {
#Displacement
'u': ('unknown field', 'displacement'),
'v': ('test field', 'displacement', 'u'),
#Pressure
'p': ('unknown field', 'pressure'),
'q': ('test field', 'pressure', 'p'),
'ls': ('unknown field', 'pressure'),
'lv': ('test field', 'pressure', 'ls'),
#Velocity
'w': ('unknown field', 'velocity'),
'z': ('test field', 'velocity', 'w'),
}
functions = {
'get_homog': (lambda ts, coors, problem, mode=None, **kwargs: \
get_homog(coors,problem, mode, **kwargs),), }
functions.update(bc_funs)
materials = {
'hom': 'get_homog',
}
materials.update(mats)
integrals = {
'i': 4,
"is": ("s", 4),
}
#Definition of solvers
solvers = {
'ls': ('ls.mumps', {}),
'newton': ('nls.newton',
{'i_max': 2,
'eps_a': 1e-12,
'eps_r': 1e-3,
'problem': 'nonlinear',
})
}
#Definition of macroscopic equations, see (43)
equations = {
'eq1': """
dw_lin_elastic.i.Omega(hom.A, v, u)
- dw_biot.i.Omega(hom.B, v, p)
- dw_v_dot_grad_s.i.Omega(hom.PT, v, p)
- dw_volume_dot.i.Omega(hom.H, v, w)
= 0""",
'eq2': """
dw_diffusion.i.Omega(hom.K, q, p)
- dw_v_dot_grad_s.i.Omega(hom.P, w, q)+ dw_volume_dot.i.Omega( q,ls )
= + dw_surface_integrate.is.In(w_in.val, q) - dw_surface_integrate.is.Out(w_out.val, q)
""",
'eq3': """
dw_lin_elastic.i.Omega(hom.S, z, w)
+ dw_volume_dot.i.Omega(hom.H, z, w)
+ dw_v_dot_grad_s.i.Omega(hom.PT, z, p)
= 0""",
'eq_imv': 'dw_volume_dot.i.Omega( lv, p ) = 0',
}
options = {
'output_dir': data_dir + '/results/macro',
'ls': 'ls',
'nls': 'newton',
'micro_filename' : poroela_mezo_file,
'absolute_mesh_path': True,
}
return locals()
|
[
"sfepy.homogenization.micmac.get_homog_coefs_linear",
"sfepy.homogenization.utils.define_box_regions",
"sfepy.discrete.fem.mesh.Mesh.from_file"
] |
[((4957, 4986), 'sfepy.discrete.fem.mesh.Mesh.from_file', 'Mesh.from_file', (['filename_mesh'], {}), '(filename_mesh)\n', (4971, 4986), False, 'from sfepy.discrete.fem.mesh import Mesh\n'), ((5012, 5049), 'os.path.join', 'osp.join', (['data_dir', '"""perf_BDB_mes.py"""'], {}), "(data_dir, 'perf_BDB_mes.py')\n", (5020, 5049), True, 'import os.path as osp\n'), ((5157, 5214), 'sfepy.homogenization.utils.define_box_regions', 'define_box_regions', (['mesh.dim', 'bbox[0]', 'bbox[1]'], {'eps': '(1e-06)'}), '(mesh.dim, bbox[0], bbox[1], eps=1e-06)\n', (5175, 5214), False, 'from sfepy.homogenization.utils import define_box_regions\n'), ((1140, 1163), 'numpy.tile', 'nm.tile', (['v', '(nqp, 1, 1)'], {}), '(v, (nqp, 1, 1))\n', (1147, 1163), True, 'import numpy as nm\n'), ((1576, 1676), 'sfepy.homogenization.micmac.get_homog_coefs_linear', 'get_homog_coefs_linear', (['(0)', '(0)', 'None'], {'micro_filename': 'micro_filename', 'coefs_filename': 'coefs_filename'}), '(0, 0, None, micro_filename=micro_filename,\n coefs_filename=coefs_filename)\n', (1598, 1676), False, 'from sfepy.homogenization.micmac import get_homog_coefs_linear\n'), ((4827, 4863), 'os.path.join', 'osp.join', (['data_dir', '"""macro_perf.vtk"""'], {}), "(data_dir, 'macro_perf.vtk')\n", (4835, 4863), True, 'import os.path as osp\n'), ((2800, 2815), 'numpy.ones_like', 'nm.ones_like', (['y'], {}), '(y)\n', (2812, 2815), True, 'import numpy as nm\n'), ((2352, 2367), 'numpy.array', 'nm.array', (['[[v]]'], {}), '([[v]])\n', (2360, 2367), True, 'import numpy as nm\n')]
|
"""
Functions to visualize the CMesh geometry and topology.
"""
from sfepy.postprocess.plot_dofs import _get_axes, _to2d
def plot_wireframe(ax, cmesh, color='k'):
"""
Plot a finite element mesh as a wireframe using edges connectivity.
"""
coors = cmesh.coors
coors = _to2d(coors)
dim = cmesh.dim
ax = _get_axes(ax, dim)
edges = cmesh.get_conn(1, 0)
for edge_vertices in edges.indices.reshape((edges.num, 2)):
cc = coors[edge_vertices]
ax.plot(*cc.T, color=color)
return ax
def plot_entities(ax, cmesh, edim, color='b', size=10):
"""
Plot mesh topology entities using scatter plot.
"""
coors = cmesh.get_centroids(edim)
coors = _to2d(coors)
dim = cmesh.dim
ax = _get_axes(ax, dim)
ax.scatter(*coors.T, s=size, c=color)
return ax
def label_global_entities(ax, cmesh, edim, color='b', fontsize=10):
"""
Label mesh topology entities using global ids.
"""
coors = cmesh.get_centroids(edim)
coors = _to2d(coors)
dim = cmesh.dim
ax = _get_axes(ax, dim)
for ii, cc in enumerate(coors):
ax.text(*cc.T, s=ii, color=color, fontsize=fontsize)
return ax
def label_local_entities(ax, cmesh, edim, color='b', fontsize=10):
"""
Label mesh topology entities using cell-local ids.
"""
coors = cmesh.get_centroids(edim)
coors = _to2d(coors)
dim = cmesh.dim
centres = cmesh.get_centroids(dim)
cmesh.setup_connectivity(dim, edim)
conn = cmesh.get_conn(dim, edim)
off = conn.offsets
ax = _get_axes(ax, dim)
eps = 0.1
oeps = 1.0 - eps
for ii in xrange(conn.num):
for ic, ie in enumerate(conn.indices[off[ii]:off[ii+1]]):
# Shift labels towards the cell centre.
cc = oeps * coors[ie] + eps * centres[ii]
ax.text(*cc.T, s=ic, color=color, fontsize=fontsize)
return ax
|
[
"sfepy.postprocess.plot_dofs._to2d",
"sfepy.postprocess.plot_dofs._get_axes"
] |
[((288, 300), 'sfepy.postprocess.plot_dofs._to2d', '_to2d', (['coors'], {}), '(coors)\n', (293, 300), False, 'from sfepy.postprocess.plot_dofs import _get_axes, _to2d\n'), ((331, 349), 'sfepy.postprocess.plot_dofs._get_axes', '_get_axes', (['ax', 'dim'], {}), '(ax, dim)\n', (340, 349), False, 'from sfepy.postprocess.plot_dofs import _get_axes, _to2d\n'), ((708, 720), 'sfepy.postprocess.plot_dofs._to2d', '_to2d', (['coors'], {}), '(coors)\n', (713, 720), False, 'from sfepy.postprocess.plot_dofs import _get_axes, _to2d\n'), ((751, 769), 'sfepy.postprocess.plot_dofs._get_axes', '_get_axes', (['ax', 'dim'], {}), '(ax, dim)\n', (760, 769), False, 'from sfepy.postprocess.plot_dofs import _get_axes, _to2d\n'), ((1013, 1025), 'sfepy.postprocess.plot_dofs._to2d', '_to2d', (['coors'], {}), '(coors)\n', (1018, 1025), False, 'from sfepy.postprocess.plot_dofs import _get_axes, _to2d\n'), ((1056, 1074), 'sfepy.postprocess.plot_dofs._get_axes', '_get_axes', (['ax', 'dim'], {}), '(ax, dim)\n', (1065, 1074), False, 'from sfepy.postprocess.plot_dofs import _get_axes, _to2d\n'), ((1377, 1389), 'sfepy.postprocess.plot_dofs._to2d', '_to2d', (['coors'], {}), '(coors)\n', (1382, 1389), False, 'from sfepy.postprocess.plot_dofs import _get_axes, _to2d\n'), ((1560, 1578), 'sfepy.postprocess.plot_dofs._get_axes', '_get_axes', (['ax', 'dim'], {}), '(ax, dim)\n', (1569, 1578), False, 'from sfepy.postprocess.plot_dofs import _get_axes, _to2d\n')]
|
from sqlmodel import select
from sqlalchemy.sql import expression
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm import defer
from typing import Any
from app.db import models, pagination, session_scope
from app.logs import fastapi_logger
def get_user(email: str) -> Any:
""" Get User Data based on email"""
try:
with session_scope() as db:
statement = select(models.User).where(
models.User.email == email).options(defer('password'))
results = db.exec(statement)
data = results.one()
return data
except SQLAlchemyError as e:
fastapi_logger.exception("get_user")
return None
def get_user_password(email: str) -> Any:
""" Get User Password based on email"""
try:
with session_scope() as db:
statement = select(models.User).where(
models.User.email == email)
results = db.exec(statement)
data = results.one()
return data
except SQLAlchemyError as e:
fastapi_logger.exception("get_user")
return None
def get_active_user(email: str) -> Any:
""" Get User Data based on email and active status"""
try:
with session_scope() as db:
statement = select(models.User).where(
models.User.email == email).where(
models.User.is_active == expression.true()).options(defer('password'))
results = db.exec(statement)
data = results.one()
return data
except SQLAlchemyError as e:
fastapi_logger.exception("get_user")
return None
|
[
"sqlmodel.select"
] |
[((363, 378), 'app.db.session_scope', 'session_scope', ([], {}), '()\n', (376, 378), False, 'from app.db import models, pagination, session_scope\n'), ((654, 690), 'app.logs.fastapi_logger.exception', 'fastapi_logger.exception', (['"""get_user"""'], {}), "('get_user')\n", (678, 690), False, 'from app.logs import fastapi_logger\n'), ((826, 841), 'app.db.session_scope', 'session_scope', ([], {}), '()\n', (839, 841), False, 'from app.db import models, pagination, session_scope\n'), ((1090, 1126), 'app.logs.fastapi_logger.exception', 'fastapi_logger.exception', (['"""get_user"""'], {}), "('get_user')\n", (1114, 1126), False, 'from app.logs import fastapi_logger\n'), ((1274, 1289), 'app.db.session_scope', 'session_scope', ([], {}), '()\n', (1287, 1289), False, 'from app.db import models, pagination, session_scope\n'), ((1633, 1669), 'app.logs.fastapi_logger.exception', 'fastapi_logger.exception', (['"""get_user"""'], {}), "('get_user')\n", (1657, 1669), False, 'from app.logs import fastapi_logger\n'), ((491, 508), 'sqlalchemy.orm.defer', 'defer', (['"""password"""'], {}), "('password')\n", (496, 508), False, 'from sqlalchemy.orm import defer\n'), ((1470, 1487), 'sqlalchemy.orm.defer', 'defer', (['"""password"""'], {}), "('password')\n", (1475, 1487), False, 'from sqlalchemy.orm import defer\n'), ((874, 893), 'sqlmodel.select', 'select', (['models.User'], {}), '(models.User)\n', (880, 893), False, 'from sqlmodel import select\n'), ((411, 430), 'sqlmodel.select', 'select', (['models.User'], {}), '(models.User)\n', (417, 430), False, 'from sqlmodel import select\n'), ((1443, 1460), 'sqlalchemy.sql.expression.true', 'expression.true', ([], {}), '()\n', (1458, 1460), False, 'from sqlalchemy.sql import expression\n'), ((1322, 1341), 'sqlmodel.select', 'select', (['models.User'], {}), '(models.User)\n', (1328, 1341), False, 'from sqlmodel import select\n')]
|
from datetime import date
from typing import Optional
from sqlmodel import SQLModel, Field
# if TYPE_CHECKING:
# from app.src.models.db.order import Order
class AppUserBase(SQLModel):
username: str
name: Optional[str]
surname: Optional[str]
birth_date: Optional[date]
email: str
password: str
isAdmin: bool = False
class AppUser(AppUserBase, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
class AppUserCreate(AppUserBase):
pass
class AppUserRead(AppUserBase):
id: int
# Nel modello update tutti gli attributi devono essere opzionali
class AppUserUpdate(AppUserBase):
name: Optional[str] = None
surname: Optional[str] = None
birth_date: Optional[date] = None
username: Optional[str] = None
email: str
password: str
isAdmin: Optional[bool] = None
|
[
"sqlmodel.Field"
] |
[((415, 452), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (420, 452), False, 'from sqlmodel import SQLModel, Field\n')]
|
import scrapy
from imdb_rating.dependencies.models import Movie
from pydantic import ValidationError
from scrapy.crawler import CrawlerProcess
from sqlmodel import Session, select
class IMDBSpider(scrapy.Spider):
name = "imdb"
custom_settings = {"FEED_EXPORT_ENCODING": "utf-8"}
def start_requests(self):
"""
This method is called by Scrapy to start the crawl.
"""
self.start = self.start.strftime("%Y-%m-%d")
self.end = self.end.strftime("%Y-%m-%d")
yield scrapy.Request(
url=f"https://www.imdb.com/search/title/?title_type=feature&year={self.start},{self.end}&start=1",
callback=self.parse,
)
def parse(self, response):
"""
This method is called by Scrapy to parse the response.
Parameters
----------
response : scrapy.http.Response
The response from the server.
Yields
------
scrapy.http.Request
The next request to be crawled.
"""
for film in response.xpath('//*[@id="main"]/div/div[3]/div/div'):
try:
title = film.xpath(".//div[3]/h3/a/text()").get()
except:
title = None
try:
year = (
film.xpath(".//div[3]/h3/span[2]/text()")
.get()
.split(" ")[-1]
.replace("(", "")
.replace(")", "")
)
except:
year = None
try:
rating = film.xpath(".//div[3]/div/div/strong/text()").get()
except:
rating = None
try:
duration = film.css("span.runtime::text").get().replace(" min", "")
except:
duration = None
try:
votes = film.css(".//div[3]/p[4]/span[2]/@data-value").get()
except:
votes = None
try:
genres = film.css("span.genre::text").get().split(", ")
genres = [genre.strip() for genre in genres]
genres.extend([None for _ in range(3 - len(genres))])
genre1, genre2, genre3 = genres[:3]
except:
genre1, genre2, genre3 = None, None, None
try:
certificate = film.css("span.certificate::text").get()
except:
certificate = None
try:
synopsis = film.xpath(".//div[3]/p[2]/text()").get().strip()
except:
synopsis = None
try:
image = film.xpath(".//div[2]/a/img/@loadlate").get().split("._V1_")[0]
except:
image = None
try:
cast = film.xpath(".//div[3]/p[3]/*/text()").getall()
split = cast.index("|")
directors = cast[:split]
directors.extend([None for _ in range(3 - len(directors))])
director1, director2, director3 = directors[:3]
actors = cast[split + 1 :]
actors.extend([None for _ in range(3 - len(actors))])
actor1, actor2, actor3 = actors[:3]
except:
actor1, actor2, actor3 = None, None, None
director1, director2, director3 = None, None, None
try:
movie = Movie.validate(
dict(
title=title,
year=year,
actual_rating=rating,
votes=votes,
duration=duration,
certificate=certificate,
synopsis=synopsis,
image=image,
actor1=actor1,
actor2=actor2,
actor3=actor3,
director1=director1,
director2=director2,
director3=director3,
genre1=genre1,
genre2=genre2,
genre3=genre3,
)
)
with Session(self.engine) as session:
statement = select(Movie).where(
Movie.title == movie.title and Movie.year == movie.year
)
results = session.exec(statement)
movie_orig = results.first()
if movie_orig:
movie_orig_values = dict(movie_orig)
movie_orig_values.pop("id")
movie_new_values = dict(movie)
movie_new_values.pop("id")
if movie_orig_values != movie_new_values:
for key, value in movie_new_values.items():
setattr(movie_orig, key, value)
session.add(movie_orig)
session.commit()
else:
session.add(movie)
session.commit()
except ValidationError:
continue
try:
next_page = response.css("a.next-page::attr(href)").get()
yield response.follow(next_page, callback=self.parse)
except:
pass
if __name__ == "__main__":
from datetime import datetime, timedelta
start = datetime.today() - timedelta(days=90)
end = datetime.today() + timedelta(days=30)
process = CrawlerProcess({"FEED_FORMAT": "json", "FEED_URI": f"data/test.json"})
process.crawl(IMDBSpider, start=start, end=end)
process.start()
|
[
"sqlmodel.Session",
"sqlmodel.select"
] |
[((5642, 5712), 'scrapy.crawler.CrawlerProcess', 'CrawlerProcess', (["{'FEED_FORMAT': 'json', 'FEED_URI': f'data/test.json'}"], {}), "({'FEED_FORMAT': 'json', 'FEED_URI': f'data/test.json'})\n", (5656, 5712), False, 'from scrapy.crawler import CrawlerProcess\n'), ((5541, 5557), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (5555, 5557), False, 'from datetime import datetime, timedelta\n'), ((5560, 5578), 'datetime.timedelta', 'timedelta', ([], {'days': '(90)'}), '(days=90)\n', (5569, 5578), False, 'from datetime import datetime, timedelta\n'), ((5589, 5605), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (5603, 5605), False, 'from datetime import datetime, timedelta\n'), ((5608, 5626), 'datetime.timedelta', 'timedelta', ([], {'days': '(30)'}), '(days=30)\n', (5617, 5626), False, 'from datetime import datetime, timedelta\n'), ((520, 664), 'scrapy.Request', 'scrapy.Request', ([], {'url': 'f"""https://www.imdb.com/search/title/?title_type=feature&year={self.start},{self.end}&start=1"""', 'callback': 'self.parse'}), "(url=\n f'https://www.imdb.com/search/title/?title_type=feature&year={self.start},{self.end}&start=1'\n , callback=self.parse)\n", (534, 664), False, 'import scrapy\n'), ((4252, 4272), 'sqlmodel.Session', 'Session', (['self.engine'], {}), '(self.engine)\n', (4259, 4272), False, 'from sqlmodel import Session, select\n'), ((4317, 4330), 'sqlmodel.select', 'select', (['Movie'], {}), '(Movie)\n', (4323, 4330), False, 'from sqlmodel import Session, select\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import copy
from typing import Any, Mapping
import megengine as mge
import megengine.functional as F
import megengine.module as M
from .modules import SE, activation, conv2d, gap2d, linear, norm2d
__all__ = ["build_head", "ClsHead", "MBV3Head", "VGGHead"]
def build_head(
w_in: int, head_args: Mapping[str, Any] = None, norm_name: str = "BN", act_name: str = "relu"
) -> M.Module:
"""The factory function to build head.
Note:
if ``head_args`` is ``None`` or ``head_args["name"]`` is ``None``, this function will do
nothing and return ``None``.
Args:
w_in: input width.
head_args: head args. Default: ``None``
norm_name: default normalization function, will be overridden by the same key in
``head_args``. Default: ``"BN"``
act_name: default activation function, will be overridden by the same key in ``head_args``.
Default: ``"relu"``
Returns:
A head.
"""
if head_args is None:
return None
head_args = copy.deepcopy(head_args)
head_name = head_args.pop("name", None)
if head_name is None:
return None
head_args["w_in"] = w_in
head_args.setdefault("norm_name", norm_name)
head_args.setdefault("act_name", act_name)
if callable(head_name):
return head_name(**head_args)
if isinstance(head_name, str):
head_funcs = {
"ClsHead": ClsHead,
"MBV3Head": MBV3Head,
"VGGHead": VGGHead,
}
if head_name in head_funcs:
return head_funcs[head_name](**head_args)
raise ValueError(f"Head '{head_name}' not supported")
class ClsHead(M.Module):
"""Cls head: Conv, BN, Act, AvgPool, FC.
Args:
w_in: input width.
w_out: output width, normally the number of classes. Default: ``1000``
width: width for first conv in head, conv will be omitted if set to 0. Default: ``0``
dropout_prob: dropout probability. Default: ``0.0``
norm_name: normalization function. Default: ``"BN"``
act_name: activation function. Default: ``"relu"``
bias: whether fc has bias. Default: ``True``
"""
def __init__(
self,
w_in: int,
w_out: int = 1000,
width: int = 0,
dropout_prob: float = 0.0,
norm_name: str = "BN",
act_name: str = "relu",
bias: bool = True,
):
super().__init__()
self.width = width
if self.width > 0:
self.conv = conv2d(w_in, self.width, 1)
self.bn = norm2d(norm_name, self.width)
self.act = activation(act_name)
w_in = self.width
self.avg_pool = gap2d()
if dropout_prob > 0.0:
self.dropout = M.Dropout(dropout_prob)
self.fc = linear(w_in, w_out, bias=bias)
def forward(self, x: mge.Tensor) -> mge.Tensor:
if self.width > 0:
x = self.conv(x)
x = self.bn(x)
x = self.act(x)
x = self.avg_pool(x)
x = F.flatten(x, 1)
if getattr(self, "dropout", None) is not None:
x = self.dropout(x)
x = self.fc(x)
return x
class MBV3Head(M.Module):
"""MobileNet V3 head: Conv, BN, Act, AvgPool, SE, FC, Act, FC.
Args:
w_in: input width.
w_out: output width, normally the number of classes.
width: width for first conv in head.
w_h: width for first linear in head.
dropout_prob: dropout probability. Default: ``0.0``
se_r: Squeeze-and-Excitation (SE) ratio. Default: ``0.0``
norm_name: normalization function. Default: ``"BN"``
act_name: activation function. Default: ``"hswish"``
bias: whether fc has bias. Default: ``True``
"""
def __init__(
self,
w_in: int,
w_out: int = 1000,
width: int = 960,
w_h: int = 1280,
dropout_prob: float = 0.0,
se_r: float = 0.0,
norm_name: str = "BN",
act_name: str = "hswish",
bias: bool = True,
):
super().__init__()
self.conv = conv2d(w_in, width, 1)
self.bn = norm2d(norm_name, width)
self.act = activation(act_name)
self.avg_pool = gap2d()
if se_r > 0.0:
self.se = SE(width, int(se_r * width), act_name)
self.h_fc = linear(width, w_h, bias=bias)
self.h_act = activation(act_name)
if dropout_prob > 0.0:
self.dropout = M.Dropout(dropout_prob)
self.fc = linear(w_h, w_out, bias=bias)
def forward(self, x: mge.Tensor) -> mge.Tensor:
x = self.conv(x)
x = self.bn(x)
x = self.act(x)
x = self.avg_pool(x)
if getattr(self, "se", None) is not None:
x = self.se(x)
x = F.flatten(x, 1)
x = self.h_fc(x)
x = self.h_act(x)
if getattr(self, "dropout", None) is not None:
x = self.dropout(x)
x = self.fc(x)
return x
class VGGHead(M.Module):
"""VGG head: AvgPool, [FC, Act, Dropout] x2, FC.
Args:
w_in: input width.
w_out: output width, normally the number of classes. Default: ``1000``
width: width for linear in head. Default: ``4096``
dropout_prob: dropout probability. Default: ``0.5``
act_name: activation function. Default: ``"relu"``
"""
def __init__(
self,
w_in: int,
w_out: int = 1000,
width: int = 4096,
dropout_prob: float = 0.5,
act_name: str = "relu",
**kwargs,
):
super().__init__()
self.avg_pool = gap2d(7)
self.classifier = M.Sequential(
linear(w_in * 7 * 7, width, bias=True),
activation(act_name),
M.Dropout(dropout_prob),
linear(width, width, bias=True),
activation(act_name),
M.Dropout(dropout_prob),
linear(width, w_out, bias=True),
)
def forward(self, x: mge.Tensor) -> mge.Tensor:
x = self.avg_pool(x)
x = F.flatten(x, 1)
x = self.classifier(x)
return x
|
[
"megengine.module.Dropout",
"megengine.functional.flatten"
] |
[((1112, 1136), 'copy.deepcopy', 'copy.deepcopy', (['head_args'], {}), '(head_args)\n', (1125, 1136), False, 'import copy\n'), ((3120, 3135), 'megengine.functional.flatten', 'F.flatten', (['x', '(1)'], {}), '(x, 1)\n', (3129, 3135), True, 'import megengine.functional as F\n'), ((4881, 4896), 'megengine.functional.flatten', 'F.flatten', (['x', '(1)'], {}), '(x, 1)\n', (4890, 4896), True, 'import megengine.functional as F\n'), ((6144, 6159), 'megengine.functional.flatten', 'F.flatten', (['x', '(1)'], {}), '(x, 1)\n', (6153, 6159), True, 'import megengine.functional as F\n'), ((2842, 2865), 'megengine.module.Dropout', 'M.Dropout', (['dropout_prob'], {}), '(dropout_prob)\n', (2851, 2865), True, 'import megengine.module as M\n'), ((4566, 4589), 'megengine.module.Dropout', 'M.Dropout', (['dropout_prob'], {}), '(dropout_prob)\n', (4575, 4589), True, 'import megengine.module as M\n'), ((5854, 5877), 'megengine.module.Dropout', 'M.Dropout', (['dropout_prob'], {}), '(dropout_prob)\n', (5863, 5877), True, 'import megengine.module as M\n'), ((5970, 5993), 'megengine.module.Dropout', 'M.Dropout', (['dropout_prob'], {}), '(dropout_prob)\n', (5979, 5993), True, 'import megengine.module as M\n')]
|
"""
Elapsed time measurement utilities.
"""
import time
from sfepy.base.base import Struct
class Timer(Struct):
def __init__(self, name='timer', start=False):
Struct.__init__(self, name=name)
self.time_function = time.perf_counter
self.reset()
if start:
self.start()
def reset(self):
self.t0 = self.t1 = None
self.total = self.dt = 0.0
def start(self, reset=False):
if reset: self.reset()
self.t1 = None
self.t0 = self.time_function()
def stop(self):
self.t1 = self.time_function()
if self.t0 is None:
raise ValueError('timer "%s" was not started!' % self.name)
self.dt = self.t1 - self.t0
self.total += self.dt
return self.dt
|
[
"sfepy.base.base.Struct.__init__"
] |
[((174, 206), 'sfepy.base.base.Struct.__init__', 'Struct.__init__', (['self'], {'name': 'name'}), '(self, name=name)\n', (189, 206), False, 'from sfepy.base.base import Struct\n')]
|
from typing import Optional
from sqlmodel import Field, Session, SQLModel, create_engine
def test_query(clear_sqlmodel):
class Hero(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
secret_name: str
age: Optional[int] = None
hero_1 = Hero(name="Deadpond", secret_name="<NAME>")
engine = create_engine("sqlite://")
SQLModel.metadata.create_all(engine)
with Session(engine) as session:
session.add(hero_1)
session.commit()
session.refresh(hero_1)
with Session(engine) as session:
query_hero = session.query(Hero).first()
assert query_hero
assert query_hero.name == hero_1.name
|
[
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.Field",
"sqlmodel.create_engine",
"sqlmodel.Session"
] |
[((377, 403), 'sqlmodel.create_engine', 'create_engine', (['"""sqlite://"""'], {}), "('sqlite://')\n", (390, 403), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((409, 445), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (437, 445), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((190, 227), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (195, 227), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((455, 470), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (462, 470), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((578, 593), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (585, 593), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n')]
|
from __future__ import annotations
from fastapi import Depends
from fastapi.security import OAuth2PasswordBearer
from sqlmodel import Session, select
from src.database import engine
from src.models import User
from src.services.auth import decode_jwt
from .http_exceptions import credentials_exception
def get_db():
with Session(engine) as session:
yield session
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/auth/sign-in")
def get_current_user(
db: Session = Depends(get_db), token: str = Depends(oauth2_scheme)
):
token_data = decode_jwt(token)
if token_data is None or token_data.email is None:
raise credentials_exception
stmt = select(User).where(User.email == token_data.email)
user = db.exec(stmt).first()
if user is None:
raise credentials_exception
return user
|
[
"sqlmodel.Session",
"sqlmodel.select"
] |
[((398, 444), 'fastapi.security.OAuth2PasswordBearer', 'OAuth2PasswordBearer', ([], {'tokenUrl': '"""/auth/sign-in"""'}), "(tokenUrl='/auth/sign-in')\n", (418, 444), False, 'from fastapi.security import OAuth2PasswordBearer\n'), ((487, 502), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (494, 502), False, 'from fastapi import Depends\n'), ((517, 539), 'fastapi.Depends', 'Depends', (['oauth2_scheme'], {}), '(oauth2_scheme)\n', (524, 539), False, 'from fastapi import Depends\n'), ((560, 577), 'src.services.auth.decode_jwt', 'decode_jwt', (['token'], {}), '(token)\n', (570, 577), False, 'from src.services.auth import decode_jwt\n'), ((330, 345), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (337, 345), False, 'from sqlmodel import Session, select\n'), ((681, 693), 'sqlmodel.select', 'select', (['User'], {}), '(User)\n', (687, 693), False, 'from sqlmodel import Session, select\n')]
|
"""initial-db-tables
Revision ID: d925cb39480e
Revises:
Create Date: 2022-05-05 11:45:18.781171
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "d925cb39480e"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"contact",
sa.Column(
"contact_id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column("tags", postgresql.ARRAY(sa.String()), nullable=True),
sa.Column(
"created_at",
postgresql.TIMESTAMP(),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column(
"updated_at",
postgresql.TIMESTAMP(),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column("author_status", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("endorse_status", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("connection_id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
sa.Column("connection_protocol", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column(
"connection_alias", sqlmodel.sql.sqltypes.AutoString(), nullable=True
),
sa.Column("public_did", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("state", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.PrimaryKeyConstraint("contact_id"),
)
op.create_table(
"endorserequest",
sa.Column(
"endorse_request_id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column("tags", postgresql.ARRAY(sa.String()), nullable=True),
sa.Column(
"created_at",
postgresql.TIMESTAMP(),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column(
"updated_at",
postgresql.TIMESTAMP(),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column("transaction_id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
sa.Column("connection_id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
sa.Column("endorser_did", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("author_did", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column(
"transaction_type", sqlmodel.sql.sqltypes.AutoString(), nullable=False
),
sa.Column("state", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("ledger_txn", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.PrimaryKeyConstraint("endorse_request_id"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("endorserequest")
op.drop_table("contact")
# ### end Alembic commands ###
|
[
"sqlmodel.sql.sqltypes.GUID",
"sqlmodel.sql.sqltypes.AutoString"
] |
[((3169, 3200), 'alembic.op.drop_table', 'op.drop_table', (['"""endorserequest"""'], {}), "('endorserequest')\n", (3182, 3200), False, 'from alembic import op\n'), ((3205, 3229), 'alembic.op.drop_table', 'op.drop_table', (['"""contact"""'], {}), "('contact')\n", (3218, 3229), False, 'from alembic import op\n'), ((1683, 1720), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""contact_id"""'], {}), "('contact_id')\n", (1706, 1720), True, 'import sqlalchemy as sa\n'), ((2992, 3037), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""endorse_request_id"""'], {}), "('endorse_request_id')\n", (3015, 3037), True, 'import sqlalchemy as sa\n'), ((516, 545), 'sqlalchemy.dialects.postgresql.UUID', 'postgresql.UUID', ([], {'as_uuid': '(True)'}), '(as_uuid=True)\n', (531, 545), False, 'from sqlalchemy.dialects import postgresql\n'), ((773, 795), 'sqlalchemy.dialects.postgresql.TIMESTAMP', 'postgresql.TIMESTAMP', ([], {}), '()\n', (793, 795), False, 'from sqlalchemy.dialects import postgresql\n'), ((938, 960), 'sqlalchemy.dialects.postgresql.TIMESTAMP', 'postgresql.TIMESTAMP', ([], {}), '()\n', (958, 960), False, 'from sqlalchemy.dialects import postgresql\n'), ((1081, 1115), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1113, 1115), False, 'import sqlmodel\n'), ((1170, 1204), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1202, 1204), False, 'import sqlmodel\n'), ((1258, 1286), 'sqlmodel.sql.sqltypes.GUID', 'sqlmodel.sql.sqltypes.GUID', ([], {}), '()\n', (1284, 1286), False, 'import sqlmodel\n'), ((1346, 1380), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1378, 1380), False, 'import sqlmodel\n'), ((1450, 1484), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1482, 1484), False, 'import sqlmodel\n'), ((1543, 1577), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1575, 1577), False, 'import sqlmodel\n'), ((1622, 1656), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1654, 1656), False, 'import sqlmodel\n'), ((1840, 1869), 'sqlalchemy.dialects.postgresql.UUID', 'postgresql.UUID', ([], {'as_uuid': '(True)'}), '(as_uuid=True)\n', (1855, 1869), False, 'from sqlalchemy.dialects import postgresql\n'), ((2097, 2119), 'sqlalchemy.dialects.postgresql.TIMESTAMP', 'postgresql.TIMESTAMP', ([], {}), '()\n', (2117, 2119), False, 'from sqlalchemy.dialects import postgresql\n'), ((2262, 2284), 'sqlalchemy.dialects.postgresql.TIMESTAMP', 'postgresql.TIMESTAMP', ([], {}), '()\n', (2282, 2284), False, 'from sqlalchemy.dialects import postgresql\n'), ((2406, 2434), 'sqlmodel.sql.sqltypes.GUID', 'sqlmodel.sql.sqltypes.GUID', ([], {}), '()\n', (2432, 2434), False, 'import sqlmodel\n'), ((2488, 2516), 'sqlmodel.sql.sqltypes.GUID', 'sqlmodel.sql.sqltypes.GUID', ([], {}), '()\n', (2514, 2516), False, 'import sqlmodel\n'), ((2569, 2603), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2601, 2603), False, 'import sqlmodel\n'), ((2654, 2688), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2686, 2688), False, 'import sqlmodel\n'), ((2758, 2792), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2790, 2792), False, 'import sqlmodel\n'), ((2847, 2881), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2879, 2881), False, 'import sqlmodel\n'), ((2932, 2966), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2964, 2966), False, 'import sqlmodel\n'), ((574, 602), 'sqlalchemy.text', 'sa.text', (['"""gen_random_uuid()"""'], {}), "('gen_random_uuid()')\n", (581, 602), True, 'import sqlalchemy as sa\n'), ((686, 697), 'sqlalchemy.String', 'sa.String', ([], {}), '()\n', (695, 697), True, 'import sqlalchemy as sa\n'), ((824, 840), 'sqlalchemy.text', 'sa.text', (['"""now()"""'], {}), "('now()')\n", (831, 840), True, 'import sqlalchemy as sa\n'), ((989, 1005), 'sqlalchemy.text', 'sa.text', (['"""now()"""'], {}), "('now()')\n", (996, 1005), True, 'import sqlalchemy as sa\n'), ((1898, 1926), 'sqlalchemy.text', 'sa.text', (['"""gen_random_uuid()"""'], {}), "('gen_random_uuid()')\n", (1905, 1926), True, 'import sqlalchemy as sa\n'), ((2010, 2021), 'sqlalchemy.String', 'sa.String', ([], {}), '()\n', (2019, 2021), True, 'import sqlalchemy as sa\n'), ((2148, 2164), 'sqlalchemy.text', 'sa.text', (['"""now()"""'], {}), "('now()')\n", (2155, 2164), True, 'import sqlalchemy as sa\n'), ((2313, 2329), 'sqlalchemy.text', 'sa.text', (['"""now()"""'], {}), "('now()')\n", (2320, 2329), True, 'import sqlalchemy as sa\n')]
|
from datetime import date
from typing import List, Optional
from api.ecoindex.models.responses import ApiEcoindex
from api.models.enums import Version
from sqlalchemy.ext.asyncio.session import AsyncSession
from sqlmodel import select
from db.helper import date_filter
async def get_host_list_db(
session: AsyncSession,
version: Optional[Version] = Version.v1,
q: Optional[str] = None,
date_from: Optional[date] = None,
date_to: Optional[date] = None,
page: Optional[int] = 1,
size: Optional[int] = 50,
) -> List[str]:
statement = (
select(ApiEcoindex.host)
.where(ApiEcoindex.version == version.get_version_number())
.offset(size * (page - 1))
.limit(size)
)
if q:
statement = statement.filter(ApiEcoindex.host.like(f"%{q}%"))
statement = date_filter(statement=statement, date_from=date_from, date_to=date_to)
statement = statement.group_by(ApiEcoindex.host).order_by(ApiEcoindex.host)
hosts = await session.execute(statement)
return hosts.scalars().all()
async def get_count_hosts_db(
session: AsyncSession,
version: Optional[Version] = Version.v1,
q: Optional[str] = None,
date_from: Optional[date] = None,
date_to: Optional[date] = None,
) -> int:
sub_statement = (
f"SELECT host FROM apiecoindex WHERE version = {version.get_version_number()}"
)
if q:
sub_statement += f" AND host LIKE '%{q}%'"
if date_from:
sub_statement += f" AND date >= '{date_from}'"
if date_to:
sub_statement += f" AND date <= '{date_to}'"
sub_statement += " GROUP BY host"
statement = f"SELECT count(*) FROM ({sub_statement}) t"
result = await session.execute(statement=statement)
return result.scalar()
|
[
"sqlmodel.select"
] |
[((830, 900), 'db.helper.date_filter', 'date_filter', ([], {'statement': 'statement', 'date_from': 'date_from', 'date_to': 'date_to'}), '(statement=statement, date_from=date_from, date_to=date_to)\n', (841, 900), False, 'from db.helper import date_filter\n'), ((780, 811), 'api.ecoindex.models.responses.ApiEcoindex.host.like', 'ApiEcoindex.host.like', (['f"""%{q}%"""'], {}), "(f'%{q}%')\n", (801, 811), False, 'from api.ecoindex.models.responses import ApiEcoindex\n'), ((577, 601), 'sqlmodel.select', 'select', (['ApiEcoindex.host'], {}), '(ApiEcoindex.host)\n', (583, 601), False, 'from sqlmodel import select\n')]
|
"""Initial Migration3
Revision ID: 849d12c13c8a
Revises:
Create Date: 2021-12-19 17:06:36.345137
"""
import sqlalchemy as sa
import sqlmodel
from alembic import op
# revision identifiers, used by Alembic.
revision = '849d12<PASSWORD>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
'user', sa.Column('id', sa.Integer(), nullable=True),
sa.Column('username', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('email', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('password_hashed', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('is_admin', sa.Boolean(), nullable=False), sa.Column('is_disabled', sa.Boolean(), nullable=False),
sa.Column('is_verified', sa.Boolean(), nullable=False), sa.PrimaryKeyConstraint('id', 'username', 'email')
)
op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=False)
op.create_index(op.f('ix_user_id'), 'user', ['id'], unique=False)
op.create_index(op.f('ix_user_is_admin'), 'user', ['is_admin'], unique=False)
op.create_index(op.f('ix_user_is_disabled'), 'user', ['is_disabled'], unique=False)
op.create_index(op.f('ix_user_is_verified'), 'user', ['is_verified'], unique=False)
op.create_index(op.f('ix_user_password_hashed'), 'user', ['password_hashed'], unique=False)
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_user_username'), table_name='user')
op.drop_index(op.f('ix_user_password_hashed'), table_name='user')
op.drop_index(op.f('ix_user_is_verified'), table_name='user')
op.drop_index(op.f('ix_user_is_disabled'), table_name='user')
op.drop_index(op.f('ix_user_is_admin'), table_name='user')
op.drop_index(op.f('ix_user_id'), table_name='user')
op.drop_index(op.f('ix_user_email'), table_name='user')
op.drop_table('user')
# ### end Alembic commands ###
|
[
"sqlmodel.sql.sqltypes.AutoString"
] |
[((2106, 2127), 'alembic.op.drop_table', 'op.drop_table', (['"""user"""'], {}), "('user')\n", (2119, 2127), False, 'from alembic import op\n'), ((898, 948), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""', '"""username"""', '"""email"""'], {}), "('id', 'username', 'email')\n", (921, 948), True, 'import sqlalchemy as sa\n'), ((975, 996), 'alembic.op.f', 'op.f', (['"""ix_user_email"""'], {}), "('ix_user_email')\n", (979, 996), False, 'from alembic import op\n'), ((1051, 1069), 'alembic.op.f', 'op.f', (['"""ix_user_id"""'], {}), "('ix_user_id')\n", (1055, 1069), False, 'from alembic import op\n'), ((1121, 1145), 'alembic.op.f', 'op.f', (['"""ix_user_is_admin"""'], {}), "('ix_user_is_admin')\n", (1125, 1145), False, 'from alembic import op\n'), ((1203, 1230), 'alembic.op.f', 'op.f', (['"""ix_user_is_disabled"""'], {}), "('ix_user_is_disabled')\n", (1207, 1230), False, 'from alembic import op\n'), ((1291, 1318), 'alembic.op.f', 'op.f', (['"""ix_user_is_verified"""'], {}), "('ix_user_is_verified')\n", (1295, 1318), False, 'from alembic import op\n'), ((1379, 1410), 'alembic.op.f', 'op.f', (['"""ix_user_password_hashed"""'], {}), "('ix_user_password_hashed')\n", (1383, 1410), False, 'from alembic import op\n'), ((1475, 1499), 'alembic.op.f', 'op.f', (['"""ix_user_username"""'], {}), "('ix_user_username')\n", (1479, 1499), False, 'from alembic import op\n'), ((1675, 1699), 'alembic.op.f', 'op.f', (['"""ix_user_username"""'], {}), "('ix_user_username')\n", (1679, 1699), False, 'from alembic import op\n'), ((1738, 1769), 'alembic.op.f', 'op.f', (['"""ix_user_password_hashed"""'], {}), "('ix_user_password_hashed')\n", (1742, 1769), False, 'from alembic import op\n'), ((1808, 1835), 'alembic.op.f', 'op.f', (['"""ix_user_is_verified"""'], {}), "('ix_user_is_verified')\n", (1812, 1835), False, 'from alembic import op\n'), ((1874, 1901), 'alembic.op.f', 'op.f', (['"""ix_user_is_disabled"""'], {}), "('ix_user_is_disabled')\n", (1878, 1901), False, 'from alembic import op\n'), ((1940, 1964), 'alembic.op.f', 'op.f', (['"""ix_user_is_admin"""'], {}), "('ix_user_is_admin')\n", (1944, 1964), False, 'from alembic import op\n'), ((2003, 2021), 'alembic.op.f', 'op.f', (['"""ix_user_id"""'], {}), "('ix_user_id')\n", (2007, 2021), False, 'from alembic import op\n'), ((2060, 2081), 'alembic.op.f', 'op.f', (['"""ix_user_email"""'], {}), "('ix_user_email')\n", (2064, 2081), False, 'from alembic import op\n'), ((434, 446), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (444, 446), True, 'import sqlalchemy as sa\n'), ((494, 528), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (526, 528), False, 'import sqlmodel\n'), ((574, 608), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (606, 608), False, 'import sqlmodel\n'), ((664, 698), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (696, 698), False, 'import sqlmodel\n'), ((747, 759), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (757, 759), True, 'import sqlalchemy as sa\n'), ((803, 815), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (813, 815), True, 'import sqlalchemy as sa\n'), ((867, 879), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (877, 879), True, 'import sqlalchemy as sa\n')]
|
r"""
Diametrically point loaded 2-D disk. See :ref:`sec-primer`.
Find :math:`\ul{u}` such that:
.. math::
\int_{\Omega} D_{ijkl}\ e_{ij}(\ul{v}) e_{kl}(\ul{u})
= 0
\;, \quad \forall \ul{v} \;,
where
.. math::
D_{ijkl} = \mu (\delta_{ik} \delta_{jl}+\delta_{il} \delta_{jk}) +
\lambda \ \delta_{ij} \delta_{kl}
\;.
"""
from __future__ import absolute_import
from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson
from sfepy.discrete.fem.utils import refine_mesh
from sfepy import data_dir
# Fix the mesh file name if you run this file outside the SfePy directory.
filename_mesh = data_dir + '/meshes/2d/its2D.mesh'
refinement_level = 0
filename_mesh = refine_mesh(filename_mesh, refinement_level)
output_dir = '.' # set this to a valid directory you have write access to
young = 2000.0 # Young's modulus [MPa]
poisson = 0.4 # Poisson's ratio
options = {
'output_dir' : output_dir,
}
regions = {
'Omega' : 'all',
'Left' : ('vertices in (x < 0.001)', 'facet'),
'Bottom' : ('vertices in (y < 0.001)', 'facet'),
'Top' : ('vertex 2', 'vertex'),
}
materials = {
'Asphalt' : ({'D': stiffness_from_youngpoisson(2, young, poisson)},),
'Load' : ({'.val' : [0.0, -1000.0]},),
}
fields = {
'displacement': ('real', 'vector', 'Omega', 1),
}
equations = {
'balance_of_forces' :
"""dw_lin_elastic.2.Omega(Asphalt.D, v, u)
= dw_point_load.0.Top(Load.val, v)""",
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
}
ebcs = {
'XSym' : ('Bottom', {'u.1' : 0.0}),
'YSym' : ('Left', {'u.0' : 0.0}),
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 1,
'eps_a' : 1e-6,
}),
}
|
[
"sfepy.discrete.fem.utils.refine_mesh",
"sfepy.mechanics.matcoefs.stiffness_from_youngpoisson"
] |
[((691, 735), 'sfepy.discrete.fem.utils.refine_mesh', 'refine_mesh', (['filename_mesh', 'refinement_level'], {}), '(filename_mesh, refinement_level)\n', (702, 735), False, 'from sfepy.discrete.fem.utils import refine_mesh\n'), ((1144, 1190), 'sfepy.mechanics.matcoefs.stiffness_from_youngpoisson', 'stiffness_from_youngpoisson', (['(2)', 'young', 'poisson'], {}), '(2, young, poisson)\n', (1171, 1190), False, 'from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson\n')]
|
import logging
import os
import secrets
import aioredis
import boto3
import pytest
import redis as pyredis
from fastapi.testclient import TestClient
from sqlalchemy import text
from sqlalchemy_utils import create_database, database_exists, drop_database
from sqlmodel import Session, create_engine
from iris.agent.settings import AgentSettings
from iris.api.authentication import (
current_active_user,
current_superuser,
current_verified_user,
)
from iris.api.main import app
from iris.api.settings import APISettings
from iris.commons.clickhouse import ClickHouse
from iris.commons.dependencies import get_settings
from iris.commons.models.base import Base
from iris.commons.redis import Redis
from iris.commons.settings import CommonSettings
from iris.commons.storage import Storage
from iris.commons.utils import json_serializer
from iris.worker import WorkerSettings
pytest.register_assert_rewrite("tests.assertions")
pytest_plugins = ["tests.fixtures.models", "tests.fixtures.storage"]
def should_cleanup():
return os.environ.get("IRIS_TEST_CLEANUP", "") != "0"
@pytest.fixture
def logger():
return logging.getLogger(__name__)
@pytest.fixture
def settings():
namespace = secrets.token_hex(nbytes=4)
print(f"@{namespace}", end=" ")
# Redis has 16 databases by default, we use the last one for testing.
return CommonSettings(
CLICKHOUSE_PUBLIC_USER="public",
CLICKHOUSE_DATABASE="iris_test",
DATABASE_URL=f"postgresql://iris:[email protected]/iris-test-{namespace}",
S3_PREFIX=f"iris-test-{namespace}",
S3_PUBLIC_RESOURCES=["arn:aws:s3:::test-public-exports/*"],
REDIS_NAMESPACE=f"iris-test-{namespace}",
REDIS_URL="redis://default:[email protected]?db=15",
RETRY_TIMEOUT=-1,
)
@pytest.fixture
def api_settings(settings):
return APISettings(
API_CORS_ALLOW_ORIGIN="https://example.org,http://localhost:8000",
**settings.dict(),
)
@pytest.fixture
def agent_settings(settings, tmp_path):
return AgentSettings(
**settings.dict(),
AGENT_CARACAL_SNIFFER_WAIT_TIME=1,
AGENT_MIN_TTL=0,
AGENT_RESULTS_DIR_PATH=tmp_path / "agent_results",
AGENT_TARGETS_DIR_PATH=tmp_path / "agent_targets",
)
@pytest.fixture
def worker_settings(settings, tmp_path):
return WorkerSettings(
**settings.dict(),
WORKER_RESULTS_DIR_PATH=tmp_path / "worker_results",
WORKER_MAX_OPEN_FILES=128,
)
@pytest.fixture
def clickhouse(settings, logger):
return ClickHouse(settings, logger)
@pytest.fixture
def engine(settings):
engine = create_engine(settings.DATABASE_URL, json_serializer=json_serializer)
if not database_exists(engine.url):
create_database(engine.url)
Base.metadata.create_all(engine)
return engine
@pytest.fixture
async def redis(settings, logger):
client = aioredis.from_url(settings.REDIS_URL, decode_responses=True)
yield Redis(client, settings, logger)
await client.close()
@pytest.fixture
def session(engine):
with Session(engine) as session:
yield session
@pytest.fixture
def storage(settings, logger):
return Storage(settings, logger)
@pytest.fixture
def make_client(engine, api_settings):
def _make_client(user=None):
if user and user.is_active:
app.dependency_overrides[current_active_user] = lambda: user
if user and user.is_active and user.is_verified:
app.dependency_overrides[current_verified_user] = lambda: user
if user and user.is_active and user.is_verified and user.is_superuser:
app.dependency_overrides[current_superuser] = lambda: user
app.dependency_overrides[get_settings] = lambda: api_settings
return TestClient(app)
yield _make_client
app.dependency_overrides.clear()
@pytest.fixture(autouse=True, scope="session")
def cleanup_redis():
yield
if should_cleanup():
redis_ = pyredis.from_url("redis://default:[email protected]?db=15")
redis_.flushdb()
redis_.close()
@pytest.fixture(autouse=True, scope="session")
def cleanup_database():
yield
if should_cleanup():
# TODO: Cleanup/simplify this code.
engine = create_engine("postgresql://iris:[email protected]")
with engine.connect() as conn:
databases = conn.execute(
text(
"""
SELECT datname
FROM pg_database
WHERE datistemplate = false AND datname LIKE 'iris-test-%'
"""
)
).all()
for (database,) in databases:
drop_database(
f"postgresql://iris:[email protected]/{database}"
)
@pytest.fixture(autouse=True, scope="session")
def cleanup_s3():
yield
if should_cleanup():
s3 = boto3.client(
"s3",
aws_access_key_id="minioadmin",
aws_secret_access_key="minioadmin",
endpoint_url="http://minio.docker.localhost",
)
buckets = s3.list_buckets()
buckets = [x["Name"] for x in buckets["Buckets"]]
for bucket in buckets:
if "test-" in bucket:
objects = s3.list_objects_v2(Bucket=bucket)
if objects["KeyCount"]:
objects = [{"Key": x["Key"]} for x in objects.get("Contents", [])]
s3.delete_objects(Bucket=bucket, Delete=dict(Objects=objects))
s3.delete_bucket(Bucket=bucket)
# https://github.com/boto/botocore/pull/1810
s3._endpoint.http_session._manager.clear()
|
[
"sqlmodel.create_engine",
"sqlmodel.Session"
] |
[((887, 937), 'pytest.register_assert_rewrite', 'pytest.register_assert_rewrite', (['"""tests.assertions"""'], {}), "('tests.assertions')\n", (917, 937), False, 'import pytest\n'), ((3884, 3929), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)', 'scope': '"""session"""'}), "(autouse=True, scope='session')\n", (3898, 3929), False, 'import pytest\n'), ((4124, 4169), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)', 'scope': '"""session"""'}), "(autouse=True, scope='session')\n", (4138, 4169), False, 'import pytest\n'), ((4865, 4910), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)', 'scope': '"""session"""'}), "(autouse=True, scope='session')\n", (4879, 4910), False, 'import pytest\n'), ((1132, 1159), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1149, 1159), False, 'import logging\n'), ((1210, 1237), 'secrets.token_hex', 'secrets.token_hex', ([], {'nbytes': '(4)'}), '(nbytes=4)\n', (1227, 1237), False, 'import secrets\n'), ((1359, 1775), 'iris.commons.settings.CommonSettings', 'CommonSettings', ([], {'CLICKHOUSE_PUBLIC_USER': '"""public"""', 'CLICKHOUSE_DATABASE': '"""iris_test"""', 'DATABASE_URL': 'f"""postgresql://iris:[email protected]/iris-test-{namespace}"""', 'S3_PREFIX': 'f"""iris-test-{namespace}"""', 'S3_PUBLIC_RESOURCES': "['arn:aws:s3:::test-public-exports/*']", 'REDIS_NAMESPACE': 'f"""iris-test-{namespace}"""', 'REDIS_URL': '"""redis://default:[email protected]?db=15"""', 'RETRY_TIMEOUT': '(-1)'}), "(CLICKHOUSE_PUBLIC_USER='public', CLICKHOUSE_DATABASE=\n 'iris_test', DATABASE_URL=\n f'postgresql://iris:[email protected]/iris-test-{namespace}',\n S3_PREFIX=f'iris-test-{namespace}', S3_PUBLIC_RESOURCES=[\n 'arn:aws:s3:::test-public-exports/*'], REDIS_NAMESPACE=\n f'iris-test-{namespace}', REDIS_URL=\n 'redis://default:[email protected]?db=15', RETRY_TIMEOUT=-1)\n", (1373, 1775), False, 'from iris.commons.settings import CommonSettings\n'), ((2577, 2605), 'iris.commons.clickhouse.ClickHouse', 'ClickHouse', (['settings', 'logger'], {}), '(settings, logger)\n', (2587, 2605), False, 'from iris.commons.clickhouse import ClickHouse\n'), ((2659, 2728), 'sqlmodel.create_engine', 'create_engine', (['settings.DATABASE_URL'], {'json_serializer': 'json_serializer'}), '(settings.DATABASE_URL, json_serializer=json_serializer)\n', (2672, 2728), False, 'from sqlmodel import Session, create_engine\n'), ((2809, 2841), 'iris.commons.models.base.Base.metadata.create_all', 'Base.metadata.create_all', (['engine'], {}), '(engine)\n', (2833, 2841), False, 'from iris.commons.models.base import Base\n'), ((2926, 2986), 'aioredis.from_url', 'aioredis.from_url', (['settings.REDIS_URL'], {'decode_responses': '(True)'}), '(settings.REDIS_URL, decode_responses=True)\n', (2943, 2986), False, 'import aioredis\n'), ((3212, 3237), 'iris.commons.storage.Storage', 'Storage', (['settings', 'logger'], {}), '(settings, logger)\n', (3219, 3237), False, 'from iris.commons.storage import Storage\n'), ((3848, 3880), 'iris.api.main.app.dependency_overrides.clear', 'app.dependency_overrides.clear', ([], {}), '()\n', (3878, 3880), False, 'from iris.api.main import app\n'), ((1042, 1081), 'os.environ.get', 'os.environ.get', (['"""IRIS_TEST_CLEANUP"""', '""""""'], {}), "('IRIS_TEST_CLEANUP', '')\n", (1056, 1081), False, 'import os\n'), ((2740, 2767), 'sqlalchemy_utils.database_exists', 'database_exists', (['engine.url'], {}), '(engine.url)\n', (2755, 2767), False, 'from sqlalchemy_utils import create_database, database_exists, drop_database\n'), ((2777, 2804), 'sqlalchemy_utils.create_database', 'create_database', (['engine.url'], {}), '(engine.url)\n', (2792, 2804), False, 'from sqlalchemy_utils import create_database, database_exists, drop_database\n'), ((2997, 3028), 'iris.commons.redis.Redis', 'Redis', (['client', 'settings', 'logger'], {}), '(client, settings, logger)\n', (3002, 3028), False, 'from iris.commons.redis import Redis\n'), ((3102, 3117), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (3109, 3117), False, 'from sqlmodel import Session, create_engine\n'), ((3804, 3819), 'fastapi.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (3814, 3819), False, 'from fastapi.testclient import TestClient\n'), ((4003, 4072), 'redis.from_url', 'pyredis.from_url', (['"""redis://default:[email protected]?db=15"""'], {}), "('redis://default:[email protected]?db=15')\n", (4019, 4072), True, 'import redis as pyredis\n'), ((4290, 4355), 'sqlmodel.create_engine', 'create_engine', (['"""postgresql://iris:[email protected]"""'], {}), "('postgresql://iris:[email protected]')\n", (4303, 4355), False, 'from sqlmodel import Session, create_engine\n'), ((4977, 5114), 'boto3.client', 'boto3.client', (['"""s3"""'], {'aws_access_key_id': '"""minioadmin"""', 'aws_secret_access_key': '"""minioadmin"""', 'endpoint_url': '"""http://minio.docker.localhost"""'}), "('s3', aws_access_key_id='minioadmin', aws_secret_access_key=\n 'minioadmin', endpoint_url='http://minio.docker.localhost')\n", (4989, 5114), False, 'import boto3\n'), ((4754, 4831), 'sqlalchemy_utils.drop_database', 'drop_database', (['f"""postgresql://iris:[email protected]/{database}"""'], {}), "(f'postgresql://iris:[email protected]/{database}')\n", (4767, 4831), False, 'from sqlalchemy_utils import create_database, database_exists, drop_database\n'), ((4449, 4655), 'sqlalchemy.text', 'text', (['"""\n SELECT datname\n FROM pg_database\n WHERE datistemplate = false AND datname LIKE \'iris-test-%\'\n """'], {}), '(\n """\n SELECT datname\n FROM pg_database\n WHERE datistemplate = false AND datname LIKE \'iris-test-%\'\n """\n )\n', (4453, 4655), False, 'from sqlalchemy import text\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing
import time
# pylint: disable=import-error
import model as snet_model
import megengine
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x1_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-m", "--model", metavar="PKL", default=None, help="path to model checkpoint"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
_, valid_dataloader = build_dataset(args)
# build model
model = snet_model.__dict__[args.arch](pretrained=args.model is None)
if args.model is not None:
logging.info("load from checkpoint %s", args.model)
checkpoint = megengine.load(args.model)
if "state_dict" in checkpoint:
state_dict = checkpoint["state_dict"]
model.load_state_dict(state_dict)
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = F.distributed.all_reduce_sum(acc1) / world_size
acc5 = F.distributed.all_reduce_sum(acc5) / world_size
return loss, acc1, acc5
model.eval()
_, valid_acc1, valid_acc5 = valid(valid_step, valid_dataloader, args)
logging.info(
"Test Acc@1 %.3f, Acc@5 %.3f",
valid_acc1,
valid_acc5,
)
def valid(func, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
n = image.shape[0]
loss, acc1, acc5 = func(image, label)
objs.update(loss.item(), n)
top1.update(100 * acc1.item(), n)
top5.update(100 * acc5.item(), n)
clck.update(time.time() - t, n)
t = time.time()
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info("Test step %d, %s %s %s %s", step, objs, top1, top5, clck)
return objs.avg, top1.avg, top5.avg
def build_dataset(args):
train_dataloader = None
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_dataloader = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[
T.Resize(256),
T.CenterCrop(224),
T.Normalize(
mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395]
), # BGR
T.ToMode("CHW"),
]
),
num_workers=args.workers,
)
return train_dataloader, valid_dataloader
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=":.3f"):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
if __name__ == "__main__":
main()
|
[
"megengine.data.dataset.ImageNet",
"megengine.logger.get_logger",
"megengine.functional.nn.cross_entropy",
"megengine.distributed.init_process_group",
"megengine.functional.topk_accuracy",
"megengine.data.SequentialSampler",
"megengine.tensor",
"megengine.distributed.Server",
"megengine.functional.distributed.all_reduce_sum",
"megengine.load",
"megengine.data.transform.Normalize",
"megengine.data.transform.CenterCrop",
"megengine.distributed.get_rank",
"megengine.data.transform.Resize",
"megengine.distributed.get_world_size",
"megengine.data.transform.ToMode"
] |
[((653, 682), 'megengine.logger.get_logger', 'megengine.logger.get_logger', ([], {}), '()\n', (680, 682), False, 'import megengine\n'), ((710, 776), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""MegEngine ImageNet Training"""'}), "(description='MegEngine ImageNet Training')\n", (733, 776), False, 'import argparse\n'), ((4432, 4443), 'time.time', 'time.time', ([], {}), '()\n', (4441, 4443), False, 'import time\n'), ((5137, 5182), 'megengine.data.dataset.ImageNet', 'data.dataset.ImageNet', (['args.data'], {'train': '(False)'}), '(args.data, train=False)\n', (5158, 5182), True, 'import megengine.data as data\n'), ((5203, 5273), 'megengine.data.SequentialSampler', 'data.SequentialSampler', (['valid_dataset'], {'batch_size': '(100)', 'drop_last': '(False)'}), '(valid_dataset, batch_size=100, drop_last=False)\n', (5225, 5273), True, 'import megengine.data as data\n'), ((1918, 1950), 'megengine.distributed.Server', 'dist.Server', ([], {'port': 'args.dist_port'}), '(port=args.dist_port)\n', (1929, 1950), True, 'import megengine.distributed as dist\n'), ((2033, 2056), 'multiprocessing.Pool', 'multiprocessing.Pool', (['(1)'], {}), '(1)\n', (2053, 2056), False, 'import multiprocessing\n'), ((2808, 2967), 'megengine.distributed.init_process_group', 'dist.init_process_group', ([], {'master_ip': 'args.dist_addr', 'port': 'args.dist_port', 'world_size': 'world_size', 'rank': 'rank', 'device': '(rank % ngpus_per_node)', 'backend': '"""nccl"""'}), "(master_ip=args.dist_addr, port=args.dist_port,\n world_size=world_size, rank=rank, device=rank % ngpus_per_node, backend\n ='nccl')\n", (2831, 2967), True, 'import megengine.distributed as dist\n'), ((3432, 3458), 'megengine.load', 'megengine.load', (['args.model'], {}), '(args.model)\n', (3446, 3458), False, 'import megengine\n'), ((3670, 3703), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits', 'label'], {}), '(logits, label)\n', (3688, 3703), True, 'import megengine.functional as F\n'), ((3725, 3768), 'megengine.functional.topk_accuracy', 'F.topk_accuracy', (['logits', 'label'], {'topk': '(1, 5)'}), '(logits, label, topk=(1, 5))\n', (3740, 3768), True, 'import megengine.functional as F\n'), ((4515, 4555), 'megengine.tensor', 'megengine.tensor', (['image'], {'dtype': '"""float32"""'}), "(image, dtype='float32')\n", (4531, 4555), False, 'import megengine\n'), ((4572, 4610), 'megengine.tensor', 'megengine.tensor', (['label'], {'dtype': '"""int32"""'}), "(label, dtype='int32')\n", (4588, 4610), False, 'import megengine\n'), ((4859, 4870), 'time.time', 'time.time', ([], {}), '()\n', (4868, 4870), False, 'import time\n'), ((3111, 3126), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (3124, 3126), True, 'import megengine.distributed as dist\n'), ((3128, 3149), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (3147, 3149), True, 'import megengine.distributed as dist\n'), ((3847, 3881), 'megengine.functional.distributed.all_reduce_sum', 'F.distributed.all_reduce_sum', (['loss'], {}), '(loss)\n', (3875, 3881), True, 'import megengine.functional as F\n'), ((3914, 3948), 'megengine.functional.distributed.all_reduce_sum', 'F.distributed.all_reduce_sum', (['acc1'], {}), '(acc1)\n', (3942, 3948), True, 'import megengine.functional as F\n'), ((3981, 4015), 'megengine.functional.distributed.all_reduce_sum', 'F.distributed.all_reduce_sum', (['acc5'], {}), '(acc5)\n', (4009, 4015), True, 'import megengine.functional as F\n'), ((4827, 4838), 'time.time', 'time.time', ([], {}), '()\n', (4836, 4838), False, 'import time\n'), ((4915, 4930), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (4928, 4930), True, 'import megengine.distributed as dist\n'), ((5441, 5454), 'megengine.data.transform.Resize', 'T.Resize', (['(256)'], {}), '(256)\n', (5449, 5454), True, 'import megengine.data.transform as T\n'), ((5472, 5489), 'megengine.data.transform.CenterCrop', 'T.CenterCrop', (['(224)'], {}), '(224)\n', (5484, 5489), True, 'import megengine.data.transform as T\n'), ((5507, 5579), 'megengine.data.transform.Normalize', 'T.Normalize', ([], {'mean': '[103.53, 116.28, 123.675]', 'std': '[57.375, 57.12, 58.395]'}), '(mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395])\n', (5518, 5579), True, 'import megengine.data.transform as T\n'), ((5645, 5660), 'megengine.data.transform.ToMode', 'T.ToMode', (['"""CHW"""'], {}), "('CHW')\n", (5653, 5660), True, 'import megengine.data.transform as T\n')]
|
from datetime import date
from typing import List, Optional
from uuid import UUID
from api.ecoindex.models.responses import ApiEcoindex
from api.helper import new_uuid
from api.models.enums import Version
from ecoindex_scraper.models import Result
from sqlalchemy import func
from sqlalchemy.ext.asyncio.session import AsyncSession
from sqlalchemy.sql.expression import asc
from sqlmodel import select
from db.helper import date_filter
async def save_ecoindex_result_db(
session: AsyncSession,
ecoindex_result: Result,
version: Optional[Version] = Version.v1,
) -> ApiEcoindex:
ranking = await get_rank_analysis_db(
ecoindex=ecoindex_result, session=session, version=version
)
total_results = await get_count_analysis_db(session=session, version=version)
db_ecoindex = ApiEcoindex(
id=new_uuid(),
date=ecoindex_result.date,
url=ecoindex_result.url,
host=ecoindex_result.url.host,
width=ecoindex_result.width,
height=ecoindex_result.height,
size=ecoindex_result.size,
nodes=ecoindex_result.nodes,
requests=ecoindex_result.requests,
grade=ecoindex_result.grade,
score=ecoindex_result.score,
ges=ecoindex_result.ges,
water=ecoindex_result.water,
page_type=ecoindex_result.page_type,
version=version.get_version_number(),
initial_ranking=ranking if ranking else total_results + 1,
initial_total_results=total_results + 1,
)
session.add(db_ecoindex)
await session.commit()
await session.refresh(db_ecoindex)
return db_ecoindex
async def get_count_analysis_db(
session: AsyncSession,
version: Optional[Version] = Version.v1,
host: Optional[str] = None,
date_from: Optional[date] = None,
date_to: Optional[date] = None,
) -> int:
statement = f"SELECT count(*) FROM apiecoindex WHERE version = {version.get_version_number()}"
if host:
statement += f" AND host = '{host}'"
if date_from:
statement += f" AND date >= '{date_from}'"
if date_to:
statement += f" AND date <= '{date_to}'"
result = await session.execute(statement=statement)
return result.scalar()
async def get_rank_analysis_db(
ecoindex: Result, session: AsyncSession, version: Optional[Version] = Version.v1
) -> Optional[int]:
result = await session.execute(
(
"SELECT ranking FROM ("
"SELECT *, ROW_NUMBER() OVER (ORDER BY score DESC) ranking "
"FROM apiecoindex "
f"WHERE version={version.get_version_number()} "
"ORDER BY score DESC) t "
f"WHERE score <= {ecoindex.score} "
"LIMIT 1;"
)
)
return result.scalar()
async def get_ecoindex_result_list_db(
session: AsyncSession,
version: Optional[Version] = Version.v1,
host: Optional[str] = None,
date_from: Optional[date] = None,
date_to: Optional[date] = None,
page: Optional[int] = 1,
size: Optional[int] = 50,
) -> List[ApiEcoindex]:
statement = (
select(ApiEcoindex)
.where(ApiEcoindex.version == version.get_version_number())
.offset((page - 1) * size)
.limit(size)
)
if host:
statement = statement.where(ApiEcoindex.host == host)
statement = date_filter(statement=statement, date_from=date_from, date_to=date_to)
ecoindexes = await session.execute(statement.order_by(asc("date")))
return ecoindexes.scalars().all()
async def get_ecoindex_result_by_id_db(
session: AsyncSession, id: UUID, version: Optional[Version] = Version.v1
) -> ApiEcoindex:
statement = (
select(ApiEcoindex)
.where(ApiEcoindex.id == id)
.where(ApiEcoindex.version == version.get_version_number())
)
ecoindex = await session.execute(statement)
return ecoindex.scalar_one_or_none()
async def get_count_daily_request_per_host(session: AsyncSession, host: str) -> int:
statement = select(ApiEcoindex).where(
func.date(ApiEcoindex.date) == date.today(), ApiEcoindex.host == host
)
results = await session.execute(statement)
return len(results.all())
|
[
"sqlmodel.select"
] |
[((3331, 3401), 'db.helper.date_filter', 'date_filter', ([], {'statement': 'statement', 'date_from': 'date_from', 'date_to': 'date_to'}), '(statement=statement, date_from=date_from, date_to=date_to)\n', (3342, 3401), False, 'from db.helper import date_filter\n'), ((833, 843), 'api.helper.new_uuid', 'new_uuid', ([], {}), '()\n', (841, 843), False, 'from api.helper import new_uuid\n'), ((4001, 4020), 'sqlmodel.select', 'select', (['ApiEcoindex'], {}), '(ApiEcoindex)\n', (4007, 4020), False, 'from sqlmodel import select\n'), ((4036, 4063), 'sqlalchemy.func.date', 'func.date', (['ApiEcoindex.date'], {}), '(ApiEcoindex.date)\n', (4045, 4063), False, 'from sqlalchemy import func\n'), ((4067, 4079), 'datetime.date.today', 'date.today', ([], {}), '()\n', (4077, 4079), False, 'from datetime import date\n'), ((3461, 3472), 'sqlalchemy.sql.expression.asc', 'asc', (['"""date"""'], {}), "('date')\n", (3464, 3472), False, 'from sqlalchemy.sql.expression import asc\n'), ((3677, 3696), 'sqlmodel.select', 'select', (['ApiEcoindex'], {}), '(ApiEcoindex)\n', (3683, 3696), False, 'from sqlmodel import select\n'), ((3089, 3108), 'sqlmodel.select', 'select', (['ApiEcoindex'], {}), '(ApiEcoindex)\n', (3095, 3108), False, 'from sqlmodel import select\n')]
|
#!/usr/bin/env python3
from sqlalchemy.orm import sessionmaker
from sqlmodel import create_engine
from . import constants
#postgres_url = f"postgresql://postgres:{constants.POSTGRES_PW}@localhost/billsim"
if constants.POSTGRES_URL is None:
postgres_url = f"postgresql://postgres:{constants.POSTGRES_PW}@localhost"
else:
postgres_url = constants.POSTGRES_URL
engine = create_engine(postgres_url, echo=True)
SessionLocal = sessionmaker(autocommit=False,
autoflush=False,
expire_on_commit=False,
bind=engine)
|
[
"sqlmodel.create_engine"
] |
[((379, 417), 'sqlmodel.create_engine', 'create_engine', (['postgres_url'], {'echo': '(True)'}), '(postgres_url, echo=True)\n', (392, 417), False, 'from sqlmodel import create_engine\n'), ((434, 522), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'autocommit': '(False)', 'autoflush': '(False)', 'expire_on_commit': '(False)', 'bind': 'engine'}), '(autocommit=False, autoflush=False, expire_on_commit=False,\n bind=engine)\n', (446, 522), False, 'from sqlalchemy.orm import sessionmaker\n')]
|
import logging
import os
import pickle
import numpy as np
import h5py
from megengine.data import DataLoader
from megengine.data.dataset import Dataset
from megengine.data.sampler import RandomSampler, SequentialSampler
import megengine.distributed as dist
from dataset.transformations import fetch_transform
from common import utils
_logger = logging.getLogger(__name__)
class ModelNetNpy(Dataset):
def __init__(self, dataset_path: str, dataset_mode: str, subset: str = "train", categories=None, transform=None):
self._logger = logging.getLogger(self.__class__.__name__)
self._root = dataset_path
self._subset = subset
self._is_master = dist.get_rank() == 0
metadata_fpath = os.path.join(self._root, "modelnet_{}_{}.pickle".format(dataset_mode, subset))
utils.master_logger(self._logger, "Loading data from {} for {}".format(metadata_fpath, subset), self._is_master)
if not os.path.exists(os.path.join(dataset_path)):
assert FileNotFoundError("Not found dataset_path: {}".format(dataset_path))
with open(os.path.join(dataset_path, "shape_names.txt")) as fid:
self._classes = [l.strip() for l in fid]
self._category2idx = {e[1]: e[0] for e in enumerate(self._classes)}
self._idx2category = self._classes
if categories is not None:
categories_idx = [self._category2idx[c] for c in categories]
utils.master_logger(self._logger, "Categories used: {}.".format(categories_idx), self._is_master)
self._classes = categories
else:
categories_idx = None
utils.master_logger(self._logger, "Using all categories.", self._is_master)
self._data = self._read_pickle_files(os.path.join(dataset_path, "modelnet_{}_{}.pickle".format(dataset_mode, subset)),
categories_idx)
self._transform = transform
utils.master_logger(self._logger, "Loaded {} {} instances.".format(len(self._data), subset), self._is_master)
@property
def classes(self):
return self._classes
@staticmethod
def _read_pickle_files(fnames, categories):
all_data_dict = []
with open(fnames, "rb") as f:
data = pickle.load(f)
for category in categories:
all_data_dict.extend(data[category])
return all_data_dict
def to_category(self, i):
return self._idx2category[i]
def __getitem__(self, item):
data_path = self._data[item]
# load and process data
points = np.load(data_path)
idx = np.array(int(os.path.splitext(os.path.basename(data_path))[0].split("_")[1]))
label = np.array(int(os.path.splitext(os.path.basename(data_path))[0].split("_")[3]))
sample = {"points": points, "label": label, "idx": idx}
if self._transform:
sample = self._transform(sample)
return sample
def __len__(self):
return len(self._data)
def fetch_dataloader(params):
utils.master_logger(_logger, "Dataset type: {}, transform type: {}".format(params.dataset_type, params.transform_type),
dist.get_rank() == 0)
train_transforms, test_transforms = fetch_transform(params)
if params.dataset_type == "modelnet_os":
dataset_path = "./dataset/data/modelnet_os"
train_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half1_rm_rotate.txt")]
val_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half1_rm_rotate.txt")]
test_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half2_rm_rotate.txt")]
train_categories.sort()
val_categories.sort()
test_categories.sort()
train_ds = ModelNetNpy(dataset_path, dataset_mode="os", subset="train", categories=train_categories, transform=train_transforms)
val_ds = ModelNetNpy(dataset_path, dataset_mode="os", subset="val", categories=val_categories, transform=test_transforms)
test_ds = ModelNetNpy(dataset_path, dataset_mode="os", subset="test", categories=test_categories, transform=test_transforms)
elif params.dataset_type == "modelnet_ts":
dataset_path = "./dataset/data/modelnet_ts"
train_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half1_rm_rotate.txt")]
val_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half1_rm_rotate.txt")]
test_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half2_rm_rotate.txt")]
train_categories.sort()
val_categories.sort()
test_categories.sort()
train_ds = ModelNetNpy(dataset_path, dataset_mode="ts", subset="train", categories=train_categories, transform=train_transforms)
val_ds = ModelNetNpy(dataset_path, dataset_mode="ts", subset="val", categories=val_categories, transform=test_transforms)
test_ds = ModelNetNpy(dataset_path, dataset_mode="ts", subset="test", categories=test_categories, transform=test_transforms)
dataloaders = {}
# add defalt train data loader
train_sampler = RandomSampler(train_ds, batch_size=params.train_batch_size, drop_last=True)
train_dl = DataLoader(train_ds, train_sampler, num_workers=params.num_workers)
dataloaders["train"] = train_dl
# chosse val or test data loader for evaluate
for split in ["val", "test"]:
if split in params.eval_type:
if split == "val":
val_sampler = SequentialSampler(val_ds, batch_size=params.eval_batch_size)
dl = DataLoader(val_ds, val_sampler, num_workers=params.num_workers)
elif split == "test":
test_sampler = SequentialSampler(test_ds, batch_size=params.eval_batch_size)
dl = DataLoader(test_ds, test_sampler, num_workers=params.num_workers)
else:
raise ValueError("Unknown eval_type in params, should in [val, test]")
dataloaders[split] = dl
else:
dataloaders[split] = None
return dataloaders
|
[
"megengine.data.DataLoader",
"megengine.data.sampler.RandomSampler",
"megengine.data.sampler.SequentialSampler",
"megengine.distributed.get_rank"
] |
[((347, 374), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (364, 374), False, 'import logging\n'), ((3269, 3292), 'dataset.transformations.fetch_transform', 'fetch_transform', (['params'], {}), '(params)\n', (3284, 3292), False, 'from dataset.transformations import fetch_transform\n'), ((5226, 5301), 'megengine.data.sampler.RandomSampler', 'RandomSampler', (['train_ds'], {'batch_size': 'params.train_batch_size', 'drop_last': '(True)'}), '(train_ds, batch_size=params.train_batch_size, drop_last=True)\n', (5239, 5301), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler\n'), ((5317, 5384), 'megengine.data.DataLoader', 'DataLoader', (['train_ds', 'train_sampler'], {'num_workers': 'params.num_workers'}), '(train_ds, train_sampler, num_workers=params.num_workers)\n', (5327, 5384), False, 'from megengine.data import DataLoader\n'), ((546, 588), 'logging.getLogger', 'logging.getLogger', (['self.__class__.__name__'], {}), '(self.__class__.__name__)\n', (563, 588), False, 'import logging\n'), ((2606, 2624), 'numpy.load', 'np.load', (['data_path'], {}), '(data_path)\n', (2613, 2624), True, 'import numpy as np\n'), ((679, 694), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (692, 694), True, 'import megengine.distributed as dist\n'), ((1646, 1721), 'common.utils.master_logger', 'utils.master_logger', (['self._logger', '"""Using all categories."""', 'self._is_master'], {}), "(self._logger, 'Using all categories.', self._is_master)\n", (1665, 1721), False, 'from common import utils\n'), ((2285, 2299), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2296, 2299), False, 'import pickle\n'), ((3206, 3221), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (3219, 3221), True, 'import megengine.distributed as dist\n'), ((957, 983), 'os.path.join', 'os.path.join', (['dataset_path'], {}), '(dataset_path)\n', (969, 983), False, 'import os\n'), ((1093, 1138), 'os.path.join', 'os.path.join', (['dataset_path', '"""shape_names.txt"""'], {}), "(dataset_path, 'shape_names.txt')\n", (1105, 1138), False, 'import os\n'), ((5605, 5665), 'megengine.data.sampler.SequentialSampler', 'SequentialSampler', (['val_ds'], {'batch_size': 'params.eval_batch_size'}), '(val_ds, batch_size=params.eval_batch_size)\n', (5622, 5665), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler\n'), ((5687, 5750), 'megengine.data.DataLoader', 'DataLoader', (['val_ds', 'val_sampler'], {'num_workers': 'params.num_workers'}), '(val_ds, val_sampler, num_workers=params.num_workers)\n', (5697, 5750), False, 'from megengine.data import DataLoader\n'), ((5816, 5877), 'megengine.data.sampler.SequentialSampler', 'SequentialSampler', (['test_ds'], {'batch_size': 'params.eval_batch_size'}), '(test_ds, batch_size=params.eval_batch_size)\n', (5833, 5877), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler\n'), ((5899, 5964), 'megengine.data.DataLoader', 'DataLoader', (['test_ds', 'test_sampler'], {'num_workers': 'params.num_workers'}), '(test_ds, test_sampler, num_workers=params.num_workers)\n', (5909, 5964), False, 'from megengine.data import DataLoader\n'), ((2669, 2696), 'os.path.basename', 'os.path.basename', (['data_path'], {}), '(data_path)\n', (2685, 2696), False, 'import os\n'), ((2763, 2790), 'os.path.basename', 'os.path.basename', (['data_path'], {}), '(data_path)\n', (2779, 2790), False, 'import os\n')]
|
import numpy as np
import megengine as mge
import megengine.functional as F
from common import se3, so3
def compute_losses(data_batch, endpoints, params):
loss = {}
# compute losses
if params.loss_type == "omnet":
num_iter = len(endpoints["all_pose_pair"])
for i in range(num_iter):
# mask loss
src_cls_pair, ref_cls_pair = endpoints["all_src_cls_pair"][i], endpoints["all_ref_cls_pair"][i]
src_cls = F.nn.frequency_weighted_cross_entropy(src_cls_pair[1], src_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
ref_cls = F.nn.frequency_weighted_cross_entropy(ref_cls_pair[1], ref_cls_pair[0], weight=mge.tensor([0.7, 0.3]))
loss["cls_{}".format(i)] = (src_cls + ref_cls) / 2.0
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[1][:, :4], pose_pair[0][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[1][:, 4:], pose_pair[0][:, 4:]) * params.loss_alpha2
# total loss
total_losses = []
for k in loss:
total_losses.append(loss[k])
loss["total"] = F.sum(F.concat(total_losses))
else:
raise NotImplementedError
return loss
def compute_metrics(data_batch, endpoints, params):
metrics = {}
gt_transforms = endpoints["transform_pair"][0]
pred_transforms = endpoints["transform_pair"][1]
# Euler angles, Individual translation errors (Deep Closest Point convention)
if "prnet" in params.transform_type:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="zyx")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="zyx")
else:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="xyz")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="xyz")
t_gt = gt_transforms[:, :3, 3]
t_pred = pred_transforms[:, :3, 3]
r_mse = F.mean((r_gt_euler_deg - r_pred_euler_deg)**2, axis=1)
r_mae = F.mean(F.abs(r_gt_euler_deg - r_pred_euler_deg), axis=1)
t_mse = F.mean((t_gt - t_pred)**2, axis=1)
t_mae = F.mean(F.abs(t_gt - t_pred), axis=1)
r_mse = F.mean(r_mse)
t_mse = F.mean(t_mse)
r_mae = F.mean(r_mae)
t_mae = F.mean(t_mae)
# Rotation, translation errors (isotropic, i.e. doesn"t depend on error
# direction, which is more representative of the actual error)
concatenated = se3.mge_concatenate(se3.mge_inverse(gt_transforms), pred_transforms)
rot_trace = concatenated[:, 0, 0] + concatenated[:, 1, 1] + concatenated[:, 2, 2]
residual_rotdeg = F.acos(F.clip(0.5 * (rot_trace - 1), -1.0, 1.0)) * 180.0 / np.pi
residual_transmag = F.norm(concatenated[:, :, 3], axis=-1)
err_r = F.mean(residual_rotdeg)
err_t = F.mean(residual_transmag)
# weighted score of isotropic errors
score = err_r * 0.01 + err_t
metrics = {"R_MSE": r_mse, "R_MAE": r_mae, "t_MSE": t_mse, "t_MAE": t_mae, "Err_R": err_r, "Err_t": err_t, "score": score}
# metrics = utils.tensor_mge(metrics, check_on=False)
return metrics
|
[
"megengine.tensor",
"megengine.functional.nn.l1_loss",
"megengine.functional.clip",
"megengine.functional.nn.square_loss",
"megengine.functional.mean",
"megengine.functional.norm",
"megengine.functional.abs",
"megengine.functional.concat"
] |
[((2027, 2083), 'megengine.functional.mean', 'F.mean', (['((r_gt_euler_deg - r_pred_euler_deg) ** 2)'], {'axis': '(1)'}), '((r_gt_euler_deg - r_pred_euler_deg) ** 2, axis=1)\n', (2033, 2083), True, 'import megengine.functional as F\n'), ((2163, 2199), 'megengine.functional.mean', 'F.mean', (['((t_gt - t_pred) ** 2)'], {'axis': '(1)'}), '((t_gt - t_pred) ** 2, axis=1)\n', (2169, 2199), True, 'import megengine.functional as F\n'), ((2260, 2273), 'megengine.functional.mean', 'F.mean', (['r_mse'], {}), '(r_mse)\n', (2266, 2273), True, 'import megengine.functional as F\n'), ((2286, 2299), 'megengine.functional.mean', 'F.mean', (['t_mse'], {}), '(t_mse)\n', (2292, 2299), True, 'import megengine.functional as F\n'), ((2312, 2325), 'megengine.functional.mean', 'F.mean', (['r_mae'], {}), '(r_mae)\n', (2318, 2325), True, 'import megengine.functional as F\n'), ((2338, 2351), 'megengine.functional.mean', 'F.mean', (['t_mae'], {}), '(t_mae)\n', (2344, 2351), True, 'import megengine.functional as F\n'), ((2781, 2819), 'megengine.functional.norm', 'F.norm', (['concatenated[:, :, 3]'], {'axis': '(-1)'}), '(concatenated[:, :, 3], axis=-1)\n', (2787, 2819), True, 'import megengine.functional as F\n'), ((2832, 2855), 'megengine.functional.mean', 'F.mean', (['residual_rotdeg'], {}), '(residual_rotdeg)\n', (2838, 2855), True, 'import megengine.functional as F\n'), ((2868, 2893), 'megengine.functional.mean', 'F.mean', (['residual_transmag'], {}), '(residual_transmag)\n', (2874, 2893), True, 'import megengine.functional as F\n'), ((1627, 1681), 'common.so3.mge_dcm2euler', 'so3.mge_dcm2euler', (['gt_transforms[:, :3, :3]'], {'seq': '"""zyx"""'}), "(gt_transforms[:, :3, :3], seq='zyx')\n", (1644, 1681), False, 'from common import se3, so3\n'), ((1709, 1765), 'common.so3.mge_dcm2euler', 'so3.mge_dcm2euler', (['pred_transforms[:, :3, :3]'], {'seq': '"""zyx"""'}), "(pred_transforms[:, :3, :3], seq='zyx')\n", (1726, 1765), False, 'from common import se3, so3\n'), ((1801, 1855), 'common.so3.mge_dcm2euler', 'so3.mge_dcm2euler', (['gt_transforms[:, :3, :3]'], {'seq': '"""xyz"""'}), "(gt_transforms[:, :3, :3], seq='xyz')\n", (1818, 1855), False, 'from common import se3, so3\n'), ((1883, 1939), 'common.so3.mge_dcm2euler', 'so3.mge_dcm2euler', (['pred_transforms[:, :3, :3]'], {'seq': '"""xyz"""'}), "(pred_transforms[:, :3, :3], seq='xyz')\n", (1900, 1939), False, 'from common import se3, so3\n'), ((2101, 2141), 'megengine.functional.abs', 'F.abs', (['(r_gt_euler_deg - r_pred_euler_deg)'], {}), '(r_gt_euler_deg - r_pred_euler_deg)\n', (2106, 2141), True, 'import megengine.functional as F\n'), ((2217, 2237), 'megengine.functional.abs', 'F.abs', (['(t_gt - t_pred)'], {}), '(t_gt - t_pred)\n', (2222, 2237), True, 'import megengine.functional as F\n'), ((2535, 2565), 'common.se3.mge_inverse', 'se3.mge_inverse', (['gt_transforms'], {}), '(gt_transforms)\n', (2550, 2565), False, 'from common import se3, so3\n'), ((1219, 1241), 'megengine.functional.concat', 'F.concat', (['total_losses'], {}), '(total_losses)\n', (1227, 1241), True, 'import megengine.functional as F\n'), ((877, 931), 'megengine.functional.nn.l1_loss', 'F.nn.l1_loss', (['pose_pair[1][:, :4]', 'pose_pair[0][:, :4]'], {}), '(pose_pair[1][:, :4], pose_pair[0][:, :4])\n', (889, 931), True, 'import megengine.functional as F\n'), ((998, 1056), 'megengine.functional.nn.square_loss', 'F.nn.square_loss', (['pose_pair[1][:, 4:]', 'pose_pair[0][:, 4:]'], {}), '(pose_pair[1][:, 4:], pose_pair[0][:, 4:])\n', (1014, 1056), True, 'import megengine.functional as F\n'), ((2699, 2739), 'megengine.functional.clip', 'F.clip', (['(0.5 * (rot_trace - 1))', '(-1.0)', '(1.0)'], {}), '(0.5 * (rot_trace - 1), -1.0, 1.0)\n', (2705, 2739), True, 'import megengine.functional as F\n'), ((546, 568), 'megengine.tensor', 'mge.tensor', (['[0.7, 0.3]'], {}), '([0.7, 0.3])\n', (556, 568), True, 'import megengine as mge\n'), ((671, 693), 'megengine.tensor', 'mge.tensor', (['[0.7, 0.3]'], {}), '([0.7, 0.3])\n', (681, 693), True, 'import megengine as mge\n')]
|
"""Add participant and application
Revision ID: 58d2280520b8
Revises:
Create Date: 2022-02-12 07:30:30.427270+00:00
"""
import sqlalchemy as sa
import sqlmodel
from alembic import op
# revision identifiers, used by Alembic.
revision = "58d2280520b8"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"participants",
sa.Column("first_name", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("last_name", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("email", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("id", sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"applications",
sa.Column(
"gender",
sa.Enum("MALE", "FEMALE", "NON_BINARY", name="gender"),
nullable=True,
),
sa.Column(
"race_ethnicity",
sa.Enum(
"AMERICAN_INDIAN",
"ASIAN",
"PACIFIC_ISLANDER",
"BLACK",
"HISPANIC",
"CAUCASIAN",
"MULTIPLE_OTHER",
name="raceethnicity",
),
nullable=True,
),
sa.Column("participant_id", sa.Integer(), nullable=False),
sa.Column("level_of_study", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("graduation_year", sa.Integer(), nullable=False),
sa.Column("major", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("date_of_birth", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("hackathons_attended", sa.Integer(), nullable=False),
sa.Column("portfolio_url", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("vcs_url", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column(
"shipping_address", sqlmodel.sql.sqltypes.AutoString(), nullable=True
),
sa.Column("share_information", sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(
["participant_id"],
["participants.id"],
),
sa.PrimaryKeyConstraint("participant_id"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("applications")
op.drop_table("participants")
# ### end Alembic commands ###
|
[
"sqlmodel.sql.sqltypes.AutoString"
] |
[((2458, 2487), 'alembic.op.drop_table', 'op.drop_table', (['"""applications"""'], {}), "('applications')\n", (2471, 2487), False, 'from alembic import op\n'), ((2492, 2521), 'alembic.op.drop_table', 'op.drop_table', (['"""participants"""'], {}), "('participants')\n", (2505, 2521), False, 'from alembic import op\n'), ((753, 782), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (776, 782), True, 'import sqlalchemy as sa\n'), ((2176, 2240), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['participant_id']", "['participants.id']"], {}), "(['participant_id'], ['participants.id'])\n", (2199, 2240), True, 'import sqlalchemy as sa\n'), ((2285, 2326), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""participant_id"""'], {}), "('participant_id')\n", (2308, 2326), True, 'import sqlalchemy as sa\n'), ((473, 507), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (505, 507), False, 'import sqlmodel\n'), ((557, 591), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (589, 591), False, 'import sqlmodel\n'), ((637, 671), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (669, 671), False, 'import sqlmodel\n'), ((714, 726), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (724, 726), True, 'import sqlalchemy as sa\n'), ((888, 942), 'sqlalchemy.Enum', 'sa.Enum', (['"""MALE"""', '"""FEMALE"""', '"""NON_BINARY"""'], {'name': '"""gender"""'}), "('MALE', 'FEMALE', 'NON_BINARY', name='gender')\n", (895, 942), True, 'import sqlalchemy as sa\n'), ((1043, 1176), 'sqlalchemy.Enum', 'sa.Enum', (['"""AMERICAN_INDIAN"""', '"""ASIAN"""', '"""PACIFIC_ISLANDER"""', '"""BLACK"""', '"""HISPANIC"""', '"""CAUCASIAN"""', '"""MULTIPLE_OTHER"""'], {'name': '"""raceethnicity"""'}), "('AMERICAN_INDIAN', 'ASIAN', 'PACIFIC_ISLANDER', 'BLACK', 'HISPANIC',\n 'CAUCASIAN', 'MULTIPLE_OTHER', name='raceethnicity')\n", (1050, 1176), True, 'import sqlalchemy as sa\n'), ((1391, 1403), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1401, 1403), True, 'import sqlalchemy as sa\n'), ((1458, 1492), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1490, 1492), False, 'import sqlmodel\n'), ((1548, 1560), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1558, 1560), True, 'import sqlalchemy as sa\n'), ((1606, 1640), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1638, 1640), False, 'import sqlmodel\n'), ((1693, 1727), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1725, 1727), False, 'import sqlmodel\n'), ((1787, 1799), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1797, 1799), True, 'import sqlalchemy as sa\n'), ((1853, 1887), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1885, 1887), False, 'import sqlmodel\n'), ((1934, 1968), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1966, 1968), False, 'import sqlmodel\n'), ((2037, 2071), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2069, 2071), False, 'import sqlmodel\n'), ((2137, 2149), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (2147, 2149), True, 'import sqlalchemy as sa\n')]
|
from typing import TYPE_CHECKING, List, Optional, Type
from uuid import UUID
from sqlalchemy import event
from sqlalchemy.schema import Column, ForeignKey, UniqueConstraint
from sqlmodel import Field, Relationship
from sqlmodel.sql.sqltypes import GUID
from joj.horse.models.base import DomainURLORMModel, url_pre_save
from joj.horse.models.link_tables import ProblemProblemSetLink
from joj.horse.schemas.problem import ProblemDetail, WithLatestRecordType
from joj.horse.services.db import db_session
if TYPE_CHECKING:
from joj.horse.models import (
Domain,
ProblemConfig,
ProblemGroup,
ProblemSet,
Record,
User,
)
class Problem(DomainURLORMModel, ProblemDetail, table=True): # type: ignore[call-arg]
__tablename__ = "problems"
__table_args__ = (UniqueConstraint("domain_id", "url"),)
domain_id: UUID = Field(
sa_column=Column(
GUID, ForeignKey("domains.id", ondelete="CASCADE"), nullable=False
)
)
domain: "Domain" = Relationship(back_populates="problems")
owner_id: Optional[UUID] = Field(
sa_column=Column(
GUID, ForeignKey("users.id", ondelete="SET NULL"), nullable=True
)
)
owner: Optional["User"] = Relationship(back_populates="owned_problems")
problem_group_id: Optional[UUID] = Field(
sa_column=Column(
GUID, ForeignKey("problem_groups.id", ondelete="SET NULL"), nullable=True
)
)
problem_group: Optional["ProblemGroup"] = Relationship(back_populates="problems")
problem_sets: List["ProblemSet"] = Relationship(
back_populates="problems",
link_model=ProblemProblemSetLink,
)
problem_problem_set_links: List[ProblemProblemSetLink] = Relationship(
back_populates="problem",
)
records: List["Record"] = Relationship(back_populates="problem")
problem_configs: List["ProblemConfig"] = Relationship(back_populates="problem")
@classmethod
async def get_problems_with_record_states(
cls,
result_cls: Type[WithLatestRecordType],
problem_set_id: Optional[UUID],
problems: List["Problem"],
user_id: UUID,
) -> List[WithLatestRecordType]:
from joj.horse import models
problem_ids = [problem.id for problem in problems]
records = await models.Record.get_user_latest_records(
problem_set_id=problem_set_id, problem_ids=problem_ids, user_id=user_id
)
problems = [
result_cls(**problems[i].dict(), latest_record=records[i])
for i, record in enumerate(records)
]
return problems
async def get_latest_problem_config(self) -> Optional["ProblemConfig"]:
from joj.horse import models
statement = (
models.ProblemConfig.sql_select()
.where(models.ProblemConfig.problem_id == self.id)
.order_by(models.ProblemConfig.created_at.desc()) # type: ignore
.limit(1)
)
async with db_session() as session:
results = await session.exec(statement)
return results.one_or_none()
event.listen(Problem, "before_insert", url_pre_save)
event.listen(Problem, "before_update", url_pre_save)
|
[
"sqlmodel.Relationship"
] |
[((3154, 3206), 'sqlalchemy.event.listen', 'event.listen', (['Problem', '"""before_insert"""', 'url_pre_save'], {}), "(Problem, 'before_insert', url_pre_save)\n", (3166, 3206), False, 'from sqlalchemy import event\n'), ((3207, 3259), 'sqlalchemy.event.listen', 'event.listen', (['Problem', '"""before_update"""', 'url_pre_save'], {}), "(Problem, 'before_update', url_pre_save)\n", (3219, 3259), False, 'from sqlalchemy import event\n'), ((1029, 1068), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""problems"""'}), "(back_populates='problems')\n", (1041, 1068), False, 'from sqlmodel import Field, Relationship\n'), ((1257, 1302), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""owned_problems"""'}), "(back_populates='owned_problems')\n", (1269, 1302), False, 'from sqlmodel import Field, Relationship\n'), ((1524, 1563), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""problems"""'}), "(back_populates='problems')\n", (1536, 1563), False, 'from sqlmodel import Field, Relationship\n'), ((1604, 1677), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""problems"""', 'link_model': 'ProblemProblemSetLink'}), "(back_populates='problems', link_model=ProblemProblemSetLink)\n", (1616, 1677), False, 'from sqlmodel import Field, Relationship\n'), ((1762, 1800), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""problem"""'}), "(back_populates='problem')\n", (1774, 1800), False, 'from sqlmodel import Field, Relationship\n'), ((1847, 1885), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""problem"""'}), "(back_populates='problem')\n", (1859, 1885), False, 'from sqlmodel import Field, Relationship\n'), ((1931, 1969), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""problem"""'}), "(back_populates='problem')\n", (1943, 1969), False, 'from sqlmodel import Field, Relationship\n'), ((816, 852), 'sqlalchemy.schema.UniqueConstraint', 'UniqueConstraint', (['"""domain_id"""', '"""url"""'], {}), "('domain_id', 'url')\n", (832, 852), False, 'from sqlalchemy.schema import Column, ForeignKey, UniqueConstraint\n'), ((2352, 2466), 'joj.horse.models.Record.get_user_latest_records', 'models.Record.get_user_latest_records', ([], {'problem_set_id': 'problem_set_id', 'problem_ids': 'problem_ids', 'user_id': 'user_id'}), '(problem_set_id=problem_set_id,\n problem_ids=problem_ids, user_id=user_id)\n', (2389, 2466), False, 'from joj.horse import models\n'), ((3034, 3046), 'joj.horse.services.db.db_session', 'db_session', ([], {}), '()\n', (3044, 3046), False, 'from joj.horse.services.db import db_session\n'), ((929, 973), 'sqlalchemy.schema.ForeignKey', 'ForeignKey', (['"""domains.id"""'], {'ondelete': '"""CASCADE"""'}), "('domains.id', ondelete='CASCADE')\n", (939, 973), False, 'from sqlalchemy.schema import Column, ForeignKey, UniqueConstraint\n'), ((1152, 1195), 'sqlalchemy.schema.ForeignKey', 'ForeignKey', (['"""users.id"""'], {'ondelete': '"""SET NULL"""'}), "('users.id', ondelete='SET NULL')\n", (1162, 1195), False, 'from sqlalchemy.schema import Column, ForeignKey, UniqueConstraint\n'), ((1394, 1446), 'sqlalchemy.schema.ForeignKey', 'ForeignKey', (['"""problem_groups.id"""'], {'ondelete': '"""SET NULL"""'}), "('problem_groups.id', ondelete='SET NULL')\n", (1404, 1446), False, 'from sqlalchemy.schema import Column, ForeignKey, UniqueConstraint\n'), ((2927, 2965), 'joj.horse.models.ProblemConfig.created_at.desc', 'models.ProblemConfig.created_at.desc', ([], {}), '()\n', (2963, 2965), False, 'from joj.horse import models\n'), ((2808, 2841), 'joj.horse.models.ProblemConfig.sql_select', 'models.ProblemConfig.sql_select', ([], {}), '()\n', (2839, 2841), False, 'from joj.horse import models\n')]
|
from copy import copy
from sfepy.base.base import output, get_default, Struct
from sfepy.applications import PDESolverApp, Application
from coefs_base import MiniAppBase
def insert_sub_reqs(reqs, levels, req_info):
"""Recursively build all requirements in correct order."""
all_reqs = []
for _, req in enumerate(reqs):
# Coefficients are referenced as 'c.<name>'...
areq = req
if req.startswith('c.'):
areq = req[2:]
try:
rargs = req_info[areq]
except KeyError:
raise ValueError('requirement "%s" is not defined!' % req)
sub_reqs = rargs.get('requires', [])
if req in levels:
raise ValueError('circular requirement "%s"!' % (req))
if sub_reqs:
levels.append(req)
all_reqs.extend(insert_sub_reqs(sub_reqs, levels, req_info))
levels.pop()
if req in all_reqs:
raise ValueError('circular requirement "%s"!' % (req))
else:
all_reqs.append(req)
return all_reqs
class HomogenizationEngine(PDESolverApp):
@staticmethod
def process_options(options):
get = options.get
return Struct(coefs=get('coefs', None,
'missing "coefs" in options!'),
requirements=get('requirements', None,
'missing "requirements" in options!'),
compute_only=get('compute_only', None),
save_format=get('save_format', 'vtk'),
dump_format=get('dump_format', 'h5'),
coefs_info=get('coefs_info', None))
def __init__(self, problem, options, app_options=None,
volume=None, output_prefix='he:', **kwargs):
"""Bypasses PDESolverApp.__init__()!"""
Application.__init__(self, problem.conf, options, output_prefix,
**kwargs)
self.problem = problem
self.setup_options(app_options=app_options)
self.setup_output_info(self.problem, self.options)
if volume is None:
self.volume = self.problem.evaluate(self.app_options.total_volume)
else:
self.volume = volume
def setup_options(self, app_options=None):
PDESolverApp.setup_options(self)
app_options = get_default(app_options, self.conf.options)
po = HomogenizationEngine.process_options
self.app_options += po(app_options)
def compute_requirements(self, requirements, dependencies, store):
problem = self.problem
opts = self.app_options
req_info = getattr(self.conf, opts.requirements)
requires = insert_sub_reqs(copy(requirements), [], req_info)
for req in requires:
if req in dependencies and (dependencies[req] is not None):
continue
output('computing dependency %s...' % req)
rargs = req_info[req]
mini_app = MiniAppBase.any_from_conf(req, problem, rargs)
mini_app.setup_output(save_format=opts.save_format,
dump_format=opts.dump_format,
post_process_hook=self.post_process_hook,
file_per_var=opts.file_per_var)
store(mini_app)
problem.clear_equations()
# Pass only the direct dependencies, not the indirect ones.
dep_requires = rargs.get('requires', [])
data = {}
for key in dep_requires:
data[key] = dependencies[key]
dep = mini_app(data=data)
dependencies[req] = dep
output('...done')
return dependencies
def call(self, ret_all=False):
problem = self.problem
opts = self.app_options
coef_info = getattr(self.conf, opts.coefs)
compute_names = set(get_default(opts.compute_only, coef_info.keys()))
compute_names = ['c.' + key for key in compute_names]
is_store_filenames = coef_info.pop('filenames', None) is not None
try:
compute_names.remove('c.filenames')
except:
pass
dependencies = {}
save_names = {}
dump_names = {}
def store_filenames(app):
if not '(not_set)' in app.get_save_name_base():
save_names[app.name] = app.get_save_name_base()
if not '(not_set)' in app.get_dump_name_base():
dump_names[app.name] = app.get_dump_name_base()
# Some coefficients can require other coefficients - resolve their
# order here.
req_info = self.conf.get(opts.requirements, {})
info = copy(coef_info)
info.update(req_info)
all_deps = set(compute_names)
sorted_names = []
for coef_name in compute_names:
cargs = coef_info[coef_name[2:]]
requires = cargs.get('requires', [])
deps = insert_sub_reqs(copy(requires), [], info)
all_deps.update(deps)
aux = [key for key in deps if key.startswith('c.')] + [coef_name]
sorted_names.extend(aux)
sorted_coef_names = []
for name in sorted_names:
if name[2:] not in sorted_coef_names:
sorted_coef_names.append(name[2:])
coefs = Struct()
for coef_name in sorted_coef_names:
cargs = coef_info[coef_name]
output('computing %s...' % coef_name)
requires = cargs.get('requires', [])
requirements = [name for name in requires if not
name.startswith('c.')]
self.compute_requirements(requirements, dependencies,
store_filenames)
for name in requires:
if name.startswith('c.'):
dependencies[name] = getattr(coefs, name[2:])
mini_app = MiniAppBase.any_from_conf(coef_name, problem, cargs)
problem.clear_equations()
# Pass only the direct dependencies, not the indirect ones.
data = {}
for key in requires:
data[key] = dependencies[key]
val = mini_app(self.volume, data=data)
setattr(coefs, coef_name, val)
output('...done')
# remove "auxiliary" coefs
for coef_name in sorted_coef_names:
cstat = coef_info[coef_name].get('status', 'main')
if cstat == 'auxiliary':
delattr(coefs, coef_name)
# Store filenames of all requirements as a "coefficient".
if is_store_filenames:
coefs.save_names = save_names
coefs.dump_names = dump_names
if opts.coefs_info is not None:
coefs.info = opts.coefs_info
if ret_all:
return coefs, dependencies
else:
return coefs
|
[
"sfepy.base.base.output",
"sfepy.applications.Application.__init__",
"sfepy.base.base.Struct",
"sfepy.applications.PDESolverApp.setup_options",
"sfepy.base.base.get_default"
] |
[((1859, 1933), 'sfepy.applications.Application.__init__', 'Application.__init__', (['self', 'problem.conf', 'options', 'output_prefix'], {}), '(self, problem.conf, options, output_prefix, **kwargs)\n', (1879, 1933), False, 'from sfepy.applications import PDESolverApp, Application\n'), ((2316, 2348), 'sfepy.applications.PDESolverApp.setup_options', 'PDESolverApp.setup_options', (['self'], {}), '(self)\n', (2342, 2348), False, 'from sfepy.applications import PDESolverApp, Application\n'), ((2371, 2414), 'sfepy.base.base.get_default', 'get_default', (['app_options', 'self.conf.options'], {}), '(app_options, self.conf.options)\n', (2382, 2414), False, 'from sfepy.base.base import output, get_default, Struct\n'), ((4753, 4768), 'copy.copy', 'copy', (['coef_info'], {}), '(coef_info)\n', (4757, 4768), False, 'from copy import copy\n'), ((5392, 5400), 'sfepy.base.base.Struct', 'Struct', ([], {}), '()\n', (5398, 5400), False, 'from sfepy.base.base import output, get_default, Struct\n'), ((2739, 2757), 'copy.copy', 'copy', (['requirements'], {}), '(requirements)\n', (2743, 2757), False, 'from copy import copy\n'), ((2913, 2955), 'sfepy.base.base.output', 'output', (["('computing dependency %s...' % req)"], {}), "('computing dependency %s...' % req)\n", (2919, 2955), False, 'from sfepy.base.base import output, get_default, Struct\n'), ((3015, 3061), 'coefs_base.MiniAppBase.any_from_conf', 'MiniAppBase.any_from_conf', (['req', 'problem', 'rargs'], {}), '(req, problem, rargs)\n', (3040, 3061), False, 'from coefs_base import MiniAppBase\n'), ((3718, 3735), 'sfepy.base.base.output', 'output', (['"""...done"""'], {}), "('...done')\n", (3724, 3735), False, 'from sfepy.base.base import output, get_default, Struct\n'), ((5498, 5535), 'sfepy.base.base.output', 'output', (["('computing %s...' % coef_name)"], {}), "('computing %s...' % coef_name)\n", (5504, 5535), False, 'from sfepy.base.base import output, get_default, Struct\n'), ((5986, 6038), 'coefs_base.MiniAppBase.any_from_conf', 'MiniAppBase.any_from_conf', (['coef_name', 'problem', 'cargs'], {}), '(coef_name, problem, cargs)\n', (6011, 6038), False, 'from coefs_base import MiniAppBase\n'), ((6359, 6376), 'sfepy.base.base.output', 'output', (['"""...done"""'], {}), "('...done')\n", (6365, 6376), False, 'from sfepy.base.base import output, get_default, Struct\n'), ((5032, 5046), 'copy.copy', 'copy', (['requires'], {}), '(requires)\n', (5036, 5046), False, 'from copy import copy\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.