code
stringlengths 110
64.5k
| apis
list | extract_api
stringlengths 123
69.9k
|
---|---|---|
import os.path as op
import numpy as nm
import sfepy
from sfepy.discrete.common import Field
import sfepy.discrete.common.global_interp as gi
from sfepy.base.testing import TestCommon
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
test = Test(conf=conf, options=options)
return test
def test_ref_coors_fem(self):
from sfepy.discrete.fem import Mesh, FEDomain
mesh = Mesh.from_file('meshes/3d/special/cross3d.mesh',
prefix_dir=sfepy.data_dir)
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field = Field.from_args('linear', nm.float64, 'scalar', omega,
approx_order=1)
mcoors = field.domain.get_mesh_coors()
conn = field.domain.get_conn()
bbox = field.domain.get_mesh_bounding_box()
ray = nm.linspace(bbox[0, 0], bbox[1, 0], 7)
coors = nm.zeros((ray.shape[0], 3), dtype=nm.float64)
def gen_rays():
coors[:, 0] = ray
yield coors
coors.fill(0.0)
coors[:, 1] = ray
yield coors
coors.fill(0.0)
coors[:, 2] = ray
yield coors
ok = True
ctx = field.create_basis_context()._geo_ctx
for ir, coors in enumerate(gen_rays()):
self.report('ray %d' % ir)
ref_coors, cells, status = gi.get_ref_coors(field, coors,
strategy='general',
close_limit=0.0,
verbose=False)
self.report(ref_coors)
self.report(cells)
self.report(status)
# In the distorted cell 2, the Newton method finds a solution
# outside of the cell. This will be fixed when box constraints
# are applied.
_ok = nm.all((status == 0) | ((cells == 2) & (status == 3)))
if not _ok:
self.report('wrong status %s for ray %d!' % (status, ir))
ok = ok and _ok
for ic, cell in enumerate(cells):
ctx.iel = cell
bf = ctx.evaluate(ref_coors[ic:ic+1], check_errors=False)
cell_coors = mcoors[conn[cell]]
coor = nm.dot(bf, cell_coors).ravel()
_ok = nm.allclose(coor, coors[ic], atol=1e-14, rtol=0.0)
if not _ok:
self.report('ray %d point %d:' % (ir, ic))
self.report(' - wrong reference coordinates %s!'
% ref_coors[ic])
self.report(' - given point: %s' % coors[ic])
self.report(' - found point: %s' % coor)
ok = ok and _ok
return ok
def test_ref_coors_iga(self):
from sfepy.discrete.iga.domain import IGDomain
domain = IGDomain.from_file(op.join(sfepy.data_dir,
'meshes/iga/block2d.iga'))
omega = domain.create_region('Omega', 'all')
field = Field.from_args('iga', nm.float64, 'scalar', omega,
approx_order='iga', poly_space_base='iga')
mcoors = field.nurbs.cps
conn = field.get_econn('volume', field.region)
bbox = domain.eval_mesh.get_bounding_box()
ray = nm.linspace(bbox[0, 0], bbox[1, 0], 11)
coors = nm.c_[ray, ray]
ref_coors, cells, status = gi.get_ref_coors(field, coors,
strategy='general',
close_limit=0.0,
verbose=False)
self.report(ref_coors)
self.report(cells)
self.report(status)
ok = nm.all(status == 0)
ctx = field.create_basis_context()
for ic, cell in enumerate(cells):
ctx.iel = cell
bf = ctx.evaluate(ref_coors[ic:ic+1])
cell_coors = mcoors[conn[cell]]
coor = nm.dot(bf, cell_coors).ravel()
_ok = nm.allclose(coor, coors[ic], atol=1e-14, rtol=0.0)
if not _ok:
self.report('point %d:' % ic)
self.report(' - wrong reference coordinates %s!'
% ref_coors[ic])
self.report(' - given point: %s' % coors[ic])
self.report(' - found point: %s' % coor)
ok = ok and _ok
return ok
|
[
"sfepy.discrete.common.Field.from_args",
"sfepy.discrete.common.global_interp.get_ref_coors",
"sfepy.discrete.fem.Mesh.from_file",
"sfepy.discrete.fem.FEDomain"
] |
[((438, 513), 'sfepy.discrete.fem.Mesh.from_file', 'Mesh.from_file', (['"""meshes/3d/special/cross3d.mesh"""'], {'prefix_dir': 'sfepy.data_dir'}), "('meshes/3d/special/cross3d.mesh', prefix_dir=sfepy.data_dir)\n", (452, 513), False, 'from sfepy.discrete.fem import Mesh, FEDomain\n'), ((561, 585), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['"""domain"""', 'mesh'], {}), "('domain', mesh)\n", (569, 585), False, 'from sfepy.discrete.fem import Mesh, FEDomain\n'), ((657, 727), 'sfepy.discrete.common.Field.from_args', 'Field.from_args', (['"""linear"""', 'nm.float64', '"""scalar"""', 'omega'], {'approx_order': '(1)'}), "('linear', nm.float64, 'scalar', omega, approx_order=1)\n", (672, 727), False, 'from sfepy.discrete.common import Field\n'), ((915, 953), 'numpy.linspace', 'nm.linspace', (['bbox[0, 0]', 'bbox[1, 0]', '(7)'], {}), '(bbox[0, 0], bbox[1, 0], 7)\n', (926, 953), True, 'import numpy as nm\n'), ((970, 1015), 'numpy.zeros', 'nm.zeros', (['(ray.shape[0], 3)'], {'dtype': 'nm.float64'}), '((ray.shape[0], 3), dtype=nm.float64)\n', (978, 1015), True, 'import numpy as nm\n'), ((3195, 3293), 'sfepy.discrete.common.Field.from_args', 'Field.from_args', (['"""iga"""', 'nm.float64', '"""scalar"""', 'omega'], {'approx_order': '"""iga"""', 'poly_space_base': '"""iga"""'}), "('iga', nm.float64, 'scalar', omega, approx_order='iga',\n poly_space_base='iga')\n", (3210, 3293), False, 'from sfepy.discrete.common import Field\n'), ((3477, 3516), 'numpy.linspace', 'nm.linspace', (['bbox[0, 0]', 'bbox[1, 0]', '(11)'], {}), '(bbox[0, 0], bbox[1, 0], 11)\n', (3488, 3516), True, 'import numpy as nm\n'), ((3585, 3672), 'sfepy.discrete.common.global_interp.get_ref_coors', 'gi.get_ref_coors', (['field', 'coors'], {'strategy': '"""general"""', 'close_limit': '(0.0)', 'verbose': '(False)'}), "(field, coors, strategy='general', close_limit=0.0, verbose\n =False)\n", (3601, 3672), True, 'import sfepy.discrete.common.global_interp as gi\n'), ((3924, 3943), 'numpy.all', 'nm.all', (['(status == 0)'], {}), '(status == 0)\n', (3930, 3943), True, 'import numpy as nm\n'), ((1460, 1547), 'sfepy.discrete.common.global_interp.get_ref_coors', 'gi.get_ref_coors', (['field', 'coors'], {'strategy': '"""general"""', 'close_limit': '(0.0)', 'verbose': '(False)'}), "(field, coors, strategy='general', close_limit=0.0, verbose\n =False)\n", (1476, 1547), True, 'import sfepy.discrete.common.global_interp as gi\n'), ((2004, 2056), 'numpy.all', 'nm.all', (['((status == 0) | (cells == 2) & (status == 3))'], {}), '((status == 0) | (cells == 2) & (status == 3))\n', (2010, 2056), True, 'import numpy as nm\n'), ((3029, 3078), 'os.path.join', 'op.join', (['sfepy.data_dir', '"""meshes/iga/block2d.iga"""'], {}), "(sfepy.data_dir, 'meshes/iga/block2d.iga')\n", (3036, 3078), True, 'import os.path as op\n'), ((4222, 4272), 'numpy.allclose', 'nm.allclose', (['coor', 'coors[ic]'], {'atol': '(1e-14)', 'rtol': '(0.0)'}), '(coor, coors[ic], atol=1e-14, rtol=0.0)\n', (4233, 4272), True, 'import numpy as nm\n'), ((2464, 2514), 'numpy.allclose', 'nm.allclose', (['coor', 'coors[ic]'], {'atol': '(1e-14)', 'rtol': '(0.0)'}), '(coor, coors[ic], atol=1e-14, rtol=0.0)\n', (2475, 2514), True, 'import numpy as nm\n'), ((4172, 4194), 'numpy.dot', 'nm.dot', (['bf', 'cell_coors'], {}), '(bf, cell_coors)\n', (4178, 4194), True, 'import numpy as nm\n'), ((2410, 2432), 'numpy.dot', 'nm.dot', (['bf', 'cell_coors'], {}), '(bf, cell_coors)\n', (2416, 2432), True, 'import numpy as nm\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import megengine as mge
import megengine.module as M
import numpy as np
import pytest
from basecls.layers import SE, DropPath, conv2d, gap2d, linear, norm2d, pool2d
@pytest.mark.parametrize("w_in", [4])
@pytest.mark.parametrize("w_out", [8])
@pytest.mark.parametrize("k", [3, 5])
@pytest.mark.parametrize("stride", [1, 2])
@pytest.mark.parametrize("dilation", [1, 2])
@pytest.mark.parametrize("groups", [1, 2])
@pytest.mark.parametrize("bias", [True, False])
def test_conv2d(w_in, w_out, k, stride, dilation, groups, bias):
m = conv2d(w_in, w_out, k, stride=stride, dilation=dilation, groups=groups, bias=bias)
assert isinstance(m, M.Conv2d)
m(mge.random.normal(size=(2, 4, 8, 8)))
@pytest.mark.parametrize("drop_prob", [0.0, 0.5])
def test_drop_path(drop_prob):
m = DropPath(drop_prob)
assert isinstance(m, M.Module)
m.training = True
m(mge.random.normal(size=(2, 4, 8, 8)))
m.training = False
x = np.random.rand(2, 4, 8, 8).astype("float32")
y = m(mge.Tensor(x)).numpy()
np.testing.assert_allclose(y, x, rtol=1e-4, atol=1e-6)
@pytest.mark.parametrize("shape", [1, (7, 7)])
def test_gap2d(shape):
m = gap2d(shape)
assert isinstance(m, M.AdaptiveAvgPool2d)
m(mge.random.normal(size=(2, 4, 8, 8)))
@pytest.mark.parametrize("w_in", [4])
@pytest.mark.parametrize("w_out", [8])
@pytest.mark.parametrize("bias", [True, False])
def test_linear(w_in, w_out, bias):
m = linear(w_in, w_out, bias=bias)
assert isinstance(m, M.Linear)
m(mge.random.normal(size=(2, 8, 4)))
# TODO: "GN", "IN" and "LN" need different hyper-parameters
@pytest.mark.parametrize("name", [None, "BN", "SyncBN"])
@pytest.mark.parametrize("w_in", [4])
def test_norm2d(name, w_in):
m = norm2d(name, w_in)
assert isinstance(m, M.Module)
m(mge.random.normal(size=(2, 4, 8, 8)))
@pytest.mark.parametrize("k", [3, 5])
@pytest.mark.parametrize("stride", [1, 2])
@pytest.mark.parametrize("name", ["avg", "max"])
def test_pool2d(k, stride, name):
m = pool2d(k, stride=stride, name=name)
assert isinstance(m, M.Module)
m(mge.random.normal(size=(2, 4, 8, 8)))
@pytest.mark.parametrize("w_in", [8])
@pytest.mark.parametrize("w_se", [4])
@pytest.mark.parametrize("act_name", ["relu", "silu"])
def test_se(w_in, w_se, act_name):
m = SE(w_in, w_se, act_name)
assert isinstance(m, M.Module)
m(mge.random.normal(size=(2, 8, 8, 8)))
|
[
"megengine.random.normal",
"megengine.Tensor"
] |
[((251, 287), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_in"""', '[4]'], {}), "('w_in', [4])\n", (274, 287), False, 'import pytest\n'), ((289, 326), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_out"""', '[8]'], {}), "('w_out', [8])\n", (312, 326), False, 'import pytest\n'), ((328, 364), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""k"""', '[3, 5]'], {}), "('k', [3, 5])\n", (351, 364), False, 'import pytest\n'), ((366, 407), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stride"""', '[1, 2]'], {}), "('stride', [1, 2])\n", (389, 407), False, 'import pytest\n'), ((409, 452), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dilation"""', '[1, 2]'], {}), "('dilation', [1, 2])\n", (432, 452), False, 'import pytest\n'), ((454, 495), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""groups"""', '[1, 2]'], {}), "('groups', [1, 2])\n", (477, 495), False, 'import pytest\n'), ((497, 543), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""bias"""', '[True, False]'], {}), "('bias', [True, False])\n", (520, 543), False, 'import pytest\n'), ((783, 831), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""drop_prob"""', '[0.0, 0.5]'], {}), "('drop_prob', [0.0, 0.5])\n", (806, 831), False, 'import pytest\n'), ((1165, 1210), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', '[1, (7, 7)]'], {}), "('shape', [1, (7, 7)])\n", (1188, 1210), False, 'import pytest\n'), ((1349, 1385), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_in"""', '[4]'], {}), "('w_in', [4])\n", (1372, 1385), False, 'import pytest\n'), ((1387, 1424), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_out"""', '[8]'], {}), "('w_out', [8])\n", (1410, 1424), False, 'import pytest\n'), ((1426, 1472), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""bias"""', '[True, False]'], {}), "('bias', [True, False])\n", (1449, 1472), False, 'import pytest\n'), ((1688, 1743), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""name"""', "[None, 'BN', 'SyncBN']"], {}), "('name', [None, 'BN', 'SyncBN'])\n", (1711, 1743), False, 'import pytest\n'), ((1745, 1781), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_in"""', '[4]'], {}), "('w_in', [4])\n", (1768, 1781), False, 'import pytest\n'), ((1921, 1957), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""k"""', '[3, 5]'], {}), "('k', [3, 5])\n", (1944, 1957), False, 'import pytest\n'), ((1959, 2000), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stride"""', '[1, 2]'], {}), "('stride', [1, 2])\n", (1982, 2000), False, 'import pytest\n'), ((2002, 2049), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""name"""', "['avg', 'max']"], {}), "('name', ['avg', 'max'])\n", (2025, 2049), False, 'import pytest\n'), ((2211, 2247), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_in"""', '[8]'], {}), "('w_in', [8])\n", (2234, 2247), False, 'import pytest\n'), ((2249, 2285), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_se"""', '[4]'], {}), "('w_se', [4])\n", (2272, 2285), False, 'import pytest\n'), ((2287, 2340), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""act_name"""', "['relu', 'silu']"], {}), "('act_name', ['relu', 'silu'])\n", (2310, 2340), False, 'import pytest\n'), ((617, 703), 'basecls.layers.conv2d', 'conv2d', (['w_in', 'w_out', 'k'], {'stride': 'stride', 'dilation': 'dilation', 'groups': 'groups', 'bias': 'bias'}), '(w_in, w_out, k, stride=stride, dilation=dilation, groups=groups,\n bias=bias)\n', (623, 703), False, 'from basecls.layers import SE, DropPath, conv2d, gap2d, linear, norm2d, pool2d\n'), ((871, 890), 'basecls.layers.DropPath', 'DropPath', (['drop_prob'], {}), '(drop_prob)\n', (879, 890), False, 'from basecls.layers import SE, DropPath, conv2d, gap2d, linear, norm2d, pool2d\n'), ((1107, 1164), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y', 'x'], {'rtol': '(0.0001)', 'atol': '(1e-06)'}), '(y, x, rtol=0.0001, atol=1e-06)\n', (1133, 1164), True, 'import numpy as np\n'), ((1242, 1254), 'basecls.layers.gap2d', 'gap2d', (['shape'], {}), '(shape)\n', (1247, 1254), False, 'from basecls.layers import SE, DropPath, conv2d, gap2d, linear, norm2d, pool2d\n'), ((1517, 1547), 'basecls.layers.linear', 'linear', (['w_in', 'w_out'], {'bias': 'bias'}), '(w_in, w_out, bias=bias)\n', (1523, 1547), False, 'from basecls.layers import SE, DropPath, conv2d, gap2d, linear, norm2d, pool2d\n'), ((1819, 1837), 'basecls.layers.norm2d', 'norm2d', (['name', 'w_in'], {}), '(name, w_in)\n', (1825, 1837), False, 'from basecls.layers import SE, DropPath, conv2d, gap2d, linear, norm2d, pool2d\n'), ((2092, 2127), 'basecls.layers.pool2d', 'pool2d', (['k'], {'stride': 'stride', 'name': 'name'}), '(k, stride=stride, name=name)\n', (2098, 2127), False, 'from basecls.layers import SE, DropPath, conv2d, gap2d, linear, norm2d, pool2d\n'), ((2384, 2408), 'basecls.layers.SE', 'SE', (['w_in', 'w_se', 'act_name'], {}), '(w_in, w_se, act_name)\n', (2386, 2408), False, 'from basecls.layers import SE, DropPath, conv2d, gap2d, linear, norm2d, pool2d\n'), ((742, 778), 'megengine.random.normal', 'mge.random.normal', ([], {'size': '(2, 4, 8, 8)'}), '(size=(2, 4, 8, 8))\n', (759, 778), True, 'import megengine as mge\n'), ((955, 991), 'megengine.random.normal', 'mge.random.normal', ([], {'size': '(2, 4, 8, 8)'}), '(size=(2, 4, 8, 8))\n', (972, 991), True, 'import megengine as mge\n'), ((1308, 1344), 'megengine.random.normal', 'mge.random.normal', ([], {'size': '(2, 4, 8, 8)'}), '(size=(2, 4, 8, 8))\n', (1325, 1344), True, 'import megengine as mge\n'), ((1590, 1623), 'megengine.random.normal', 'mge.random.normal', ([], {'size': '(2, 8, 4)'}), '(size=(2, 8, 4))\n', (1607, 1623), True, 'import megengine as mge\n'), ((1880, 1916), 'megengine.random.normal', 'mge.random.normal', ([], {'size': '(2, 4, 8, 8)'}), '(size=(2, 4, 8, 8))\n', (1897, 1916), True, 'import megengine as mge\n'), ((2170, 2206), 'megengine.random.normal', 'mge.random.normal', ([], {'size': '(2, 4, 8, 8)'}), '(size=(2, 4, 8, 8))\n', (2187, 2206), True, 'import megengine as mge\n'), ((2451, 2487), 'megengine.random.normal', 'mge.random.normal', ([], {'size': '(2, 8, 8, 8)'}), '(size=(2, 8, 8, 8))\n', (2468, 2487), True, 'import megengine as mge\n'), ((1025, 1051), 'numpy.random.rand', 'np.random.rand', (['(2)', '(4)', '(8)', '(8)'], {}), '(2, 4, 8, 8)\n', (1039, 1051), True, 'import numpy as np\n'), ((1080, 1093), 'megengine.Tensor', 'mge.Tensor', (['x'], {}), '(x)\n', (1090, 1093), True, 'import megengine as mge\n')]
|
import numpy as nm
from sfepy.base.base import Struct
class SplineBox(Struct):
"""
B-spline geometry parametrization. Geometry can be modified
by moving spline control points.
"""
@staticmethod
def mmax(x, y):
n = len(x)
aux = nm.zeros((2,n), dtype=nm.int)
aux[0,:] = x[:]
aux[1,:] = nm.ones((1,n)) * y
out = nm.max(aux, axis=0)
return out
@staticmethod
def spsorted(meshsites, sites):
n1 = len(meshsites)
n2 = len(sites)
aux = nm.zeros((n1 + n2,), dtype=nm.double)
aux[:n1] = meshsites
aux[n1:] = sites
inx = nm.argsort(aux)
out = nm.where(inx >= len(meshsites)) - nm.arange(len(sites))
return out[0]
@staticmethod
def augknt(knots, k, mults=1):
if mults > 1:
aux = []
for j in k[1:-1]:
aux += [j] * mults;
else:
aux = knots[1:-1]
augknot = [knots[0]] * k + list(aux) + [knots[-1]] * k
return augknot
@staticmethod
def spcol(knots, k, tau):
npk = len(knots)
n = npk - k
nrows = tau.shape[0]
pts = tau
km1 = k - 1
savl = SplineBox.mmax(SplineBox.spsorted(knots[:n], pts), k)
b = nm.zeros((nrows, k), dtype=nm.double)
b[:,0] = nm.ones((nrows,), dtype=nm.double)
for j in range(km1):
saved = nm.zeros((nrows,), dtype=nm.double);
for r in range(j+1):
tr = knots[savl + r] - pts
tl = pts - knots[savl + r - j - 1]
term = nm.double(b[:,r]) / (tr + tl)
b[:,r] = saved + tr * term
saved = tl * term
b[:,j+1] = saved
idx = nm.where((tau < knots[0]) | (tau > knots[npk-1]))[0]
if len(idx) > 0:
b[idx,:] = 0
idx1 = nm.tile(nm.arange(1-nrows, 1), (k,))
idx2 = nm.tile(nrows * savl, (k,))
idx3 = nm.tile(nrows * nm.arange(-km1, 1), (nrows, 1))
idx3 = nm.reshape(idx3, (nrows * k,), order='F')
width = n + km1 + km1
nn = nrows * width
cc = nm.zeros((nn,), dtype=nm.double)
idx = idx1 + idx2 + idx3 - 1
cc[idx] = b.reshape((len(idx),), order='F')
idx1 = nm.tile(nm.arange(1-nrows, 1), (n,))
idx2 = nm.tile(nrows * nm.arange(1, n + 1), (nrows, 1))
idx2 = nm.reshape(idx2, (nrows * n,), order='F')
idx = idx1 + idx2 - 1
colloc = cc[idx].reshape((nrows, n), order='F')
return colloc
@staticmethod
def create_spb(bbox, coors, nsg=None):
if type(bbox) is not nm.ndarray:
bbox = nm.array(bbox)
if type(coors) is not nm.ndarray:
coors = nm.array(coors)
dim = coors.shape[1]
axes = []
if nsg is None:
nsg = nm.ones((dim,), dtype=nm.int)
else:
nsg = nm.array(nsg)
cpt = []
ncpt = []
for idim in range(dim):
axes.append({})
aux = nm.linspace(bbox[idim,0], bbox[idim,1], nsg[idim] + 1)
knots = nm.array(SplineBox.augknt(aux, 4))
axes[idim]['knots'] = knots
nn = 4 + nsg[idim] - 1
ncpt.append(nn)
cpt.append(nm.zeros((nn,), dtype=nm.double))
for j in range(nn):
cpt[idim][j] = nm.sum(knots[(j+1):(j+4)]) / 3.0
inx = nm.argsort(coors[:,idim])
aux = SplineBox.spcol(knots, 4, coors[inx,idim])
axes[idim]['bsc'] = nm.zeros_like(aux)
axes[idim]['bsc'][inx,:] = aux[:,:]
ncpts = nm.prod(ncpt)
cpoints = nm.zeros((ncpts, dim), dtype=nm.double)
cpoints_idx = nm.zeros(ncpt, dtype=nm.int)
n2 = nm.prod(ncpt[:2])
aux = nm.arange(n2).reshape(ncpt[:2], order='F')
if dim == 2:
idx = nm.mgrid[0:ncpt[1],0:ncpt[0]]
cpoints[:,0] = cpt[0][idx[1].reshape((ncpts,))]
cpoints[:,1] = cpt[1][idx[0].reshape((ncpts,))]
cpoints_idx[:,:] = aux
elif dim == 3:
idx = nm.mgrid[0:ncpt[2],0:ncpt[1],0:ncpt[0]]
cpoints[:,0] = cpt[0][idx[2].reshape((ncpts,))]
cpoints[:,1] = cpt[1][idx[1].reshape((ncpts,))]
cpoints[:,2] = cpt[2][idx[0].reshape((ncpts,))]
for j in range(ncpt[2]):
cpoints_idx[:,:,j] = aux + j * n2
return axes, cpoints, cpoints_idx
def __init__(self, bbox, coors,
name='spbox', **kwargs):
"""
Create a SplineBox.
Parameters
----------
bbox : array
Mesh bounding box.
coors : array
Coordinates of mesh nodes.
name : str
Object name.
"""
Struct.__init__(self, name=name, **kwargs)
self.axes, self.control_points, self.control_points_idx\
= self.create_spb(bbox, coors)
self.dim = self.control_points.shape[1]
self.coors_shape = (self.axes[0]['bsc'].shape[0], self.dim)
self.control_points0 = self.control_points.copy()
def get_control_points(self, init=False):
"""
Get spline control points coordinates.
Return
------
cpt_coors : array
The coordinates of the spline control points.
init : bool
If True, return initial state.
"""
if init:
return self.control_points0
else:
return self.control_points
def set_control_points(self, cpt_coors, add=False):
"""
Set spline control points position.
Parameters
----------
cpt_coors : array
The coordinates of the spline control points.
add : bool
If True, coors += cpt_coors
"""
if add:
self.control_points += cpt_coors
else:
self.control_points = cpt_coors.copy()
def change_shape(self, cpoint, val):
"""
Change shape of spline parametrization.
Parameters
----------
cpoint : list
The indices of the spline control point.
val : array
Displacement.
"""
idx = self.control_points_idx[cpoint]
self.control_points[idx] += val
def evaluate(self, cp_coors=None):
"""
Evaluate SplineBox.
Returns
-------
coors : array
The coordinates corresponding to the actual spline control points
position.
cp_coors : array
If is not None, use as control points cooardinates.
"""
coors = nm.zeros((self.axes[0]['bsc'].shape[0], self.dim),
dtype=nm.double)
if cp_coors is None:
cp_coors = self.control_points
cptidx = self.control_points_idx
nn = self.control_points_idx.shape
if self.dim == 2:
for i in range(nn[0]):
for j in range(nn[1]):
aux = self.axes[0]['bsc'][:,i] * self.axes[1]['bsc'][:,j]
inx = cptidx[i,j]
coors += nm.dot(aux[:,nm.newaxis],
cp_coors[inx,:][nm.newaxis,:])
elif self.dim == 3:
for i in range(nn[0]):
for j in range(nn[1]):
aux = self.axes[0]['bsc'][:,i] * self.axes[1]['bsc'][:,j]
for k in range(nn[2]):
inx = cptidx[i,j,k]
aux2 = aux * self.axes[2]['bsc'][:,k]
coors += nm.dot(aux2[:,nm.newaxis],
cp_coors[inx,:][nm.newaxis,:])
return coors
def dvelocity(self, cpoint, dir):
"""
Evaluate derivative of spline in a given control point and direction.
Parameters
----------
cpoint : list
The indices of the spline control point.
dir : array
The directional vector.
Returns
-------
dvel : array
The design velocity field.
"""
if type(dir) is not nm.ndarray:
dir = nm.array(dir)
dvel = nm.zeros((self.axes[0]['bsc'].shape[0], self.dim),
dtype=nm.double)
ax = self.axes
if self.dim == 2:
aux = ax[0]['bsc'][:,cpoint[0]] * ax[1]['bsc'][:,cpoint[1]]
dvel = nm.dot(aux[:,nm.newaxis], dir[nm.newaxis,:])
elif self.dim == 3:
aux = ax[0]['bsc'][:,cpoint[0]] * ax[1]['bsc'][:,cpoint[1]]\
* ax[2]['bsc'][:,cpoint[2]]
dvel = nm.dot(aux[:,nm.newaxis], dir[nm.newaxis,:])
return dvel
def write_vtk(self, filename):
cptidx = self.control_points_idx
ncpt = cptidx.shape
nnd = nm.prod(ncpt)
f = open(filename, 'w')
f.write("# vtk DataFile Version 2.6\nspbox file\n"
"ASCII\nDATASET UNSTRUCTURED_GRID\n\n")
f.write("POINTS %d float\n" % nnd)
if self.dim == 2:
nel = (ncpt[0] - 1) * ncpt[1] + (ncpt[1] - 1) * ncpt[0]
for cpt in self.control_points:
f.write("%e %e 0.0\n" % (cpt[0], cpt[1]))
f.write("\nCELLS %d %d\n" % (nel, 3 * nel))
for i in range(ncpt[0]):
for j in range(ncpt[1] - 1):
inx1 = cptidx[i,j]
inx2 = cptidx[i,j + 1]
f.write("2 %d %d\n" % (inx1, inx2))
for i in range(ncpt[0] - 1):
for j in range(ncpt[1]):
inx1 = cptidx[i,j]
inx2 = cptidx[i + 1,j]
f.write("2 %d %d\n" % (inx1, inx2))
elif self.dim == 3:
nel = ((ncpt[0] - 1) * ncpt[1] + (ncpt[1] - 1) * ncpt[0]) * ncpt[2]
nel += ncpt[0] * ncpt[1] * (ncpt[2] - 1)
for cpt in self.control_points:
f.write("%e %e %e\n" % (cpt[0], cpt[1], cpt[2]))
f.write("\nCELLS %d %d\n" % (nel, 3 * nel))
for k in range(ncpt[2]):
for i in range(ncpt[0]):
for j in range(ncpt[1] - 1):
inx1 = cptidx[i, j, k]
inx2 = cptidx[i, j + 1, k]
f.write("2 %d %d\n" % (inx1, inx2))
for i in range(ncpt[0] - 1):
for j in range(ncpt[1]):
inx1 = cptidx[i, j, k]
inx2 = cptidx[i + 1, j, k]
f.write("2 %d %d\n" % (inx1, inx2))
for k in range(ncpt[2] - 1):
for i in range(ncpt[0]):
for j in range(ncpt[1]):
inx1 = cptidx[i, j, k]
inx2 = cptidx[i, j, k + 1]
f.write("2 %d %d\n" % (inx1, inx2))
f.write("\nCELL_TYPES %d\n" % nel )
f.write("3\n" * nel)
f.close()
|
[
"sfepy.base.base.Struct.__init__"
] |
[((269, 299), 'numpy.zeros', 'nm.zeros', (['(2, n)'], {'dtype': 'nm.int'}), '((2, n), dtype=nm.int)\n', (277, 299), True, 'import numpy as nm\n'), ((376, 395), 'numpy.max', 'nm.max', (['aux'], {'axis': '(0)'}), '(aux, axis=0)\n', (382, 395), True, 'import numpy as nm\n'), ((536, 573), 'numpy.zeros', 'nm.zeros', (['(n1 + n2,)'], {'dtype': 'nm.double'}), '((n1 + n2,), dtype=nm.double)\n', (544, 573), True, 'import numpy as nm\n'), ((643, 658), 'numpy.argsort', 'nm.argsort', (['aux'], {}), '(aux)\n', (653, 658), True, 'import numpy as nm\n'), ((1292, 1329), 'numpy.zeros', 'nm.zeros', (['(nrows, k)'], {'dtype': 'nm.double'}), '((nrows, k), dtype=nm.double)\n', (1300, 1329), True, 'import numpy as nm\n'), ((1347, 1381), 'numpy.ones', 'nm.ones', (['(nrows,)'], {'dtype': 'nm.double'}), '((nrows,), dtype=nm.double)\n', (1354, 1381), True, 'import numpy as nm\n'), ((1942, 1969), 'numpy.tile', 'nm.tile', (['(nrows * savl)', '(k,)'], {}), '(nrows * savl, (k,))\n', (1949, 1969), True, 'import numpy as nm\n'), ((2048, 2089), 'numpy.reshape', 'nm.reshape', (['idx3', '(nrows * k,)'], {'order': '"""F"""'}), "(idx3, (nrows * k,), order='F')\n", (2058, 2089), True, 'import numpy as nm\n'), ((2161, 2193), 'numpy.zeros', 'nm.zeros', (['(nn,)'], {'dtype': 'nm.double'}), '((nn,), dtype=nm.double)\n', (2169, 2193), True, 'import numpy as nm\n'), ((2415, 2456), 'numpy.reshape', 'nm.reshape', (['idx2', '(nrows * n,)'], {'order': '"""F"""'}), "(idx2, (nrows * n,), order='F')\n", (2425, 2456), True, 'import numpy as nm\n'), ((3653, 3666), 'numpy.prod', 'nm.prod', (['ncpt'], {}), '(ncpt)\n', (3660, 3666), True, 'import numpy as nm\n'), ((3685, 3724), 'numpy.zeros', 'nm.zeros', (['(ncpts, dim)'], {'dtype': 'nm.double'}), '((ncpts, dim), dtype=nm.double)\n', (3693, 3724), True, 'import numpy as nm\n'), ((3747, 3775), 'numpy.zeros', 'nm.zeros', (['ncpt'], {'dtype': 'nm.int'}), '(ncpt, dtype=nm.int)\n', (3755, 3775), True, 'import numpy as nm\n'), ((3789, 3806), 'numpy.prod', 'nm.prod', (['ncpt[:2]'], {}), '(ncpt[:2])\n', (3796, 3806), True, 'import numpy as nm\n'), ((4814, 4856), 'sfepy.base.base.Struct.__init__', 'Struct.__init__', (['self'], {'name': 'name'}), '(self, name=name, **kwargs)\n', (4829, 4856), False, 'from sfepy.base.base import Struct\n'), ((6698, 6765), 'numpy.zeros', 'nm.zeros', (["(self.axes[0]['bsc'].shape[0], self.dim)"], {'dtype': 'nm.double'}), "((self.axes[0]['bsc'].shape[0], self.dim), dtype=nm.double)\n", (6706, 6765), True, 'import numpy as nm\n'), ((8262, 8329), 'numpy.zeros', 'nm.zeros', (["(self.axes[0]['bsc'].shape[0], self.dim)"], {'dtype': 'nm.double'}), "((self.axes[0]['bsc'].shape[0], self.dim), dtype=nm.double)\n", (8270, 8329), True, 'import numpy as nm\n'), ((8890, 8903), 'numpy.prod', 'nm.prod', (['ncpt'], {}), '(ncpt)\n', (8897, 8903), True, 'import numpy as nm\n'), ((342, 357), 'numpy.ones', 'nm.ones', (['(1, n)'], {}), '((1, n))\n', (349, 357), True, 'import numpy as nm\n'), ((1432, 1467), 'numpy.zeros', 'nm.zeros', (['(nrows,)'], {'dtype': 'nm.double'}), '((nrows,), dtype=nm.double)\n', (1440, 1467), True, 'import numpy as nm\n'), ((1770, 1821), 'numpy.where', 'nm.where', (['((tau < knots[0]) | (tau > knots[npk - 1]))'], {}), '((tau < knots[0]) | (tau > knots[npk - 1]))\n', (1778, 1821), True, 'import numpy as nm\n'), ((1898, 1921), 'numpy.arange', 'nm.arange', (['(1 - nrows)', '(1)'], {}), '(1 - nrows, 1)\n', (1907, 1921), True, 'import numpy as nm\n'), ((2307, 2330), 'numpy.arange', 'nm.arange', (['(1 - nrows)', '(1)'], {}), '(1 - nrows, 1)\n', (2316, 2330), True, 'import numpy as nm\n'), ((2688, 2702), 'numpy.array', 'nm.array', (['bbox'], {}), '(bbox)\n', (2696, 2702), True, 'import numpy as nm\n'), ((2766, 2781), 'numpy.array', 'nm.array', (['coors'], {}), '(coors)\n', (2774, 2781), True, 'import numpy as nm\n'), ((2874, 2903), 'numpy.ones', 'nm.ones', (['(dim,)'], {'dtype': 'nm.int'}), '((dim,), dtype=nm.int)\n', (2881, 2903), True, 'import numpy as nm\n'), ((2936, 2949), 'numpy.array', 'nm.array', (['nsg'], {}), '(nsg)\n', (2944, 2949), True, 'import numpy as nm\n'), ((3064, 3120), 'numpy.linspace', 'nm.linspace', (['bbox[idim, 0]', 'bbox[idim, 1]', '(nsg[idim] + 1)'], {}), '(bbox[idim, 0], bbox[idim, 1], nsg[idim] + 1)\n', (3075, 3120), True, 'import numpy as nm\n'), ((3450, 3476), 'numpy.argsort', 'nm.argsort', (['coors[:, idim]'], {}), '(coors[:, idim])\n', (3460, 3476), True, 'import numpy as nm\n'), ((3569, 3587), 'numpy.zeros_like', 'nm.zeros_like', (['aux'], {}), '(aux)\n', (3582, 3587), True, 'import numpy as nm\n'), ((8232, 8245), 'numpy.array', 'nm.array', (['dir'], {}), '(dir)\n', (8240, 8245), True, 'import numpy as nm\n'), ((8495, 8541), 'numpy.dot', 'nm.dot', (['aux[:, nm.newaxis]', 'dir[nm.newaxis, :]'], {}), '(aux[:, nm.newaxis], dir[nm.newaxis, :])\n', (8501, 8541), True, 'import numpy as nm\n'), ((2001, 2019), 'numpy.arange', 'nm.arange', (['(-km1)', '(1)'], {}), '(-km1, 1)\n', (2010, 2019), True, 'import numpy as nm\n'), ((2367, 2386), 'numpy.arange', 'nm.arange', (['(1)', '(n + 1)'], {}), '(1, n + 1)\n', (2376, 2386), True, 'import numpy as nm\n'), ((3301, 3333), 'numpy.zeros', 'nm.zeros', (['(nn,)'], {'dtype': 'nm.double'}), '((nn,), dtype=nm.double)\n', (3309, 3333), True, 'import numpy as nm\n'), ((3821, 3834), 'numpy.arange', 'nm.arange', (['n2'], {}), '(n2)\n', (3830, 3834), True, 'import numpy as nm\n'), ((8704, 8750), 'numpy.dot', 'nm.dot', (['aux[:, nm.newaxis]', 'dir[nm.newaxis, :]'], {}), '(aux[:, nm.newaxis], dir[nm.newaxis, :])\n', (8710, 8750), True, 'import numpy as nm\n'), ((1619, 1637), 'numpy.double', 'nm.double', (['b[:, r]'], {}), '(b[:, r])\n', (1628, 1637), True, 'import numpy as nm\n'), ((3398, 3424), 'numpy.sum', 'nm.sum', (['knots[j + 1:j + 4]'], {}), '(knots[j + 1:j + 4])\n', (3404, 3424), True, 'import numpy as nm\n'), ((7194, 7253), 'numpy.dot', 'nm.dot', (['aux[:, nm.newaxis]', 'cp_coors[inx, :][nm.newaxis, :]'], {}), '(aux[:, nm.newaxis], cp_coors[inx, :][nm.newaxis, :])\n', (7200, 7253), True, 'import numpy as nm\n'), ((7649, 7709), 'numpy.dot', 'nm.dot', (['aux2[:, nm.newaxis]', 'cp_coors[inx, :][nm.newaxis, :]'], {}), '(aux2[:, nm.newaxis], cp_coors[inx, :][nm.newaxis, :])\n', (7655, 7709), True, 'import numpy as nm\n')]
|
from __future__ import absolute_import
import os.path as op
import numpy as nm
from sfepy import data_dir
from sfepy.base.testing import TestCommon
import six
def init_vec(variables):
return nm.random.rand(variables.di.ptr[-1])
def check_vec(self, vec, ii, ok, conds, variables):
from sfepy.discrete.common.dof_info import expand_nodes_to_equations
for var_name, var_conds in six.iteritems(conds.group_by_variables()):
var = variables[var_name]
for cond in var_conds:
cond.canonize_dof_names(var.dofs)
self.report('%d: %s %s: %s %s'
% (ii, var.name,
cond.name, cond.region.name, cond.dofs[0]))
nods = var.field.get_dofs_in_region(cond.region)
eq = expand_nodes_to_equations(nods, cond.dofs[0], var.dofs)
off = variables.di.indx[var_name].start
n_nod = len(nods)
for cdof, dof_name in enumerate(cond.dofs[0]):
idof = var.dofs.index(dof_name)
eqs = eq[n_nod * cdof : n_nod * (cdof + 1)]
_ok = nm.allclose(vec[off + eqs], idof,
atol=1e-14, rtol=0.0)
if not _ok:
self.report(' %s: failed! (all of %s == %f)'
% (dof_name, vec[off + eqs], idof))
ok = ok and _ok
return ok
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
from sfepy.discrete import FieldVariable, Variables, Problem
from sfepy.discrete.fem import Mesh, FEDomain, Field
mesh = Mesh.from_file(data_dir + '/meshes/2d/square_unit_tri.mesh')
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
domain.create_region('Left',
'vertices in (x < -0.499)',
'facet')
domain.create_region('LeftStrip',
'vertices in (x < -0.499)'
' & (y > -0.199) & (y < 0.199)',
'facet')
domain.create_region('LeftFix',
'r.Left -v r.LeftStrip',
'facet')
domain.create_region('Right',
'vertices in (x > 0.499)',
'facet')
domain.create_region('RightStrip',
'vertices in (x > 0.499)'
' & (y > -0.199) & (y < 0.199)',
'facet')
domain.create_region('RightFix',
'r.Right -v r.RightStrip',
'facet')
fu = Field.from_args('fu', nm.float64, 'vector', omega, approx_order=2)
u = FieldVariable('u', 'unknown', fu)
fp = Field.from_args('fp', nm.float64, 'scalar', omega, approx_order=2)
p = FieldVariable('p', 'unknown', fp)
pb = Problem('test', domain=domain, fields=[fu, fp],
auto_conf=False, auto_solvers=False)
test = Test(problem=pb, variables=Variables([u, p]),
conf=conf, options=options)
return test
def test_ics(self):
from sfepy.discrete.conditions import Conditions, InitialCondition
variables = self.variables
omega = self.problem.domain.regions['Omega']
all_ics = []
all_ics.append(InitialCondition('ic0', omega,
{'p.all' : 0.0}))
all_ics.append(InitialCondition('ic1', omega,
{'u.1' : 1.0}))
all_ics.append(InitialCondition('ic2', omega,
{'u.all' : nm.array([0.0, 1.0])}))
all_ics.append(InitialCondition('ic3', omega,
{'p.0' : 0.0,
'u.0' : 0.0, 'u.1' : 1.0}))
ok = True
for ii, ics in enumerate(all_ics):
if not isinstance(ics, list): ics = [ics]
ics = Conditions(ics)
variables.setup_initial_conditions(ics, functions=None)
vec = init_vec(variables)
variables.apply_ic(vec)
ok = check_vec(self, vec, ii, ok, ics, variables)
return ok
def test_ebcs(self):
from sfepy.discrete.conditions import Conditions, EssentialBC
variables = self.variables
regions = self.problem.domain.regions
all_ebcs = []
all_ebcs.append(EssentialBC('fix_u1', regions['LeftFix'],
{'u.all' : nm.array([0.0, 1.0])}))
all_ebcs.append(EssentialBC('fix_u2', regions['LeftStrip'],
{'u.0' : 0.0, 'u.1' : 1.0}))
all_ebcs.append(EssentialBC('fix_p1', regions['RightFix'],
{'p.all' : 0.0}))
all_ebcs.append(EssentialBC('fix_p2', regions['RightStrip'],
{'p.0' : 0.0}))
all_ebcs.append([EssentialBC('fix_p3', regions['Right'],
{'p.0' : 0.0}),
EssentialBC('fix_u3', regions['Left'],
{'u.0' : 0.0, 'u.1' : 1.0})])
ok = True
for ii, bcs in enumerate(all_ebcs):
if not isinstance(bcs, list): bcs = [bcs]
ebcs = Conditions(bcs)
variables.equation_mapping(ebcs=ebcs, epbcs=None,
ts=None, functions=None)
vec = init_vec(variables)
variables.apply_ebc(vec)
ok = check_vec(self, vec, ii, ok, ebcs, variables)
return ok
def test_epbcs(self):
from sfepy.discrete import Function, Functions
from sfepy.discrete.conditions import Conditions, PeriodicBC
from sfepy.discrete.common.dof_info import expand_nodes_to_equations
from sfepy.discrete.fem.periodic import match_y_line
variables = self.variables
regions = self.problem.domain.regions
match_y_line = Function('match_y_line', match_y_line)
pbc = PeriodicBC('pbc', [regions['LeftStrip'], regions['RightStrip']],
{'u.[1,0]' : 'u.[0,1]'}, match='match_y_line')
functions = Functions([match_y_line])
epbcs = Conditions([pbc])
variables.equation_mapping(ebcs=None, epbcs=epbcs,
ts=None, functions=functions)
vec = init_vec(variables)
variables.apply_ebc(vec)
var = variables['u']
var_bcs = epbcs.group_by_variables()['u']
bc = var_bcs['pbc']
bc.canonize_dof_names(var.dofs)
nods0 = var.field.get_dofs_in_region(bc.regions[0])
nods1 = var.field.get_dofs_in_region(bc.regions[1])
coors0 = var.field.get_coor(nods0)
coors1 = var.field.get_coor(nods1)
i0, i1 = match_y_line(coors0, coors1)
eq0 = expand_nodes_to_equations(nods0[i0], bc.dofs[0], var.dofs)
eq1 = expand_nodes_to_equations(nods1[i1], bc.dofs[1], var.dofs)
ok = True
_ok = len(nm.setdiff1d(eq0, var.eq_map.master)) == 0
if not _ok:
self.report('master equations mismatch! (set(%s) == set(%s))'
% (eq0, var.eq_map.master))
ok = ok and _ok
_ok = len(nm.setdiff1d(eq1, var.eq_map.slave)) == 0
if not _ok:
self.report('slave equations mismatch! (set(%s) == set(%s))'
% (eq1, var.eq_map.slave))
ok = ok and _ok
off = variables.di.indx['u'].start
_ok = nm.allclose(vec[off + eq0], vec[off + eq1], atol=1e-14, rtol=0.0)
if not _ok:
self.report('periodicity test failed! (%s == %s)'
% (vec[off + eq0], vec[off + eq0]))
ok = ok and _ok
return ok
def test_save_ebc(self):
from sfepy.discrete import (FieldVariable, Integral,
Equation, Equations, Problem)
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.terms import Term
name = op.join(self.options.out_dir,
op.splitext(op.basename(__file__))[0])
integral = Integral('i', order=1)
u = self.variables['u']
v = FieldVariable('v', 'test', u.field, primary_var_name='u')
p = self.variables['p']
q = FieldVariable('q', 'test', p.field, primary_var_name='p')
regions = self.problem.domain.regions
omega = regions['Omega']
# Problem.save_ebc() requires to have equations defined.
t1 = Term.new('dw_lin_elastic(v, u)',
integral, omega, v=v, u=u)
t2 = Term.new('dw_laplace(q, p)', integral, omega, q=q, p=p)
eq = Equation('aux', t1 + t2)
eqs = Equations([eq])
pb = Problem('test', equations=eqs, auto_solvers=False)
all_ebcs = []
all_ebcs.append(EssentialBC('fix_u1', regions['RightFix'],
{'u.all' : nm.array([0.0, 1.0])}))
all_ebcs.append(EssentialBC('fix_u2', regions['LeftStrip'],
{'u.0' : 0.0, 'u.1' : 1.0}))
all_ebcs.append(EssentialBC('fix_p1', regions['LeftFix'],
{'p.all' : 0.0}))
all_ebcs.append(EssentialBC('fix_p2', regions['RightStrip'],
{'p.0' : 0.0}))
ebcs = Conditions(all_ebcs)
pb.time_update(ebcs=ebcs)
pb.save_ebc(name + '_ebcs_f.vtk', ebcs=ebcs, force=True)
pb.save_ebc(name + '_ebcs.vtk', ebcs=ebcs, default=-1, force=False)
return True
|
[
"sfepy.discrete.Equation",
"sfepy.discrete.conditions.PeriodicBC",
"sfepy.discrete.conditions.EssentialBC",
"sfepy.discrete.fem.periodic.match_y_line",
"sfepy.discrete.fem.Mesh.from_file",
"sfepy.discrete.common.dof_info.expand_nodes_to_equations",
"sfepy.discrete.fem.Field.from_args",
"sfepy.discrete.Function",
"sfepy.terms.Term.new",
"sfepy.discrete.Equations",
"sfepy.discrete.Integral",
"sfepy.discrete.fem.FEDomain",
"sfepy.discrete.Functions",
"sfepy.discrete.Problem",
"sfepy.discrete.FieldVariable",
"sfepy.discrete.Variables",
"sfepy.discrete.conditions.Conditions",
"sfepy.discrete.conditions.InitialCondition"
] |
[((199, 235), 'numpy.random.rand', 'nm.random.rand', (['variables.di.ptr[-1]'], {}), '(variables.di.ptr[-1])\n', (213, 235), True, 'import numpy as nm\n'), ((1632, 1692), 'sfepy.discrete.fem.Mesh.from_file', 'Mesh.from_file', (["(data_dir + '/meshes/2d/square_unit_tri.mesh')"], {}), "(data_dir + '/meshes/2d/square_unit_tri.mesh')\n", (1646, 1692), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((1710, 1734), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['"""domain"""', 'mesh'], {}), "('domain', mesh)\n", (1718, 1734), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((2730, 2796), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""fu"""', 'nm.float64', '"""vector"""', 'omega'], {'approx_order': '(2)'}), "('fu', nm.float64, 'vector', omega, approx_order=2)\n", (2745, 2796), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((2809, 2842), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""u"""', '"""unknown"""', 'fu'], {}), "('u', 'unknown', fu)\n", (2822, 2842), False, 'from sfepy.discrete import FieldVariable, Integral, Equation, Equations, Problem\n'), ((2857, 2923), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""fp"""', 'nm.float64', '"""scalar"""', 'omega'], {'approx_order': '(2)'}), "('fp', nm.float64, 'scalar', omega, approx_order=2)\n", (2872, 2923), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((2936, 2969), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""p"""', '"""unknown"""', 'fp'], {}), "('p', 'unknown', fp)\n", (2949, 2969), False, 'from sfepy.discrete import FieldVariable, Integral, Equation, Equations, Problem\n'), ((2984, 3072), 'sfepy.discrete.Problem', 'Problem', (['"""test"""'], {'domain': 'domain', 'fields': '[fu, fp]', 'auto_conf': '(False)', 'auto_solvers': '(False)'}), "('test', domain=domain, fields=[fu, fp], auto_conf=False,\n auto_solvers=False)\n", (2991, 3072), False, 'from sfepy.discrete import FieldVariable, Integral, Equation, Equations, Problem\n'), ((6129, 6167), 'sfepy.discrete.Function', 'Function', (['"""match_y_line"""', 'match_y_line'], {}), "('match_y_line', match_y_line)\n", (6137, 6167), False, 'from sfepy.discrete import Function, Functions\n'), ((6182, 6296), 'sfepy.discrete.conditions.PeriodicBC', 'PeriodicBC', (['"""pbc"""', "[regions['LeftStrip'], regions['RightStrip']]", "{'u.[1,0]': 'u.[0,1]'}"], {'match': '"""match_y_line"""'}), "('pbc', [regions['LeftStrip'], regions['RightStrip']], {'u.[1,0]':\n 'u.[0,1]'}, match='match_y_line')\n", (6192, 6296), False, 'from sfepy.discrete.conditions import Conditions, PeriodicBC\n'), ((6340, 6365), 'sfepy.discrete.Functions', 'Functions', (['[match_y_line]'], {}), '([match_y_line])\n', (6349, 6365), False, 'from sfepy.discrete import Function, Functions\n'), ((6383, 6400), 'sfepy.discrete.conditions.Conditions', 'Conditions', (['[pbc]'], {}), '([pbc])\n', (6393, 6400), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((6966, 6994), 'sfepy.discrete.fem.periodic.match_y_line', 'match_y_line', (['coors0', 'coors1'], {}), '(coors0, coors1)\n', (6978, 6994), False, 'from sfepy.discrete.fem.periodic import match_y_line\n'), ((7010, 7068), 'sfepy.discrete.common.dof_info.expand_nodes_to_equations', 'expand_nodes_to_equations', (['nods0[i0]', 'bc.dofs[0]', 'var.dofs'], {}), '(nods0[i0], bc.dofs[0], var.dofs)\n', (7035, 7068), False, 'from sfepy.discrete.common.dof_info import expand_nodes_to_equations\n'), ((7083, 7141), 'sfepy.discrete.common.dof_info.expand_nodes_to_equations', 'expand_nodes_to_equations', (['nods1[i1]', 'bc.dofs[1]', 'var.dofs'], {}), '(nods1[i1], bc.dofs[1], var.dofs)\n', (7108, 7141), False, 'from sfepy.discrete.common.dof_info import expand_nodes_to_equations\n'), ((7680, 7745), 'numpy.allclose', 'nm.allclose', (['vec[off + eq0]', 'vec[off + eq1]'], {'atol': '(1e-14)', 'rtol': '(0.0)'}), '(vec[off + eq0], vec[off + eq1], atol=1e-14, rtol=0.0)\n', (7691, 7745), True, 'import numpy as nm\n'), ((8323, 8345), 'sfepy.discrete.Integral', 'Integral', (['"""i"""'], {'order': '(1)'}), "('i', order=1)\n", (8331, 8345), False, 'from sfepy.discrete import FieldVariable, Integral, Equation, Equations, Problem\n'), ((8391, 8448), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""v"""', '"""test"""', 'u.field'], {'primary_var_name': '"""u"""'}), "('v', 'test', u.field, primary_var_name='u')\n", (8404, 8448), False, 'from sfepy.discrete import FieldVariable, Integral, Equation, Equations, Problem\n'), ((8494, 8551), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""q"""', '"""test"""', 'p.field'], {'primary_var_name': '"""p"""'}), "('q', 'test', p.field, primary_var_name='p')\n", (8507, 8551), False, 'from sfepy.discrete import FieldVariable, Integral, Equation, Equations, Problem\n'), ((8711, 8770), 'sfepy.terms.Term.new', 'Term.new', (['"""dw_lin_elastic(v, u)"""', 'integral', 'omega'], {'v': 'v', 'u': 'u'}), "('dw_lin_elastic(v, u)', integral, omega, v=v, u=u)\n", (8719, 8770), False, 'from sfepy.terms import Term\n'), ((8806, 8861), 'sfepy.terms.Term.new', 'Term.new', (['"""dw_laplace(q, p)"""', 'integral', 'omega'], {'q': 'q', 'p': 'p'}), "('dw_laplace(q, p)', integral, omega, q=q, p=p)\n", (8814, 8861), False, 'from sfepy.terms import Term\n'), ((8875, 8899), 'sfepy.discrete.Equation', 'Equation', (['"""aux"""', '(t1 + t2)'], {}), "('aux', t1 + t2)\n", (8883, 8899), False, 'from sfepy.discrete import FieldVariable, Integral, Equation, Equations, Problem\n'), ((8914, 8929), 'sfepy.discrete.Equations', 'Equations', (['[eq]'], {}), '([eq])\n', (8923, 8929), False, 'from sfepy.discrete import FieldVariable, Integral, Equation, Equations, Problem\n'), ((8944, 8994), 'sfepy.discrete.Problem', 'Problem', (['"""test"""'], {'equations': 'eqs', 'auto_solvers': '(False)'}), "('test', equations=eqs, auto_solvers=False)\n", (8951, 8994), False, 'from sfepy.discrete import FieldVariable, Integral, Equation, Equations, Problem\n'), ((9545, 9565), 'sfepy.discrete.conditions.Conditions', 'Conditions', (['all_ebcs'], {}), '(all_ebcs)\n', (9555, 9565), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((781, 836), 'sfepy.discrete.common.dof_info.expand_nodes_to_equations', 'expand_nodes_to_equations', (['nods', 'cond.dofs[0]', 'var.dofs'], {}), '(nods, cond.dofs[0], var.dofs)\n', (806, 836), False, 'from sfepy.discrete.common.dof_info import expand_nodes_to_equations\n'), ((3454, 3500), 'sfepy.discrete.conditions.InitialCondition', 'InitialCondition', (['"""ic0"""', 'omega', "{'p.all': 0.0}"], {}), "('ic0', omega, {'p.all': 0.0})\n", (3470, 3500), False, 'from sfepy.discrete.conditions import Conditions, InitialCondition\n'), ((3566, 3610), 'sfepy.discrete.conditions.InitialCondition', 'InitialCondition', (['"""ic1"""', 'omega', "{'u.1': 1.0}"], {}), "('ic1', omega, {'u.1': 1.0})\n", (3582, 3610), False, 'from sfepy.discrete.conditions import Conditions, InitialCondition\n'), ((3805, 3873), 'sfepy.discrete.conditions.InitialCondition', 'InitialCondition', (['"""ic3"""', 'omega', "{'p.0': 0.0, 'u.0': 0.0, 'u.1': 1.0}"], {}), "('ic3', omega, {'p.0': 0.0, 'u.0': 0.0, 'u.1': 1.0})\n", (3821, 3873), False, 'from sfepy.discrete.conditions import Conditions, InitialCondition\n'), ((4094, 4109), 'sfepy.discrete.conditions.Conditions', 'Conditions', (['ics'], {}), '(ics)\n', (4104, 4109), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((4697, 4766), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', (['"""fix_u2"""', "regions['LeftStrip']", "{'u.0': 0.0, 'u.1': 1.0}"], {}), "('fix_u2', regions['LeftStrip'], {'u.0': 0.0, 'u.1': 1.0})\n", (4708, 4766), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((4830, 4888), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', (['"""fix_p1"""', "regions['RightFix']", "{'p.all': 0.0}"], {}), "('fix_p1', regions['RightFix'], {'p.all': 0.0})\n", (4841, 4888), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((4951, 5009), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', (['"""fix_p2"""', "regions['RightStrip']", "{'p.0': 0.0}"], {}), "('fix_p2', regions['RightStrip'], {'p.0': 0.0})\n", (4962, 5009), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((5434, 5449), 'sfepy.discrete.conditions.Conditions', 'Conditions', (['bcs'], {}), '(bcs)\n', (5444, 5449), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((9180, 9249), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', (['"""fix_u2"""', "regions['LeftStrip']", "{'u.0': 0.0, 'u.1': 1.0}"], {}), "('fix_u2', regions['LeftStrip'], {'u.0': 0.0, 'u.1': 1.0})\n", (9191, 9249), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((9313, 9370), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', (['"""fix_p1"""', "regions['LeftFix']", "{'p.all': 0.0}"], {}), "('fix_p1', regions['LeftFix'], {'p.all': 0.0})\n", (9324, 9370), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((9433, 9491), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', (['"""fix_p2"""', "regions['RightStrip']", "{'p.0': 0.0}"], {}), "('fix_p2', regions['RightStrip'], {'p.0': 0.0})\n", (9444, 9491), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((1110, 1165), 'numpy.allclose', 'nm.allclose', (['vec[off + eqs]', 'idof'], {'atol': '(1e-14)', 'rtol': '(0.0)'}), '(vec[off + eqs], idof, atol=1e-14, rtol=0.0)\n', (1121, 1165), True, 'import numpy as nm\n'), ((3133, 3150), 'sfepy.discrete.Variables', 'Variables', (['[u, p]'], {}), '([u, p])\n', (3142, 3150), False, 'from sfepy.discrete import FieldVariable, Variables, Problem\n'), ((5073, 5126), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', (['"""fix_p3"""', "regions['Right']", "{'p.0': 0.0}"], {}), "('fix_p3', regions['Right'], {'p.0': 0.0})\n", (5084, 5126), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((5191, 5255), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', (['"""fix_u3"""', "regions['Left']", "{'u.0': 0.0, 'u.1': 1.0}"], {}), "('fix_u3', regions['Left'], {'u.0': 0.0, 'u.1': 1.0})\n", (5202, 5255), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((7180, 7216), 'numpy.setdiff1d', 'nm.setdiff1d', (['eq0', 'var.eq_map.master'], {}), '(eq0, var.eq_map.master)\n', (7192, 7216), True, 'import numpy as nm\n'), ((7412, 7447), 'numpy.setdiff1d', 'nm.setdiff1d', (['eq1', 'var.eq_map.slave'], {}), '(eq1, var.eq_map.slave)\n', (7424, 7447), True, 'import numpy as nm\n'), ((3758, 3778), 'numpy.array', 'nm.array', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (3766, 3778), True, 'import numpy as nm\n'), ((4649, 4669), 'numpy.array', 'nm.array', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (4657, 4669), True, 'import numpy as nm\n'), ((8276, 8297), 'os.path.basename', 'op.basename', (['__file__'], {}), '(__file__)\n', (8287, 8297), True, 'import os.path as op\n'), ((9132, 9152), 'numpy.array', 'nm.array', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (9140, 9152), True, 'import numpy as nm\n')]
|
from time import sleep
from sqlmodel import select
from icon_governance.config import settings
from icon_governance.log import logger
from icon_governance.models.preps import Prep
from icon_governance.utils.rpc import (
convert_hex_int,
get_preps_cps,
get_sponsors_record,
post_rpc_json,
)
def get_cps(session):
sponsors = post_rpc_json(get_sponsors_record())
if sponsors is None:
logger.info("No sponsors found from rpc.")
sleep(1)
return
for k, v in sponsors.items():
prep = session.get(Prep, k)
if prep is None:
logger.info("No preps found in db? Should not ever happen cuz of db_init.")
continue
prep.sponsored_cps_grants = convert_hex_int(v)
session.merge(prep)
session.commit()
result = session.execute(select(Prep))
preps = result.scalars().all()
cps_preps = post_rpc_json(get_preps_cps())
prep_list = [i["address"] for i in cps_preps]
for prep in preps:
if prep.address in prep_list:
prep.cps_governance = True
else:
prep.cps_governance = False
session.merge(prep)
session.commit()
def cps_cron(session):
while True:
logger.info("Starting cps cron")
get_cps(session)
logger.info("CPS cron ran.")
sleep(settings.CRON_SLEEP_SEC * 10)
if __name__ == "__main__":
from icon_governance.db import session_factory
get_cps(session_factory())
|
[
"sqlmodel.select"
] |
[((361, 382), 'icon_governance.utils.rpc.get_sponsors_record', 'get_sponsors_record', ([], {}), '()\n', (380, 382), False, 'from icon_governance.utils.rpc import convert_hex_int, get_preps_cps, get_sponsors_record, post_rpc_json\n'), ((418, 460), 'icon_governance.log.logger.info', 'logger.info', (['"""No sponsors found from rpc."""'], {}), "('No sponsors found from rpc.')\n", (429, 460), False, 'from icon_governance.log import logger\n'), ((469, 477), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (474, 477), False, 'from time import sleep\n'), ((735, 753), 'icon_governance.utils.rpc.convert_hex_int', 'convert_hex_int', (['v'], {}), '(v)\n', (750, 753), False, 'from icon_governance.utils.rpc import convert_hex_int, get_preps_cps, get_sponsors_record, post_rpc_json\n'), ((838, 850), 'sqlmodel.select', 'select', (['Prep'], {}), '(Prep)\n', (844, 850), False, 'from sqlmodel import select\n'), ((918, 933), 'icon_governance.utils.rpc.get_preps_cps', 'get_preps_cps', ([], {}), '()\n', (931, 933), False, 'from icon_governance.utils.rpc import convert_hex_int, get_preps_cps, get_sponsors_record, post_rpc_json\n'), ((1243, 1275), 'icon_governance.log.logger.info', 'logger.info', (['"""Starting cps cron"""'], {}), "('Starting cps cron')\n", (1254, 1275), False, 'from icon_governance.log import logger\n'), ((1309, 1337), 'icon_governance.log.logger.info', 'logger.info', (['"""CPS cron ran."""'], {}), "('CPS cron ran.')\n", (1320, 1337), False, 'from icon_governance.log import logger\n'), ((1346, 1381), 'time.sleep', 'sleep', (['(settings.CRON_SLEEP_SEC * 10)'], {}), '(settings.CRON_SLEEP_SEC * 10)\n', (1351, 1381), False, 'from time import sleep\n'), ((1475, 1492), 'icon_governance.db.session_factory', 'session_factory', ([], {}), '()\n', (1490, 1492), False, 'from icon_governance.db import session_factory\n'), ((601, 676), 'icon_governance.log.logger.info', 'logger.info', (['"""No preps found in db? Should not ever happen cuz of db_init."""'], {}), "('No preps found in db? Should not ever happen cuz of db_init.')\n", (612, 676), False, 'from icon_governance.log import logger\n')]
|
from typing import List, Optional
from fastapi import Depends, FastAPI, HTTPException, Query
from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select
class TeamBase(SQLModel):
name: str = Field(index=True)
headquarters: str
class Team(TeamBase, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
heroes: List["Hero"] = Relationship(back_populates="team")
class TeamCreate(TeamBase):
pass
class TeamRead(TeamBase):
id: int
class TeamUpdate(SQLModel):
id: Optional[int] = None
name: Optional[str] = None
headquarters: Optional[str] = None
class HeroBase(SQLModel):
name: str = Field(index=True)
secret_name: str
age: Optional[int] = Field(default=None, index=True)
team_id: Optional[int] = Field(default=None, foreign_key="team.id")
class Hero(HeroBase, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
team: Optional[Team] = Relationship(back_populates="heroes")
class HeroRead(HeroBase):
id: int
class HeroCreate(HeroBase):
pass
class HeroUpdate(SQLModel):
name: Optional[str] = None
secret_name: Optional[str] = None
age: Optional[int] = None
team_id: Optional[int] = None
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
connect_args = {"check_same_thread": False}
engine = create_engine(sqlite_url, echo=True, connect_args=connect_args)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def get_session():
with Session(engine) as session:
yield session
app = FastAPI()
@app.on_event("startup")
def on_startup():
create_db_and_tables()
@app.post("/heroes/", response_model=HeroRead)
def create_hero(*, session: Session = Depends(get_session), hero: HeroCreate):
db_hero = Hero.from_orm(hero)
session.add(db_hero)
session.commit()
session.refresh(db_hero)
return db_hero
@app.get("/heroes/", response_model=List[HeroRead])
def read_heroes(
*,
session: Session = Depends(get_session),
offset: int = 0,
limit: int = Query(default=100, lte=100),
):
heroes = session.exec(select(Hero).offset(offset).limit(limit)).all()
return heroes
@app.get("/heroes/{hero_id}", response_model=HeroRead)
def read_hero(*, session: Session = Depends(get_session), hero_id: int):
hero = session.get(Hero, hero_id)
if not hero:
raise HTTPException(status_code=404, detail="Hero not found")
return hero
@app.patch("/heroes/{hero_id}", response_model=HeroRead)
def update_hero(
*, session: Session = Depends(get_session), hero_id: int, hero: HeroUpdate
):
db_hero = session.get(Hero, hero_id)
if not db_hero:
raise HTTPException(status_code=404, detail="Hero not found")
hero_data = hero.dict(exclude_unset=True)
for key, value in hero_data.items():
setattr(db_hero, key, value)
session.add(db_hero)
session.commit()
session.refresh(db_hero)
return db_hero
@app.delete("/heroes/{hero_id}")
def delete_hero(*, session: Session = Depends(get_session), hero_id: int):
hero = session.get(Hero, hero_id)
if not hero:
raise HTTPException(status_code=404, detail="Hero not found")
session.delete(hero)
session.commit()
return {"ok": True}
@app.post("/teams/", response_model=TeamRead)
def create_team(*, session: Session = Depends(get_session), team: TeamCreate):
db_team = Team.from_orm(team)
session.add(db_team)
session.commit()
session.refresh(db_team)
return db_team
@app.get("/teams/", response_model=List[TeamRead])
def read_teams(
*,
session: Session = Depends(get_session),
offset: int = 0,
limit: int = Query(default=100, lte=100),
):
teams = session.exec(select(Team).offset(offset).limit(limit)).all()
return teams
@app.get("/teams/{team_id}", response_model=TeamRead)
def read_team(*, team_id: int, session: Session = Depends(get_session)):
team = session.get(Team, team_id)
if not team:
raise HTTPException(status_code=404, detail="Team not found")
return team
@app.patch("/teams/{team_id}", response_model=TeamRead)
def update_team(
*,
session: Session = Depends(get_session),
team_id: int,
team: TeamUpdate,
):
db_team = session.get(Team, team_id)
if not db_team:
raise HTTPException(status_code=404, detail="Team not found")
team_data = team.dict(exclude_unset=True)
for key, value in team_data.items():
setattr(db_team, key, value)
session.add(db_team)
session.commit()
session.refresh(db_team)
return db_team
@app.delete("/teams/{team_id}")
def delete_team(*, session: Session = Depends(get_session), team_id: int):
team = session.get(Team, team_id)
if not team:
raise HTTPException(status_code=404, detail="Team not found")
session.delete(team)
session.commit()
return {"ok": True}
|
[
"sqlmodel.Relationship",
"sqlmodel.create_engine",
"sqlmodel.Session",
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.Field",
"sqlmodel.select"
] |
[((1384, 1447), 'sqlmodel.create_engine', 'create_engine', (['sqlite_url'], {'echo': '(True)', 'connect_args': 'connect_args'}), '(sqlite_url, echo=True, connect_args=connect_args)\n', (1397, 1447), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((1607, 1616), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (1614, 1616), False, 'from fastapi import Depends, FastAPI, HTTPException, Query\n'), ((221, 238), 'sqlmodel.Field', 'Field', ([], {'index': '(True)'}), '(index=True)\n', (226, 238), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((321, 358), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (326, 358), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((387, 422), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""team"""'}), "(back_populates='team')\n", (399, 422), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((675, 692), 'sqlmodel.Field', 'Field', ([], {'index': '(True)'}), '(index=True)\n', (680, 692), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((739, 770), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'index': '(True)'}), '(default=None, index=True)\n', (744, 770), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((801, 843), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""team.id"""'}), "(default=None, foreign_key='team.id')\n", (806, 843), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((904, 941), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (909, 941), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((970, 1007), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""heroes"""'}), "(back_populates='heroes')\n", (982, 1007), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((1482, 1518), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (1510, 1518), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((1776, 1796), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1783, 1796), False, 'from fastapi import Depends, FastAPI, HTTPException, Query\n'), ((2046, 2066), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (2053, 2066), False, 'from fastapi import Depends, FastAPI, HTTPException, Query\n'), ((2106, 2133), 'fastapi.Query', 'Query', ([], {'default': '(100)', 'lte': '(100)'}), '(default=100, lte=100)\n', (2111, 2133), False, 'from fastapi import Depends, FastAPI, HTTPException, Query\n'), ((2323, 2343), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (2330, 2343), False, 'from fastapi import Depends, FastAPI, HTTPException, Query\n'), ((2603, 2623), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (2610, 2623), False, 'from fastapi import Depends, FastAPI, HTTPException, Query\n'), ((3081, 3101), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (3088, 3101), False, 'from fastapi import Depends, FastAPI, HTTPException, Query\n'), ((3400, 3420), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (3407, 3420), False, 'from fastapi import Depends, FastAPI, HTTPException, Query\n'), ((3668, 3688), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (3675, 3688), False, 'from fastapi import Depends, FastAPI, HTTPException, Query\n'), ((3728, 3755), 'fastapi.Query', 'Query', ([], {'default': '(100)', 'lte': '(100)'}), '(default=100, lte=100)\n', (3733, 3755), False, 'from fastapi import Depends, FastAPI, HTTPException, Query\n'), ((3956, 3976), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (3963, 3976), False, 'from fastapi import Depends, FastAPI, HTTPException, Query\n'), ((4225, 4245), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (4232, 4245), False, 'from fastapi import Depends, FastAPI, HTTPException, Query\n'), ((4711, 4731), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (4718, 4731), False, 'from fastapi import Depends, FastAPI, HTTPException, Query\n'), ((1549, 1564), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1556, 1564), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((2429, 2484), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Hero not found"""'}), "(status_code=404, detail='Hero not found')\n", (2442, 2484), False, 'from fastapi import Depends, FastAPI, HTTPException, Query\n'), ((2734, 2789), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Hero not found"""'}), "(status_code=404, detail='Hero not found')\n", (2747, 2789), False, 'from fastapi import Depends, FastAPI, HTTPException, Query\n'), ((3188, 3243), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Hero not found"""'}), "(status_code=404, detail='Hero not found')\n", (3201, 3243), False, 'from fastapi import Depends, FastAPI, HTTPException, Query\n'), ((4048, 4103), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Team not found"""'}), "(status_code=404, detail='Team not found')\n", (4061, 4103), False, 'from fastapi import Depends, FastAPI, HTTPException, Query\n'), ((4365, 4420), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Team not found"""'}), "(status_code=404, detail='Team not found')\n", (4378, 4420), False, 'from fastapi import Depends, FastAPI, HTTPException, Query\n'), ((4817, 4872), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Team not found"""'}), "(status_code=404, detail='Team not found')\n", (4830, 4872), False, 'from fastapi import Depends, FastAPI, HTTPException, Query\n'), ((2164, 2176), 'sqlmodel.select', 'select', (['Hero'], {}), '(Hero)\n', (2170, 2176), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((3785, 3797), 'sqlmodel.select', 'select', (['Team'], {}), '(Team)\n', (3791, 3797), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n')]
|
import os.path as op
from sfepy.base.base import *
from sfepy.base.conf import transform_variables, transform_fields
from sfepy.base.testing import TestCommon
variables = {
'u' : ('unknown field', 'f', 0),
'v' : ('test field', 'f', 'u'),
}
def in_dir(adir):
return lambda x: op.join(adir, x)
def do_interpolation(m2, m1, data, field_name):
"""Interpolate data from m1 to m2. """
from sfepy.fem import Domain, Field, Variables
fields = {
'scalar_si' : ((1,1), 'Omega', 2),
'vector_si' : ((3,1), 'Omega', 2),
'scalar_tp' : ((1,1), 'Omega', 1),
'vector_tp' : ((3,1), 'Omega', 1),
}
d1 = Domain('d1', m1)
omega1 = d1.create_region('Omega', 'all')
f = fields[field_name]
field1 = Field('f', nm.float64, f[0], d1.regions[f[1]], approx_order=f[2])
ff = {field1.name : field1}
vv = Variables.from_conf(transform_variables(variables), ff)
u1 = vv['u']
u1.set_from_mesh_vertices(data)
d2 = Domain('d2', m2)
omega2 = d2.create_region('Omega', 'all')
field2 = Field('f', nm.float64, f[0], d2.regions[f[1]], approx_order=f[2])
ff2 = {field2.name : field2}
vv2 = Variables.from_conf(transform_variables(variables), ff2)
u2 = vv2['u']
# Performs interpolation, if other field differs from self.field
# or, in particular, is defined on a different mesh.
u2.set_from_other(u1, strategy='interpolation', close_limit=0.5)
return u1, u2
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
test = Test(conf=conf, options=options)
return test
def test_interpolation(self):
from sfepy import data_dir
from sfepy.fem import Mesh
from sfepy.linalg import make_axis_rotation_matrix
fname = in_dir(self.options.out_dir)
meshes = {
'tp' : Mesh('original mesh', data_dir + '/meshes/3d/block.mesh'),
'si' : Mesh('original mesh', data_dir + '/meshes/3d/cylinder.mesh'),
}
datas = {}
for key, mesh in meshes.iteritems():
bbox = mesh.get_bounding_box()
nx = bbox[1,0] - bbox[0,0]
centre = 0.5 * bbox.sum(axis=0)
mesh.coors -= centre
data = nm.sin(4.0 * nm.pi * mesh.coors[:,0:1] / nx)
datas['scalar_' + key] = data
data = nm.zeros_like(mesh.coors)
data[:,0] = 0.05 * nx * nm.sin(4.0 * nm.pi * mesh.coors[:,0] / nx)
data[:,2] = 0.05 * nx * nm.cos(4.0 * nm.pi * mesh.coors[:,0] / nx)
datas['vector_' + key] = data
for field_name in ['scalar_si', 'vector_si', 'scalar_tp', 'vector_tp']:
m1 = meshes[field_name[-2:]]
for ia, angle in enumerate(nm.linspace(0.0, nm.pi, 11)):
self.report('%s: %d. angle: %f' % (field_name, ia, angle))
shift = [0.0, 0.0, 0.0]
mtx = make_axis_rotation_matrix([0, 1, 0], angle)
m2 = m1.copy('rotated mesh')
m2.transform_coors(mtx)
data = datas[field_name]
u1, u2 = do_interpolation(m2, m1, data, field_name)
if ia == 0:
u1.save_as_mesh(fname('test_mesh_interp_%s_u1.vtk'
% field_name))
u2.save_as_mesh(fname('test_mesh_interp_%s_u2.%03d.vtk'
% (field_name, ia)))
return True
def test_interpolation_two_meshes(self):
from sfepy import data_dir
from sfepy.fem import Mesh, Domain, Field, Variables
m1 = Mesh('source mesh', data_dir + '/meshes/3d/block.mesh')
m2 = Mesh('target mesh', data_dir + '/meshes/3d/cube_medium_tetra.mesh')
m2.coors *= 2.0
bbox = m1.get_bounding_box()
dd = bbox[1,:] - bbox[0,:]
data = nm.sin(4.0 * nm.pi * m1.coors[:,0:1] / dd[0]) \
* nm.cos(4.0 * nm.pi * m1.coors[:,1:2] / dd[1])
variables1 = {
'u' : ('unknown field', 'scalar_tp', 0),
'v' : ('test field', 'scalar_tp', 'u'),
}
variables2 = {
'u' : ('unknown field', 'scalar_si', 0),
'v' : ('test field', 'scalar_si', 'u'),
}
d1 = Domain('d1', m1)
omega1 = d1.create_region('Omega', 'all')
field1 = Field('scalar_tp', nm.float64, (1,1), omega1, approx_order=1)
ff1 = {field1.name : field1}
d2 = Domain('d2', m2)
omega2 = d2.create_region('Omega', 'all')
field2 = Field('scalar_si', nm.float64, (1,1), omega2, approx_order=0)
ff2 = {field2.name : field2}
vv1 = Variables.from_conf(transform_variables(variables1), ff1)
u1 = vv1['u']
u1.set_from_mesh_vertices(data)
vv2 = Variables.from_conf(transform_variables(variables2), ff2)
u2 = vv2['u']
# Performs interpolation, if other field differs from self.field
# or, in particular, is defined on a different mesh.
u2.set_from_other(u1, strategy='interpolation', close_limit=0.1)
fname = in_dir(self.options.out_dir)
u1.save_as_mesh(fname('test_mesh_interp_block_scalar.vtk'))
u2.save_as_mesh(fname('test_mesh_interp_cube_scalar.vtk'))
return True
|
[
"sfepy.fem.Domain",
"sfepy.base.conf.transform_variables",
"sfepy.linalg.make_axis_rotation_matrix",
"sfepy.fem.Mesh",
"sfepy.fem.Field"
] |
[((669, 685), 'sfepy.fem.Domain', 'Domain', (['"""d1"""', 'm1'], {}), "('d1', m1)\n", (675, 685), False, 'from sfepy.fem import Mesh, Domain, Field, Variables\n'), ((775, 840), 'sfepy.fem.Field', 'Field', (['"""f"""', 'nm.float64', 'f[0]', 'd1.regions[f[1]]'], {'approx_order': 'f[2]'}), "('f', nm.float64, f[0], d1.regions[f[1]], approx_order=f[2])\n", (780, 840), False, 'from sfepy.fem import Mesh, Domain, Field, Variables\n'), ((1002, 1018), 'sfepy.fem.Domain', 'Domain', (['"""d2"""', 'm2'], {}), "('d2', m2)\n", (1008, 1018), False, 'from sfepy.fem import Mesh, Domain, Field, Variables\n'), ((1079, 1144), 'sfepy.fem.Field', 'Field', (['"""f"""', 'nm.float64', 'f[0]', 'd2.regions[f[1]]'], {'approx_order': 'f[2]'}), "('f', nm.float64, f[0], d2.regions[f[1]], approx_order=f[2])\n", (1084, 1144), False, 'from sfepy.fem import Mesh, Domain, Field, Variables\n'), ((305, 321), 'os.path.join', 'op.join', (['adir', 'x'], {}), '(adir, x)\n', (312, 321), True, 'import os.path as op\n'), ((903, 933), 'sfepy.base.conf.transform_variables', 'transform_variables', (['variables'], {}), '(variables)\n', (922, 933), False, 'from sfepy.base.conf import transform_variables, transform_fields\n'), ((1209, 1239), 'sfepy.base.conf.transform_variables', 'transform_variables', (['variables'], {}), '(variables)\n', (1228, 1239), False, 'from sfepy.base.conf import transform_variables, transform_fields\n'), ((3657, 3712), 'sfepy.fem.Mesh', 'Mesh', (['"""source mesh"""', "(data_dir + '/meshes/3d/block.mesh')"], {}), "('source mesh', data_dir + '/meshes/3d/block.mesh')\n", (3661, 3712), False, 'from sfepy.fem import Mesh, Domain, Field, Variables\n'), ((3727, 3794), 'sfepy.fem.Mesh', 'Mesh', (['"""target mesh"""', "(data_dir + '/meshes/3d/cube_medium_tetra.mesh')"], {}), "('target mesh', data_dir + '/meshes/3d/cube_medium_tetra.mesh')\n", (3731, 3794), False, 'from sfepy.fem import Mesh, Domain, Field, Variables\n'), ((4340, 4356), 'sfepy.fem.Domain', 'Domain', (['"""d1"""', 'm1'], {}), "('d1', m1)\n", (4346, 4356), False, 'from sfepy.fem import Mesh, Domain, Field, Variables\n'), ((4424, 4486), 'sfepy.fem.Field', 'Field', (['"""scalar_tp"""', 'nm.float64', '(1, 1)', 'omega1'], {'approx_order': '(1)'}), "('scalar_tp', nm.float64, (1, 1), omega1, approx_order=1)\n", (4429, 4486), False, 'from sfepy.fem import Mesh, Domain, Field, Variables\n'), ((4537, 4553), 'sfepy.fem.Domain', 'Domain', (['"""d2"""', 'm2'], {}), "('d2', m2)\n", (4543, 4553), False, 'from sfepy.fem import Mesh, Domain, Field, Variables\n'), ((4621, 4683), 'sfepy.fem.Field', 'Field', (['"""scalar_si"""', 'nm.float64', '(1, 1)', 'omega2'], {'approx_order': '(0)'}), "('scalar_si', nm.float64, (1, 1), omega2, approx_order=0)\n", (4626, 4683), False, 'from sfepy.fem import Mesh, Domain, Field, Variables\n'), ((1875, 1932), 'sfepy.fem.Mesh', 'Mesh', (['"""original mesh"""', "(data_dir + '/meshes/3d/block.mesh')"], {}), "('original mesh', data_dir + '/meshes/3d/block.mesh')\n", (1879, 1932), False, 'from sfepy.fem import Mesh, Domain, Field, Variables\n'), ((1953, 2013), 'sfepy.fem.Mesh', 'Mesh', (['"""original mesh"""', "(data_dir + '/meshes/3d/cylinder.mesh')"], {}), "('original mesh', data_dir + '/meshes/3d/cylinder.mesh')\n", (1957, 2013), False, 'from sfepy.fem import Mesh, Domain, Field, Variables\n'), ((4755, 4786), 'sfepy.base.conf.transform_variables', 'transform_variables', (['variables1'], {}), '(variables1)\n', (4774, 4786), False, 'from sfepy.base.conf import transform_variables, transform_fields\n'), ((4890, 4921), 'sfepy.base.conf.transform_variables', 'transform_variables', (['variables2'], {}), '(variables2)\n', (4909, 4921), False, 'from sfepy.base.conf import transform_variables, transform_fields\n'), ((2944, 2987), 'sfepy.linalg.make_axis_rotation_matrix', 'make_axis_rotation_matrix', (['[0, 1, 0]', 'angle'], {}), '([0, 1, 0], angle)\n', (2969, 2987), False, 'from sfepy.linalg import make_axis_rotation_matrix\n')]
|
"""
dayong.models
~~~~~~~~~~~~~
A model maps to a single database table. It contains fields and behaviors of the data
stored in the database.
"""
from typing import Optional
from sqlmodel import Field, SQLModel
class Message(SQLModel):
"""Base model class for message table models."""
message_id: str
class AnonMessage(Message, table=True):
"""Table model for anonymized guild messages."""
# pyright cannot recognize the type of SQLModel.__tablename__
# See: https://github.com/tiangolo/sqlmodel/issues/98
__tablename__ = "anon_messages" # type: ignore
id: Optional[int] = Field(default=None, primary_key=True)
user_id: str
username: str
nickname: str
message: str
|
[
"sqlmodel.Field"
] |
[((610, 647), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (615, 647), False, 'from sqlmodel import Field, SQLModel\n')]
|
import typing as t
if t.TYPE_CHECKING:
from ..core.discussions import DB_Discussion
from ..core.users import DB_User
from datetime import datetime
from sqlmodel import SQLModel, Field, Relationship
class DB_TagUser(SQLModel, table=True):
"""
Represents a tag-to-user relationship in the database.
"""
__tablename__ = 'tag_user'
user_id: t.Optional[int] = Field(default=None, primary_key=True, foreign_key='users.id')
"""The ID of the user."""
tag_id: t.Optional[int] = Field(default=None, primary_key=True, foreign_key='tags.id')
"""The ID of the tag."""
marked_as_read_at: t.Optional[datetime]
"""When the user marked the tag as read?"""
is_hidden: bool = Field(default=False)
"""?"""
class DB_Tag(SQLModel, table=True):
"""
Represents a tag in the database.
"""
__tablename__ = 'tags'
id: t.Optional[int] = Field(default=None, primary_key=True)
"""The ID of the tag. This is handled by the database."""
name: str = Field(max_length=100)
"""The name of the tag."""
slug: str = Field(max_length=100)
"""The tag's slug (will be used in URL)."""
description: t.Optional[t.Text]
"""The description of the tag."""
color: t.Optional[str] = Field(max_length=50)
"""The tag's color."""
background_path: t.Optional[str] = Field(max_length=100)
"""?"""
background_mode: t.Optional[str] = Field(max_length=100)
"""?"""
position: t.Optional[int]
"""The tag's position in the tag tree."""
parent_id: t.Optional[int] = Field(default=None, foreign_key='tags.id')
"""The ID of the parent tag."""
parent_tag: t.Optional['DB_Tag'] = Relationship(back_populates='children')
"""The tag's parent tag."""
default_sort: t.Optional[str]
"""The default sorting behaviour of the tag."""
is_restricted: bool = Field(default=False)
"""Whether or not the tag is restricted."""
is_hidden: bool = Field(default=False)
"""Whether or not the tag is hidden."""
discussion_count: int = Field(default=0)
"""How many discussions are tagged with this tag?"""
last_posted_at: t.Optional[datetime]
"""The datetime when was the last discussion posted in this tag."""
last_posted_discussion_id: t.Optional[int] = Field(default=None, foreign_key='discussions.id')
"""The ID of the last posted discussion in this tag."""
last_posted_discussion: t.Optional['DB_Discussion'] = Relationship(back_populates='tags')
"""The last posted discussion in this tag."""
last_posted_user_id: t.Optional[int] = Field(default=None, foreign_key='users.id')
"""The ID of the user that last posted in this tag."""
last_posted_user: t.Optional['DB_User'] = Relationship(back_populates='tags')
"""The user that last posted in this tag."""
icon: t.Optional[str] = Field(max_length=100)
"""The [FontAwesome](https://fontawesome.com/v5.15/icons?d=gallery&m=free) icon for the tag."""
users: t.List['DB_User'] = Relationship(back_populates='tags', link_model=DB_TagUser)
"""Users that have relationship with this tag."""
|
[
"sqlmodel.Field",
"sqlmodel.Relationship"
] |
[((394, 455), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)', 'foreign_key': '"""users.id"""'}), "(default=None, primary_key=True, foreign_key='users.id')\n", (399, 455), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((516, 576), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)', 'foreign_key': '"""tags.id"""'}), "(default=None, primary_key=True, foreign_key='tags.id')\n", (521, 576), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((721, 741), 'sqlmodel.Field', 'Field', ([], {'default': '(False)'}), '(default=False)\n', (726, 741), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((905, 942), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (910, 942), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((1022, 1043), 'sqlmodel.Field', 'Field', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1027, 1043), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((1091, 1112), 'sqlmodel.Field', 'Field', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1096, 1112), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((1265, 1285), 'sqlmodel.Field', 'Field', ([], {'max_length': '(50)'}), '(max_length=50)\n', (1270, 1285), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((1353, 1374), 'sqlmodel.Field', 'Field', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1358, 1374), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((1426, 1447), 'sqlmodel.Field', 'Field', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1431, 1447), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((1570, 1612), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""tags.id"""'}), "(default=None, foreign_key='tags.id')\n", (1575, 1612), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((1688, 1727), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""children"""'}), "(back_populates='children')\n", (1700, 1727), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((1874, 1894), 'sqlmodel.Field', 'Field', ([], {'default': '(False)'}), '(default=False)\n', (1879, 1894), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((1965, 1985), 'sqlmodel.Field', 'Field', ([], {'default': '(False)'}), '(default=False)\n', (1970, 1985), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((2059, 2075), 'sqlmodel.Field', 'Field', ([], {'default': '(0)'}), '(default=0)\n', (2064, 2075), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((2296, 2345), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""discussions.id"""'}), "(default=None, foreign_key='discussions.id')\n", (2301, 2345), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((2464, 2499), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""tags"""'}), "(back_populates='tags')\n", (2476, 2499), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((2593, 2636), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""users.id"""'}), "(default=None, foreign_key='users.id')\n", (2598, 2636), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((2742, 2777), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""tags"""'}), "(back_populates='tags')\n", (2754, 2777), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((2856, 2877), 'sqlmodel.Field', 'Field', ([], {'max_length': '(100)'}), '(max_length=100)\n', (2861, 2877), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((3010, 3068), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""tags"""', 'link_model': 'DB_TagUser'}), "(back_populates='tags', link_model=DB_TagUser)\n", (3022, 3068), False, 'from sqlmodel import SQLModel, Field, Relationship\n')]
|
from typing import Optional, List
from pydantic import BaseModel, validator, ValidationError
from sqlmodel import SQLModel, Field
class User(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
username: str = Field(nullable=False, sa_column_kwargs={"unique": True})
superuser: bool = False
password: str
# Serializers
class ValidatePassword(BaseModel):
@validator('confirm_password', allow_reuse=True, check_fields=False)
def validate_password(cls, v, values, **kwargs):
if v != values['password']:
raise ValueError("Passwords don't match")
return v
class UserIn(ValidatePassword):
username: str
superuser: bool = False
password: str
confirm_password: str
class UserPatch(ValidatePassword):
superuser: Optional[bool]
password: Optional[str]
confirm_password: Optional[str]
class UserOut(BaseModel):
username: str
superuser: bool
UserOutList = List[UserOut]
|
[
"sqlmodel.Field"
] |
[((190, 227), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (195, 227), False, 'from sqlmodel import SQLModel, Field\n'), ((248, 304), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)', 'sa_column_kwargs': "{'unique': True}"}), "(nullable=False, sa_column_kwargs={'unique': True})\n", (253, 304), False, 'from sqlmodel import SQLModel, Field\n'), ((408, 475), 'pydantic.validator', 'validator', (['"""confirm_password"""'], {'allow_reuse': '(True)', 'check_fields': '(False)'}), "('confirm_password', allow_reuse=True, check_fields=False)\n", (417, 475), False, 'from pydantic import BaseModel, validator, ValidationError\n')]
|
from pathlib import Path
from typing import List
import nonebot
import pytest
from nonebug import App
from .utils import make_fake_event, make_fake_message
@pytest.mark.asyncio
async def test_db(app: App):
"""测试数据库"""
from sqlmodel import select
from nonebot_plugin_datastore.db import create_session, init_db
from .example import Example, test
nonebot.load_plugin("tests.example")
await init_db()
async with create_session() as session:
session.add(Example(message="test"))
await session.commit()
async with create_session() as session:
statement = select(Example)
examples: List[Example] = (await session.exec(statement)).all() # type: ignore
assert len(examples) == 1
assert examples[0].message == "test"
message = make_fake_message()("/test")
event = make_fake_event(_message=message)()
async with app.test_matcher(test) as ctx:
bot = ctx.create_bot()
ctx.receive_event(bot, event)
async with create_session() as session:
statement = select(Example)
examples: List[Example] = (await session.exec(statement)).all() # type: ignore
assert len(examples) == 2
assert examples[1].message == "matcher"
@pytest.mark.asyncio
async def test_disable_db(nonebug_init: None, tmp_path: Path):
"""测试禁用数据库"""
import nonebot
config = nonebot.get_driver().config
# 插件数据目录
config.datastore_cache_dir = tmp_path / "cache"
config.datastore_config_dir = tmp_path / "config"
config.datastore_data_dir = tmp_path / "data"
# 禁用数据库
config.datastore_enable_database = False
# 加载插件
nonebot.load_plugin("nonebot_plugin_datastore")
from nonebot_plugin_datastore import create_session
with pytest.raises(ValueError) as e:
async with create_session() as session:
pass
assert str(e.value) == "数据库未启用"
@pytest.mark.asyncio
async def test_default_db_url(nonebug_init: None):
"""测试默认数据库地址"""
import nonebot
# 加载插件
nonebot.load_plugin("nonebot_plugin_datastore")
from nonebot_plugin_datastore.config import BASE_DATA_DIR, plugin_config
assert (
plugin_config.datastore_database_url
== f"sqlite+aiosqlite:///{BASE_DATA_DIR / 'data.db'}"
)
|
[
"sqlmodel.select"
] |
[((372, 408), 'nonebot.load_plugin', 'nonebot.load_plugin', (['"""tests.example"""'], {}), "('tests.example')\n", (391, 408), False, 'import nonebot\n'), ((1667, 1714), 'nonebot.load_plugin', 'nonebot.load_plugin', (['"""nonebot_plugin_datastore"""'], {}), "('nonebot_plugin_datastore')\n", (1686, 1714), False, 'import nonebot\n'), ((2045, 2092), 'nonebot.load_plugin', 'nonebot.load_plugin', (['"""nonebot_plugin_datastore"""'], {}), "('nonebot_plugin_datastore')\n", (2064, 2092), False, 'import nonebot\n'), ((420, 429), 'nonebot_plugin_datastore.db.init_db', 'init_db', ([], {}), '()\n', (427, 429), False, 'from nonebot_plugin_datastore.db import create_session, init_db\n'), ((446, 462), 'nonebot_plugin_datastore.create_session', 'create_session', ([], {}), '()\n', (460, 462), False, 'from nonebot_plugin_datastore import create_session\n'), ((567, 583), 'nonebot_plugin_datastore.create_session', 'create_session', ([], {}), '()\n', (581, 583), False, 'from nonebot_plugin_datastore import create_session\n'), ((616, 631), 'sqlmodel.select', 'select', (['Example'], {}), '(Example)\n', (622, 631), False, 'from sqlmodel import select\n'), ((1024, 1040), 'nonebot_plugin_datastore.create_session', 'create_session', ([], {}), '()\n', (1038, 1040), False, 'from nonebot_plugin_datastore import create_session\n'), ((1073, 1088), 'sqlmodel.select', 'select', (['Example'], {}), '(Example)\n', (1079, 1088), False, 'from sqlmodel import select\n'), ((1396, 1416), 'nonebot.get_driver', 'nonebot.get_driver', ([], {}), '()\n', (1414, 1416), False, 'import nonebot\n'), ((1782, 1807), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1795, 1807), False, 'import pytest\n'), ((1833, 1849), 'nonebot_plugin_datastore.create_session', 'create_session', ([], {}), '()\n', (1847, 1849), False, 'from nonebot_plugin_datastore import create_session\n')]
|
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.asymmetric.rsa import RSAPrivateKey
from typing import List, Optional
from sqlmodel import SQLModel, Field, Relationship, VARCHAR, Column
from pydantic import EmailStr
class UserGroup(SQLModel, table=True):
id: int = Field(default=None, primary_key=True)
name: str
users: List["User"] = Relationship(back_populates="group")
class User(SQLModel, table=True):
id: int = Field(default=None, primary_key=True)
username: str = Field(sa_column=Column('username', VARCHAR, unique=True))
email: EmailStr
password: str
first_name: str
last_name: str
public_key: str
private_key: str
profile_pic: Optional[str]
is_staff: bool = Field(default=False)
group_id: Optional[int] = Field(foreign_key="usergroup.id")
group: Optional[UserGroup] = Relationship(back_populates="users")
class RemoteUser(SQLModel, table=True):
id: int = Field(default=None, primary_key=True)
remote_id: str = Field(sa_column=Column('username', VARCHAR, unique=True))
inbox: str
public_key: Optional[str]
def generate_new_pk() -> RSAPrivateKey:
return rsa.generate_private_key(public_exponent=65537, key_size=4096)
def load_pk(pkstr: str) -> RSAPrivateKey:
loaded_pk = serialization.load_pem_private_key(pkstr, password=None)
def create_user(username: str, password: str, email: str, first_name: str, last_name: str, profile_pic: Optional[str]) -> User:
user = User(
username=username,
password=password,
email=email,
first_name=first_name,
last_name=last_name,
profile_pic=profile_pic,
)
pk = generate_new_pk()
pub_key = pk.public_key()
user.public_key = pub_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
).decode('utf-8')
user.private_key = pk.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption()
).decode('utf-8')
return user
|
[
"sqlmodel.Relationship",
"sqlmodel.Field",
"sqlmodel.Column"
] |
[((374, 411), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (379, 411), False, 'from sqlmodel import SQLModel, Field, Relationship, VARCHAR, Column\n'), ((453, 489), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""group"""'}), "(back_populates='group')\n", (465, 489), False, 'from sqlmodel import SQLModel, Field, Relationship, VARCHAR, Column\n'), ((540, 577), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (545, 577), False, 'from sqlmodel import SQLModel, Field, Relationship, VARCHAR, Column\n'), ((826, 846), 'sqlmodel.Field', 'Field', ([], {'default': '(False)'}), '(default=False)\n', (831, 846), False, 'from sqlmodel import SQLModel, Field, Relationship, VARCHAR, Column\n'), ((878, 911), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""usergroup.id"""'}), "(foreign_key='usergroup.id')\n", (883, 911), False, 'from sqlmodel import SQLModel, Field, Relationship, VARCHAR, Column\n'), ((945, 981), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""users"""'}), "(back_populates='users')\n", (957, 981), False, 'from sqlmodel import SQLModel, Field, Relationship, VARCHAR, Column\n'), ((1038, 1075), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (1043, 1075), False, 'from sqlmodel import SQLModel, Field, Relationship, VARCHAR, Column\n'), ((1253, 1315), 'cryptography.hazmat.primitives.asymmetric.rsa.generate_private_key', 'rsa.generate_private_key', ([], {'public_exponent': '(65537)', 'key_size': '(4096)'}), '(public_exponent=65537, key_size=4096)\n', (1277, 1315), False, 'from cryptography.hazmat.primitives.asymmetric import rsa\n'), ((1376, 1432), 'cryptography.hazmat.primitives.serialization.load_pem_private_key', 'serialization.load_pem_private_key', (['pkstr'], {'password': 'None'}), '(pkstr, password=None)\n', (1410, 1432), False, 'from cryptography.hazmat.primitives import serialization\n'), ((614, 654), 'sqlmodel.Column', 'Column', (['"""username"""', 'VARCHAR'], {'unique': '(True)'}), "('username', VARCHAR, unique=True)\n", (620, 654), False, 'from sqlmodel import SQLModel, Field, Relationship, VARCHAR, Column\n'), ((1113, 1153), 'sqlmodel.Column', 'Column', (['"""username"""', 'VARCHAR'], {'unique': '(True)'}), "('username', VARCHAR, unique=True)\n", (1119, 1153), False, 'from sqlmodel import SQLModel, Field, Relationship, VARCHAR, Column\n'), ((2154, 2182), 'cryptography.hazmat.primitives.serialization.NoEncryption', 'serialization.NoEncryption', ([], {}), '()\n', (2180, 2182), False, 'from cryptography.hazmat.primitives import serialization\n')]
|
from typing import List, Union
from fastapi import APIRouter, Request
from fastapi.exceptions import HTTPException
from sqlmodel import Session, or_, select
from ..db import ActiveSession
from ..security import (
AdminUser,
AuthenticatedUser,
HashedPassword,
User,
UserCreate,
UserPasswordPatch,
UserResponse,
get_current_user,
)
router = APIRouter()
@router.get("/", response_model=List[UserResponse], dependencies=[AdminUser])
async def list_users(*, session: Session = ActiveSession):
users = session.exec(select(User)).all()
return users
@router.post("/", response_model=UserResponse, dependencies=[AdminUser])
async def create_user(*, session: Session = ActiveSession, user: UserCreate):
db_user = User.from_orm(user)
session.add(db_user)
session.commit()
session.refresh(db_user)
return db_user
@router.patch(
"/{user_id}/password/",
response_model=UserResponse,
dependencies=[AuthenticatedUser],
)
async def update_user_password(
*,
user_id: int,
session: Session = ActiveSession,
request: Request,
patch: UserPasswordPatch,
):
# Query the content
user = session.get(User, user_id)
if not user:
raise HTTPException(status_code=404, detail="User not found")
# Check the user can update the password
current_user: User = get_current_user(request=request)
if user.id != current_user.id and not current_user.superuser:
raise HTTPException(status_code=403, detail="You can't update this user password")
if not patch.password == patch.password_confirm:
raise HTTPException(status_code=400, detail="Passwords don't match")
# Update the password
user.password = HashedPassword(patch.password)
# Commit the session
session.commit()
session.refresh(user)
return user
@router.get("/me/", response_model=UserResponse)
async def my_profile(current_user: User = AuthenticatedUser):
return current_user
@router.get(
"/{user_id_or_username}/",
response_model=UserResponse,
dependencies=[AuthenticatedUser],
)
async def query_user(*, session: Session = ActiveSession, user_id_or_username: Union[str, int]):
user = session.query(User).where(
or_(
User.id == user_id_or_username,
User.username == user_id_or_username,
)
)
if not user:
raise HTTPException(status_code=404, detail="User not found")
return user.first()
@router.delete("/{user_id}/", dependencies=[AdminUser])
def delete_user(*, session: Session = ActiveSession, request: Request, user_id: int):
user = session.get(User, user_id)
if not user:
raise HTTPException(status_code=404, detail="Content not found")
# Check the user is not deleting himself
current_user = get_current_user(request=request)
if user.id == current_user.id:
raise HTTPException(status_code=403, detail="You can't delete yourself")
session.delete(user)
session.commit()
return {"ok": True}
|
[
"sqlmodel.select",
"sqlmodel.or_"
] |
[((374, 385), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (383, 385), False, 'from fastapi import APIRouter, Request\n'), ((1229, 1284), 'fastapi.exceptions.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""User not found"""'}), "(status_code=404, detail='User not found')\n", (1242, 1284), False, 'from fastapi.exceptions import HTTPException\n'), ((1470, 1546), 'fastapi.exceptions.HTTPException', 'HTTPException', ([], {'status_code': '(403)', 'detail': '"""You can\'t update this user password"""'}), '(status_code=403, detail="You can\'t update this user password")\n', (1483, 1546), False, 'from fastapi.exceptions import HTTPException\n'), ((1615, 1677), 'fastapi.exceptions.HTTPException', 'HTTPException', ([], {'status_code': '(400)', 'detail': '"""Passwords don\'t match"""'}), '(status_code=400, detail="Passwords don\'t match")\n', (1628, 1677), False, 'from fastapi.exceptions import HTTPException\n'), ((2244, 2317), 'sqlmodel.or_', 'or_', (['(User.id == user_id_or_username)', '(User.username == user_id_or_username)'], {}), '(User.id == user_id_or_username, User.username == user_id_or_username)\n', (2247, 2317), False, 'from sqlmodel import Session, or_, select\n'), ((2391, 2446), 'fastapi.exceptions.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""User not found"""'}), "(status_code=404, detail='User not found')\n", (2404, 2446), False, 'from fastapi.exceptions import HTTPException\n'), ((2684, 2742), 'fastapi.exceptions.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Content not found"""'}), "(status_code=404, detail='Content not found')\n", (2697, 2742), False, 'from fastapi.exceptions import HTTPException\n'), ((2890, 2956), 'fastapi.exceptions.HTTPException', 'HTTPException', ([], {'status_code': '(403)', 'detail': '"""You can\'t delete yourself"""'}), '(status_code=403, detail="You can\'t delete yourself")\n', (2903, 2956), False, 'from fastapi.exceptions import HTTPException\n'), ((550, 562), 'sqlmodel.select', 'select', (['User'], {}), '(User)\n', (556, 562), False, 'from sqlmodel import Session, or_, select\n')]
|
from fastapi import FastAPI
from api import router
from settings.datastore import dbengine
from sqlmodel import SQLModel
app = FastAPI()
#include app api routers
router.add_routers(app)
SQLModel.metadata.create_all(dbengine)
@app.get("/")
def main():
return {
"message": "Welcome to pilot"
}
|
[
"sqlmodel.SQLModel.metadata.create_all"
] |
[((127, 136), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (134, 136), False, 'from fastapi import FastAPI\n'), ((162, 185), 'api.router.add_routers', 'router.add_routers', (['app'], {}), '(app)\n', (180, 185), False, 'from api import router\n'), ((186, 224), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['dbengine'], {}), '(dbengine)\n', (214, 224), False, 'from sqlmodel import SQLModel\n')]
|
# MegFlow is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2019-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#!/usr/bin/env python
# coding=utf-8
from math import log
from loguru import logger
import megengine as mge
import cv2
import megengine.functional as F
import numpy as np
from .model import Model
if __name__ == "__main__":
import sys
if len(sys.argv) < 5:
print("usage: python3 -m reid_alignedreid/demo reid.pkl positive1.png positive2.png negtive.jpg")
sys.exit(0)
model = Model()
sd = mge.load(sys.argv[1])
model.load_state_dict(sd, strict=False)
model.eval()
feat1 = model.inference(cv2.imread(sys.argv[2]))
logger.info(f'{feat1}')
feat2 = model.inference(cv2.imread(sys.argv[3]))
logger.info(f'{feat2}')
feat3 = model.inference(cv2.imread(sys.argv[4]))
logger.info(f'{feat3}')
positive = np.linalg.norm(feat1-feat2)
print(f'distance_positive: {positive}')
negtive = np.linalg.norm(feat3-feat2)
print(f'distance_negtive: {negtive}')
|
[
"megengine.load"
] |
[((778, 799), 'megengine.load', 'mge.load', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (786, 799), True, 'import megengine as mge\n'), ((918, 941), 'loguru.logger.info', 'logger.info', (['f"""{feat1}"""'], {}), "(f'{feat1}')\n", (929, 941), False, 'from loguru import logger\n'), ((999, 1022), 'loguru.logger.info', 'logger.info', (['f"""{feat2}"""'], {}), "(f'{feat2}')\n", (1010, 1022), False, 'from loguru import logger\n'), ((1081, 1104), 'loguru.logger.info', 'logger.info', (['f"""{feat3}"""'], {}), "(f'{feat3}')\n", (1092, 1104), False, 'from loguru import logger\n'), ((1121, 1150), 'numpy.linalg.norm', 'np.linalg.norm', (['(feat1 - feat2)'], {}), '(feat1 - feat2)\n', (1135, 1150), True, 'import numpy as np\n'), ((1208, 1237), 'numpy.linalg.norm', 'np.linalg.norm', (['(feat3 - feat2)'], {}), '(feat3 - feat2)\n', (1222, 1237), True, 'import numpy as np\n'), ((737, 748), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (745, 748), False, 'import sys\n'), ((889, 912), 'cv2.imread', 'cv2.imread', (['sys.argv[2]'], {}), '(sys.argv[2])\n', (899, 912), False, 'import cv2\n'), ((970, 993), 'cv2.imread', 'cv2.imread', (['sys.argv[3]'], {}), '(sys.argv[3])\n', (980, 993), False, 'import cv2\n'), ((1052, 1075), 'cv2.imread', 'cv2.imread', (['sys.argv[4]'], {}), '(sys.argv[4])\n', (1062, 1075), False, 'import cv2\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import itertools
import platform
from functools import partial
import numpy as np
import pytest
from utils import opr_test
import megengine.amp as amp
import megengine.config as config
import megengine.core.ops.builtin as builtin
import megengine.core.tensor.dtype as dtype
import megengine.functional as F
import megengine.jit as jit
from megengine import Parameter, Tensor, is_cuda_available, tensor
from megengine.core._trace_option import use_symbolic_shape
from megengine.core.autodiff.grad import Grad
from megengine.core.tensor.utils import make_shape_tuple
from megengine.device import get_device_count
from megengine.module import LayerNorm
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.bool_)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.bool_)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where, test_trace=False)
maskv2 = np.array([1, 1, 1], dtype=np.bool_)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.bool_)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where, test_trace=False)
def test_dropout():
from megengine.autodiff import GradManager
from megengine.core._imperative_rt.ops import set_global_rng_seed
def test_dropout_with_shape(shape, rate):
data = tensor(np.ones(shape, dtype=np.float32))
gm = GradManager().attach([data])
with gm:
out = F.nn.dropout(data, rate, training=True)
gm.backward(out, tensor(np.ones(shape, dtype=np.float32)))
assert not out.numpy().all()
np.testing.assert_allclose(out.numpy(), data.grad.numpy(), 1e-7, 1e-7)
def test_multiple_dropout(shape, rate):
data = tensor(np.ones(shape, dtype=np.float32))
gm = GradManager().attach([data])
with gm:
out1 = F.nn.dropout(data, rate, training=True)
out2 = F.nn.dropout(out1, rate, training=True)
out3 = F.nn.dropout(out2, rate, training=True)
gm.backward(out3, tensor(np.ones(shape, dtype=np.float32)))
np.testing.assert_allclose(out3.numpy(), data.grad.numpy(), 1e-7, 1e-7)
def test_dropout_seed(shape, rate):
data = tensor(np.random.randn(*shape), dtype="float32")
set_global_rng_seed(111)
out1 = F.nn.dropout(data, rate, training=True)
out2 = F.nn.dropout(data, rate, training=True)
assert not (out1.numpy() == out2.numpy()).all()
set_global_rng_seed(111)
out3 = F.nn.dropout(data, rate, training=True)
assert (out1.numpy() == out3.numpy()).all()
set_global_rng_seed(222)
out4 = F.nn.dropout(data, rate, training=True)
assert not (out1.numpy() == out4.numpy()).all()
test_dropout_with_shape([13, 17, 63, 21], 0.4)
test_dropout_with_shape([16, 32, 64], 0.3)
test_multiple_dropout([1024], 0.2)
test_dropout_seed([16, 32], 0.2)
def test_matinv():
shape1 = (5, 5)
shape2 = (3, 9, 9)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
# make matrix diagonally dominant for numerical stability
data1 += (np.eye(shape1[0]) * shape1[0]).astype("float32")
data2 += np.broadcast_to((np.eye(shape2[1]) * shape2[1]).astype("float32"), shape2)
cases = [
{"input": data1},
{"input": data2},
]
opr_test(
cases,
F.matinv,
compare_fn=lambda x, y: np.testing.assert_allclose(x.numpy(), y, rtol=1e-4),
ref_fn=np.linalg.inv,
)
def test_matmul():
shape1 = 3
shape2 = 3
shape3 = (3, 5)
shape4 = (5, 6)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
data4 = np.random.random(shape4).astype("float32")
cases = [
{"input": [data1, data2]},
{"input": [data2, data3]},
{"input": [data3, data4]},
]
opr_test(cases, F.matmul, ref_fn=np.matmul)
batch_size = 10
shape1 = (2,)
shape2 = (batch_size, 2, 3)
shape3 = (batch_size, 3, 4)
shape4 = (batch_size, 10, 4, 2)
shape5 = (batch_size, 10, 2, 4)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
data4 = np.random.random(shape4).astype("float32")
data5 = np.random.random(shape5).astype("float32")
cases = [
{"input": [data1, data2]},
{"input": [data2, data3]},
{"input": [data3, data4]},
{"input": [data4, data5]},
]
opr_test(cases, F.matmul, ref_fn=np.matmul)
opr_test(
[{"input": [data1, data4]}],
F.matmul,
ref_fn=lambda x, y: np.matmul(x, y.transpose(0, 1, 3, 2)),
transpose_b=True,
)
opr_test(
[{"input": [data3, data2]}],
F.matmul,
ref_fn=lambda x, y: np.matmul(x.transpose(0, 2, 1), y.transpose(0, 2, 1)),
transpose_a=True,
transpose_b=True,
)
@pytest.mark.parametrize(
"shape_a, shape_b", [((0,), (0,)), ((10, 0), (0, 10)), ((3, 10, 0), (3, 0, 10)),],
)
@pytest.mark.parametrize("is_symbolic", [None, True, False])
def test_matmul_empty_tensor(shape_a, shape_b, is_symbolic):
def func(a, b):
return F.matmul(a, b)
if is_symbolic is not None:
func = jit.trace(symbolic=is_symbolic)(func)
a = tensor(np.random.randn(*shape_a))
b = tensor(np.random.randn(*shape_b))
for _ in range(3):
out = func(a, b)
assert np.all(out.numpy() == 0)
if is_symbolic is None:
break
def test_interpolate():
def linear_interpolate():
inp = tensor(np.arange(1, 3, dtype=np.float32).reshape(1, 1, 2))
out = F.vision.interpolate(inp, scale_factor=2.0, mode="linear")
out2 = F.vision.interpolate(inp, 4, mode="linear")
np.testing.assert_allclose(
out.numpy(), np.array([[[1.0, 1.25, 1.75, 2.0]]], dtype=np.float32)
)
np.testing.assert_allclose(
out2.numpy(), np.array([[[1.0, 1.25, 1.75, 2.0]]], dtype=np.float32)
)
def many_batch_interpolate():
inp = tensor(np.arange(1, 9, dtype=np.float32).reshape(2, 1, 2, 2))
out = F.vision.interpolate(inp, [4, 4])
out2 = F.vision.interpolate(inp, scale_factor=2.0)
np.testing.assert_allclose(out.numpy(), out2.numpy())
def assign_corner_interpolate():
inp = tensor(np.arange(1, 5, dtype=np.float32).reshape(1, 1, 2, 2))
out = F.vision.interpolate(inp, [4, 4], align_corners=True)
out2 = F.vision.interpolate(inp, scale_factor=2.0, align_corners=True)
np.testing.assert_allclose(out.numpy(), out2.numpy())
def error_shape_linear_interpolate():
inp = tensor(np.arange(1, 5, dtype=np.float32).reshape(1, 1, 2, 2))
with pytest.raises(ValueError):
F.vision.interpolate(inp, scale_factor=2.0, mode="linear")
def inappropriate_scale_linear_interpolate():
inp = tensor(np.arange(1, 3, dtype=np.float32).reshape(1, 1, 2))
with pytest.raises(ValueError):
F.vision.interpolate(inp, scale_factor=[2.0, 3.0], mode="linear")
linear_interpolate()
many_batch_interpolate()
assign_corner_interpolate()
error_shape_linear_interpolate()
inappropriate_scale_linear_interpolate()
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
def _gen_roi_inp():
inp_feat = np.random.randn(2, 32, 256, 256)
rois = np.zeros((4, 5))
rois[:, 0] = [0, 0, 1, 1]
rois[:, 1:3] = np.random.rand(4, 2) * 100
rois[:, 3:] = np.random.rand(4, 2) * 100 + 150
inp_feat = tensor(inp_feat)
rois = tensor(rois)
return inp_feat, rois
def test_roi_align():
inp_feat, rois = _gen_roi_inp()
grad = Grad().wrt(inp_feat, callback=_save_to(inp_feat))
output_shape = (7, 7)
out_feat = F.vision.roi_align(
inp_feat,
rois,
output_shape=output_shape,
mode="average",
spatial_scale=1.0 / 4,
sample_points=2,
aligned=True,
)
assert make_shape_tuple(out_feat.shape) == (
rois.shape[0],
inp_feat.shape[1],
*output_shape,
)
grad(out_feat, tensor(F.ones_like(out_feat)))
assert make_shape_tuple(inp_feat.grad.shape) == make_shape_tuple(inp_feat.shape)
def _gen_correlation(random=True, constant=1, image_shape=(2, 1, 160, 160)):
if random:
inp_feat1 = np.random.randn(
image_shape[0], image_shape[1], image_shape[2], image_shape[3]
)
inp_feat2 = np.random.randn(
image_shape[0], image_shape[1], image_shape[2], image_shape[3]
)
else:
inp_feat1 = np.ones(image_shape) * constant
inp_feat2 = np.ones(image_shape) * constant
return tensor(inp_feat1), tensor(inp_feat2)
def test_correlation():
##test case 0 check the grad shape
data1, data2 = _gen_correlation()
grad = Grad().wrt(data1, callback=_save_to(data1))
out_feat = F.vision.correlation(
data1,
data2,
kernel_size=5,
max_displacement=4,
stride1=2,
stride2=2,
pad_size=2,
is_multiply=True,
)
grad(out_feat, tensor(F.ones_like(out_feat)))
assert make_shape_tuple(data1.grad.shape) == make_shape_tuple(data1.shape)
##test case 1 from https://github.com/NVIDIA/flownet2-pytorch/issues/194
data1, data2 = _gen_correlation(random=False, image_shape=(1, 1, 3, 3))
out_feat = F.vision.correlation(
data1,
data2,
kernel_size=3,
max_displacement=0,
stride1=1,
stride2=1,
pad_size=0,
is_multiply=True,
)
assert abs(out_feat.sum() - 1) < 1e-9
##test case 2 check same image subduction
data1, data2 = _gen_correlation(random=False, image_shape=(1, 1, 3, 3))
out_feat = F.vision.correlation(
data1,
data2,
kernel_size=3,
max_displacement=0,
stride1=1,
stride2=1,
pad_size=0,
is_multiply=False,
)
assert out_feat.sum() < 1e-9
##test case 3 check same image subduction
data1, data2 = _gen_correlation(random=False, image_shape=(1, 1, 3, 3))
out_feat = F.vision.correlation(
data1,
data2,
kernel_size=3,
max_displacement=0,
stride1=1,
stride2=1,
pad_size=0,
is_multiply=False,
)
assert out_feat.sum() < 1e-9
##test case 4 check correlation
data1, _ = _gen_correlation(
random=False, image_shape=(1, 1, 220, 220), constant=2.0
)
_, data2 = _gen_correlation(
random=False, image_shape=(1, 1, 220, 220), constant=1.0
)
out_feat = F.vision.correlation(
data1,
data2,
kernel_size=3,
max_displacement=2,
stride1=1,
stride2=2,
pad_size=0,
is_multiply=False,
)
assert abs(out_feat.mean() - 1) < 1e-9
def test_roi_pooling():
inp_feat, rois = _gen_roi_inp()
grad = Grad().wrt(inp_feat, callback=_save_to(inp_feat))
output_shape = (7, 7)
out_feat = F.vision.roi_pooling(
inp_feat, rois, output_shape=output_shape, mode="max", scale=1.0 / 4,
)
assert make_shape_tuple(out_feat.shape) == (
rois.shape[0],
inp_feat.shape[1],
*output_shape,
)
grad(out_feat, tensor(F.ones_like(out_feat)))
assert make_shape_tuple(inp_feat.grad.shape) == make_shape_tuple(inp_feat.shape)
def test_adaptive_avg_pool2d():
inp = tensor(np.arange(0, 16, dtype=np.float32).reshape(1, 1, 4, 4))
oshp = (2, 2)
grad = Grad().wrt(inp, callback=_save_to(inp))
outp = F.adaptive_avg_pool2d(inp, oshp,)
assert make_shape_tuple(outp.shape) == (inp.shape[0], inp.shape[1], *oshp,)
np.testing.assert_equal(
outp.numpy(), np.array([[[[2.5, 4.5], [10.5, 12.5]]]], dtype=np.float32)
)
grad(outp, tensor(F.ones_like(outp)))
assert make_shape_tuple(inp.grad.shape) == make_shape_tuple(inp.shape)
np.testing.assert_equal(
inp.grad.numpy(),
np.array(
[
[
[
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
]
]
],
dtype=np.float32,
),
)
def test_adaptive_max_pool2d():
inp = tensor(np.arange(0, 16, dtype=np.float32).reshape(1, 1, 4, 4))
oshp = (2, 2)
grad = Grad().wrt(inp, callback=_save_to(inp))
outp = F.adaptive_max_pool2d(inp, oshp,)
assert make_shape_tuple(outp.shape) == (inp.shape[0], inp.shape[1], *oshp,)
np.testing.assert_equal(
outp.numpy(), np.array([[[[5, 7], [13, 15]]]], dtype=np.float32)
)
grad(outp, tensor(F.ones_like(outp)))
assert make_shape_tuple(inp.grad.shape) == make_shape_tuple(inp.shape)
np.testing.assert_equal(
inp.grad.numpy(),
np.array(
[
[
[
[0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 1.0],
]
]
],
dtype=np.float32,
),
)
def test_one_hot():
def onehot_low_dimension():
inp = tensor(np.arange(1, 4, dtype=np.int32))
out = F.one_hot(inp, num_classes=4)
np.testing.assert_allclose(
out.numpy(), np.eye(4, dtype=np.int32)[np.arange(1, 4, dtype=np.int32)]
)
def onehot_high_dimension():
arr = np.array(
[[3, 2, 4, 4, 2, 4, 0, 4, 4, 1], [4, 1, 1, 3, 2, 2, 4, 2, 4, 3]],
dtype=np.int32,
)
inp = tensor(arr)
out = F.one_hot(inp, 10)
np.testing.assert_allclose(out.numpy(), np.eye(10, dtype=np.int32)[arr])
onehot_low_dimension()
onehot_high_dimension()
def test_interpolate_fastpath():
# check shape
test_cases = [
[(1, 1, 10, 10), (5, 5)],
[(1, 3, 10, 10), (20, 20)],
[(10, 1, 10, 10), (1, 1)],
# [(10, 10, 1, 1), (10, 10)], # FIXME, it causes random CI failure
]
for inp_shape, target_shape in test_cases:
x = tensor(np.random.randn(*inp_shape), dtype=np.float32)
out = F.vision.interpolate(x, target_shape, mode="bilinear")
assert out.shape[0] == x.shape[0] and out.shape[1] == x.shape[1]
assert out.shape[2] == target_shape[0] and out.shape[3] == target_shape[1]
# check value
x = tensor(np.ones((3, 3, 10, 10)), dtype=np.float32)
out = F.vision.interpolate(x, (15, 5), mode="bilinear")
np.testing.assert_equal(out.numpy(), np.ones((3, 3, 15, 5)).astype(np.float32))
np_x = np.arange(32)
x = tensor(np_x).astype(np.float32).reshape(1, 1, 32, 1)
out = F.vision.interpolate(x, (1, 1), mode="bilinear")
np.testing.assert_equal(out.item(), np_x.mean())
@pytest.mark.parametrize("dt", [np.float32, np.int8, np.uint8, np.float16])
def test_warp_perspective(dt):
inp_shape = (1, 1, 4, 4)
x = tensor(np.arange(16, dtype=dt).reshape(inp_shape))
M_shape = (1, 3, 3)
# M defines a translation: dst(1, 1, h, w) = rst(1, 1, h+1, w+1)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
outp = F.vision.warp_perspective(x, M, (2, 2))
np.testing.assert_equal(outp.numpy(), np.array([[[[5, 6], [9, 10]]]], dtype=dt))
@pytest.mark.parametrize("dt", [np.float32, np.int8, np.uint8, np.float16])
def test_warp_perspective_mat_idx(dt):
inp_shape = (2, 1, 4, 4)
x = tensor(np.arange(32, dtype=dt).reshape(inp_shape))
M_shape = (1, 3, 3)
# M defines a translation: dst(1, 1, h, w) = rst(1, 1, h+1, w+1)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
M = F.concat([M,] * 4, 0)
outp = F.vision.warp_perspective(x, M, (2, 2), mat_idx=[0, 1, 1, 0])
np.testing.assert_equal(
outp.numpy(),
np.array(
[
[[[5, 6], [9, 10]]],
[[[21, 22], [25, 26]]],
[[[21, 22], [25, 26]]],
[[[5, 6], [9, 10]]],
],
dtype=dt,
),
)
def test_warp_affine():
inp_shape = (1, 3, 3, 3)
x = tensor(np.arange(27, dtype=np.float32).reshape(inp_shape))
weightv = [[[1.26666667, 0.6, -83.33333333], [-0.33333333, 1, 66.66666667]]]
outp = F.vision.warp_affine(x, tensor(weightv), (2, 2), border_mode="wrap")
res = np.array(
[
[
[[7.875, 8.875, 9.875], [8.90625, 9.90625, 10.90625]],
[[18.75, 19.75, 20.75], [14.90625, 15.90625, 16.90625]],
]
],
dtype=np.float32,
)
if not is_cuda_available():
np.testing.assert_almost_equal(outp.numpy(), res, 5)
def test_remap():
inp_shape = (1, 1, 4, 4)
inp = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
map_xy_shape = (1, 2, 2, 2)
map_xy = tensor(
np.array(
[[[1.0, 0.0], [0.0, 1.0]], [[0.0, 1.0], [0.0, 1.0]]], dtype=np.float32
).reshape(map_xy_shape)
)
outp = F.vision.remap(inp, map_xy)
np.testing.assert_equal(
outp.numpy(), np.array([[[[1.0, 4.0], [4.0, 4.0]]]], dtype=np.float32)
)
def test_binary_cross_entropy():
data1_shape = (2, 2)
label1_shape = (2, 2)
data2_shape = (2, 3)
label2_shape = (2, 3)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compare_fn(x, y):
np.testing.assert_allclose(x.numpy(), y, atol=5e-4)
np.random.seed(123)
data1 = np.random.uniform(size=data1_shape).astype(np.float32)
label1 = np.random.uniform(size=label1_shape).astype(np.float32)
expect1 = np.array([0.6361], dtype=np.float32)
np.random.seed(123)
data2 = np.random.uniform(size=data2_shape).astype(np.float32)
label2 = np.random.uniform(size=label2_shape).astype(np.float32)
expect2 = np.array([0.6750], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.nn.binary_cross_entropy, compare_fn=compare_fn)
cases = [
{"input": [sigmoid(data1), label1], "output": expect1,},
{"input": [sigmoid(data2), label2], "output": expect2,},
]
opr_test(
cases,
partial(F.nn.binary_cross_entropy, with_logits=False),
compare_fn=compare_fn,
)
def test_hinge_loss():
np.random.seed(123)
# case with L1 norm
cases = []
for shape in [(2, 2), (2, 3)]:
data = np.random.uniform(size=shape).astype(np.float32)
label = 2 * np.random.randint(0, 1, size=shape).astype(np.float32) - 1
expect = np.clip(0, np.inf, 1 - data * label).sum(axis=1).mean()
cases.append({"input": [data, label], "output": expect})
opr_test(cases, F.nn.hinge_loss)
# cases with L2 norm
cases = []
for shape in [(2, 2), (2, 3)]:
data = np.random.uniform(size=shape).astype(np.float32)
label = 2 * np.random.randint(0, 1, size=shape).astype(np.float32) - 1
expect = ((np.clip(0, np.inf, 1 - data * label) ** 2).sum(axis=1)).mean()
cases.append({"input": [data, label], "output": expect})
def hinge_loss_with_l2_norm(pred, label):
return F.nn.hinge_loss(pred, label, "L2")
opr_test(cases, hinge_loss_with_l2_norm)
@pytest.mark.parametrize("is_symbolic", [None, False, True])
def test_nms(is_symbolic):
def fn(inp, scores):
return F.vision.nms(
inp,
scores=scores,
iou_thresh=0.5,
max_output=None if is_symbolic is None else 4,
)
if is_symbolic is not None:
fn = jit.trace(symbolic=is_symbolic)(fn)
x = np.array(
[
[0, 0, 100, 100],
[10, 10, 100, 100],
[50, 50, 100, 100],
[100, 100, 150, 150],
],
dtype=np.float32,
)
inp = tensor(x)
scores = tensor([0.5, 0.8, 0.9, 0.6], dtype=np.float32)
for _ in range(3):
result = fn(inp, scores=scores)
np.testing.assert_equal(result.numpy(), np.array([2, 1, 3], dtype=np.int32))
x = np.array([], dtype=np.float32,).reshape(0, 4)
inp = tensor(x)
scores = tensor([], dtype=np.float32)
for _ in range(3):
result = fn(inp, scores=scores)
np.testing.assert_equal(result.numpy(), np.array([], dtype=np.int32))
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = tensor(inpv, dtype=inp_dtype)
w_int8 = Parameter(wv, dtype=w_dtype)
b_int32 = Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skipif(get_device_count("gpu") > 0, reason="no int8 algorithm on cuda")
def test_batch_conv_bias():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N, IC, OC, IH, IW, KH, KW, PH, PW, SH, SW, has_bias=True,
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(N, OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = tensor(inpv, dtype=inp_dtype)
w_int8 = Parameter(wv, dtype=w_dtype)
b_int32 = Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def run_batch_conv_bias(inp, w, b):
b = b if has_bias else Parameter(np.zeros_like(b.numpy()))
result = F.quantized.batch_conv_bias_activation(
inp, w, b, stride=(SH, SW), padding=(PH, PW), dtype=out_dtype,
)
return result.astype("float32")
expected = F.conv2d(inp_fp32, w_fp32[0], b_fp32 if has_bias else None)[0]
expected = expected.astype(out_dtype).astype("float32")
expected = F.flatten(expected)
result = run_batch_conv_bias(inp_int8, w_int8, b_int32)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 5, 5, 3, 3, 0, 0, 1, 1, True)
def test_conv2d_autocast():
"""check amp's result is equal to manually converted result"""
amp.enabled = True
inp = tensor(np.random.randn(1, 3, 224, 224), dtype=np.float32)
weight = tensor(np.random.randn(64, 3, 7, 7), dtype=np.float32)
out = F.conv2d(inp, weight, None, (2, 2), (3, 3), (1, 1), 1)
amp.enabled = False
expected = F.conv2d(
inp.astype("float16"),
weight.astype("float16"),
None,
(2, 2),
(3, 3),
(1, 1),
1,
compute_mode="float32",
)
assert out.dtype == np.float16
assert expected.dtype == np.float16
np.testing.assert_allclose(out.numpy(), expected.numpy())
def test_conv2d_zero_stride_numpy_array():
inp = np.random.randn(3, 224, 224).astype(np.float32)
inp = inp[np.newaxis, :]
inp = tensor(inp, dtype=np.float32)
weight = tensor(np.random.randn(16, 3, 3, 3), dtype=np.float32)
out = F.conv2d(inp, weight, None, (2, 2), (3, 3), (1, 1), 1)
def test_conv3d_zero_stride_numpy_array():
inp = np.random.randn(3, 224, 224, 224).astype(np.float32)
inp = inp[np.newaxis, :]
inp = tensor(inp, dtype=np.float32)
weight = tensor(np.random.randn(16, 3, 3, 3, 3), dtype=np.float32)
out = F.conv3d(inp, weight, None, (2, 2, 2), (3, 3, 3), (1, 1, 1), 1)
out.numpy()
def test_conv1d():
inp = tensor(np.ones((2, 2, 4), dtype=np.float32))
weight = tensor(np.ones((3, 2, 2), dtype=np.float32))
out = F.conv1d(inp, weight, None, 2, 0, 1, 1)
np.testing.assert_equal(
out.numpy(),
np.array(
[[[4, 4], [4, 4], [4, 4]], [[4, 4], [4, 4], [4, 4]]], dtype=np.float32
),
)
def test_batchnorm2d_autocast():
"""check amp's result is equal to manually converted result"""
amp.enabled = True
tshape = (1, 3, 224, 224)
pshape = (1, 3, 1, 1)
inp = tensor(np.random.randn(*tshape), dtype=np.float32)
weight = tensor(np.ones(pshape, dtype=np.float32))
bias = tensor(np.zeros(pshape, dtype=np.float32))
out = F.batch_norm(inp, weight=weight, bias=bias, training=True, inplace=False)
amp.enabled = False
expected = F.batch_norm(
inp.astype("float16"),
weight=weight,
bias=bias,
training=True,
inplace=False,
compute_mode="float32",
)
assert out.dtype == np.float16
assert expected.dtype == np.float16
np.testing.assert_allclose(out.numpy(), expected.numpy())
def test_conv3d():
inp = tensor(np.ones((2, 2, 4, 4, 4), dtype=np.float32))
weight = tensor(np.ones((3, 2, 2, 2, 2), dtype=np.float32))
out = F.conv3d(inp, weight, None, 2, 0, 1, 1)
np.testing.assert_equal(
out.numpy(), np.ones((2, 3, 2, 2, 2), dtype=np.float32) * 16
)
def test_condtake():
x = np.array([[1, 2, 3], [4, 5, 6]])
y = np.array([[True, False, True], [False, True, True]])
xx = tensor(x)
yy = tensor(y)
val, idx = F.cond_take(yy, xx)
np.testing.assert_equal(val.numpy(), x[y])
np.testing.assert_equal(idx.numpy(), np.where(y.reshape(-1))[0])
@pytest.mark.parametrize("is_symbolic", [None, False, True])
def test_condtake(is_symbolic):
shapes = [
(3, 3, 3),
(0,),
(3, 0, 3),
]
def fn(mask, data):
return F.cond_take(mask, data)
if is_symbolic is not None:
fn = jit.trace(symbolic=is_symbolic)(fn)
for shp in shapes:
x_np = np.random.randn(*shp).astype("float32")
mask_np = x_np > 0
x = tensor(x_np)
mask = tensor(mask_np)
ref_out = x_np[mask_np]
ref_idx = mask_np.flatten().nonzero()[0]
for i in range(3):
out, idx = fn(mask, x)
np.testing.assert_equal(out.numpy(), ref_out)
np.testing.assert_equal(idx.numpy(), ref_idx)
if is_symbolic is None:
break
def test_condtake_is_same():
op1 = builtin.CondTake()
op2 = builtin.CondTake()
assert op1 == op2
def test_nms_is_same():
op1 = builtin.NMSKeep(0.7, 100)
op2 = builtin.NMSKeep(0.7, 100)
op3 = builtin.NMSKeep(0.8, 100)
op4 = builtin.NMSKeep(0.7, 200)
assert op1 == op2
assert op1 != op3
assert op1 != op4
assert op3 != op4
def test_argmxx_on_inf():
def run_argmax():
x = F.zeros((100, 100))
x[:] = -float("inf")
idxs = F.argmax(x, axis=0)
return idxs
def run_argmin():
x = F.zeros((100, 100))
x[:] = float("inf")
idxs = F.argmin(x, axis=0)
return idxs
assert all(run_argmax() >= 0)
assert all(run_argmin() >= 0)
def test_deformable_psroi_pooling():
inp = np.random.random((1, 256, 64, 64)).astype("float32")
rois = np.random.random((1, 5)).astype("float32")
trans = np.random.random((24, 2, 7, 7)).astype("float32")
pooled_h = 7
pooled_w = 7
sample_per_part = 4
no_trans = False
part_size = 7
spatial_scale = 1.0 / 64
trans_std = 0.1
y = F.deformable_psroi_pooling(
tensor(inp),
tensor(rois),
tensor(trans),
no_trans,
part_size,
pooled_h,
pooled_w,
sample_per_part,
spatial_scale,
trans_std,
)
def test_cvt_color():
def rgb2gray(rgb):
return np.dot(rgb[..., :3], [0.299, 0.587, 0.114])
def bgr2gray(bgr):
return np.dot(bgr[..., :3], [0.114, 0.587, 0.299])
inp = np.random.randn(3, 3, 3, 3).astype(np.float32)
out = np.expand_dims(rgb2gray(inp), 3).astype(np.float32)
x = tensor(inp)
y = F.vision.cvt_color(x, mode="RGB2GRAY")
np.testing.assert_allclose(y.numpy(), out, atol=1e-5)
out1 = np.expand_dims(bgr2gray(inp), 3).astype(np.float32)
y1 = F.vision.cvt_color(x, mode="BGR2GRAY")
np.testing.assert_allclose(y1.numpy(), out1, atol=1e-5)
@pytest.mark.parametrize("val", [2, [2,], [2, 3]])
def test_ones(val):
shp = tensor(val)
np_shp = np.array(val)
np.testing.assert_equal(F.ones(shp), np.ones(np_shp))
def test_assert_equal():
shape = (2, 3, 4, 5)
x = F.ones(shape, dtype=np.float32)
y = F.zeros(shape, dtype=np.float32) + 1.00001
z = F.utils._assert_equal(x, y)
def test_assert_not_equal():
shape = (2, 3, 4, 5)
x = F.ones(shape, dtype=np.float32)
y = F.zeros(shape, dtype=np.float32) + 1.1
with pytest.raises(RuntimeError):
z = F.utils._assert_equal(x, y)
def test_neg_axis():
x = tensor(np.random.normal(0, 1, (32, 5)))
y = F.argmax(x, axis=-1)
yy = F.argmax(x, axis=1)
np.testing.assert_equal(y.numpy(), yy.numpy())
y = F.argmax(x, axis=(-1, -2))
yy = F.argmax(x, axis=(0, 1))
np.testing.assert_equal(y.numpy(), yy.numpy())
y = F.argmin(x, axis=(-1, -2))
yy = F.argmin(x, axis=(0, 1))
np.testing.assert_equal(y.numpy(), yy.numpy())
def test_sliding_window():
N, C, H, W = 2, 3, 7, 8
inp = np.random.normal(size=(N, C, H, W))
ph, pw = 1, 2
sh, sw = 2, 1
wh, ww = 3, 2
dh, dw = 1, 3
s = lambda i, p, s, d, w: (i + p * 2 - (w - 1) * d - 1) // s + 1
inp_pad = np.zeros((N, C, H + ph * 2, W + pw * 2))
inp_pad[:, :, ph : H + ph, pw : W + pw] = inp
gt_out = np.empty(
(N, C, s(H, ph, sh, dh, wh), s(W, pw, sw, dw, ww), wh, ww), dtype=np.float32
)
for n, c, oh, ow in itertools.product(*map(range, gt_out.shape[:4])):
ih, iw = oh * sh, ow * sw
gt_out[n, c, oh, ow, :] = inp_pad[
n, c, ih : ih + (wh - 1) * dh + 1 : dh, iw : iw + (ww - 1) * dw + 1 : dw
]
out = F.sliding_window(
tensor(inp), (wh, ww), padding=(ph, pw), stride=(sh, sw), dilation=(dh, dw)
)
np.testing.assert_equal(gt_out, out.numpy())
def test_sliding_window_transpose():
N, C, H, W = 2, 3, 7, 8
ph, pw = 1, 2
sh, sw = 2, 1
wh, ww = 3, 2
dh, dw = 1, 3
s = lambda i, p, s, d, w: (i + p * 2 - (w - 1) * d - 1) // s + 1
inp = np.random.normal(
size=(N, C, s(H, ph, sh, dh, wh), s(W, pw, sw, dw, ww), wh, ww)
).astype(np.float32)
gt_out = np.zeros((N, C, H, W), dtype=np.float32)
for n, c in itertools.product(*map(range, inp.shape[:2])):
oh = 0
for ih in range(-ph, H + ph - dh * (wh - 1), sh):
ow = 0
for iw in range(-pw, W + pw - dw * (ww - 1), sw):
for kh, kw in itertools.product(*map(range, inp.shape[-2:])):
ih2 = ih + dh * kh
iw2 = iw + dw * kw
if ih2 >= 0 and ih2 < H and iw2 >= 0 and iw2 < W:
gt_out[n, c, ih2, iw2] += inp[n, c, oh, ow, kh, kw]
ow += 1
oh += 1
out = F.sliding_window_transpose(
tensor(inp),
(H, W),
(wh, ww),
padding=(ph, pw),
stride=(sh, sw),
dilation=(dh, dw),
)
np.testing.assert_equal(gt_out, out.numpy())
def test_pad():
src = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
dst = np.pad(src, ((2, 2), (2, 2)), "constant")
res = F.nn.pad(tensor(src), ((2, 2), (2, 2)), "CONSTANT")
np.testing.assert_allclose(res, dst, atol=1e-5)
dst = np.pad(src, ((2, 2), (2, 2)), "constant", constant_values=3)
res = F.nn.pad(tensor(src), ((2, 2), (2, 2)), "CONSTANT", constant_value=3)
np.testing.assert_allclose(res, dst, atol=1e-5)
dst = np.pad(src, ((2, 2), (2, 2)), "edge")
res = F.nn.pad(tensor(src), ((2, 2), (2, 2)), "EDGE")
np.testing.assert_allclose(res, dst, atol=1e-5)
dst = np.pad(src, ((2, 2), (2, 2)), "reflect")
res = F.nn.pad(tensor(src), ((2, 2), (2, 2)), "REFLECT")
np.testing.assert_allclose(res, dst, atol=1e-5)
def pixel_shuffle(data, r):
high_dim = data.shape[:-3]
data = data.reshape(-1, data.shape[-3], data.shape[-2], data.shape[-1])
inn, ic, ih, iw = data.shape
res = np.zeros((inn, int(ic / (r * r)), ih * r, iw * r))
for n in range(inn):
for c in range(ic):
for h in range(ih):
for w in range(iw):
res[
n,
int(c / r / r),
h * r + int((c % (r * r)) / r),
w * r + c % r,
] = data[n, c, h, w]
if len(high_dim) > 0:
res = res.reshape((*high_dim, int(ic / r / r), ih * r, iw * r))
else:
res = res[0]
return res
def test_pixel_shuffle():
# ndim = 3
inp = np.arange(16 * 3 * 3).reshape(16, 3, 3)
out = F.pixel_shuffle(tensor(inp), upscale_factor=4)
golden = pixel_shuffle(inp, 4)
np.testing.assert_equal(out.numpy(), golden)
# ndim = 4
inp = np.arange(3 * 18 * 3 * 3).reshape(3, 18, 3, 3)
out = F.pixel_shuffle(tensor(inp), upscale_factor=3)
golden = pixel_shuffle(inp, 3)
np.testing.assert_equal(out.numpy(), golden)
# ndim = 5
inp = np.arange(5 * 3 * 20 * 3 * 4).reshape(5, 3, 20, 3, 4)
out = F.pixel_shuffle(tensor(inp), upscale_factor=2)
golden = pixel_shuffle(inp, 2)
np.testing.assert_equal(out.numpy(), golden)
# ndim = 6
inp = np.arange(6 * 5 * 3 * 25 * 3 * 4).reshape(6, 5, 3, 25, 3, 4)
out = F.pixel_shuffle(tensor(inp), upscale_factor=5)
golden = pixel_shuffle(inp, 5)
np.testing.assert_equal(out.numpy(), golden)
# ndim = 7
inp = np.arange(2 * 3 * 5 * 3 * 20 * 3 * 4).reshape(2, 3, 5, 3, 20, 3, 4)
out = F.pixel_shuffle(tensor(inp), upscale_factor=2)
golden = pixel_shuffle(inp, 2)
np.testing.assert_equal(out.numpy(), golden)
@pytest.mark.parametrize("is_symbolic", [False, True])
def test_pixel_shuffle_symbolic(is_symbolic):
def fn(inp, upscale_factor):
return F.pixel_shuffle(inp, upscale_factor=upscale_factor)
if is_symbolic is not None:
fn = jit.trace(symbolic=is_symbolic)(fn)
inp = tensor(np.arange(3 * 4 * 5 * 5).reshape(3, 4, 5, 5))
golden = pixel_shuffle(inp, 2)
for _ in range(3):
out = fn(inp, 2)
np.testing.assert_equal(out.numpy(), golden)
if is_symbolic is None:
break
def test_set_conv2d_config():
"""check setting config by contextmanager is equal to manually converted result"""
config._compute_mode = "float32"
inp = tensor(np.random.randn(1, 3, 224, 224), dtype=np.float16)
weight = tensor(np.random.randn(64, 3, 7, 7), dtype=np.float16)
config_out = F.conv2d(inp, weight, None, (2, 2), (3, 3), (1, 1), 1)
config._compute_mode = "default"
with config._override(compute_mode="float32"):
context_out = F.conv2d(inp, weight, None, (2, 2), (3, 3), (1, 1), 1)
expected = F.conv2d(
inp, weight, None, (2, 2), (3, 3), (1, 1), 1, compute_mode="float32",
)
np.testing.assert_allclose(config_out.numpy(), expected.numpy())
np.testing.assert_allclose(context_out.numpy(), expected.numpy())
def test_set_warp_perspective_config():
config._conv_format = "NHWC"
inp_shape = (1, 1, 4, 4)
inp = Tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
M_shape = (1, 3, 3)
M = Tensor(np.random.randn(3, 3), dtype=np.float32).reshape(M_shape)
config_out = F.vision.warp_perspective(inp, M, (2, 2))
config._conv_format = "default"
with config._override(conv_format="NHWC"):
context_out = F.vision.warp_perspective(inp, M, (2, 2))
expected = F.vision.warp_perspective(inp, M, (2, 2), format="NHWC")
np.testing.assert_allclose(config_out.numpy(), expected.numpy())
np.testing.assert_allclose(context_out.numpy(), expected.numpy())
@pytest.mark.parametrize("stride", [(1, 1)])
@pytest.mark.parametrize("padding", [(1, 1)])
@pytest.mark.parametrize("dilation", [(1, 1)])
@pytest.mark.parametrize("ksize", [(3, 3)])
@pytest.mark.parametrize("groups", [1, 2])
def test_local_conv2d(stride, padding, dilation, ksize, groups):
batch_size, in_channels, out_channels = 2, 4, 8
input_height, input_width = 10, 10
output_height = (input_height + padding[0] * 2 - ksize[0]) // stride[0] + 1
output_width = (input_width + padding[1] * 2 - ksize[1]) // stride[1] + 1
def local_conv2d_np(data, weight, stride, padding, dialtion):
# naive calculation use numpy
# only test output_height == input_height, output_width == input_width
data = np.pad(data, ((0, 0), (0, 0), (1, 1), (1, 1)))
expected = np.zeros(
(batch_size, out_channels, output_height, output_width), dtype=np.float32,
)
ic_group_size = in_channels // groups
oc_group_size = out_channels // groups
for n, oc, oh, ow in itertools.product(
*map(range, [batch_size, out_channels, output_height, output_width])
):
ih, iw = oh * stride[0], ow * stride[1]
g_id = oc // oc_group_size
expected[n, oc, ih, iw] = np.sum(
data[
n,
g_id * ic_group_size : (g_id + 1) * ic_group_size,
ih : ih + ksize[0],
iw : iw + ksize[1],
]
* weight[g_id, oh, ow, :, :, :, oc % oc_group_size]
)
return expected
data = np.random.rand(batch_size, in_channels, input_height, input_width).astype(
"float32"
)
weight = np.random.rand(
groups,
output_height,
output_width,
in_channels // groups,
*ksize,
out_channels // groups,
).astype("float32")
output = F.local_conv2d(
tensor(data),
tensor(weight),
None,
stride=stride,
padding=padding,
dilation=dilation,
)
ref = local_conv2d_np(data, weight, stride, padding, dilation)
np.testing.assert_almost_equal(output.numpy(), ref, 5)
|
[
"megengine.functional.conv3d",
"megengine.functional.pixel_shuffle",
"megengine.core.tensor.dtype.convert_to_qint32",
"megengine.config._override",
"megengine.functional.argmax",
"megengine.functional.adaptive_avg_pool2d",
"megengine.functional.argmin",
"megengine.jit.trace",
"megengine.functional.utils._assert_equal",
"megengine.functional.nn.hinge_loss",
"megengine.core.tensor.dtype.get_scale",
"megengine.functional.transpose",
"megengine.functional.conv2d",
"megengine.functional.conv1d",
"megengine.device.get_device_count",
"megengine.core.ops.builtin.NMSKeep",
"megengine.functional.zeros",
"megengine.tensor",
"megengine.functional.flatten",
"megengine.functional.concat",
"megengine.functional.adaptive_max_pool2d",
"megengine.functional.vision.cvt_color",
"megengine.functional.matmul",
"megengine.core._imperative_rt.ops.set_global_rng_seed",
"megengine.core.autodiff.grad.Grad",
"megengine.functional.vision.correlation",
"megengine.functional.vision.remap",
"megengine.functional.cond_take",
"megengine.functional.vision.nms",
"megengine.functional.vision.interpolate",
"megengine.core.tensor.dtype.convert_to_qint8",
"megengine.functional.ones_like",
"megengine.core.tensor.dtype.qint8",
"megengine.functional.batch_norm",
"megengine.is_cuda_available",
"megengine.functional.relu",
"megengine.functional.vision.roi_align",
"megengine.functional.quantized.batch_conv_bias_activation",
"megengine.functional.one_hot",
"megengine.core.tensor.utils.make_shape_tuple",
"megengine.functional.vision.warp_perspective",
"megengine.core.ops.builtin.CondTake",
"megengine.functional.ones",
"megengine.Parameter",
"megengine.autodiff.GradManager",
"megengine.core.tensor.dtype.qint32",
"megengine.functional.nn.dropout",
"megengine.functional.quantized.conv_bias_activation",
"megengine.functional.reshape",
"megengine.functional.vision.roi_pooling"
] |
[((6039, 6149), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape_a, shape_b"""', '[((0,), (0,)), ((10, 0), (0, 10)), ((3, 10, 0), (3, 0, 10))]'], {}), "('shape_a, shape_b', [((0,), (0,)), ((10, 0), (0, 10\n )), ((3, 10, 0), (3, 0, 10))])\n", (6062, 6149), False, 'import pytest\n'), ((6154, 6213), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""is_symbolic"""', '[None, True, False]'], {}), "('is_symbolic', [None, True, False])\n", (6177, 6213), False, 'import pytest\n'), ((16189, 16263), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dt"""', '[np.float32, np.int8, np.uint8, np.float16]'], {}), "('dt', [np.float32, np.int8, np.uint8, np.float16])\n", (16212, 16263), False, 'import pytest\n'), ((16764, 16838), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dt"""', '[np.float32, np.int8, np.uint8, np.float16]'], {}), "('dt', [np.float32, np.int8, np.uint8, np.float16])\n", (16787, 16838), False, 'import pytest\n'), ((20828, 20887), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""is_symbolic"""', '[None, False, True]'], {}), "('is_symbolic', [None, False, True])\n", (20851, 20887), False, 'import pytest\n'), ((30215, 30274), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""is_symbolic"""', '[None, False, True]'], {}), "('is_symbolic', [None, False, True])\n", (30238, 30274), False, 'import pytest\n'), ((32970, 33018), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""val"""', '[2, [2], [2, 3]]'], {}), "('val', [2, [2], [2, 3]])\n", (32993, 33018), False, 'import pytest\n'), ((38668, 38721), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""is_symbolic"""', '[False, True]'], {}), "('is_symbolic', [False, True])\n", (38691, 38721), False, 'import pytest\n'), ((40667, 40710), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stride"""', '[(1, 1)]'], {}), "('stride', [(1, 1)])\n", (40690, 40710), False, 'import pytest\n'), ((40712, 40756), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""padding"""', '[(1, 1)]'], {}), "('padding', [(1, 1)])\n", (40735, 40756), False, 'import pytest\n'), ((40758, 40803), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dilation"""', '[(1, 1)]'], {}), "('dilation', [(1, 1)])\n", (40781, 40803), False, 'import pytest\n'), ((40805, 40847), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ksize"""', '[(3, 3)]'], {}), "('ksize', [(3, 3)])\n", (40828, 40847), False, 'import pytest\n'), ((40849, 40890), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""groups"""', '[1, 2]'], {}), "('groups', [1, 2])\n", (40872, 40890), False, 'import pytest\n'), ((1062, 1104), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {'dtype': 'np.bool_'}), '([[1, 0], [0, 1]], dtype=np.bool_)\n', (1070, 1104), True, 'import numpy as np\n'), ((1115, 1169), 'numpy.array', 'np.array', (['[[1, np.inf], [np.nan, 4]]'], {'dtype': 'np.float32'}), '([[1, np.inf], [np.nan, 4]], dtype=np.float32)\n', (1123, 1169), True, 'import numpy as np\n'), ((1180, 1224), 'numpy.array', 'np.array', (['[[5, 6], [7, 8]]'], {'dtype': 'np.float32'}), '([[5, 6], [7, 8]], dtype=np.float32)\n', (1188, 1224), True, 'import numpy as np\n'), ((1239, 1298), 'numpy.array', 'np.array', (['[[1, 0, 1], [1, 0, 0], [1, 1, 0]]'], {'dtype': 'np.bool_'}), '([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.bool_)\n', (1247, 1298), True, 'import numpy as np\n'), ((1309, 1380), 'numpy.array', 'np.array', (['[[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]]'], {'dtype': 'np.float32'}), '([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)\n', (1317, 1380), True, 'import numpy as np\n'), ((1391, 1452), 'numpy.array', 'np.array', (['[[5, 6, 9], [2, 7, 8], [2, 1, 9]]'], {'dtype': 'np.float32'}), '([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)\n', (1399, 1452), True, 'import numpy as np\n'), ((1556, 1615), 'utils.opr_test', 'opr_test', (['cases', 'F.where'], {'ref_fn': 'np.where', 'test_trace': '(False)'}), '(cases, F.where, ref_fn=np.where, test_trace=False)\n', (1564, 1615), False, 'from utils import opr_test\n'), ((1630, 1665), 'numpy.array', 'np.array', (['[1, 1, 1]'], {'dtype': 'np.bool_'}), '([1, 1, 1], dtype=np.bool_)\n', (1638, 1665), True, 'import numpy as np\n'), ((1676, 1713), 'numpy.array', 'np.array', (['[1, 3, 2]'], {'dtype': 'np.float32'}), '([1, 3, 2], dtype=np.float32)\n', (1684, 1713), True, 'import numpy as np\n'), ((1724, 1761), 'numpy.array', 'np.array', (['[5, 6, 9]'], {'dtype': 'np.float32'}), '([5, 6, 9], dtype=np.float32)\n', (1732, 1761), True, 'import numpy as np\n'), ((1776, 1811), 'numpy.array', 'np.array', (['[0, 0, 0]'], {'dtype': 'np.bool_'}), '([0, 0, 0], dtype=np.bool_)\n', (1784, 1811), True, 'import numpy as np\n'), ((1822, 1859), 'numpy.array', 'np.array', (['[1, 3, 2]'], {'dtype': 'np.float32'}), '([1, 3, 2], dtype=np.float32)\n', (1830, 1859), True, 'import numpy as np\n'), ((1870, 1907), 'numpy.array', 'np.array', (['[5, 6, 9]'], {'dtype': 'np.float32'}), '([5, 6, 9], dtype=np.float32)\n', (1878, 1907), True, 'import numpy as np\n'), ((2011, 2070), 'utils.opr_test', 'opr_test', (['cases', 'F.where'], {'ref_fn': 'np.where', 'test_trace': '(False)'}), '(cases, F.where, ref_fn=np.where, test_trace=False)\n', (2019, 2070), False, 'from utils import opr_test\n'), ((4953, 4996), 'utils.opr_test', 'opr_test', (['cases', 'F.matmul'], {'ref_fn': 'np.matmul'}), '(cases, F.matmul, ref_fn=np.matmul)\n', (4961, 4996), False, 'from utils import opr_test\n'), ((5612, 5655), 'utils.opr_test', 'opr_test', (['cases', 'F.matmul'], {'ref_fn': 'np.matmul'}), '(cases, F.matmul, ref_fn=np.matmul)\n', (5620, 5655), False, 'from utils import opr_test\n'), ((8551, 8583), 'numpy.random.randn', 'np.random.randn', (['(2)', '(32)', '(256)', '(256)'], {}), '(2, 32, 256, 256)\n', (8566, 8583), True, 'import numpy as np\n'), ((8595, 8611), 'numpy.zeros', 'np.zeros', (['(4, 5)'], {}), '((4, 5))\n', (8603, 8611), True, 'import numpy as np\n'), ((8755, 8771), 'megengine.tensor', 'tensor', (['inp_feat'], {}), '(inp_feat)\n', (8761, 8771), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((8783, 8795), 'megengine.tensor', 'tensor', (['rois'], {}), '(rois)\n', (8789, 8795), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((8985, 9121), 'megengine.functional.vision.roi_align', 'F.vision.roi_align', (['inp_feat', 'rois'], {'output_shape': 'output_shape', 'mode': '"""average"""', 'spatial_scale': '(1.0 / 4)', 'sample_points': '(2)', 'aligned': '(True)'}), "(inp_feat, rois, output_shape=output_shape, mode=\n 'average', spatial_scale=1.0 / 4, sample_points=2, aligned=True)\n", (9003, 9121), True, 'import megengine.functional as F\n'), ((10119, 10244), 'megengine.functional.vision.correlation', 'F.vision.correlation', (['data1', 'data2'], {'kernel_size': '(5)', 'max_displacement': '(4)', 'stride1': '(2)', 'stride2': '(2)', 'pad_size': '(2)', 'is_multiply': '(True)'}), '(data1, data2, kernel_size=5, max_displacement=4,\n stride1=2, stride2=2, pad_size=2, is_multiply=True)\n', (10139, 10244), True, 'import megengine.functional as F\n'), ((10612, 10737), 'megengine.functional.vision.correlation', 'F.vision.correlation', (['data1', 'data2'], {'kernel_size': '(3)', 'max_displacement': '(0)', 'stride1': '(1)', 'stride2': '(1)', 'pad_size': '(0)', 'is_multiply': '(True)'}), '(data1, data2, kernel_size=3, max_displacement=0,\n stride1=1, stride2=1, pad_size=0, is_multiply=True)\n', (10632, 10737), True, 'import megengine.functional as F\n'), ((10986, 11112), 'megengine.functional.vision.correlation', 'F.vision.correlation', (['data1', 'data2'], {'kernel_size': '(3)', 'max_displacement': '(0)', 'stride1': '(1)', 'stride2': '(1)', 'pad_size': '(0)', 'is_multiply': '(False)'}), '(data1, data2, kernel_size=3, max_displacement=0,\n stride1=1, stride2=1, pad_size=0, is_multiply=False)\n', (11006, 11112), True, 'import megengine.functional as F\n'), ((11352, 11478), 'megengine.functional.vision.correlation', 'F.vision.correlation', (['data1', 'data2'], {'kernel_size': '(3)', 'max_displacement': '(0)', 'stride1': '(1)', 'stride2': '(1)', 'pad_size': '(0)', 'is_multiply': '(False)'}), '(data1, data2, kernel_size=3, max_displacement=0,\n stride1=1, stride2=1, pad_size=0, is_multiply=False)\n', (11372, 11478), True, 'import megengine.functional as F\n'), ((11840, 11966), 'megengine.functional.vision.correlation', 'F.vision.correlation', (['data1', 'data2'], {'kernel_size': '(3)', 'max_displacement': '(2)', 'stride1': '(1)', 'stride2': '(2)', 'pad_size': '(0)', 'is_multiply': '(False)'}), '(data1, data2, kernel_size=3, max_displacement=2,\n stride1=1, stride2=2, pad_size=0, is_multiply=False)\n', (11860, 11966), True, 'import megengine.functional as F\n'), ((12241, 12335), 'megengine.functional.vision.roi_pooling', 'F.vision.roi_pooling', (['inp_feat', 'rois'], {'output_shape': 'output_shape', 'mode': '"""max"""', 'scale': '(1.0 / 4)'}), "(inp_feat, rois, output_shape=output_shape, mode='max',\n scale=1.0 / 4)\n", (12261, 12335), True, 'import megengine.functional as F\n'), ((12798, 12830), 'megengine.functional.adaptive_avg_pool2d', 'F.adaptive_avg_pool2d', (['inp', 'oshp'], {}), '(inp, oshp)\n', (12819, 12830), True, 'import megengine.functional as F\n'), ((13762, 13794), 'megengine.functional.adaptive_max_pool2d', 'F.adaptive_max_pool2d', (['inp', 'oshp'], {}), '(inp, oshp)\n', (13783, 13794), True, 'import megengine.functional as F\n'), ((15853, 15902), 'megengine.functional.vision.interpolate', 'F.vision.interpolate', (['x', '(15, 5)'], {'mode': '"""bilinear"""'}), "(x, (15, 5), mode='bilinear')\n", (15873, 15902), True, 'import megengine.functional as F\n'), ((15999, 16012), 'numpy.arange', 'np.arange', (['(32)'], {}), '(32)\n', (16008, 16012), True, 'import numpy as np\n'), ((16084, 16132), 'megengine.functional.vision.interpolate', 'F.vision.interpolate', (['x', '(1, 1)'], {'mode': '"""bilinear"""'}), "(x, (1, 1), mode='bilinear')\n", (16104, 16132), True, 'import megengine.functional as F\n'), ((16636, 16675), 'megengine.functional.vision.warp_perspective', 'F.vision.warp_perspective', (['x', 'M', '(2, 2)'], {}), '(x, M, (2, 2))\n', (16661, 16675), True, 'import megengine.functional as F\n'), ((17216, 17236), 'megengine.functional.concat', 'F.concat', (['([M] * 4)', '(0)'], {}), '([M] * 4, 0)\n', (17224, 17236), True, 'import megengine.functional as F\n'), ((17249, 17310), 'megengine.functional.vision.warp_perspective', 'F.vision.warp_perspective', (['x', 'M', '(2, 2)'], {'mat_idx': '[0, 1, 1, 0]'}), '(x, M, (2, 2), mat_idx=[0, 1, 1, 0])\n', (17274, 17310), True, 'import megengine.functional as F\n'), ((17895, 18042), 'numpy.array', 'np.array', (['[[[[7.875, 8.875, 9.875], [8.90625, 9.90625, 10.90625]], [[18.75, 19.75, \n 20.75], [14.90625, 15.90625, 16.90625]]]]'], {'dtype': 'np.float32'}), '([[[[7.875, 8.875, 9.875], [8.90625, 9.90625, 10.90625]], [[18.75, \n 19.75, 20.75], [14.90625, 15.90625, 16.90625]]]], dtype=np.float32)\n', (17903, 18042), True, 'import numpy as np\n'), ((18544, 18571), 'megengine.functional.vision.remap', 'F.vision.remap', (['inp', 'map_xy'], {}), '(inp, map_xy)\n', (18558, 18571), True, 'import megengine.functional as F\n'), ((18972, 18991), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (18986, 18991), True, 'import numpy as np\n'), ((19142, 19178), 'numpy.array', 'np.array', (['[0.6361]'], {'dtype': 'np.float32'}), '([0.6361], dtype=np.float32)\n', (19150, 19178), True, 'import numpy as np\n'), ((19184, 19203), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (19198, 19203), True, 'import numpy as np\n'), ((19354, 19389), 'numpy.array', 'np.array', (['[0.675]'], {'dtype': 'np.float32'}), '([0.675], dtype=np.float32)\n', (19362, 19389), True, 'import numpy as np\n'), ((19528, 19593), 'utils.opr_test', 'opr_test', (['cases', 'F.nn.binary_cross_entropy'], {'compare_fn': 'compare_fn'}), '(cases, F.nn.binary_cross_entropy, compare_fn=compare_fn)\n', (19536, 19593), False, 'from utils import opr_test\n'), ((19903, 19922), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (19917, 19922), True, 'import numpy as np\n'), ((20283, 20315), 'utils.opr_test', 'opr_test', (['cases', 'F.nn.hinge_loss'], {}), '(cases, F.nn.hinge_loss)\n', (20291, 20315), False, 'from utils import opr_test\n'), ((20784, 20824), 'utils.opr_test', 'opr_test', (['cases', 'hinge_loss_with_l2_norm'], {}), '(cases, hinge_loss_with_l2_norm)\n', (20792, 20824), False, 'from utils import opr_test\n'), ((21201, 21314), 'numpy.array', 'np.array', (['[[0, 0, 100, 100], [10, 10, 100, 100], [50, 50, 100, 100], [100, 100, 150, 150]\n ]'], {'dtype': 'np.float32'}), '([[0, 0, 100, 100], [10, 10, 100, 100], [50, 50, 100, 100], [100, \n 100, 150, 150]], dtype=np.float32)\n', (21209, 21314), True, 'import numpy as np\n'), ((21402, 21411), 'megengine.tensor', 'tensor', (['x'], {}), '(x)\n', (21408, 21411), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((21425, 21471), 'megengine.tensor', 'tensor', (['[0.5, 0.8, 0.9, 0.6]'], {'dtype': 'np.float32'}), '([0.5, 0.8, 0.9, 0.6], dtype=np.float32)\n', (21431, 21471), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((21685, 21694), 'megengine.tensor', 'tensor', (['x'], {}), '(x)\n', (21691, 21694), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((21708, 21736), 'megengine.tensor', 'tensor', (['[]'], {'dtype': 'np.float32'}), '([], dtype=np.float32)\n', (21714, 21736), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((22074, 22096), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['inp_scale'], {}), '(inp_scale)\n', (22085, 22096), True, 'import megengine.core.tensor.dtype as dtype\n'), ((22111, 22131), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['w_scale'], {}), '(w_scale)\n', (22122, 22131), True, 'import megengine.core.tensor.dtype as dtype\n'), ((22146, 22179), 'megengine.core.tensor.dtype.qint32', 'dtype.qint32', (['(inp_scale * w_scale)'], {}), '(inp_scale * w_scale)\n', (22158, 22179), True, 'import megengine.core.tensor.dtype as dtype\n'), ((22196, 22219), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['outp_scale'], {}), '(outp_scale)\n', (22207, 22219), True, 'import megengine.core.tensor.dtype as dtype\n'), ((25405, 25427), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['inp_scale'], {}), '(inp_scale)\n', (25416, 25427), True, 'import megengine.core.tensor.dtype as dtype\n'), ((25442, 25462), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['w_scale'], {}), '(w_scale)\n', (25453, 25462), True, 'import megengine.core.tensor.dtype as dtype\n'), ((25477, 25510), 'megengine.core.tensor.dtype.qint32', 'dtype.qint32', (['(inp_scale * w_scale)'], {}), '(inp_scale * w_scale)\n', (25489, 25510), True, 'import megengine.core.tensor.dtype as dtype\n'), ((25527, 25550), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['outp_scale'], {}), '(outp_scale)\n', (25538, 25550), True, 'import megengine.core.tensor.dtype as dtype\n'), ((27400, 27454), 'megengine.functional.conv2d', 'F.conv2d', (['inp', 'weight', 'None', '(2, 2)', '(3, 3)', '(1, 1)', '(1)'], {}), '(inp, weight, None, (2, 2), (3, 3), (1, 1), 1)\n', (27408, 27454), True, 'import megengine.functional as F\n'), ((27960, 27989), 'megengine.tensor', 'tensor', (['inp'], {'dtype': 'np.float32'}), '(inp, dtype=np.float32)\n', (27966, 27989), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((28068, 28122), 'megengine.functional.conv2d', 'F.conv2d', (['inp', 'weight', 'None', '(2, 2)', '(3, 3)', '(1, 1)', '(1)'], {}), '(inp, weight, None, (2, 2), (3, 3), (1, 1), 1)\n', (28076, 28122), True, 'import megengine.functional as F\n'), ((28271, 28300), 'megengine.tensor', 'tensor', (['inp'], {'dtype': 'np.float32'}), '(inp, dtype=np.float32)\n', (28277, 28300), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((28382, 28445), 'megengine.functional.conv3d', 'F.conv3d', (['inp', 'weight', 'None', '(2, 2, 2)', '(3, 3, 3)', '(1, 1, 1)', '(1)'], {}), '(inp, weight, None, (2, 2, 2), (3, 3, 3), (1, 1, 1), 1)\n', (28390, 28445), True, 'import megengine.functional as F\n'), ((28606, 28645), 'megengine.functional.conv1d', 'F.conv1d', (['inp', 'weight', 'None', '(2)', '(0)', '(1)', '(1)'], {}), '(inp, weight, None, 2, 0, 1, 1)\n', (28614, 28645), True, 'import megengine.functional as F\n'), ((29176, 29249), 'megengine.functional.batch_norm', 'F.batch_norm', (['inp'], {'weight': 'weight', 'bias': 'bias', 'training': '(True)', 'inplace': '(False)'}), '(inp, weight=weight, bias=bias, training=True, inplace=False)\n', (29188, 29249), True, 'import megengine.functional as F\n'), ((29754, 29793), 'megengine.functional.conv3d', 'F.conv3d', (['inp', 'weight', 'None', '(2)', '(0)', '(1)', '(1)'], {}), '(inp, weight, None, 2, 0, 1, 1)\n', (29762, 29793), True, 'import megengine.functional as F\n'), ((29929, 29961), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (29937, 29961), True, 'import numpy as np\n'), ((29970, 30022), 'numpy.array', 'np.array', (['[[True, False, True], [False, True, True]]'], {}), '([[True, False, True], [False, True, True]])\n', (29978, 30022), True, 'import numpy as np\n'), ((30032, 30041), 'megengine.tensor', 'tensor', (['x'], {}), '(x)\n', (30038, 30041), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((30051, 30060), 'megengine.tensor', 'tensor', (['y'], {}), '(y)\n', (30057, 30060), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((30076, 30095), 'megengine.functional.cond_take', 'F.cond_take', (['yy', 'xx'], {}), '(yy, xx)\n', (30087, 30095), True, 'import megengine.functional as F\n'), ((31046, 31064), 'megengine.core.ops.builtin.CondTake', 'builtin.CondTake', ([], {}), '()\n', (31062, 31064), True, 'import megengine.core.ops.builtin as builtin\n'), ((31075, 31093), 'megengine.core.ops.builtin.CondTake', 'builtin.CondTake', ([], {}), '()\n', (31091, 31093), True, 'import megengine.core.ops.builtin as builtin\n'), ((31152, 31177), 'megengine.core.ops.builtin.NMSKeep', 'builtin.NMSKeep', (['(0.7)', '(100)'], {}), '(0.7, 100)\n', (31167, 31177), True, 'import megengine.core.ops.builtin as builtin\n'), ((31188, 31213), 'megengine.core.ops.builtin.NMSKeep', 'builtin.NMSKeep', (['(0.7)', '(100)'], {}), '(0.7, 100)\n', (31203, 31213), True, 'import megengine.core.ops.builtin as builtin\n'), ((31224, 31249), 'megengine.core.ops.builtin.NMSKeep', 'builtin.NMSKeep', (['(0.8)', '(100)'], {}), '(0.8, 100)\n', (31239, 31249), True, 'import megengine.core.ops.builtin as builtin\n'), ((31260, 31285), 'megengine.core.ops.builtin.NMSKeep', 'builtin.NMSKeep', (['(0.7)', '(200)'], {}), '(0.7, 200)\n', (31275, 31285), True, 'import megengine.core.ops.builtin as builtin\n'), ((32678, 32689), 'megengine.tensor', 'tensor', (['inp'], {}), '(inp)\n', (32684, 32689), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((32698, 32736), 'megengine.functional.vision.cvt_color', 'F.vision.cvt_color', (['x'], {'mode': '"""RGB2GRAY"""'}), "(x, mode='RGB2GRAY')\n", (32716, 32736), True, 'import megengine.functional as F\n'), ((32868, 32906), 'megengine.functional.vision.cvt_color', 'F.vision.cvt_color', (['x'], {'mode': '"""BGR2GRAY"""'}), "(x, mode='BGR2GRAY')\n", (32886, 32906), True, 'import megengine.functional as F\n'), ((33050, 33061), 'megengine.tensor', 'tensor', (['val'], {}), '(val)\n', (33056, 33061), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((33075, 33088), 'numpy.array', 'np.array', (['val'], {}), '(val)\n', (33083, 33088), True, 'import numpy as np\n'), ((33207, 33238), 'megengine.functional.ones', 'F.ones', (['shape'], {'dtype': 'np.float32'}), '(shape, dtype=np.float32)\n', (33213, 33238), True, 'import megengine.functional as F\n'), ((33298, 33325), 'megengine.functional.utils._assert_equal', 'F.utils._assert_equal', (['x', 'y'], {}), '(x, y)\n', (33319, 33325), True, 'import megengine.functional as F\n'), ((33390, 33421), 'megengine.functional.ones', 'F.ones', (['shape'], {'dtype': 'np.float32'}), '(shape, dtype=np.float32)\n', (33396, 33421), True, 'import megengine.functional as F\n'), ((33627, 33647), 'megengine.functional.argmax', 'F.argmax', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (33635, 33647), True, 'import megengine.functional as F\n'), ((33657, 33676), 'megengine.functional.argmax', 'F.argmax', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (33665, 33676), True, 'import megengine.functional as F\n'), ((33737, 33763), 'megengine.functional.argmax', 'F.argmax', (['x'], {'axis': '(-1, -2)'}), '(x, axis=(-1, -2))\n', (33745, 33763), True, 'import megengine.functional as F\n'), ((33773, 33797), 'megengine.functional.argmax', 'F.argmax', (['x'], {'axis': '(0, 1)'}), '(x, axis=(0, 1))\n', (33781, 33797), True, 'import megengine.functional as F\n'), ((33858, 33884), 'megengine.functional.argmin', 'F.argmin', (['x'], {'axis': '(-1, -2)'}), '(x, axis=(-1, -2))\n', (33866, 33884), True, 'import megengine.functional as F\n'), ((33894, 33918), 'megengine.functional.argmin', 'F.argmin', (['x'], {'axis': '(0, 1)'}), '(x, axis=(0, 1))\n', (33902, 33918), True, 'import megengine.functional as F\n'), ((34037, 34072), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(N, C, H, W)'}), '(size=(N, C, H, W))\n', (34053, 34072), True, 'import numpy as np\n'), ((34228, 34268), 'numpy.zeros', 'np.zeros', (['(N, C, H + ph * 2, W + pw * 2)'], {}), '((N, C, H + ph * 2, W + pw * 2))\n', (34236, 34268), True, 'import numpy as np\n'), ((35193, 35233), 'numpy.zeros', 'np.zeros', (['(N, C, H, W)'], {'dtype': 'np.float32'}), '((N, C, H, W), dtype=np.float32)\n', (35201, 35233), True, 'import numpy as np\n'), ((36053, 36114), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6], [7, 8, 9]]'], {'dtype': 'np.float32'}), '([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)\n', (36061, 36114), True, 'import numpy as np\n'), ((36125, 36166), 'numpy.pad', 'np.pad', (['src', '((2, 2), (2, 2))', '"""constant"""'], {}), "(src, ((2, 2), (2, 2)), 'constant')\n", (36131, 36166), True, 'import numpy as np\n'), ((36233, 36281), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['res', 'dst'], {'atol': '(1e-05)'}), '(res, dst, atol=1e-05)\n', (36259, 36281), True, 'import numpy as np\n'), ((36292, 36352), 'numpy.pad', 'np.pad', (['src', '((2, 2), (2, 2))', '"""constant"""'], {'constant_values': '(3)'}), "(src, ((2, 2), (2, 2)), 'constant', constant_values=3)\n", (36298, 36352), True, 'import numpy as np\n'), ((36437, 36485), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['res', 'dst'], {'atol': '(1e-05)'}), '(res, dst, atol=1e-05)\n', (36463, 36485), True, 'import numpy as np\n'), ((36496, 36533), 'numpy.pad', 'np.pad', (['src', '((2, 2), (2, 2))', '"""edge"""'], {}), "(src, ((2, 2), (2, 2)), 'edge')\n", (36502, 36533), True, 'import numpy as np\n'), ((36596, 36644), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['res', 'dst'], {'atol': '(1e-05)'}), '(res, dst, atol=1e-05)\n', (36622, 36644), True, 'import numpy as np\n'), ((36655, 36695), 'numpy.pad', 'np.pad', (['src', '((2, 2), (2, 2))', '"""reflect"""'], {}), "(src, ((2, 2), (2, 2)), 'reflect')\n", (36661, 36695), True, 'import numpy as np\n'), ((36761, 36809), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['res', 'dst'], {'atol': '(1e-05)'}), '(res, dst, atol=1e-05)\n', (36787, 36809), True, 'import numpy as np\n'), ((39509, 39563), 'megengine.functional.conv2d', 'F.conv2d', (['inp', 'weight', 'None', '(2, 2)', '(3, 3)', '(1, 1)', '(1)'], {}), '(inp, weight, None, (2, 2), (3, 3), (1, 1), 1)\n', (39517, 39563), True, 'import megengine.functional as F\n'), ((39744, 39822), 'megengine.functional.conv2d', 'F.conv2d', (['inp', 'weight', 'None', '(2, 2)', '(3, 3)', '(1, 1)', '(1)'], {'compute_mode': '"""float32"""'}), "(inp, weight, None, (2, 2), (3, 3), (1, 1), 1, compute_mode='float32')\n", (39752, 39822), True, 'import megengine.functional as F\n'), ((40264, 40305), 'megengine.functional.vision.warp_perspective', 'F.vision.warp_perspective', (['inp', 'M', '(2, 2)'], {}), '(inp, M, (2, 2))\n', (40289, 40305), True, 'import megengine.functional as F\n'), ((40468, 40524), 'megengine.functional.vision.warp_perspective', 'F.vision.warp_perspective', (['inp', 'M', '(2, 2)'], {'format': '"""NHWC"""'}), "(inp, M, (2, 2), format='NHWC')\n", (40493, 40524), True, 'import megengine.functional as F\n'), ((3231, 3255), 'megengine.core._imperative_rt.ops.set_global_rng_seed', 'set_global_rng_seed', (['(111)'], {}), '(111)\n', (3250, 3255), False, 'from megengine.core._imperative_rt.ops import set_global_rng_seed\n'), ((3271, 3310), 'megengine.functional.nn.dropout', 'F.nn.dropout', (['data', 'rate'], {'training': '(True)'}), '(data, rate, training=True)\n', (3283, 3310), True, 'import megengine.functional as F\n'), ((3326, 3365), 'megengine.functional.nn.dropout', 'F.nn.dropout', (['data', 'rate'], {'training': '(True)'}), '(data, rate, training=True)\n', (3338, 3365), True, 'import megengine.functional as F\n'), ((3431, 3455), 'megengine.core._imperative_rt.ops.set_global_rng_seed', 'set_global_rng_seed', (['(111)'], {}), '(111)\n', (3450, 3455), False, 'from megengine.core._imperative_rt.ops import set_global_rng_seed\n'), ((3471, 3510), 'megengine.functional.nn.dropout', 'F.nn.dropout', (['data', 'rate'], {'training': '(True)'}), '(data, rate, training=True)\n', (3483, 3510), True, 'import megengine.functional as F\n'), ((3572, 3596), 'megengine.core._imperative_rt.ops.set_global_rng_seed', 'set_global_rng_seed', (['(222)'], {}), '(222)\n', (3591, 3596), False, 'from megengine.core._imperative_rt.ops import set_global_rng_seed\n'), ((3612, 3651), 'megengine.functional.nn.dropout', 'F.nn.dropout', (['data', 'rate'], {'training': '(True)'}), '(data, rate, training=True)\n', (3624, 3651), True, 'import megengine.functional as F\n'), ((6310, 6324), 'megengine.functional.matmul', 'F.matmul', (['a', 'b'], {}), '(a, b)\n', (6318, 6324), True, 'import megengine.functional as F\n'), ((6427, 6452), 'numpy.random.randn', 'np.random.randn', (['*shape_a'], {}), '(*shape_a)\n', (6442, 6452), True, 'import numpy as np\n'), ((6469, 6494), 'numpy.random.randn', 'np.random.randn', (['*shape_b'], {}), '(*shape_b)\n', (6484, 6494), True, 'import numpy as np\n'), ((6778, 6836), 'megengine.functional.vision.interpolate', 'F.vision.interpolate', (['inp'], {'scale_factor': '(2.0)', 'mode': '"""linear"""'}), "(inp, scale_factor=2.0, mode='linear')\n", (6798, 6836), True, 'import megengine.functional as F\n'), ((6852, 6895), 'megengine.functional.vision.interpolate', 'F.vision.interpolate', (['inp', '(4)'], {'mode': '"""linear"""'}), "(inp, 4, mode='linear')\n", (6872, 6895), True, 'import megengine.functional as F\n'), ((7276, 7309), 'megengine.functional.vision.interpolate', 'F.vision.interpolate', (['inp', '[4, 4]'], {}), '(inp, [4, 4])\n', (7296, 7309), True, 'import megengine.functional as F\n'), ((7325, 7368), 'megengine.functional.vision.interpolate', 'F.vision.interpolate', (['inp'], {'scale_factor': '(2.0)'}), '(inp, scale_factor=2.0)\n', (7345, 7368), True, 'import megengine.functional as F\n'), ((7561, 7614), 'megengine.functional.vision.interpolate', 'F.vision.interpolate', (['inp', '[4, 4]'], {'align_corners': '(True)'}), '(inp, [4, 4], align_corners=True)\n', (7581, 7614), True, 'import megengine.functional as F\n'), ((7630, 7693), 'megengine.functional.vision.interpolate', 'F.vision.interpolate', (['inp'], {'scale_factor': '(2.0)', 'align_corners': '(True)'}), '(inp, scale_factor=2.0, align_corners=True)\n', (7650, 7693), True, 'import megengine.functional as F\n'), ((8661, 8681), 'numpy.random.rand', 'np.random.rand', (['(4)', '(2)'], {}), '(4, 2)\n', (8675, 8681), True, 'import numpy as np\n'), ((9191, 9223), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['out_feat.shape'], {}), '(out_feat.shape)\n', (9207, 9223), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((9370, 9407), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['inp_feat.grad.shape'], {}), '(inp_feat.grad.shape)\n', (9386, 9407), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((9411, 9443), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['inp_feat.shape'], {}), '(inp_feat.shape)\n', (9427, 9443), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((9558, 9637), 'numpy.random.randn', 'np.random.randn', (['image_shape[0]', 'image_shape[1]', 'image_shape[2]', 'image_shape[3]'], {}), '(image_shape[0], image_shape[1], image_shape[2], image_shape[3])\n', (9573, 9637), True, 'import numpy as np\n'), ((9680, 9759), 'numpy.random.randn', 'np.random.randn', (['image_shape[0]', 'image_shape[1]', 'image_shape[2]', 'image_shape[3]'], {}), '(image_shape[0], image_shape[1], image_shape[2], image_shape[3])\n', (9695, 9759), True, 'import numpy as np\n'), ((9908, 9925), 'megengine.tensor', 'tensor', (['inp_feat1'], {}), '(inp_feat1)\n', (9914, 9925), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((9927, 9944), 'megengine.tensor', 'tensor', (['inp_feat2'], {}), '(inp_feat2)\n', (9933, 9944), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((10374, 10408), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['data1.grad.shape'], {}), '(data1.grad.shape)\n', (10390, 10408), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((10412, 10441), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['data1.shape'], {}), '(data1.shape)\n', (10428, 10441), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((12358, 12390), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['out_feat.shape'], {}), '(out_feat.shape)\n', (12374, 12390), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((12537, 12574), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['inp_feat.grad.shape'], {}), '(inp_feat.grad.shape)\n', (12553, 12574), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((12578, 12610), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['inp_feat.shape'], {}), '(inp_feat.shape)\n', (12594, 12610), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((12843, 12871), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['outp.shape'], {}), '(outp.shape)\n', (12859, 12871), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((12963, 13021), 'numpy.array', 'np.array', (['[[[[2.5, 4.5], [10.5, 12.5]]]]'], {'dtype': 'np.float32'}), '([[[[2.5, 4.5], [10.5, 12.5]]]], dtype=np.float32)\n', (12971, 13021), True, 'import numpy as np\n'), ((13082, 13114), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['inp.grad.shape'], {}), '(inp.grad.shape)\n', (13098, 13114), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((13118, 13145), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['inp.shape'], {}), '(inp.shape)\n', (13134, 13145), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((13209, 13349), 'numpy.array', 'np.array', (['[[[[0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, \n 0.25], [0.25, 0.25, 0.25, 0.25]]]]'], {'dtype': 'np.float32'}), '([[[[0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25], [0.25, 0.25,\n 0.25, 0.25], [0.25, 0.25, 0.25, 0.25]]]], dtype=np.float32)\n', (13217, 13349), True, 'import numpy as np\n'), ((13807, 13835), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['outp.shape'], {}), '(outp.shape)\n', (13823, 13835), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((13927, 13977), 'numpy.array', 'np.array', (['[[[[5, 7], [13, 15]]]]'], {'dtype': 'np.float32'}), '([[[[5, 7], [13, 15]]]], dtype=np.float32)\n', (13935, 13977), True, 'import numpy as np\n'), ((14038, 14070), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['inp.grad.shape'], {}), '(inp.grad.shape)\n', (14054, 14070), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((14074, 14101), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['inp.shape'], {}), '(inp.shape)\n', (14090, 14101), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((14165, 14290), 'numpy.array', 'np.array', (['[[[[0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 1.0], [0.0, 0.0, 0.0, 0.0], [0.0, \n 1.0, 0.0, 1.0]]]]'], {'dtype': 'np.float32'}), '([[[[0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 1.0], [0.0, 0.0, 0.0, 0.0\n ], [0.0, 1.0, 0.0, 1.0]]]], dtype=np.float32)\n', (14173, 14290), True, 'import numpy as np\n'), ((14637, 14666), 'megengine.functional.one_hot', 'F.one_hot', (['inp'], {'num_classes': '(4)'}), '(inp, num_classes=4)\n', (14646, 14666), True, 'import megengine.functional as F\n'), ((14846, 14940), 'numpy.array', 'np.array', (['[[3, 2, 4, 4, 2, 4, 0, 4, 4, 1], [4, 1, 1, 3, 2, 2, 4, 2, 4, 3]]'], {'dtype': 'np.int32'}), '([[3, 2, 4, 4, 2, 4, 0, 4, 4, 1], [4, 1, 1, 3, 2, 2, 4, 2, 4, 3]],\n dtype=np.int32)\n', (14854, 14940), True, 'import numpy as np\n'), ((14987, 14998), 'megengine.tensor', 'tensor', (['arr'], {}), '(arr)\n', (14993, 14998), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((15013, 15031), 'megengine.functional.one_hot', 'F.one_hot', (['inp', '(10)'], {}), '(inp, 10)\n', (15022, 15031), True, 'import megengine.functional as F\n'), ((15555, 15609), 'megengine.functional.vision.interpolate', 'F.vision.interpolate', (['x', 'target_shape'], {'mode': '"""bilinear"""'}), "(x, target_shape, mode='bilinear')\n", (15575, 15609), True, 'import megengine.functional as F\n'), ((15800, 15823), 'numpy.ones', 'np.ones', (['(3, 3, 10, 10)'], {}), '((3, 3, 10, 10))\n', (15807, 15823), True, 'import numpy as np\n'), ((16718, 16759), 'numpy.array', 'np.array', (['[[[[5, 6], [9, 10]]]]'], {'dtype': 'dt'}), '([[[[5, 6], [9, 10]]]], dtype=dt)\n', (16726, 16759), True, 'import numpy as np\n'), ((17370, 17485), 'numpy.array', 'np.array', (['[[[[5, 6], [9, 10]]], [[[21, 22], [25, 26]]], [[[21, 22], [25, 26]]], [[[5,\n 6], [9, 10]]]]'], {'dtype': 'dt'}), '([[[[5, 6], [9, 10]]], [[[21, 22], [25, 26]]], [[[21, 22], [25, 26]\n ]], [[[5, 6], [9, 10]]]], dtype=dt)\n', (17378, 17485), True, 'import numpy as np\n'), ((17840, 17855), 'megengine.tensor', 'tensor', (['weightv'], {}), '(weightv)\n', (17846, 17855), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((18141, 18160), 'megengine.is_cuda_available', 'is_cuda_available', ([], {}), '()\n', (18158, 18160), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((18623, 18679), 'numpy.array', 'np.array', (['[[[[1.0, 4.0], [4.0, 4.0]]]]'], {'dtype': 'np.float32'}), '([[[[1.0, 4.0], [4.0, 4.0]]]], dtype=np.float32)\n', (18631, 18679), True, 'import numpy as np\n'), ((19782, 19835), 'functools.partial', 'partial', (['F.nn.binary_cross_entropy'], {'with_logits': '(False)'}), '(F.nn.binary_cross_entropy, with_logits=False)\n', (19789, 19835), False, 'from functools import partial\n'), ((20744, 20778), 'megengine.functional.nn.hinge_loss', 'F.nn.hinge_loss', (['pred', 'label', '"""L2"""'], {}), "(pred, label, 'L2')\n", (20759, 20778), True, 'import megengine.functional as F\n'), ((20955, 21055), 'megengine.functional.vision.nms', 'F.vision.nms', (['inp'], {'scores': 'scores', 'iou_thresh': '(0.5)', 'max_output': '(None if is_symbolic is None else 4)'}), '(inp, scores=scores, iou_thresh=0.5, max_output=None if \n is_symbolic is None else 4)\n', (20967, 21055), True, 'import megengine.functional as F\n'), ((22446, 22484), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(N, IC, IH, IW)'}), '(size=(N, IC, IH, IW))\n', (22462, 22484), True, 'import numpy as np\n'), ((22499, 22538), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(OC, IC, KH, KW)'}), '(size=(OC, IC, KH, KW))\n', (22515, 22538), True, 'import numpy as np\n'), ((22553, 22589), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, OC, 1, 1)'}), '(size=(1, OC, 1, 1))\n', (22569, 22589), True, 'import numpy as np\n'), ((22610, 22636), 'megengine.core.tensor.dtype.get_scale', 'dtype.get_scale', (['inp_dtype'], {}), '(inp_dtype)\n', (22625, 22636), True, 'import megengine.core.tensor.dtype as dtype\n'), ((22655, 22679), 'megengine.core.tensor.dtype.get_scale', 'dtype.get_scale', (['w_dtype'], {}), '(w_dtype)\n', (22670, 22679), True, 'import megengine.core.tensor.dtype as dtype\n'), ((22698, 22722), 'megengine.core.tensor.dtype.get_scale', 'dtype.get_scale', (['b_dtype'], {}), '(b_dtype)\n', (22713, 22722), True, 'import megengine.core.tensor.dtype as dtype\n'), ((22739, 22791), 'megengine.core.tensor.dtype.convert_to_qint8', 'dtype.convert_to_qint8', (['(inp_v * inp_scale)', 'inp_dtype'], {}), '(inp_v * inp_scale, inp_dtype)\n', (22761, 22791), True, 'import megengine.core.tensor.dtype as dtype\n'), ((22805, 22851), 'megengine.core.tensor.dtype.convert_to_qint8', 'dtype.convert_to_qint8', (['(w_v * w_scale)', 'w_dtype'], {}), '(w_v * w_scale, w_dtype)\n', (22827, 22851), True, 'import megengine.core.tensor.dtype as dtype\n'), ((22865, 22912), 'megengine.core.tensor.dtype.convert_to_qint32', 'dtype.convert_to_qint32', (['(b_v * b_scale)', 'b_dtype'], {}), '(b_v * b_scale, b_dtype)\n', (22888, 22912), True, 'import megengine.core.tensor.dtype as dtype\n'), ((22933, 22962), 'megengine.tensor', 'tensor', (['inpv'], {'dtype': 'inp_dtype'}), '(inpv, dtype=inp_dtype)\n', (22939, 22962), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((22980, 23008), 'megengine.Parameter', 'Parameter', (['wv'], {'dtype': 'w_dtype'}), '(wv, dtype=w_dtype)\n', (22989, 23008), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((23027, 23055), 'megengine.Parameter', 'Parameter', (['bv'], {'dtype': 'b_dtype'}), '(bv, dtype=b_dtype)\n', (23036, 23055), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((24663, 24682), 'megengine.functional.flatten', 'F.flatten', (['expected'], {}), '(expected)\n', (24672, 24682), True, 'import megengine.functional as F\n'), ((24700, 24717), 'megengine.functional.flatten', 'F.flatten', (['result'], {}), '(result)\n', (24709, 24717), True, 'import megengine.functional as F\n'), ((21905, 21928), 'megengine.device.get_device_count', 'get_device_count', (['"""gpu"""'], {}), "('gpu')\n", (21921, 21928), False, 'from megengine.device import get_device_count\n'), ((25654, 25692), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(N, IC, IH, IW)'}), '(size=(N, IC, IH, IW))\n', (25670, 25692), True, 'import numpy as np\n'), ((25707, 25749), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(N, OC, IC, KH, KW)'}), '(size=(N, OC, IC, KH, KW))\n', (25723, 25749), True, 'import numpy as np\n'), ((25764, 25800), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, OC, 1, 1)'}), '(size=(1, OC, 1, 1))\n', (25780, 25800), True, 'import numpy as np\n'), ((25821, 25847), 'megengine.core.tensor.dtype.get_scale', 'dtype.get_scale', (['inp_dtype'], {}), '(inp_dtype)\n', (25836, 25847), True, 'import megengine.core.tensor.dtype as dtype\n'), ((25866, 25890), 'megengine.core.tensor.dtype.get_scale', 'dtype.get_scale', (['w_dtype'], {}), '(w_dtype)\n', (25881, 25890), True, 'import megengine.core.tensor.dtype as dtype\n'), ((25909, 25933), 'megengine.core.tensor.dtype.get_scale', 'dtype.get_scale', (['b_dtype'], {}), '(b_dtype)\n', (25924, 25933), True, 'import megengine.core.tensor.dtype as dtype\n'), ((25950, 26002), 'megengine.core.tensor.dtype.convert_to_qint8', 'dtype.convert_to_qint8', (['(inp_v * inp_scale)', 'inp_dtype'], {}), '(inp_v * inp_scale, inp_dtype)\n', (25972, 26002), True, 'import megengine.core.tensor.dtype as dtype\n'), ((26016, 26062), 'megengine.core.tensor.dtype.convert_to_qint8', 'dtype.convert_to_qint8', (['(w_v * w_scale)', 'w_dtype'], {}), '(w_v * w_scale, w_dtype)\n', (26038, 26062), True, 'import megengine.core.tensor.dtype as dtype\n'), ((26076, 26123), 'megengine.core.tensor.dtype.convert_to_qint32', 'dtype.convert_to_qint32', (['(b_v * b_scale)', 'b_dtype'], {}), '(b_v * b_scale, b_dtype)\n', (26099, 26123), True, 'import megengine.core.tensor.dtype as dtype\n'), ((26144, 26173), 'megengine.tensor', 'tensor', (['inpv'], {'dtype': 'inp_dtype'}), '(inpv, dtype=inp_dtype)\n', (26150, 26173), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((26191, 26219), 'megengine.Parameter', 'Parameter', (['wv'], {'dtype': 'w_dtype'}), '(wv, dtype=w_dtype)\n', (26200, 26219), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((26238, 26266), 'megengine.Parameter', 'Parameter', (['bv'], {'dtype': 'b_dtype'}), '(bv, dtype=b_dtype)\n', (26247, 26266), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((26879, 26898), 'megengine.functional.flatten', 'F.flatten', (['expected'], {}), '(expected)\n', (26888, 26898), True, 'import megengine.functional as F\n'), ((26981, 26998), 'megengine.functional.flatten', 'F.flatten', (['result'], {}), '(result)\n', (26990, 26998), True, 'import megengine.functional as F\n'), ((25237, 25260), 'megengine.device.get_device_count', 'get_device_count', (['"""gpu"""'], {}), "('gpu')\n", (25253, 25260), False, 'from megengine.device import get_device_count\n'), ((27271, 27302), 'numpy.random.randn', 'np.random.randn', (['(1)', '(3)', '(224)', '(224)'], {}), '(1, 3, 224, 224)\n', (27286, 27302), True, 'import numpy as np\n'), ((27342, 27370), 'numpy.random.randn', 'np.random.randn', (['(64)', '(3)', '(7)', '(7)'], {}), '(64, 3, 7, 7)\n', (27357, 27370), True, 'import numpy as np\n'), ((28010, 28038), 'numpy.random.randn', 'np.random.randn', (['(16)', '(3)', '(3)', '(3)'], {}), '(16, 3, 3, 3)\n', (28025, 28038), True, 'import numpy as np\n'), ((28321, 28352), 'numpy.random.randn', 'np.random.randn', (['(16)', '(3)', '(3)', '(3)', '(3)'], {}), '(16, 3, 3, 3, 3)\n', (28336, 28352), True, 'import numpy as np\n'), ((28500, 28536), 'numpy.ones', 'np.ones', (['(2, 2, 4)'], {'dtype': 'np.float32'}), '((2, 2, 4), dtype=np.float32)\n', (28507, 28536), True, 'import numpy as np\n'), ((28558, 28594), 'numpy.ones', 'np.ones', (['(3, 2, 2)'], {'dtype': 'np.float32'}), '((3, 2, 2), dtype=np.float32)\n', (28565, 28594), True, 'import numpy as np\n'), ((28704, 28789), 'numpy.array', 'np.array', (['[[[4, 4], [4, 4], [4, 4]], [[4, 4], [4, 4], [4, 4]]]'], {'dtype': 'np.float32'}), '([[[4, 4], [4, 4], [4, 4]], [[4, 4], [4, 4], [4, 4]]], dtype=np.float32\n )\n', (28712, 28789), True, 'import numpy as np\n'), ((29012, 29036), 'numpy.random.randn', 'np.random.randn', (['*tshape'], {}), '(*tshape)\n', (29027, 29036), True, 'import numpy as np\n'), ((29076, 29109), 'numpy.ones', 'np.ones', (['pshape'], {'dtype': 'np.float32'}), '(pshape, dtype=np.float32)\n', (29083, 29109), True, 'import numpy as np\n'), ((29129, 29163), 'numpy.zeros', 'np.zeros', (['pshape'], {'dtype': 'np.float32'}), '(pshape, dtype=np.float32)\n', (29137, 29163), True, 'import numpy as np\n'), ((29636, 29678), 'numpy.ones', 'np.ones', (['(2, 2, 4, 4, 4)'], {'dtype': 'np.float32'}), '((2, 2, 4, 4, 4), dtype=np.float32)\n', (29643, 29678), True, 'import numpy as np\n'), ((29700, 29742), 'numpy.ones', 'np.ones', (['(3, 2, 2, 2, 2)'], {'dtype': 'np.float32'}), '((3, 2, 2, 2, 2), dtype=np.float32)\n', (29707, 29742), True, 'import numpy as np\n'), ((30420, 30443), 'megengine.functional.cond_take', 'F.cond_take', (['mask', 'data'], {}), '(mask, data)\n', (30431, 30443), True, 'import megengine.functional as F\n'), ((30644, 30656), 'megengine.tensor', 'tensor', (['x_np'], {}), '(x_np)\n', (30650, 30656), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((30672, 30687), 'megengine.tensor', 'tensor', (['mask_np'], {}), '(mask_np)\n', (30678, 30687), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((31436, 31455), 'megengine.functional.zeros', 'F.zeros', (['(100, 100)'], {}), '((100, 100))\n', (31443, 31455), True, 'import megengine.functional as F\n'), ((31500, 31519), 'megengine.functional.argmax', 'F.argmax', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (31508, 31519), True, 'import megengine.functional as F\n'), ((31575, 31594), 'megengine.functional.zeros', 'F.zeros', (['(100, 100)'], {}), '((100, 100))\n', (31582, 31594), True, 'import megengine.functional as F\n'), ((31638, 31657), 'megengine.functional.argmin', 'F.argmin', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (31646, 31657), True, 'import megengine.functional as F\n'), ((32157, 32168), 'megengine.tensor', 'tensor', (['inp'], {}), '(inp)\n', (32163, 32168), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((32178, 32190), 'megengine.tensor', 'tensor', (['rois'], {}), '(rois)\n', (32184, 32190), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((32200, 32213), 'megengine.tensor', 'tensor', (['trans'], {}), '(trans)\n', (32206, 32213), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((32423, 32466), 'numpy.dot', 'np.dot', (['rgb[..., :3]', '[0.299, 0.587, 0.114]'], {}), '(rgb[..., :3], [0.299, 0.587, 0.114])\n', (32429, 32466), True, 'import numpy as np\n'), ((32506, 32549), 'numpy.dot', 'np.dot', (['bgr[..., :3]', '[0.114, 0.587, 0.299]'], {}), '(bgr[..., :3], [0.114, 0.587, 0.299])\n', (32512, 32549), True, 'import numpy as np\n'), ((33117, 33128), 'megengine.functional.ones', 'F.ones', (['shp'], {}), '(shp)\n', (33123, 33128), True, 'import megengine.functional as F\n'), ((33130, 33145), 'numpy.ones', 'np.ones', (['np_shp'], {}), '(np_shp)\n', (33137, 33145), True, 'import numpy as np\n'), ((33247, 33279), 'megengine.functional.zeros', 'F.zeros', (['shape'], {'dtype': 'np.float32'}), '(shape, dtype=np.float32)\n', (33254, 33279), True, 'import megengine.functional as F\n'), ((33430, 33462), 'megengine.functional.zeros', 'F.zeros', (['shape'], {'dtype': 'np.float32'}), '(shape, dtype=np.float32)\n', (33437, 33462), True, 'import megengine.functional as F\n'), ((33478, 33505), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (33491, 33505), False, 'import pytest\n'), ((33519, 33546), 'megengine.functional.utils._assert_equal', 'F.utils._assert_equal', (['x', 'y'], {}), '(x, y)\n', (33540, 33546), True, 'import megengine.functional as F\n'), ((33585, 33616), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(32, 5)'], {}), '(0, 1, (32, 5))\n', (33601, 33616), True, 'import numpy as np\n'), ((34716, 34727), 'megengine.tensor', 'tensor', (['inp'], {}), '(inp)\n', (34722, 34727), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((35845, 35856), 'megengine.tensor', 'tensor', (['inp'], {}), '(inp)\n', (35851, 35856), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((36186, 36197), 'megengine.tensor', 'tensor', (['src'], {}), '(src)\n', (36192, 36197), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((36372, 36383), 'megengine.tensor', 'tensor', (['src'], {}), '(src)\n', (36378, 36383), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((36553, 36564), 'megengine.tensor', 'tensor', (['src'], {}), '(src)\n', (36559, 36564), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((36715, 36726), 'megengine.tensor', 'tensor', (['src'], {}), '(src)\n', (36721, 36726), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((37652, 37663), 'megengine.tensor', 'tensor', (['inp'], {}), '(inp)\n', (37658, 37663), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((37866, 37877), 'megengine.tensor', 'tensor', (['inp'], {}), '(inp)\n', (37872, 37877), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((38087, 38098), 'megengine.tensor', 'tensor', (['inp'], {}), '(inp)\n', (38093, 38098), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((38315, 38326), 'megengine.tensor', 'tensor', (['inp'], {}), '(inp)\n', (38321, 38326), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((38550, 38561), 'megengine.tensor', 'tensor', (['inp'], {}), '(inp)\n', (38556, 38561), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((38816, 38867), 'megengine.functional.pixel_shuffle', 'F.pixel_shuffle', (['inp'], {'upscale_factor': 'upscale_factor'}), '(inp, upscale_factor=upscale_factor)\n', (38831, 38867), True, 'import megengine.functional as F\n'), ((39373, 39404), 'numpy.random.randn', 'np.random.randn', (['(1)', '(3)', '(224)', '(224)'], {}), '(1, 3, 224, 224)\n', (39388, 39404), True, 'import numpy as np\n'), ((39444, 39472), 'numpy.random.randn', 'np.random.randn', (['(64)', '(3)', '(7)', '(7)'], {}), '(64, 3, 7, 7)\n', (39459, 39472), True, 'import numpy as np\n'), ((39610, 39650), 'megengine.config._override', 'config._override', ([], {'compute_mode': '"""float32"""'}), "(compute_mode='float32')\n", (39626, 39650), True, 'import megengine.config as config\n'), ((39674, 39728), 'megengine.functional.conv2d', 'F.conv2d', (['inp', 'weight', 'None', '(2, 2)', '(3, 3)', '(1, 1)', '(1)'], {}), '(inp, weight, None, (2, 2), (3, 3), (1, 1), 1)\n', (39682, 39728), True, 'import megengine.functional as F\n'), ((40351, 40387), 'megengine.config._override', 'config._override', ([], {'conv_format': '"""NHWC"""'}), "(conv_format='NHWC')\n", (40367, 40387), True, 'import megengine.config as config\n'), ((40411, 40452), 'megengine.functional.vision.warp_perspective', 'F.vision.warp_perspective', (['inp', 'M', '(2, 2)'], {}), '(inp, M, (2, 2))\n', (40436, 40452), True, 'import megengine.functional as F\n'), ((41404, 41450), 'numpy.pad', 'np.pad', (['data', '((0, 0), (0, 0), (1, 1), (1, 1))'], {}), '(data, ((0, 0), (0, 0), (1, 1), (1, 1)))\n', (41410, 41450), True, 'import numpy as np\n'), ((41470, 41558), 'numpy.zeros', 'np.zeros', (['(batch_size, out_channels, output_height, output_width)'], {'dtype': 'np.float32'}), '((batch_size, out_channels, output_height, output_width), dtype=np.\n float32)\n', (41478, 41558), True, 'import numpy as np\n'), ((42608, 42620), 'megengine.tensor', 'tensor', (['data'], {}), '(data)\n', (42614, 42620), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((42630, 42644), 'megengine.tensor', 'tensor', (['weight'], {}), '(weight)\n', (42636, 42644), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((2279, 2311), 'numpy.ones', 'np.ones', (['shape'], {'dtype': 'np.float32'}), '(shape, dtype=np.float32)\n', (2286, 2311), True, 'import numpy as np\n'), ((2390, 2429), 'megengine.functional.nn.dropout', 'F.nn.dropout', (['data', 'rate'], {'training': '(True)'}), '(data, rate, training=True)\n', (2402, 2429), True, 'import megengine.functional as F\n'), ((2692, 2724), 'numpy.ones', 'np.ones', (['shape'], {'dtype': 'np.float32'}), '(shape, dtype=np.float32)\n', (2699, 2724), True, 'import numpy as np\n'), ((2804, 2843), 'megengine.functional.nn.dropout', 'F.nn.dropout', (['data', 'rate'], {'training': '(True)'}), '(data, rate, training=True)\n', (2816, 2843), True, 'import megengine.functional as F\n'), ((2863, 2902), 'megengine.functional.nn.dropout', 'F.nn.dropout', (['out1', 'rate'], {'training': '(True)'}), '(out1, rate, training=True)\n', (2875, 2902), True, 'import megengine.functional as F\n'), ((2922, 2961), 'megengine.functional.nn.dropout', 'F.nn.dropout', (['out2', 'rate'], {'training': '(True)'}), '(out2, rate, training=True)\n', (2934, 2961), True, 'import megengine.functional as F\n'), ((3181, 3204), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (3196, 3204), True, 'import numpy as np\n'), ((3959, 3983), 'numpy.random.random', 'np.random.random', (['shape1'], {}), '(shape1)\n', (3975, 3983), True, 'import numpy as np\n'), ((4014, 4038), 'numpy.random.random', 'np.random.random', (['shape2'], {}), '(shape2)\n', (4030, 4038), True, 'import numpy as np\n'), ((4615, 4639), 'numpy.random.random', 'np.random.random', (['shape1'], {}), '(shape1)\n', (4631, 4639), True, 'import numpy as np\n'), ((4670, 4694), 'numpy.random.random', 'np.random.random', (['shape2'], {}), '(shape2)\n', (4686, 4694), True, 'import numpy as np\n'), ((4725, 4749), 'numpy.random.random', 'np.random.random', (['shape3'], {}), '(shape3)\n', (4741, 4749), True, 'import numpy as np\n'), ((4780, 4804), 'numpy.random.random', 'np.random.random', (['shape4'], {}), '(shape4)\n', (4796, 4804), True, 'import numpy as np\n'), ((5184, 5208), 'numpy.random.random', 'np.random.random', (['shape1'], {}), '(shape1)\n', (5200, 5208), True, 'import numpy as np\n'), ((5239, 5263), 'numpy.random.random', 'np.random.random', (['shape2'], {}), '(shape2)\n', (5255, 5263), True, 'import numpy as np\n'), ((5294, 5318), 'numpy.random.random', 'np.random.random', (['shape3'], {}), '(shape3)\n', (5310, 5318), True, 'import numpy as np\n'), ((5349, 5373), 'numpy.random.random', 'np.random.random', (['shape4'], {}), '(shape4)\n', (5365, 5373), True, 'import numpy as np\n'), ((5404, 5428), 'numpy.random.random', 'np.random.random', (['shape5'], {}), '(shape5)\n', (5420, 5428), True, 'import numpy as np\n'), ((6373, 6404), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': 'is_symbolic'}), '(symbolic=is_symbolic)\n', (6382, 6404), True, 'import megengine.jit as jit\n'), ((6958, 7012), 'numpy.array', 'np.array', (['[[[1.0, 1.25, 1.75, 2.0]]]'], {'dtype': 'np.float32'}), '([[[1.0, 1.25, 1.75, 2.0]]], dtype=np.float32)\n', (6966, 7012), True, 'import numpy as np\n'), ((7085, 7139), 'numpy.array', 'np.array', (['[[[1.0, 1.25, 1.75, 2.0]]]'], {'dtype': 'np.float32'}), '([[[1.0, 1.25, 1.75, 2.0]]], dtype=np.float32)\n', (7093, 7139), True, 'import numpy as np\n'), ((7890, 7915), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7903, 7915), False, 'import pytest\n'), ((7929, 7987), 'megengine.functional.vision.interpolate', 'F.vision.interpolate', (['inp'], {'scale_factor': '(2.0)', 'mode': '"""linear"""'}), "(inp, scale_factor=2.0, mode='linear')\n", (7949, 7987), True, 'import megengine.functional as F\n'), ((8126, 8151), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8139, 8151), False, 'import pytest\n'), ((8165, 8230), 'megengine.functional.vision.interpolate', 'F.vision.interpolate', (['inp'], {'scale_factor': '[2.0, 3.0]', 'mode': '"""linear"""'}), "(inp, scale_factor=[2.0, 3.0], mode='linear')\n", (8185, 8230), True, 'import megengine.functional as F\n'), ((8706, 8726), 'numpy.random.rand', 'np.random.rand', (['(4)', '(2)'], {}), '(4, 2)\n', (8720, 8726), True, 'import numpy as np\n'), ((8893, 8899), 'megengine.core.autodiff.grad.Grad', 'Grad', ([], {}), '()\n', (8897, 8899), False, 'from megengine.core.autodiff.grad import Grad\n'), ((9335, 9356), 'megengine.functional.ones_like', 'F.ones_like', (['out_feat'], {}), '(out_feat)\n', (9346, 9356), True, 'import megengine.functional as F\n'), ((9812, 9832), 'numpy.ones', 'np.ones', (['image_shape'], {}), '(image_shape)\n', (9819, 9832), True, 'import numpy as np\n'), ((9864, 9884), 'numpy.ones', 'np.ones', (['image_shape'], {}), '(image_shape)\n', (9871, 9884), True, 'import numpy as np\n'), ((10059, 10065), 'megengine.core.autodiff.grad.Grad', 'Grad', ([], {}), '()\n', (10063, 10065), False, 'from megengine.core.autodiff.grad import Grad\n'), ((10339, 10360), 'megengine.functional.ones_like', 'F.ones_like', (['out_feat'], {}), '(out_feat)\n', (10350, 10360), True, 'import megengine.functional as F\n'), ((12150, 12156), 'megengine.core.autodiff.grad.Grad', 'Grad', ([], {}), '()\n', (12154, 12156), False, 'from megengine.core.autodiff.grad import Grad\n'), ((12502, 12523), 'megengine.functional.ones_like', 'F.ones_like', (['out_feat'], {}), '(out_feat)\n', (12513, 12523), True, 'import megengine.functional as F\n'), ((12747, 12753), 'megengine.core.autodiff.grad.Grad', 'Grad', ([], {}), '()\n', (12751, 12753), False, 'from megengine.core.autodiff.grad import Grad\n'), ((13051, 13068), 'megengine.functional.ones_like', 'F.ones_like', (['outp'], {}), '(outp)\n', (13062, 13068), True, 'import megengine.functional as F\n'), ((13711, 13717), 'megengine.core.autodiff.grad.Grad', 'Grad', ([], {}), '()\n', (13715, 13717), False, 'from megengine.core.autodiff.grad import Grad\n'), ((14007, 14024), 'megengine.functional.ones_like', 'F.ones_like', (['outp'], {}), '(outp)\n', (14018, 14024), True, 'import megengine.functional as F\n'), ((14590, 14621), 'numpy.arange', 'np.arange', (['(1)', '(4)'], {'dtype': 'np.int32'}), '(1, 4, dtype=np.int32)\n', (14599, 14621), True, 'import numpy as np\n'), ((15494, 15521), 'numpy.random.randn', 'np.random.randn', (['*inp_shape'], {}), '(*inp_shape)\n', (15509, 15521), True, 'import numpy as np\n'), ((19004, 19039), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'data1_shape'}), '(size=data1_shape)\n', (19021, 19039), True, 'import numpy as np\n'), ((19072, 19108), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'label1_shape'}), '(size=label1_shape)\n', (19089, 19108), True, 'import numpy as np\n'), ((19216, 19251), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'data2_shape'}), '(size=data2_shape)\n', (19233, 19251), True, 'import numpy as np\n'), ((19284, 19320), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'label2_shape'}), '(size=label2_shape)\n', (19301, 19320), True, 'import numpy as np\n'), ((21156, 21187), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': 'is_symbolic'}), '(symbolic=is_symbolic)\n', (21165, 21187), True, 'import megengine.jit as jit\n'), ((21583, 21618), 'numpy.array', 'np.array', (['[2, 1, 3]'], {'dtype': 'np.int32'}), '([2, 1, 3], dtype=np.int32)\n', (21591, 21618), True, 'import numpy as np\n'), ((21629, 21659), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float32'}), '([], dtype=np.float32)\n', (21637, 21659), True, 'import numpy as np\n'), ((21848, 21876), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int32'}), '([], dtype=np.int32)\n', (21856, 21876), True, 'import numpy as np\n'), ((23242, 23327), 'megengine.functional.reshape', 'F.reshape', (['var', '(var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])'], {}), '(var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])\n )\n', (23251, 23327), True, 'import megengine.functional as F\n'), ((23371, 23404), 'megengine.functional.transpose', 'F.transpose', (['var', '(0, 1, 3, 4, 2)'], {}), '(var, (0, 1, 3, 4, 2))\n', (23382, 23404), True, 'import megengine.functional as F\n'), ((23480, 23556), 'megengine.functional.conv2d', 'F.conv2d', (['inp', 'w', '(b if has_bias else None)'], {'stride': '(SH, SW)', 'padding': '(PH, PW)'}), '(inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW))\n', (23488, 23556), True, 'import megengine.functional as F\n'), ((24007, 24137), 'megengine.functional.quantized.conv_bias_activation', 'F.quantized.conv_bias_activation', (['inp', 'w', 'b'], {'stride': '(SH, SW)', 'padding': '(PH, PW)', 'dtype': 'out_dtype', 'nonlinear_mode': 'nonlinear_mode'}), '(inp, w, b, stride=(SH, SW), padding=(PH,\n PW), dtype=out_dtype, nonlinear_mode=nonlinear_mode)\n', (24039, 24137), True, 'import megengine.functional as F\n'), ((24290, 24309), 'megengine.is_cuda_available', 'is_cuda_available', ([], {}), '()\n', (24307, 24309), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((24607, 24643), 'megengine.functional.transpose', 'F.transpose', (['result', '(0, 1, 4, 2, 3)'], {}), '(result, (0, 1, 4, 2, 3))\n', (24618, 24643), True, 'import megengine.functional as F\n'), ((26536, 26642), 'megengine.functional.quantized.batch_conv_bias_activation', 'F.quantized.batch_conv_bias_activation', (['inp', 'w', 'b'], {'stride': '(SH, SW)', 'padding': '(PH, PW)', 'dtype': 'out_dtype'}), '(inp, w, b, stride=(SH, SW), padding=\n (PH, PW), dtype=out_dtype)\n', (26574, 26642), True, 'import megengine.functional as F\n'), ((26733, 26792), 'megengine.functional.conv2d', 'F.conv2d', (['inp_fp32', 'w_fp32[0]', '(b_fp32 if has_bias else None)'], {}), '(inp_fp32, w_fp32[0], b_fp32 if has_bias else None)\n', (26741, 26792), True, 'import megengine.functional as F\n'), ((27872, 27900), 'numpy.random.randn', 'np.random.randn', (['(3)', '(224)', '(224)'], {}), '(3, 224, 224)\n', (27887, 27900), True, 'import numpy as np\n'), ((28178, 28211), 'numpy.random.randn', 'np.random.randn', (['(3)', '(224)', '(224)', '(224)'], {}), '(3, 224, 224, 224)\n', (28193, 28211), True, 'import numpy as np\n'), ((29844, 29886), 'numpy.ones', 'np.ones', (['(2, 3, 2, 2, 2)'], {'dtype': 'np.float32'}), '((2, 3, 2, 2, 2), dtype=np.float32)\n', (29851, 29886), True, 'import numpy as np\n'), ((30490, 30521), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': 'is_symbolic'}), '(symbolic=is_symbolic)\n', (30499, 30521), True, 'import megengine.jit as jit\n'), ((31796, 31830), 'numpy.random.random', 'np.random.random', (['(1, 256, 64, 64)'], {}), '((1, 256, 64, 64))\n', (31812, 31830), True, 'import numpy as np\n'), ((31860, 31884), 'numpy.random.random', 'np.random.random', (['(1, 5)'], {}), '((1, 5))\n', (31876, 31884), True, 'import numpy as np\n'), ((31915, 31946), 'numpy.random.random', 'np.random.random', (['(24, 2, 7, 7)'], {}), '((24, 2, 7, 7))\n', (31931, 31946), True, 'import numpy as np\n'), ((32561, 32588), 'numpy.random.randn', 'np.random.randn', (['(3)', '(3)', '(3)', '(3)'], {}), '(3, 3, 3, 3)\n', (32576, 32588), True, 'import numpy as np\n'), ((37586, 37607), 'numpy.arange', 'np.arange', (['(16 * 3 * 3)'], {}), '(16 * 3 * 3)\n', (37595, 37607), True, 'import numpy as np\n'), ((37793, 37818), 'numpy.arange', 'np.arange', (['(3 * 18 * 3 * 3)'], {}), '(3 * 18 * 3 * 3)\n', (37802, 37818), True, 'import numpy as np\n'), ((38007, 38036), 'numpy.arange', 'np.arange', (['(5 * 3 * 20 * 3 * 4)'], {}), '(5 * 3 * 20 * 3 * 4)\n', (38016, 38036), True, 'import numpy as np\n'), ((38228, 38261), 'numpy.arange', 'np.arange', (['(6 * 5 * 3 * 25 * 3 * 4)'], {}), '(6 * 5 * 3 * 25 * 3 * 4)\n', (38237, 38261), True, 'import numpy as np\n'), ((38456, 38493), 'numpy.arange', 'np.arange', (['(2 * 3 * 5 * 3 * 20 * 3 * 4)'], {}), '(2 * 3 * 5 * 3 * 20 * 3 * 4)\n', (38465, 38493), True, 'import numpy as np\n'), ((38914, 38945), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': 'is_symbolic'}), '(symbolic=is_symbolic)\n', (38923, 38945), True, 'import megengine.jit as jit\n'), ((41939, 42099), 'numpy.sum', 'np.sum', (['(data[n, g_id * ic_group_size:(g_id + 1) * ic_group_size, ih:ih + ksize[0],\n iw:iw + ksize[1]] * weight[g_id, oh, ow, :, :, :, oc % oc_group_size])'], {}), '(data[n, g_id * ic_group_size:(g_id + 1) * ic_group_size, ih:ih +\n ksize[0], iw:iw + ksize[1]] * weight[g_id, oh, ow, :, :, :, oc %\n oc_group_size])\n', (41945, 42099), True, 'import numpy as np\n'), ((42279, 42345), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'in_channels', 'input_height', 'input_width'], {}), '(batch_size, in_channels, input_height, input_width)\n', (42293, 42345), True, 'import numpy as np\n'), ((42391, 42501), 'numpy.random.rand', 'np.random.rand', (['groups', 'output_height', 'output_width', '(in_channels // groups)', '*ksize', '(out_channels // groups)'], {}), '(groups, output_height, output_width, in_channels // groups,\n *ksize, out_channels // groups)\n', (42405, 42501), True, 'import numpy as np\n'), ((2326, 2339), 'megengine.autodiff.GradManager', 'GradManager', ([], {}), '()\n', (2337, 2339), False, 'from megengine.autodiff import GradManager\n'), ((2739, 2752), 'megengine.autodiff.GradManager', 'GradManager', ([], {}), '()\n', (2750, 2752), False, 'from megengine.autodiff import GradManager\n'), ((4133, 4150), 'numpy.eye', 'np.eye', (['shape1[0]'], {}), '(shape1[0])\n', (4139, 4150), True, 'import numpy as np\n'), ((12662, 12696), 'numpy.arange', 'np.arange', (['(0)', '(16)'], {'dtype': 'np.float32'}), '(0, 16, dtype=np.float32)\n', (12671, 12696), True, 'import numpy as np\n'), ((13626, 13660), 'numpy.arange', 'np.arange', (['(0)', '(16)'], {'dtype': 'np.float32'}), '(0, 16, dtype=np.float32)\n', (13635, 13660), True, 'import numpy as np\n'), ((14729, 14754), 'numpy.eye', 'np.eye', (['(4)'], {'dtype': 'np.int32'}), '(4, dtype=np.int32)\n', (14735, 14754), True, 'import numpy as np\n'), ((14755, 14786), 'numpy.arange', 'np.arange', (['(1)', '(4)'], {'dtype': 'np.int32'}), '(1, 4, dtype=np.int32)\n', (14764, 14786), True, 'import numpy as np\n'), ((15081, 15107), 'numpy.eye', 'np.eye', (['(10)'], {'dtype': 'np.int32'}), '(10, dtype=np.int32)\n', (15087, 15107), True, 'import numpy as np\n'), ((15944, 15966), 'numpy.ones', 'np.ones', (['(3, 3, 15, 5)'], {}), '((3, 3, 15, 5))\n', (15951, 15966), True, 'import numpy as np\n'), ((16339, 16362), 'numpy.arange', 'np.arange', (['(16)'], {'dtype': 'dt'}), '(16, dtype=dt)\n', (16348, 16362), True, 'import numpy as np\n'), ((16500, 16579), 'numpy.array', 'np.array', (['[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]]'], {'dtype': 'np.float32'}), '([[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32)\n', (16508, 16579), True, 'import numpy as np\n'), ((16922, 16945), 'numpy.arange', 'np.arange', (['(32)'], {'dtype': 'dt'}), '(32, dtype=dt)\n', (16931, 16945), True, 'import numpy as np\n'), ((17083, 17162), 'numpy.array', 'np.array', (['[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]]'], {'dtype': 'np.float32'}), '([[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32)\n', (17091, 17162), True, 'import numpy as np\n'), ((17672, 17703), 'numpy.arange', 'np.arange', (['(27)'], {'dtype': 'np.float32'}), '(27, dtype=np.float32)\n', (17681, 17703), True, 'import numpy as np\n'), ((18289, 18320), 'numpy.arange', 'np.arange', (['(16)'], {'dtype': 'np.float32'}), '(16, dtype=np.float32)\n', (18298, 18320), True, 'import numpy as np\n'), ((18402, 18487), 'numpy.array', 'np.array', (['[[[1.0, 0.0], [0.0, 1.0]], [[0.0, 1.0], [0.0, 1.0]]]'], {'dtype': 'np.float32'}), '([[[1.0, 0.0], [0.0, 1.0]], [[0.0, 1.0], [0.0, 1.0]]], dtype=np.float32\n )\n', (18410, 18487), True, 'import numpy as np\n'), ((18868, 18878), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (18874, 18878), True, 'import numpy as np\n'), ((20012, 20041), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'shape'}), '(size=shape)\n', (20029, 20041), True, 'import numpy as np\n'), ((20407, 20436), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'shape'}), '(size=shape)\n', (20424, 20436), True, 'import numpy as np\n'), ((23652, 23661), 'megengine.functional.relu', 'F.relu', (['O'], {}), '(O)\n', (23658, 23661), True, 'import megengine.functional as F\n'), ((30565, 30586), 'numpy.random.randn', 'np.random.randn', (['*shp'], {}), '(*shp)\n', (30580, 30586), True, 'import numpy as np\n'), ((38968, 38992), 'numpy.arange', 'np.arange', (['(3 * 4 * 5 * 5)'], {}), '(3 * 4 * 5 * 5)\n', (38977, 38992), True, 'import numpy as np\n'), ((40098, 40129), 'numpy.arange', 'np.arange', (['(16)'], {'dtype': 'np.float32'}), '(16, dtype=np.float32)\n', (40107, 40129), True, 'import numpy as np\n'), ((40189, 40210), 'numpy.random.randn', 'np.random.randn', (['(3)', '(3)'], {}), '(3, 3)\n', (40204, 40210), True, 'import numpy as np\n'), ((2466, 2498), 'numpy.ones', 'np.ones', (['shape'], {'dtype': 'np.float32'}), '(shape, dtype=np.float32)\n', (2473, 2498), True, 'import numpy as np\n'), ((2999, 3031), 'numpy.ones', 'np.ones', (['shape'], {'dtype': 'np.float32'}), '(shape, dtype=np.float32)\n', (3006, 3031), True, 'import numpy as np\n'), ((4212, 4229), 'numpy.eye', 'np.eye', (['shape2[1]'], {}), '(shape2[1])\n', (4218, 4229), True, 'import numpy as np\n'), ((6711, 6744), 'numpy.arange', 'np.arange', (['(1)', '(3)'], {'dtype': 'np.float32'}), '(1, 3, dtype=np.float32)\n', (6720, 6744), True, 'import numpy as np\n'), ((7206, 7239), 'numpy.arange', 'np.arange', (['(1)', '(9)'], {'dtype': 'np.float32'}), '(1, 9, dtype=np.float32)\n', (7215, 7239), True, 'import numpy as np\n'), ((7491, 7524), 'numpy.arange', 'np.arange', (['(1)', '(5)'], {'dtype': 'np.float32'}), '(1, 5, dtype=np.float32)\n', (7500, 7524), True, 'import numpy as np\n'), ((7821, 7854), 'numpy.arange', 'np.arange', (['(1)', '(5)'], {'dtype': 'np.float32'}), '(1, 5, dtype=np.float32)\n', (7830, 7854), True, 'import numpy as np\n'), ((8060, 8093), 'numpy.arange', 'np.arange', (['(1)', '(3)'], {'dtype': 'np.float32'}), '(1, 3, dtype=np.float32)\n', (8069, 8093), True, 'import numpy as np\n'), ((16021, 16033), 'megengine.tensor', 'tensor', (['np_x'], {}), '(np_x)\n', (16027, 16033), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((20081, 20116), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1)'], {'size': 'shape'}), '(0, 1, size=shape)\n', (20098, 20116), True, 'import numpy as np\n'), ((20157, 20193), 'numpy.clip', 'np.clip', (['(0)', 'np.inf', '(1 - data * label)'], {}), '(0, np.inf, 1 - data * label)\n', (20164, 20193), True, 'import numpy as np\n'), ((20476, 20511), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1)'], {'size': 'shape'}), '(0, 1, size=shape)\n', (20493, 20511), True, 'import numpy as np\n'), ((20554, 20590), 'numpy.clip', 'np.clip', (['(0)', 'np.inf', '(1 - data * label)'], {}), '(0, np.inf, 1 - data * label)\n', (20561, 20590), True, 'import numpy as np\n')]
|
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import load_classes, Struct
from sfepy import get_paths
def transform_basis(transform, bf):
"""
Transform a basis `bf` using `transform` array of matrices.
"""
if bf.ndim == 3:
nbf = nm.einsum('cij,qdj->cqdi', transform, bf, order='C')
elif bf.ndim == 4:
if bf.shape[0] == 1:
nbf = nm.einsum('cij,qdj->cqdi', transform, bf[0], order='C')
else:
nbf = nm.einsum('cij,cqdj->cqdi', transform, bf, order='C')
# Note: the 2nd derivatives are not supported here.
# Workaround for NumPy 1.14.0 - order is ignored(?)
nbf = nm.ascontiguousarray(nbf)
return nbf
class PolySpace(Struct):
"""Abstract polynomial space class."""
_all = None
keys = {
(0, 1) : 'simplex',
(1, 2) : 'simplex',
(2, 3) : 'simplex',
(3, 4) : 'simplex',
(2, 4) : 'tensor_product',
(3, 8) : 'tensor_product',
}
@staticmethod
def any_from_args(name, geometry, order, base='lagrange',
force_bubble=False):
"""
Construct a particular polynomial space classes according to the
arguments passed in.
"""
if name is None:
name = PolySpace.suggest_name(geometry, order, base, force_bubble)
if PolySpace._all is None:
ps_files = get_paths('sfepy/discrete/fem/poly_spaces.py')
ps_files += get_paths('sfepy/discrete/dg/poly_spaces.py')
PolySpace._all = load_classes(ps_files, [PolySpace],
ignore_errors=True,
name_attr='name')
table = PolySpace._all
key = '%s_%s' % (base, PolySpace.keys[(geometry.dim,
geometry.n_vertex)])
if (geometry.name == '1_2') and (key not in table):
key = '%s_%s' % (base, 'tensor_product')
if force_bubble:
key += '_bubble'
return table[key](name, geometry, order)
@staticmethod
def suggest_name(geometry, order, base='lagrange',
force_bubble=False):
"""
Suggest the polynomial space name given its constructor parameters.
"""
aux = geometry.get_interpolation_name()[:-1]
if force_bubble:
return aux + ('%dB' % order)
else:
return aux + ('%d' % order)
def __init__(self, name, geometry, order):
self.name = name
self.geometry = geometry
self.order = order
self.bbox = nm.vstack((geometry.coors.min(0), geometry.coors.max(0)))
def eval_base(self, coors, diff=0, ori=None, force_axis=False,
transform=None, suppress_errors=False, eps=1e-15):
"""
Evaluate the basis or its first or second derivatives in points given
by coordinates. The real work is done in _eval_base() implemented in
subclasses.
Note that the second derivative code is a work-in-progress and only
`coors` and `transform` arguments are used.
Parameters
----------
coors : array_like
The coordinates of points where the basis is evaluated. See Notes.
diff : 0, 1 or 2
If nonzero, return the given derivative.
ori : array_like, optional
Optional orientation of element facets for per element basis.
force_axis : bool
If True, force the resulting array shape to have one more axis even
when `ori` is None.
transform : array_like, optional
The basis transform array.
suppress_errors : bool
If True, do not report points outside the reference domain.
eps : float
Accuracy for comparing coordinates.
Returns
-------
base : array
The basis (shape (n_coor, 1, n_base)) or its first derivative
(shape (n_coor, dim, n_base)) or its second derivative (shape
(n_coor, dim, dim, n_base)) evaluated in the given points. An
additional axis is pre-pended of length n_cell, if `ori` is given,
or of length 1, if `force_axis` is True.
Notes
-----
If coors.ndim == 3, several point sets are assumed, with equal number
of points in each of them. This is the case, for example, of the
values of the volume base functions on the element facets. The indexing
(of bf_b(g)) is then (ifa,iqp,:,n_ep), so that the facet can be set in
C using FMF_SetCell.
"""
coors = nm.asarray(coors)
if not coors.ndim in (2, 3):
raise ValueError('coordinates must have 2 or 3 dimensions! (%d)'
% coors.ndim)
if (coors.ndim == 2):
base = self._eval_base(coors, diff=diff, ori=ori,
suppress_errors=suppress_errors,
eps=eps)
if (base.ndim == 3) and force_axis:
base = base[None, ...]
if not base.flags['C_CONTIGUOUS']:
base = nm.ascontiguousarray(base)
else: # Several point sets.
if diff:
bdim = self.geometry.dim
else:
bdim = 1
base = nm.empty((coors.shape[0], coors.shape[1],
bdim, self.n_nod), dtype=nm.float64)
for ii, _coors in enumerate(coors):
base[ii] = self._eval_base(_coors, diff=diff, ori=ori,
suppress_errors=suppress_errors,
eps=eps)
if transform is not None:
base = transform_basis(transform, base)
return base
|
[
"sfepy.base.base.load_classes",
"sfepy.get_paths"
] |
[((678, 703), 'numpy.ascontiguousarray', 'nm.ascontiguousarray', (['nbf'], {}), '(nbf)\n', (698, 703), True, 'import numpy as nm\n'), ((288, 340), 'numpy.einsum', 'nm.einsum', (['"""cij,qdj->cqdi"""', 'transform', 'bf'], {'order': '"""C"""'}), "('cij,qdj->cqdi', transform, bf, order='C')\n", (297, 340), True, 'import numpy as nm\n'), ((4682, 4699), 'numpy.asarray', 'nm.asarray', (['coors'], {}), '(coors)\n', (4692, 4699), True, 'import numpy as nm\n'), ((1420, 1466), 'sfepy.get_paths', 'get_paths', (['"""sfepy/discrete/fem/poly_spaces.py"""'], {}), "('sfepy/discrete/fem/poly_spaces.py')\n", (1429, 1466), False, 'from sfepy import get_paths\n'), ((1491, 1536), 'sfepy.get_paths', 'get_paths', (['"""sfepy/discrete/dg/poly_spaces.py"""'], {}), "('sfepy/discrete/dg/poly_spaces.py')\n", (1500, 1536), False, 'from sfepy import get_paths\n'), ((1566, 1639), 'sfepy.base.base.load_classes', 'load_classes', (['ps_files', '[PolySpace]'], {'ignore_errors': '(True)', 'name_attr': '"""name"""'}), "(ps_files, [PolySpace], ignore_errors=True, name_attr='name')\n", (1578, 1639), False, 'from sfepy.base.base import load_classes, Struct\n'), ((5410, 5488), 'numpy.empty', 'nm.empty', (['(coors.shape[0], coors.shape[1], bdim, self.n_nod)'], {'dtype': 'nm.float64'}), '((coors.shape[0], coors.shape[1], bdim, self.n_nod), dtype=nm.float64)\n', (5418, 5488), True, 'import numpy as nm\n'), ((412, 467), 'numpy.einsum', 'nm.einsum', (['"""cij,qdj->cqdi"""', 'transform', 'bf[0]'], {'order': '"""C"""'}), "('cij,qdj->cqdi', transform, bf[0], order='C')\n", (421, 467), True, 'import numpy as nm\n'), ((501, 554), 'numpy.einsum', 'nm.einsum', (['"""cij,cqdj->cqdi"""', 'transform', 'bf'], {'order': '"""C"""'}), "('cij,cqdj->cqdi', transform, bf, order='C')\n", (510, 554), True, 'import numpy as nm\n'), ((5221, 5247), 'numpy.ascontiguousarray', 'nm.ascontiguousarray', (['base'], {}), '(base)\n', (5241, 5247), True, 'import numpy as nm\n')]
|
r"""
Incompressible Stokes flow with Navier (slip) boundary conditions, flow driven
by a moving wall and a small diffusion for stabilization.
This example demonstrates the use of `no-penetration` boundary conditions as
well as `edge direction` boundary conditions together with Navier or slip
boundary conditions.
Find :math:`\ul{u}`, :math:`p` such that:
.. math::
\int_{\Omega} \nu\ \nabla \ul{v} : \nabla \ul{u}
- \int_{\Omega} p\ \nabla \cdot \ul{v}
+ \int_{\Gamma_1} \beta \ul{v} \cdot (\ul{u} - \ul{u}_d)
+ \int_{\Gamma_2} \beta \ul{v} \cdot \ul{u}
= 0
\;, \quad \forall \ul{v} \;,
\int_{\Omega} \mu \nabla q \cdot \nabla p
+ \int_{\Omega} q\ \nabla \cdot \ul{u}
= 0
\;, \quad \forall q \;,
where :math:`\nu` is the fluid viscosity, :math:`\beta` is the slip
coefficient, :math:`\mu` is the (small) numerical diffusion coefficient,
:math:`\Gamma_1` is the top wall that moves with the given driving velocity
:math:`\ul{u}_d` and :math:`\Gamma_2` are the remaining walls. The Navier
conditions are in effect on both :math:`\Gamma_1`, :math:`\Gamma_2` and are
expressed by the corresponding integrals in the equations above.
The `no-penetration` boundary conditions are applied on :math:`\Gamma_1`,
:math:`\Gamma_2`, except the vertices of the block edges, where the `edge
direction` boundary conditions are applied. Optionally, Dirichlet boundary
conditions can be applied on the inlet, see the code below.
The mesh is created by ``gen_block_mesh()`` function - try different mesh
dimensions and resolutions below. For large meshes use the ``'ls_i'`` linear
solver - PETSc + petsc4py is needed in that case.
"""
import numpy as nm
from sfepy.discrete.fem.meshio import UserMeshIO
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.homogenization.utils import define_box_regions
# Mesh dimensions.
dims = nm.array([3, 1, 0.5])
# Mesh resolution: increase to improve accuracy.
shape = [11, 15, 15]
def mesh_hook(mesh, mode):
"""
Generate the block mesh.
"""
if mode == 'read':
mesh = gen_block_mesh(dims, shape, [0, 0, 0], name='user_block',
verbose=False)
return mesh
elif mode == 'write':
pass
filename_mesh = UserMeshIO(mesh_hook)
regions = define_box_regions(3, 0.5 * dims)
regions.update({
'Omega' : 'all',
'Edges_v' : ("""(r.Near *v r.Bottom) +v
(r.Bottom *v r.Far) +v
(r.Far *v r.Top) +v
(r.Top *v r.Near)""", 'edge'),
'Gamma1_f' : ('copy r.Top', 'face'),
'Gamma2_f' : ('r.Near +v r.Bottom +v r.Far', 'face'),
'Gamma_f' : ('r.Gamma1_f +v r.Gamma2_f', 'face'),
'Gamma_v' : ('r.Gamma_f -v r.Edges_v', 'face'),
'Inlet_f' : ('r.Left -v r.Gamma_f', 'face'),
})
fields = {
'velocity' : ('real', 3, 'Omega', 1),
'pressure' : ('real', 1, 'Omega', 1),
}
def get_u_d(ts, coors, region=None):
"""
Given stator velocity.
"""
out = nm.zeros_like(coors)
out[:] = [1.0, 1.0, 0.0]
return out
functions = {
'get_u_d' : (get_u_d,),
}
variables = {
'u' : ('unknown field', 'velocity', 0),
'v' : ('test field', 'velocity', 'u'),
'u_d' : ('parameter field', 'velocity',
{'setter' : 'get_u_d'}),
'p' : ('unknown field', 'pressure', 1),
'q' : ('test field', 'pressure', 'p'),
}
# Try setting the inlet velocity by un-commenting the ebcs.
ebcs = {
## 'inlet' : ('Inlet_f', {'u.0' : 1.0, 'u.[1, 2]' : 0.0}),
}
lcbcs = {
'walls' : ('Gamma_v', {'u.all' : 'no_penetration'},
'normals_Gamma.vtk'),
'edges' : ('Edges_v', {'u.all' : 'edge_direction'},
'edges_Edges.vtk'),
}
materials = {
'm' : ({
'nu' : 1e-3,
'beta' : 1e-2,
'mu' : 1e-10,
},),
}
equations = {
'balance' :
"""dw_div_grad.5.Omega(m.nu, v, u)
- dw_stokes.5.Omega(v, p)
+ dw_surface_dot.5.Gamma1_f(m.beta, v, u)
+ dw_surface_dot.5.Gamma2_f(m.beta, v, u)
=
+ dw_surface_dot.5.Gamma1_f(m.beta, v, u_d)""",
'incompressibility' :
"""dw_laplace.5.Omega(m.mu, q, p)
+ dw_stokes.5.Omega(u, q) = 0""",
}
solvers = {
'ls_d' : ('ls.scipy_direct', {}),
## 'ls_i' : ('ls.petsc', {
## 'method' : 'bcgsl', # ksp_type
## 'precond' : 'ilu', # pc_type
## 'eps_a' : 0.0, # abstol
## 'eps_r' : 1e-12, # rtol
## 'eps_d' : 1e10, # Divergence tolerance.
## 'i_max' : 2500, # maxits
## }),
'newton' : ('nls.newton', {
'i_max' : 1,
'eps_a' : 1e-10,
}),
}
options = {
'nls' : 'newton',
'ls' : 'ls_d',
}
|
[
"sfepy.mesh.mesh_generators.gen_block_mesh",
"sfepy.discrete.fem.meshio.UserMeshIO",
"sfepy.homogenization.utils.define_box_regions"
] |
[((1872, 1893), 'numpy.array', 'nm.array', (['[3, 1, 0.5]'], {}), '([3, 1, 0.5])\n', (1880, 1893), True, 'import numpy as nm\n'), ((2256, 2277), 'sfepy.discrete.fem.meshio.UserMeshIO', 'UserMeshIO', (['mesh_hook'], {}), '(mesh_hook)\n', (2266, 2277), False, 'from sfepy.discrete.fem.meshio import UserMeshIO\n'), ((2289, 2322), 'sfepy.homogenization.utils.define_box_regions', 'define_box_regions', (['(3)', '(0.5 * dims)'], {}), '(3, 0.5 * dims)\n', (2307, 2322), False, 'from sfepy.homogenization.utils import define_box_regions\n'), ((2985, 3005), 'numpy.zeros_like', 'nm.zeros_like', (['coors'], {}), '(coors)\n', (2998, 3005), True, 'import numpy as nm\n'), ((2076, 2148), 'sfepy.mesh.mesh_generators.gen_block_mesh', 'gen_block_mesh', (['dims', 'shape', '[0, 0, 0]'], {'name': '"""user_block"""', 'verbose': '(False)'}), "(dims, shape, [0, 0, 0], name='user_block', verbose=False)\n", (2090, 2148), False, 'from sfepy.mesh.mesh_generators import gen_block_mesh\n')]
|
# AtrialFibrePlugin
# Copyright (C) 2018 <NAME>, King's College London, all rights reserved, see LICENSE file
'''
Atrial fibre generation plugin.
'''
import os
import stat
import ast
import shutil
import datetime
import zipfile
import warnings
from itertools import starmap
from collections import defaultdict
try:
import configparser
except ImportError:
import ConfigParser as configparser
try:
from sfepy.base.conf import ProblemConf
from sfepy.applications import solve_pde
from sfepy.base.base import output
except ImportError:
warnings.warn('SfePy needs to be installed or in PYTHONPATH to generate fiber directions.')
from eidolon import ( ui,
ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType,
listToMatrix,MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush
)
import eidolon
import numpy as np
from scipy.spatial import cKDTree
plugindir= os.path.dirname(os.path.abspath(__file__)) # this file's directory
# directory/file names
uifile=os.path.join(plugindir,'AtrialFibrePlugin.ui')
deformDir=os.path.join(plugindir,'deformetricaC')
deformExe=os.path.join(deformDir,'deformetrica')
architecture=os.path.join(plugindir,'architecture.ini')
problemFile=os.path.join(plugindir,'problemfile.py')
# registration file names
decimatedFile='subject.vtk'
targetFile='target.vtk'
datasetFile='data_set.xml'
modelFile='model.xml'
optimFile='optimization_parameters.xml'
registeredFile='Registration_subject_to_subject_0__t_9.vtk'
decimate='decimate-surface'
# deformetrica parameters
kernelWidthSub=5000
kernelWidthDef=8000
kernelType='cudaexact'
dataSigma=0.1
stepSize=0.000001
# field names
#regionField='regions'
#landmarkField='landmarks'
#directionField='directions'
#gradientDirField='gradientDirs'
#elemDirField='elemDirs'
#elemRegionField='elemRegions'
#elemthickness='elemThickness'
#elemGradient='elemGradient'
fieldNames=eidolon.enum(
'regions','landmarks',
'directions','gradientDirs',
'elemDirs','elemRegions','elemThickness',
'nodeGradient'
)
objNames=eidolon.enum(
'atlasmesh',
'origmesh',
'epimesh','epinodes',
'endomesh','endonodes',
'architecture'
)
regTypes=eidolon.enum('endo','epi')
# load the UI file into the ui namespace, this is subtyped below
ui.loadUI(open(uifile).read())
def showLines(nodes,lines,name='Lines',matname='Default'):
mgr=eidolon.getSceneMgr()
lineds=eidolon.LineDataSet(name+'DS',nodes,lines)
obj=MeshSceneObject(name,lineds)
mgr.addSceneObject(obj)
rep=obj.createRepr(eidolon.ReprType._line,matname=matname)
mgr.addSceneObjectRepr(rep)
return obj,rep
def showElemDirs(obj,glyphscale,mgr):
ds=obj.datasets[0]
nodes=ds.getNodes()
tets=first(i for i in ds.enumIndexSets() if i.getType()==ElemType._Tet1NL)
elemdirfield=ds.getDataField(fieldNames._elemDirs)
mid=ElemType.Tet1NL.basis(0.25,0.25,0.25)
elemnodes=[ElemType.Tet1NL.applyCoeffs([nodes[e] for e in elem],mid) for elem in tets]
elemobj=MeshSceneObject('elemobj',PyDataSet('elemobjds',elemnodes,[],[elemdirfield]))
mgr.addSceneObject(elemobj)
rep=elemobj.createRepr(eidolon.ReprType._glyph,0,externalOnly=False,drawInternal=True,glyphname='arrow',
glyphscale=glyphscale,dfield=elemdirfield.getName(),vecfunc=eidolon.VecFunc._Linear)
mgr.addSceneObjectRepr(rep)
class initdict(defaultdict):
def __init__(self,initfunc,*args,**kwargs):
defaultdict.__init__(self,None,*args,**kwargs)
self.initfunc=initfunc
def __missing__(self,key):
value=self.initfunc(key)
self.__setitem__(key,value)
return value
class plane(object):
def __init__(self,center,norm):
self.center=center
self.norm=norm.norm()
def dist(self,pt):
return pt.planeDist(self.center,self.norm)
def moveUp(self,dist):
self.center+=self.norm*dist
def numPointsAbove(self,nodes):
return sum(1 for n in nodes if self.dist(n)>=0)
def between(self,nodes,otherplane):
numnodes=len(nodes)
return self.numPointsAbove(nodes)==numnodes and otherplane.numPointsAbove(nodes)==numnodes
def findIntersects(self,nodes,inds):
numnodes=inds.m()
result=[]
for n in range(inds.n()):
if 0<self.numPointsAbove(nodes.mapIndexRow(inds,n))<numnodes:
result.append(n)
return result
class TriMeshGraph(object):
def __init__(self,nodes,tris,ocdepth=3):
self.nodes=nodes if isinstance(nodes,eidolon.Vec3Matrix) else listToMatrix(nodes,'nodes')
self.tris=tris if isinstance(tris,eidolon.IndexMatrix) else listToMatrix(tris,'tris')
self.tricenters=[avg(self.getTriNodes(r),vec3()) for r in range(self.tris.n())]
self.adj,self.ragged=generateTriAdj(self.tris) # elem -> elems
self.nodeelem=generateNodeElemMap(self.nodes.n(),self.tris) # node -> elems
self.edges=generateSimplexEdgeMap(self.nodes.n(),self.tris) # node -> nodes
self.boundbox=BoundBox(nodes)
# self.octree=eidolon.Octree(ocdepth,self.boundbox.getDimensions(),self.boundbox.center)
# self.octree.addMesh(self.nodes,self.tris)
def computeDist(key):
i,j=key
return self.tricenters[i].distTo(self.tricenters[j])
self.tridists=initdict(computeDist)
# def getIntersectedTri(self,start,end):
# '''
# Returns the triangle index and the (t,u,v) triple if the line from `start' to `end' intersects the indexed triangle
# at a distance of `t' from `start' with xi coord (u,v). Returns None if no triangle intersected.
# '''
# startoc=self.octree.getLeaf(start)
# endoc=self.octree.getLeaf(end)
# inds=(startoc.leafdata if startoc is not None else []) + (endoc.leafdata if endoc is not None else [])
#
# r=eidolon.Ray(start,end-start)
#
# for tri in inds:
# d=r.intersectsTri(*self.getTriNodes(tri))
# if d:# and d[0]<=start.distTo(end):
# return tri,d
#
# return None
def getPathSubgraph(self,starttri,endtri):
return getAdjTo(self.adj,starttri,endtri)
def getTriNodes(self,triindex):
return self.nodes.mapIndexRow(self.tris,triindex)
def getTriNorm(self,triindex):
a,b,c=self.getTriNodes(triindex)
return a.planeNorm(b,c)
def getSharedNodeTris(self,triindex):
tris=set()
for n in self.tris[triindex]:
tris.update(self.nodeelem[n])
tris.remove(triindex)
return list(sorted(tris))
def getNearestTri(self,pt):
def triDist(tri):
norm=self.getTriNorm(tri)
pt=self.tricenters[tri]
return abs(pt.planeDist(pt,norm))
nearestnode=min([n for n in range(self.nodes.n()) if self.nodeelem[n]],key=lambda n:self.nodes[n].distToSq(pt))
tris=self.nodeelem[nearestnode]
return min(tris,key=triDist)
def getPath(self,starttri,endtri,acceptTri=None):
return dijkstra(self.adj,starttri,endtri,lambda i,j:self.tridists[(i,j)],acceptTri)
def loadArchitecture(path,section):
'''
Load the architecture from the given file `path' and return values from the given section (endo or epi). The
return value is a tuple containing:
landmarks: 0-based indices of landmark nodes in the atlas
lmlines : 0-based index pairs defining lines between indices in landmarks
lmregions: a list of maps, each map defining a region which are mappings from a 0-based indices into lmlines to
the line's landmark index pair
lmstim : a per-region specifier list stating which lines (L# for index #) or atlas node (N#) defines stimulation
lground : a per-region specifier list stating which lines (L# for index #) or atlas node (N#) defines ground
appendageregion: region number for the appendage
appendagenode: node index for the appendage's division node which must be generated
'''
c=configparser.SafeConfigParser()
assert len(c.read(path))>0
landmarks=ast.literal_eval(c.get(section,'landmarks')) # 0-based node indices
lines=ast.literal_eval(c.get(section,'lines')) # 1-based landmark indices
regions=ast.literal_eval(c.get(section,'regions')) # 1-based landmark indices
stimulus=ast.literal_eval(c.get(section,'stimulus')) # per region
ground=ast.literal_eval(c.get(section,'ground')) # per region
appendageregion=ast.literal_eval(c.get(section,'appendageregion'))
appendagenode=ast.literal_eval(c.get(section,'appendagenode'))
appendagelmindex=ast.literal_eval(c.get(section,'appendagelmindex'))
# types=ast.literal_eval(c.get(section,'type')) # per region
# indices that don't exist are for landmarks that need to be calculated
lmlines=[subone(l) for l in lines ]#if max(l)<=len(landmarks)] # filter for lines with existing node indices
lmregions=[subone(r) for r in regions]
# lmregions=[subone(r) for r in regions if all(i<=len(landmarks) for i in r)]
lmstim=stimulus#[:len(lmregions)]
lmground=ground#[:len(lmregions)]
allregions=[]
for r in lmregions:
lr={i:(a,b) for i,(a,b) in enumerate(lmlines) if a in r and b in r}
if len(lr)>2:
allregions.append(lr)
return landmarks,lmlines,allregions,lmstim,lmground, appendageregion,appendagenode,appendagelmindex
def writeMeshFile(filename,nodes,inds,nodegroup,indgroup,dim):
'''Write a medit format mesh file to `filename'.'''
with open(filename,'w') as o:
print('MeshVersionFormatted 1',file=o)
print('Dimension %i'%dim,file=o)
print('Vertices',file=o)
print(len(nodes),file=o)
for n in range(len(nodes)):
for v in tuple(nodes[n])[:dim]:
print('%20.10f'%v,end=' ',file=o)
group=0 if nodegroup is None else nodegroup[n]
print(group,file=o)
print('Triangles' if len(inds[0])==3 else 'Tetrahedra',file=o)
print(len(inds),file=o)
for n in range(len(inds)):
print(*['%10i'%(t+1) for t in inds[n]],file=o,end=' ')
group=0 if indgroup is None else indgroup[n]
print(group,file=o)
def createNodeQuery(nodes):
'''
Create a cKDTree object from `nodes' and return a query function which accepts a position and radius value. The
function will return the nearest point index to the given position if radius<=0 and a list of indices of points
within the given radius of the position otherwise. The node list is also returned as a second return value.
'''
tree=cKDTree(np.asarray(list(map(tuple,nodes))))
def _query(pos,radius=0):
'''Query `nodes' for the nearest node to `pos' if `radius'<=0 or a list of those within `radius' otherwise.'''
pos=tuple(pos)
if radius<=0:
return tree.query(pos)[1],tree.data
else:
return tree.query_ball_point(pos,radius),tree.data
return _query
def registerSubjectToTarget(subjectObj,targetObj,targetTrans,outdir,decimpath,VTK):
'''
Register the `subjectObj' mesh object to `targetObj' mesh object putting data into directory `outdir'. The subject
will be decimated to have roughly the same number of nodes as the target and then stored as subject.vtk in `outdir'.
Registration is done with Deformetrica and result stored as 'Registration_subject_to_subject_0__t_9.vtk' in `outdir'.
If `targetTrans' must be None or a transform which is applied to the nodes of `targetObj' before registration.
'''
dpath=os.path.join(outdir,decimatedFile)
tmpfile=os.path.join(outdir,'tmp.vtk')
# if a transform is given, apply that transform to the target mesh when saving otherwise do no transformation
vecfunc=(lambda i: tuple(targetTrans*i)) if targetTrans else None
shutil.copy(os.path.join(deformDir,datasetFile),os.path.join(outdir,datasetFile)) # copy dataset file unchanged
model=open(os.path.join(deformDir,modelFile)).read()
model=model.replace('%1',str(dataSigma))
model=model.replace('%2',str(kernelWidthSub))
model=model.replace('%3',str(kernelType))
model=model.replace('%4',str(kernelWidthDef))
with open(os.path.join(outdir,modelFile),'w') as o: # save modified model file
o.write(model)
optim=open(os.path.join(deformDir,optimFile)).read()
optim=optim.replace('%1',str(stepSize))
with open(os.path.join(outdir,optimFile),'w') as o: # save modified optimization file
o.write(optim)
VTK.saveLegacyFile(tmpfile,subjectObj,datasettype='POLYDATA')
VTK.saveLegacyFile(os.path.join(outdir,targetFile),targetObj,datasettype='POLYDATA',vecfunc=vecfunc)
snodes=subjectObj.datasets[0].getNodes()
tnodes=targetObj.datasets[0].getNodes()
sizeratio=float(tnodes.n())/snodes.n()
sizepercent=str(100*(1-sizeratio))[:6] # percent to decimate by
# decimate the mesh most of the way towards having the same number of nodes as the target
ret,output=eidolon.execBatchProgram(decimpath,tmpfile,dpath,'-reduceby',sizepercent,'-ascii',logcmd=True)
assert ret==0,output
env={'LD_LIBRARY_PATH':deformDir}
ret,output=eidolon.execBatchProgram(deformExe,"registration", "3D", modelFile, datasetFile, optimFile, "--output-dir=.",cwd=outdir,env=env,logcmd=True)
assert ret==0,output
return output
def transferLandmarks(archFilename,fieldname,targetObj,targetTrans,subjectObj,outdir,VTK):
'''
Register the landmarks defined as node indices on `targetObj' to equivalent node indices on `subjectObj' via the
decimated and registered intermediary stored in `outdir'. The result is a list of index pairs associating a node
index in `subjectObj' for every landmark index in `targetObj'.
'''
decimated=os.path.join(outdir,decimatedFile)
registered=os.path.join(outdir,registeredFile)
arch=loadArchitecture(archFilename,fieldname)
lmarks,lines=arch[:2]
appendagelmindex=arch[-1]
# append the index for the estimated appendage node, this will have to be adjusted manually after registration
lmarks.append(appendagelmindex)
reg=VTK.loadObject(registered) # mesh registered to target
dec=VTK.loadObject(decimated) # decimated unregistered mesh
tnodes=targetObj.datasets[0].getNodes() # target points
rnodes=reg.datasets[0].getNodes() # registered decimated points
dnodes=dec.datasets[0].getNodes() # unregistered decimated points
snodes=subjectObj.datasets[0].getNodes() # original subject points
targetTrans=targetTrans or eidolon.transform()
lmpoints=[(targetTrans*tnodes[m],m) for m in lmarks] # (transformed landmark node, index) pairs
# find the points in the registered mesh closest to the landmark points in the target object
query=createNodeQuery(rnodes)
rpoints=[(query(pt)[0],m) for pt,m in lmpoints]
# find the subject nodes closes to landmark points in the decimated mesh (which are at the same indices as in the registered mesh)
query=createNodeQuery(snodes)
spoints=[(query(dnodes[i])[0],m) for i,m in rpoints]
assert len(spoints)==len(lmpoints)
assert all(p[0] is not None for p in spoints)
slines=[l for l in lines if max(l)<len(spoints)]
return spoints,slines # return list (i,m) pairs where node index i in the subject mesh is landmark m
def generateTriAdj(tris):
'''
Generates a table (n,3) giving the indices of adjacent triangles for each triangle, with a value of `n' indicating a
free edge. The indices in each row are in sorted order rather than per triangle edge. The result is the dual of the
triangle mesh represented as the (n,3) array and a map relating the mesh's ragged edges to their triangle.
'''
edgemap = {} # maps edges to the first triangle having that edge
result=IndexMatrix(tris.getName()+'Adj',tris.n(),3)
result.fill(tris.n())
# Find adjacent triangles by constructing a map from edges defined by points (a,b) to the triangle having that edge,
# when that edge is encountered twice then the current triangle is adjacent to the one that originally added the edge.
for t1,tri in enumerate(tris): # iterate over each triangle t1
for a,b in successive(tri,2,True): # iterate over each edge (a,b) of t1
k=(min(a,b),max(a,b)) # key has uniform edge order
t2=edgemap.pop(k,None) # attempt to find edge k in the map, None indicates edge not found
if t2 is not None: # an edge is shared if already encountered, thus t1 is adjacent to t2
result[t1]=sorted(set(result[t1]+(t2,)))
result[t2]=sorted(set(result[t2]+(t1,)))
else:
edgemap[k]=t1 # first time edge is encountered, associate this triangle with it
return result,edgemap
@timing
def getAdjTo(adj,start,end):
'''
Returns a subgraph of `adj',represented as a node->[neighbours] dict, which includes nodes `start' and `end'.
If `end' is None or an index not appearing in the mesh, the result will be the submesh contiguous with `start'.
'''
visiting=set([start])
found={}
numnodes=adj.n()
while visiting and end not in found:
visit=visiting.pop()
neighbours=[n for n in adj[visit] if n<numnodes]
found[visit]=neighbours
visiting.update(n for n in neighbours if n not in found)
return found
def generateNodeElemMap(numnodes,tris):
'''Returns a list relating each node index to the set of element indices using that node.'''
nodemap=[set() for _ in range(numnodes)]
for i,tri in enumerate(tris):
for n in tri:
nodemap[n].add(i)
#assert all(len(s)>0 for s in nodemap), 'Unused nodes in triangle topology'
return nodemap
def generateSimplexEdgeMap(numnodes,simplices):
'''
Returns a list relating each node index to the set of node indices joined to it by graph edges. This assumes the mesh
has `numnodes' number of nodes and simplex topology `simplices'.
'''
nodemap=[set() for _ in range(numnodes)]
for simplex in simplices:
simplex=set(simplex)
for s in simplex:
nodemap[s].update(simplex.difference((s,)))
return nodemap
@timing
def dijkstra(adj, start, end,distFunc,acceptTri=None):
#http://benalexkeen.com/implementing-djikstras-shortest-path-algorithm-with-python/
# shortest paths is a dict of nodes to previous node and distance
paths = {start: (None,0)}
curnode = start
visited = set()
# consider only subgraph containing start and end, this expands geometrically so should contain the minimal path
adj=getAdjTo(adj,start,end)
eidolon.printFlush(len(adj))
if acceptTri is not None:
accept=lambda a: (a in adj and acceptTri(a))
else:
accept=lambda a: a in adj
while curnode != end:
visited.add(curnode)
destinations = list(filter(accept,adj[curnode]))
curweight = paths[curnode][1]
for dest in destinations:
weight = curweight+distFunc(curnode,dest)
if dest not in paths or weight < paths[dest][1]:
paths[dest] = (curnode, weight)
nextnodes = {node: paths[node] for node in paths if node not in visited}
if not nextnodes:
raise ValueError("Route %i -> %i not possible"%(start,end))
# next node is the destination with the lowest weight
curnode = min(nextnodes, key=lambda k:nextnodes[k][1])
# collect path from end node back to the start
path = []
while curnode is not None:
path.insert(0,curnode)
curnode = paths[curnode][0]
return path
def subone(v):
return tuple(i-1 for i in v)
def findNearestIndex(pt,nodelist):
return min(range(len(nodelist)),key=lambda i:pt.distToSq(nodelist[i]))
def findFarthestIndex(pt,nodelist):
return max(range(len(nodelist)),key=lambda i:pt.distToSq(nodelist[i]))
def getContiguousTris(graph,starttri,acceptTri):
accepted=[starttri]
adjacent=first(i for i in graph.getSharedNodeTris(starttri) if i not in accepted and acceptTri(i))
while adjacent is not None:
accepted.append(adjacent)
for a in accepted[::-1]:
allneighbours=graph.getSharedNodeTris(a)
adjacent=first(i for i in allneighbours if i not in accepted and acceptTri(i))
if adjacent:
break
return accepted
@timing
def findTrisBetweenNodes(start,end,landmarks,graph):
eidolon.printFlush('Nodes:',start,end)
start=landmarks[start]
end=landmarks[end]
assert 0<=start<len(graph.nodeelem)
assert 0<=end<len(graph.nodeelem)
starttri=first(graph.nodeelem[start])
endtri=first(graph.nodeelem[end])
assert starttri is not None
assert endtri is not None
nodes=graph.nodes
startnode=nodes[start]
endnode=nodes[end]
easypath= graph.getPath(starttri,endtri)
midnode=graph.tricenters[easypath[len(easypath)//2]]
# define planes to bound the areas to search for triangles to within the space of the line
splane=plane(startnode,midnode-startnode)
eplane=plane(endnode,midnode-endnode)
# adjust the plane's positions to account for numeric error
adjustdist=1e1
splane.moveUp(-adjustdist)
eplane.moveUp(-adjustdist)
assert starttri is not None
assert endtri is not None
# TODO: plane normal determination still needs work
#linenorm=midnode.planeNorm(startnode,endnode)
#linenorm=graph.getTriNorm(easypath[len(easypath)//2]).cross(midnode-startnode)
linenorm=eidolon.avg(graph.getTriNorm(e) for e in easypath).cross(midnode-startnode)
lineplane=plane(splane.center,linenorm)
indices=set([starttri,endtri]) # list of element indices on lineplane between splane and eplane
for i in range(graph.tris.n()):
trinodes=graph.getTriNodes(i)
numabove=lineplane.numPointsAbove(trinodes)
if numabove in (1,2) and splane.between(trinodes,eplane):
indices.add(i)
accepted=getContiguousTris(graph,starttri,lambda i:i in indices)
if endtri not in accepted or len(easypath)<len(accepted):
eidolon.printFlush('---Resorting to easypath')
accepted=easypath
return accepted
@timing
def assignRegion(region,index,assignmat,landmarks,linemap,graph):
def getEnclosedGraph(adj,excludes,start):
visiting=set([start])
found=set()
numnodes=adj.n()
assert start is not None
while visiting:
visit=visiting.pop()
neighbours=[n for n in adj.getRow(visit) if n<numnodes and n not in excludes]
found.add(visit)
visiting.update(n for n in neighbours if n not in found)
return found
# collect all tri indices on the border of this region
bordertris=set()
for lineindex,(a,b) in region.items():
if (a,b) in linemap:
line=linemap[(a,b)]
else:
line=findTrisBetweenNodes(a,b,landmarks,graph)
linemap[(a,b)]=line
linemap[(b,a)]=line
# assign line ID to triangles on the line
for tri in line:
assignmat[tri,0]=lineindex
bordertris.update(line)
bordertri=graph.tricenters[first(bordertris)]
farthest=max(range(len(graph.tris)),key=lambda i:graph.tricenters[i].distToSq(bordertri))
maxgraph=getEnclosedGraph(graph.adj,bordertris,farthest)
for tri in range(len(graph.tris)):
if tri in bordertris or tri not in maxgraph:
if assignmat[tri,1]<0:
assignmat[tri,1]=index
elif assignmat[tri,2]<0:
assignmat[tri,2]=index
elif assignmat[tri,3]<0:
assignmat[tri,3]=index
@timing
def generateRegionField(obj,landmarkObj,regions,appendageregion,appendagenode,task=None):
ds=obj.datasets[0]
nodes=ds.getNodes()
tris=first(ind for ind in ds.enumIndexSets() if ind.m()==3 and bool(ind.meta(StdProps._isspatial)))
lmnodes=landmarkObj.datasets[0].getNodes()
linemap={}
landmarks={i:nodes.indexOf(lm)[0] for i,lm in enumerate(lmnodes)}
assert all(0<=l<nodes.n() for l in landmarks)
graph=TriMeshGraph(nodes,tris)
edgenodeinds=set(eidolon.listSum(graph.ragged)) # list of all node indices on the ragged edge
filledregions=RealMatrix(fieldNames._regions,tris.n(),4)
filledregions.meta(StdProps._elemdata,'True')
filledregions.fill(-10)
#landmarks[appendagenode]=0 # TODO: skipping appendage node for now
for region in regions:
for a,b in region.values():
if appendagenode not in (a,b):
if a in landmarks and b not in landmarks:
oldlmnode=nodes[landmarks[a]]
newlm=b
elif b in landmarks and a not in landmarks:
oldlmnode=nodes[landmarks[b]]
newlm=a
else:
continue
newlmnode=min(edgenodeinds,key=lambda i:nodes[i].distToSq(oldlmnode)) # ragged edge node closest to landmark
landmarks[newlm]=newlmnode
# eidolon.printFlush(newlm,newlmnode,graph.getPath(min(a,b),newlmnode),'\n')
# line=findTrisBetweenNodes(a,b,landmarks,graph)
# for tri in line:
# filledregions[tri,0]=max(a,b)
if task:
task.setMaxProgress(len(regions))
for rindex,region in enumerate(regions):
eidolon.printFlush('Region',rindex,'of',len(regions),region)
allnodes=set(eidolon.listSum(region.values()))
if all(a in landmarks for a in allnodes):
assignRegion(region,rindex,filledregions,landmarks,linemap,graph)
else:
eidolon.printFlush('Skipping',rindex,[a for a in allnodes if a not in landmarks])
if task:
task.setProgress(rindex+1)
return filledregions,linemap
def extractTriRegion(nodes,tris,acceptFunc):
'''
Extract the region from the mesh (nodes,tris) as defined by the triangle acceptance function `acceptFunc'. The return
value is a tuple containing the list of new nodes, a list of new tris, a map from old node indices in `nodes' to new
indices in the returned node list, and a map from triangle indices in `tris' to new ones in the returned triangle list.
'''
#old -> new
newnodes=[] # new node set
newtris=[] # new triangle set
nodemap={} # maps old node indices to new
trimap={} # maps old triangle indices to new
for tri in range(len(tris)):
if acceptFunc(tri):
newtri=list(tris[tri])
for i,n in enumerate(newtri):
if n not in nodemap:
nodemap[n]=len(newnodes)
newnodes.append(nodes[n])
newtri[i]=nodemap[n]
trimap[tri]=len(newtris)
newtris.append(newtri)
return newnodes,newtris,nodemap,trimap
def calculateMeshGradient(prefix,nodes,elems,groups,VTK):
'''Calculate the laplace gradient for the mesh given as (nodes,elems,groups) using sfepy.'''
tempdir=os.path.dirname(prefix)
infile=prefix+'.mesh'
logfile=prefix+'.log'
outfile=prefix+'.vtk'
probfile=prefix+'.py'
writeMeshFile(infile,nodes,elems,groups,None,3)
with open(problemFile) as p:
with open(probfile,'w') as o:
o.write(p.read()%{'inputfile':infile,'outdir':tempdir})
p=ProblemConf.from_file(probfile)
output.set_output(logfile,True,True)
solve_pde(p)
robj=VTK.loadObject(outfile)
return robj.datasets[0].getDataField('t')
@timing
def calculateGradientDirs(nodes,edges,gradientField):
'''
Returns a RealMatrix object containing the vector field for each node of `nodes' pointing in the gradient direction
for the given field RealMatrix object `gradientField'. The `edges' argument is a map relating each node index to the
set of node indices sharing an edge with that index.
'''
#https://math.stackexchange.com/questions/2627946/how-to-approximate-numerically-the-gradient-of-the-function-on-a-triangular-mesh/2632616#2632616
numnodes=len(nodes)
nodedirs=[]#eidolon.RealMatrix(gradientDirField,numnodes,3)
for n in range(numnodes):
edgenodes=edges[n]
if edgenodes:
ngrad=gradientField[n]
nnode=nodes[n]
edgegrads=[gradientField[i]-ngrad for i in edgenodes] # field gradient in edge directions
edgedirs=[nodes[i]-nnode for i in edgenodes] # edge directional vectors
# minlen=min(e.lenSq() for e in edgedirs)**0.5
edgedirs=[list(e) for e in edgedirs]
# node direction is solution for x in Ax=b where A is edge directions and b edge gradients
nodedir=np.linalg.lstsq(np.asarray(edgedirs),np.asarray(edgegrads),rcond=None)
#nodedirs[n]=vec3(*nodedir[0]).norm()*minlen
nodedirs.append(vec3(*nodedir[0]).norm())
else:
nodedirs.append(vec3())
return nodedirs
@timing
def calculateDirectionField(obj,landmarkObj,regions,regtype,tempdir,VTK):
lmlines,allregions,lmstim,lmground=loadArchitecture(architecture,regtype)[1:5]
regions=regions or list(range(len(allregions)))
ds=obj.datasets[0]
nodes=ds.getNodes()
tris=first(ind for ind in ds.enumIndexSets() if ind.m()==3 and bool(ind.meta(StdProps._isspatial)))
regionfield=ds.getDataField(fieldNames._regions)
regionfield=np.asarray(regionfield,np.int32)
lmnodes=landmarkObj.datasets[0].getNodes()
landmarks=[nodes.indexOf(lm)[0] for lm in lmnodes]
directionfield=RealMatrix(fieldNames._directions,nodes.n(),3)
directionfield.fill(0)
gradientfield=RealMatrix('gradient',nodes.n(),1)
gradientfield.fill(-1)
obj.datasets[0].setDataField(gradientfield)
obj.datasets[0].setDataField(directionfield)
def selectRegion(region,triregions):
return region==int(triregions[0]) or region==int(triregions[1]) or region==int(triregions[2])
def collectNodes(nodemap,trimap,components):
'''Collect the nodes for the given components, being lines or landmark points.'''
nodeinds=set()
for comp in components:
if comp[0]=='L':
lind=int(comp[1:])-1
for tri in trimap:
if int(regionfield[tri,0])==lind:
nodeinds.update(nodemap[t] for t in tris[tri])
else:
nodeinds.add(nodemap[landmarks[int(comp[1:])-1]])
return nodeinds
# for each region calculate the laplace gradient and fill in the direction field
for r in regions:
eidolon.printFlush('Region',r,lmstim[r],lmground[r])
try:
newnodes,newtris,nodemap,trimap=extractTriRegion(nodes,tris,lambda i:selectRegion(r,regionfield[i,1:]))
assert len(newtris)>0, 'Empty region selected'
stimnodes=collectNodes(nodemap,trimap,lmstim[r])
groundnodes=collectNodes(nodemap,trimap,lmground[r])
if len(stimnodes)==0:
raise ValueError('Region %i has no stim nodes'%r)
elif not all(0<=s<len(newnodes) for s in stimnodes):
raise ValueError('Region %i has invalid stim nodes: %r'%(r,stimnodes))
if len(groundnodes)==0:
raise ValueError('Region %i has no ground nodes'%r)
elif not all(0<=s<len(newnodes) for s in groundnodes):
raise ValueError('Region %i has invalid ground nodes: %r'%(r,groundnodes))
# convert triangles to tets
for t in range(len(newtris)):
a,b,c=[newnodes[i] for i in newtris[t]]
norm=a.planeNorm(b,c)
newtris[t].append(len(newnodes))
newnodes.append(avg((a,b,c))+norm)
nodegroup=[1 if n in stimnodes else (2 if n in groundnodes else 0) for n in range(len(newnodes))]
assert 1 in nodegroup, 'Region %i does not assign stim nodes (%r)'%(r,stimnodes)
assert 2 in nodegroup, 'Region %i does not assign ground nodes (%r)'%(r,groundnodes)
gfield=calculateMeshGradient(os.path.join(tempdir,'region%.2i'%r),newnodes,newtris,nodegroup,VTK)
for oldn,newn in nodemap.items():
gradientfield[oldn,0]=gfield[newn]
graddirs=calculateGradientDirs(newnodes,generateSimplexEdgeMap(len(newnodes),newtris),gfield)
for oldn,newn in nodemap.items():
directionfield[oldn]=graddirs[newn]+vec3(*directionfield[oldn])
except Exception as e:
eidolon.printFlush(e)
return gradientfield,directionfield
@timing
def getElemDirectionAdj(nodes,elems,adj,dirField):
'''
Generate an index matrix with a row for each element of `elems' storing which face is the forward direction of the
directional field `dirField', which adjanct element is in the forward direction, which face is in the backward
direction, and which adjacent element is in the backward direction.
'''
assert len(nodes)==len(dirField)
et=ElemType[elems.getType()]
result=IndexMatrix('diradj',elems.n(),4)
result.meta(StdProps._elemdata,'True')
result.fill(elems.n())
def getFaceInDirection(start,direction,enodes):
dray=eidolon.Ray(start,direction)
for f,face in enumerate(et.faces):
fnodes=[enodes[i] for i in face[:3]]
if dray.intersectsTri(*fnodes):
return f
return None
for e,elem in enumerate(elems):
edirs=[vec3(*dirField[n]) for n in elem] # elem directions
enodes=[nodes[n] for n in elem] # elem nodes
edir=et.applyBasis(edirs,0.25,0.25,0.25) # elem center
center=et.applyBasis(enodes,0.25,0.25,0.25) # elem center
forward=getFaceInDirection(center,edir,enodes)
result[e,0]=forward
result[e,1]=adj[e,forward]
backward=getFaceInDirection(center,-edir,enodes)
result[e,2]=backward
result[e,3]=adj[e,backward]
assert result[e,0]<elems.n()
return result
@timing
def followElemDirAdj(elemdiradj,task=None):
'''
Follow the direction adjacency matrix `elemdiradj', storing a row for each element stating the final forward element,
final forward face, final backward element, and final backward face. The given element/face pairs are on the mesh
surface.
'''
result=IndexMatrix('diradj',elemdiradj.n(),4)
result.fill(elemdiradj.n())
result.meta(StdProps._elemdata,'True')
def followElem(start,isForward):
'''From the starting element, follow the adjacency hops until a surface element is found.'''
curelem=start
index=1 if isForward else 3
visited=set()
while curelem not in visited and curelem>=start and elemdiradj[curelem,index]<elemdiradj.n():
visited.add(curelem)
curelem=elemdiradj[curelem,index]
if curelem<start: # previously assigned value, use this since the path from here on is the same
return result[curelem,index-1]
else:
return curelem
if task:
task.setMaxProgress(elemdiradj.n())
for e in range(elemdiradj.n()):
forward=followElem(e,True)
result[e,0]=forward
result[e,1]=elemdiradj[forward,0]
backward=followElem(e,False)
result[e,2]=backward
result[e,3]=elemdiradj[backward,2]
if task:
task.setProgress(e+1)
return result
@timing
def estimateThickness(nodes,tets,elemdiradj,task=None):
'''
Follow the direction adjacency matrix `elemdiradj', and estimate thickness by measuring how far each tet along the
path to the forward and backward surface is. The assigned value in the returned field is the maximal estimated
thickness for each element.
'''
result=RealMatrix(fieldNames._elemThickness,tets.n(),1)
result.fill(elemdiradj.n())
result.meta(StdProps._elemdata,'True')
result.fill(-1)
def getTetCenter(ind):
return avg(nodes.mapIndexRow(tets,ind))
def followElem(start,isForward):
curelem=start
index=1 if isForward else 3
visited=set()
curpos=getTetCenter(start)
dist=0
while elemdiradj[curelem,index]<elemdiradj.n():
visited.add(curelem)
curelem=elemdiradj[curelem,index]
if curelem in visited: # circular path, assign no value to this element
dist=0
break
else:
nextpos=getTetCenter(curelem)
dist+=nextpos.distTo(curpos)
curpos=nextpos
return dist
if task:
task.setMaxProgress(elemdiradj.n())
for e in range(elemdiradj.n()):
result[e]=max(result[e],followElem(e,True)+followElem(e,False))
if task:
task.setProgress(e+1)
return result
@timing
def calculateTetDirections(tetmesh,endomesh,epimesh,tempdir,interpFunc,VTK,task=None):
def getTriCenterOctree(obj,ocdepth=2):
ds=obj.datasets[0]
nodes=ds.getNodes()
tris=first(i for i in ds.enumIndexSets() if i.getType()==ElemType._Tri1NL)
graph=TriMeshGraph(nodes,tris)
centeroc=eidolon.Octree(ocdepth,graph.boundbox.getDimensions(),graph.boundbox.center)
for i,c in enumerate(graph.tricenters):
centeroc.addNode(c,i)
return graph,centeroc,ds.getDataField(fieldNames._directions),ds.getDataField(fieldNames._regions)
if interpFunc is None:
interpFunc=lambda dir1,dir2,grad:tuple(dir1*grad+dir2*(1-grad))
et=ElemType.Tet1NL
faces=[f[:3] for f in et.faces]
ds=tetmesh.datasets[0]
eidolon.calculateElemExtAdj(ds)
nodes=ds.getNodes()
tets=first(i for i in ds.enumIndexSets() if i.getType()==ElemType._Tet1NL)
adj=ds.getIndexSet(tets.getName()+eidolon.MatrixType.adj[1])
numElems=tets.n()
elemdirfield=RealMatrix(fieldNames._elemDirs,tets.n(),3)
elemdirfield.fill(0)
elemdirfield.meta(StdProps._elemdata,'True')
ds.setDataField(elemdirfield)
elemregions=RealMatrix(fieldNames._regions,tets.n(),4)
elemregions.meta(StdProps._elemdata,'True')
elemregions.fill(-1)
ds.setDataField(elemregions)
endograph,endooc,endodirs,endoregions=getTriCenterOctree(endomesh)
epigraph,epioc,epidirs,epiregions=getTriCenterOctree(epimesh)
# set of nodes from the tet mesh on each surface
endonodes=set()
epinodes=set()
def calculateTriDir(graph,tri,dirs):
inds=graph.tris[tri]
trinodes=[graph.nodes[i] for i in inds]
trinorm=trinodes[0].planeNorm(trinodes[1],trinodes[2])
tridir=avg(vec3(*dirs[i]).norm() for i in inds)
return tridir.planeProject(vec3(),trinorm).norm()
# iterate over each element and fill in the above map and set values for endonodes, epinodes, and elemregions
for elem in range(numElems):
externs=adj[elem,:4]
extfaces=[f for i,f in enumerate(faces) if externs[i]==numElems]
for face in extfaces:
faceinds=[tets[elem,i] for i in face]
mid=avg(nodes[i] for i in faceinds)
tridir=None
if mid in endooc:
tri=endooc.getNode(mid)
tridir=calculateTriDir(endograph,tri,endodirs)
endonodes.update(faceinds)
elemregions[elem]=endoregions[tri]
elif mid in epioc:
tri=epioc.getNode(mid)
tridir=calculateTriDir(epigraph,tri,epidirs)
epinodes.update(faceinds)
elemregions[elem]=epiregions[tri]
# set the direction for the equivalent element
if tridir is not None:
elemdirfield[elem]=tuple(tridir)
assert endonodes
assert epinodes
nodegroup=[1 if n in endonodes else (2 if n in epinodes else 0) for n in range(len(nodes))]
gfield=calculateMeshGradient(os.path.join(tempdir,'tetmesh'),nodes,tets,nodegroup,VTK)
gfield.setName(fieldNames._nodeGradient)
ds.setDataField(gfield)
# convert gradient into node directions
dirs=calculateGradientDirs(nodes,generateSimplexEdgeMap(nodes.n(),tets),gfield)
# follow gradient and determine which elements/faces are the forward and backward endpoints of each element's gradient line
elemdiradj=getElemDirectionAdj(nodes,tets,adj,dirs)
elemFollow=followElemDirAdj(elemdiradj,task)
elemThickness=estimateThickness(nodes,tets,elemdiradj,task)
ds.setDataField(elemThickness)
for e in range(tets.n()):
elem1,face1,elem2,face2=elemFollow[e]
dir1=elemdirfield[elem1]
dir2=elemdirfield[elem2]
grad=avg(gfield[i] for i in tets[e])
elemdirfield[e]=interpFunc(vec3(*dir1),vec3(*dir2),grad)
return elemdirfield
### Project objects
class AtrialFibrePropWidget(ui.QtWidgets.QWidget,ui.Ui_AtrialFibre):
def __init__(self,parent=None):
super(AtrialFibrePropWidget,self).__init__(parent)
self.setupUi(self)
self.endoDoneButton.setVisible(False)
self.endoCancelButton.setVisible(False)
self.epiDoneButton.setVisible(False)
self.epiCancelButton.setVisible(False)
def startEdit(self,regtype):
'''Adjust button visibility and connected slots when starting to edit endo or epi nodes.'''
if regtype==regTypes._endo:
edit,other=self.endoEdit,self.epiEdit
done,cancel=self.endoDoneButton,self.endoCancelButton
else:
edit,other=self.epiEdit,self.endoEdit
done,cancel=self.epiDoneButton,self.epiCancelButton
# adjust button visibility
other.setEnabled(False)
edit.setVisible(False)
done.setVisible(True)
cancel.setVisible(True)
try: # if the edit button's been already clicked, disconnect existing slots
done.clicked.disconnect()
cancel.clicked.disconnect()
except:
pass
return done,cancel
def stopEdit(self):
'''Set UI back to default when done editing.'''
self.endoEdit.setVisible(True)
self.epiEdit.setVisible(True)
self.endoEdit.setEnabled(True)
self.epiEdit.setEnabled(True)
self.endoDoneButton.setVisible(False)
self.endoCancelButton.setVisible(False)
self.epiDoneButton.setVisible(False)
self.epiCancelButton.setVisible(False)
class AtrialFibreProject(Project):
def __init__(self,name,parentdir,mgr):
Project.__init__(self,name,parentdir,mgr)
self.header='AtrialFibre.createProject(%r,scriptdir+"/..")\n' %(self.name)
self.AtrialFibre=mgr.getPlugin('AtrialFibre')
self.VTK=self.mgr.getPlugin('VTK')
self.AtrialFibre.project=self # associate project with plugin
self.backDir=self.logDir=self.getProjectFile('logs')
self.editRep=None # node representation being edited
self.addHandlers()
def create(self):
Project.create(self)
if not os.path.isdir(self.logDir):
os.mkdir(self.logDir)
def getPropBox(self):
prop=Project.getPropBox(self)
# remove the UI for changing the project location
eidolon.cppdel(prop.chooseLocLayout)
eidolon.cppdel(prop.dirButton)
eidolon.cppdel(prop.chooseLocLabel)
self.afprop=AtrialFibrePropWidget()
prop.verticalLayout.insertWidget(prop.verticalLayout.count()-1,self.afprop)
def setConfigMap(combo,name):
@combo.currentIndexChanged.connect
def _set(i):
self.configMap[name]=str(combo.itemText(i))
setConfigMap(self.afprop.atlasBox,objNames._atlasmesh)
setConfigMap(self.afprop.origBox,objNames._origmesh)
setConfigMap(self.afprop.endoBox,objNames._endomesh)
setConfigMap(self.afprop.epiBox,objNames._epimesh)
self.afprop.importShellButton.clicked.connect(self._importShell)
self.afprop.endoReg.clicked.connect(lambda:self._registerLandmarks(objNames._endomesh,regTypes._endo))
self.afprop.endoDiv.clicked.connect(lambda:self._divideRegions(objNames._endomesh,regTypes._endo))
self.afprop.endoEdit.clicked.connect(lambda:self._editLandmarks(objNames._endomesh,regTypes._endo))
self.afprop.epiReg.clicked.connect(lambda:self._registerLandmarks(objNames._epimesh,regTypes._epi))
self.afprop.epiDiv.clicked.connect(lambda:self._divideRegions(objNames._epimesh,regTypes._epi))
self.afprop.epiEdit.clicked.connect(lambda:self._editLandmarks(objNames._epimesh,regTypes._epi))
self.afprop.genButton.clicked.connect(self._generate)
return prop
def updatePropBox(self,proj,prop):
Project.updatePropBox(self,proj,prop)
scenemeshes=[o for o in self.memberObjs if isinstance(o,eidolon.MeshSceneObject)]
names=sorted(o.getName() for o in scenemeshes)
def _fillList(combo,name):
# ensure the config value is actually set, when filling a previously empty list currentIndexChanged isn't emitted
if not self.configMap.get(name,None):
self.configMap[name]=first(names)
eidolon.fillList(combo,names,self.configMap[name])
_fillList(self.afprop.atlasBox,objNames._atlasmesh)
_fillList(self.afprop.origBox,objNames._origmesh)
_fillList(self.afprop.endoBox,objNames._endomesh)
_fillList(self.afprop.epiBox,objNames._epimesh)
@taskmethod('Adding Object to Project')
def checkIncludeObject(self,obj,task):
'''Check whether the given object should be added to the project or not.'''
if not isinstance(obj,eidolon.MeshSceneObject) or obj in self.memberObjs or obj.getObjFiles() is None:
return
@timing
def _copy():
self.mgr.removeSceneObject(obj)
self.addMesh(obj)
pdir=self.getProjectDir()
files=list(map(os.path.abspath,obj.getObjFiles() or []))
if not files or any(not f.startswith(pdir) for f in files):
msg="Do you want to add %r to the project? This requires saving/copying the object's file data into the project directory."%(obj.getName())
self.mgr.win.chooseYesNoDialog(msg,'Adding Object',_copy)
def addMesh(self,obj):
filename=self.getProjectFile(obj.getName())
self.VTK.saveObject(obj,filename,setFilenames=True)
self.addObject(obj)
self.mgr.addSceneObject(obj)
self.save()
def createTempDir(self,prefix='tmp'):
path=self.getProjectFile(prefix+datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
os.mkdir(path)
return path
def _importShell(self):
filename=self.mgr.win.chooseFileDialog('Choose Endo/Epi Shell filename',filterstr='VTK Files (*.vtk *.vtu *.vtp)')
if filename:
f=self.AtrialFibre.importShell(filename)
self.mgr.checkFutureResult(f)
@taskroutine('Add meshes')
def _add(task):
endo,epi=f()
self.addMesh(endo)
self.addMesh(epi)
self.mgr.runTasks(_add())
def _registerLandmarks(self,meshname,regtype):
atlas=self.getProjectObj(self.configMap.get(objNames._atlasmesh,''))
subj=self.getProjectObj(self.configMap.get(meshname,''))
assert atlas is not None
assert subj is not None
endo=self.getProjectObj(regtype)
if endo is not None:
self.mgr.removeSceneObject(endo)
tempdir=self.createTempDir('reg')
result=self.AtrialFibre.registerLandmarks(subj,atlas,regtype,tempdir)
self.mgr.checkFutureResult(result)
@taskroutine('Add points')
def _add(task):
name=regtype+'nodes'
oldobj=self.getProjectObj(name)
if oldobj is not None:
self.mgr.removeSceneObject(oldobj)
obj=eidolon.Future.get(result)
obj.setName(name)
self.addMesh(obj)
registered=os.path.join(tempdir,registeredFile)
regobj=self.VTK.loadObject(registered,regtype+'_RegMesh')
self.addMesh(regobj)
self.mgr.runTasks(_add())
def _editLandmarks(self,meshname,regtype):
surface=self.getProjectObj(self.configMap[meshname])
landmarks=self.getProjectObj(regtype+'nodes')
landmarkMap={} # maps landmark index to surface node index
if surface is None:
self.mgr.showMsg('Cannot find surface object %r'%self.configMap[meshname])
return
elif landmarks is None:
self.mgr.showMsg('Cannot find landmark object %r'%(regtype+'nodes'))
return
f=self._startEditLandmarks(surface,landmarks,landmarkMap)
self.mgr.checkFutureResult(f)
done,cancel=self.afprop.startEdit(regtype) # adjust UI and get done and cancel buttons
@cancel.clicked.connect
def _cancel():
'''Resets UI when the cancel button is pressed.'''
self.afprop.stopEdit()
self.mgr.removeSceneObjectRepr(self.editRep)
self.editRep=None
@done.clicked.connect
def _done():
'''Transfer data from moved repr to landmark object, save, and reset UI.'''
lmnodes=landmarks.datasets[0].getNodes()
surfacenodes=surface.datasets[0].getNodes()
for i,j in landmarkMap.items():
lmnodes[i]=surfacenodes[j]
f=landmarks.saveObject(landmarks.getObjFiles()[0])
self.mgr.checkFutureResult(f)
cancel.clicked.emit() # do cancel's cleanup
@taskmethod('Starting to edit landmarks')
def _startEditLandmarks(self,surface,landmarks,landmarkMap,task):
if not surface.reprs:
rep=surface.createRepr(ReprType._volume,0)
self.mgr.addSceneObjectRepr(rep)
noderep=landmarks.createRepr(ReprType._line,matname='Red')
self.mgr.addSceneObjectRepr(noderep)
self.editRep=noderep
landmarknodes=landmarks.datasets[0].getNodes()
editnodes=surface.datasets[0].getNodes()
query=createNodeQuery(editnodes)
handlecol=eidolon.color(1,0,0,1)
def _select(handle,index,release):
'''Handle selection callback function, updates landmarkMap and node positions in noderep.'''
if release: # on mouse release update the repr
f=self.mgr.updateSceneObjectRepr(noderep)
self.mgr.checkFutureResult(f)
else:
oldpos=handle.positionOffset
newpos=editnodes[index]
landmarkMap[handle.value]=index
for n in range(noderep.nodes.n()): # replace every old position with the new
if noderep.nodes[n,0]==oldpos:
noderep.nodes[n,0]=newpos
@eidolon.setmethod(noderep)
def createHandles():
'''Overrides the default node creation method to create selection handles instead.'''
handles=[]
for ind in range(landmarknodes.n()):
pos=landmarknodes[ind]
h=eidolon.NodeSelectHandle(pos,ind,query,_select,' '+str(ind),handlecol)
handles.append(h)
return handles
self.mgr.showHandle(noderep,True)
self.mgr.setCameraSeeAll()
def _divideRegions(self,meshname,regtype):
mesh=self.getProjectObj(self.configMap.get(meshname,''))
points=self.getProjectObj(regtype+'nodes')
assert mesh is not None
assert points is not None
result=self.AtrialFibre.divideRegions(mesh,points,regtype)
self.mgr.checkFutureResult(result)
@taskroutine('Save mesh')
def _save(task):
self.VTK.saveObject(mesh,mesh.getObjFiles()[0])
rep=mesh.createRepr(eidolon.ReprType._volume,0)
self.mgr.addSceneObjectRepr(rep)
rep.applyMaterial('Rainbow',field=fieldNames._regions,valfunc='Column 1')
self.mgr.setCameraSeeAll()
# lobj,lrep=showLines(points.datasets[0].getNodes(),lmlines,'AllLines','Red')
self.mgr.runTasks(_save())
def _generate(self):
tetmesh=self.getProjectObj(self.configMap.get(objNames._origmesh,''))
endomesh=self.getProjectObj(self.configMap.get(objNames._endomesh,''))
epimesh=self.getProjectObj(self.configMap.get(objNames._epimesh,''))
if tetmesh is None:
self.mgr.showMsg('Cannot find original tet mesh %r'%self.configMap.get(objNames._endomesh,''))
elif endomesh is None:
self.mgr.showMsg('Cannot find endo mesh %r'%self.configMap.get(objNames._endomesh,''))
elif epimesh is None:
self.mgr.showMsg('Cannot find epi mesh %r'%self.configMap.get(objNames._epimesh,''))
elif endomesh.datasets[0].getDataField('regions') is None:
self.mgr.showMsg('Endo mesh does not have region field assigned!')
elif epimesh.datasets[0].getDataField('regions') is None:
self.mgr.showMsg('Epi mesh does not have region field assigned!')
else:
tempdir=self.createTempDir('dirs')
endopoints=self.getProjectObj('endonodes')
epipoints=self.getProjectObj('epinodes')
regions=[]
result=self.AtrialFibre.generateMesh(endomesh,epimesh,tetmesh,endopoints,epipoints,tempdir,regions)
self.mgr.checkFutureResult(result)
@taskroutine('Save')
def _save(task):
self.VTK.saveObject(tetmesh,tetmesh.getObjFiles()[0])
@taskroutine('Load Rep')
def _load(task):
#rep=endomesh.createRepr(ReprType._volume,0)
#self.mgr.addSceneObjectRepr(rep)
#rep.applyMaterial('Rainbow',field='gradient',valfunc='Column 1')
rep=tetmesh.createRepr(ReprType._volume,0)
self.mgr.addSceneObjectRepr(rep)
rep.applyMaterial('Rainbow',field=fieldNames._elemDirs,valfunc='Magnitude')
self.mgr.setCameraSeeAll()
showElemDirs(tetmesh,(50,50,100),self.mgr) # TODO: replace with createRepr call
self.mgr.runTasks([_save(),_load()])
class AtrialFibrePlugin(ScenePlugin):
def __init__(self):
ScenePlugin.__init__(self,'AtrialFibre')
self.project=None
def init(self,plugid,win,mgr):
ScenePlugin.init(self,plugid,win,mgr)
self.VTK=self.mgr.getPlugin('VTK')
assert self.VTK is not None, 'Cannot find VTK plugin!'
if self.win!=None:
self.win.addMenuItem('Project','AtrialFibreProj'+str(plugid),'&Atrial Fibre Project',self._newProjDialog)
# extract the deformetrica zip file if not present
if not os.path.isdir(deformDir):
z=zipfile.ZipFile(deformDir+'.zip')
z.extractall(plugindir)
os.chmod(deformExe,stat.S_IRUSR|stat.S_IXUSR|stat.S_IWUSR)
self.mirtkdir=os.path.join(eidolon.getAppDir(),eidolon.LIBSDIR,'MIRTK','Linux')
eidolon.addPathVariable('LD_LIBRARY_PATH',self.mirtkdir)
self.decimate=os.path.join(self.mirtkdir,decimate)
def _newProjDialog(self):
def chooseProjDir(name):
newdir=self.win.chooseDirDialog('Choose Project Root Directory')
if len(newdir)>0:
self.createProject(name,newdir)
self.win.chooseStrDialog('Choose Project Name','Project',chooseProjDir)
def createProject(self,name,parentdir):
if self.project==None:
self.mgr.createProjectObj(name,parentdir,AtrialFibreProject)
def getArchitecture(self,regtype=regTypes._endo):
return loadArchitecture(architecture,regtype)
@taskmethod('Import endo/epi shell')
def importShell(self,filename,task=None):
shells=self.VTK.loadObject(filename)
ds=shells.datasets[0]
nodes=ds.getNodes()
tris=first(ds.enumIndexSets())
adj,_=generateTriAdj(tris)
center=avg(nodes) #BoundBox(nodes).center
findex=findFarthestIndex(center,nodes)
outerinds=getAdjTo(adj,findex,None)
outertris=listToMatrix([tris[i] for i in outerinds],'tris',ElemType._Tri1NL)
innertris=listToMatrix([tris[i] for i in range(tris.n()) if i not in outerinds],'tris',ElemType._Tri1NL)
assert outertris.n()>0
assert innertris.n()>0
outermesh=reduceMesh(nodes,[outertris],marginSq=1e-1)
innermesh=reduceMesh(nodes,[innertris],marginSq=1e-1)
generateNodeElemMap(outermesh[0].n(),outermesh[1][0])
generateNodeElemMap(innermesh[0].n(),innermesh[1][0])
# TODO: not reliably telling inner from outer shell, until that's better use ambiguous mesh names and have user choose
outer=MeshSceneObject('shell1',PyDataSet('ds',outermesh[0],outermesh[1]))
inner=MeshSceneObject('shell2',PyDataSet('ds',innermesh[0],innermesh[1]))
return inner,outer
@taskmethod('Registering landmarks')
def registerLandmarks(self,meshObj,atlasObj,regtype,outdir,task=None):
if atlasObj.reprs:
atlasTrans=atlasObj.reprs[0].getTransform()
else:
atlasTrans=None
output=registerSubjectToTarget(meshObj,atlasObj,atlasTrans,outdir,self.decimate,self.VTK)
eidolon.printFlush(output)
points,lines=transferLandmarks(architecture,regtype,atlasObj,atlasTrans,meshObj,outdir,self.VTK)
subjnodes=meshObj.datasets[0].getNodes()
ptds=eidolon.PyDataSet('pts',[subjnodes[n[0]] for n in points],[('lines',ElemType._Line1NL,lines)])
return eidolon.MeshSceneObject('LM',ptds)
@taskmethod('Dividing mesh into regions')
def divideRegions(self,mesh,points,regtype,task=None):
_,_,lmregions,_,_,appendageregion,appendagenode,_=loadArchitecture(architecture,regtype)
filledregions,linemap=generateRegionField(mesh,points,lmregions,appendageregion,appendagenode,task)
mesh.datasets[0].setDataField(filledregions)
@taskmethod('Generating mesh')
def generateMesh(self,endomesh,epimesh,tetmesh,endopoints,epipoints,outdir,regions=[],task=None):
endograd,endodir=calculateDirectionField(endomesh,endopoints,regions,regTypes._endo,outdir,self.VTK)
epigrad,epidir=calculateDirectionField(epimesh,epipoints,regions,regTypes._epi,outdir,self.VTK)
return calculateTetDirections(tetmesh,endomesh,epimesh,outdir,None,self.VTK,task)
### Add the project
eidolon.addPlugin(AtrialFibrePlugin()) # note this occurs after other projects are loaded and is not in the subprocesses namespaces
|
[
"sfepy.base.base.output.set_output",
"sfepy.base.conf.ProblemConf.from_file",
"sfepy.applications.solve_pde"
] |
[((1102, 1149), 'os.path.join', 'os.path.join', (['plugindir', '"""AtrialFibrePlugin.ui"""'], {}), "(plugindir, 'AtrialFibrePlugin.ui')\n", (1114, 1149), False, 'import os\n'), ((1160, 1200), 'os.path.join', 'os.path.join', (['plugindir', '"""deformetricaC"""'], {}), "(plugindir, 'deformetricaC')\n", (1172, 1200), False, 'import os\n'), ((1210, 1249), 'os.path.join', 'os.path.join', (['deformDir', '"""deformetrica"""'], {}), "(deformDir, 'deformetrica')\n", (1222, 1249), False, 'import os\n'), ((1262, 1305), 'os.path.join', 'os.path.join', (['plugindir', '"""architecture.ini"""'], {}), "(plugindir, 'architecture.ini')\n", (1274, 1305), False, 'import os\n'), ((1318, 1359), 'os.path.join', 'os.path.join', (['plugindir', '"""problemfile.py"""'], {}), "(plugindir, 'problemfile.py')\n", (1330, 1359), False, 'import os\n'), ((1993, 2123), 'eidolon.enum', 'eidolon.enum', (['"""regions"""', '"""landmarks"""', '"""directions"""', '"""gradientDirs"""', '"""elemDirs"""', '"""elemRegions"""', '"""elemThickness"""', '"""nodeGradient"""'], {}), "('regions', 'landmarks', 'directions', 'gradientDirs',\n 'elemDirs', 'elemRegions', 'elemThickness', 'nodeGradient')\n", (2005, 2123), False, 'import eidolon\n'), ((2145, 2250), 'eidolon.enum', 'eidolon.enum', (['"""atlasmesh"""', '"""origmesh"""', '"""epimesh"""', '"""epinodes"""', '"""endomesh"""', '"""endonodes"""', '"""architecture"""'], {}), "('atlasmesh', 'origmesh', 'epimesh', 'epinodes', 'endomesh',\n 'endonodes', 'architecture')\n", (2157, 2250), False, 'import eidolon\n'), ((2277, 2304), 'eidolon.enum', 'eidolon.enum', (['"""endo"""', '"""epi"""'], {}), "('endo', 'epi')\n", (2289, 2304), False, 'import eidolon\n'), ((1020, 1045), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1035, 1045), False, 'import os\n'), ((2471, 2492), 'eidolon.getSceneMgr', 'eidolon.getSceneMgr', ([], {}), '()\n', (2490, 2492), False, 'import eidolon\n'), ((2504, 2550), 'eidolon.LineDataSet', 'eidolon.LineDataSet', (["(name + 'DS')", 'nodes', 'lines'], {}), "(name + 'DS', nodes, lines)\n", (2523, 2550), False, 'import eidolon\n'), ((2555, 2584), 'eidolon.MeshSceneObject', 'MeshSceneObject', (['name', 'lineds'], {}), '(name, lineds)\n', (2570, 2584), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((2970, 3009), 'eidolon.ElemType.Tet1NL.basis', 'ElemType.Tet1NL.basis', (['(0.25)', '(0.25)', '(0.25)'], {}), '(0.25, 0.25, 0.25)\n', (2991, 3009), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((8402, 8433), 'ConfigParser.SafeConfigParser', 'configparser.SafeConfigParser', ([], {}), '()\n', (8431, 8433), True, 'import ConfigParser as configparser\n'), ((12107, 12142), 'os.path.join', 'os.path.join', (['outdir', 'decimatedFile'], {}), '(outdir, decimatedFile)\n', (12119, 12142), False, 'import os\n'), ((12154, 12185), 'os.path.join', 'os.path.join', (['outdir', '"""tmp.vtk"""'], {}), "(outdir, 'tmp.vtk')\n", (12166, 12185), False, 'import os\n'), ((13576, 13680), 'eidolon.execBatchProgram', 'eidolon.execBatchProgram', (['decimpath', 'tmpfile', 'dpath', '"""-reduceby"""', 'sizepercent', '"""-ascii"""'], {'logcmd': '(True)'}), "(decimpath, tmpfile, dpath, '-reduceby',\n sizepercent, '-ascii', logcmd=True)\n", (13600, 13680), False, 'import eidolon\n'), ((13754, 13902), 'eidolon.execBatchProgram', 'eidolon.execBatchProgram', (['deformExe', '"""registration"""', '"""3D"""', 'modelFile', 'datasetFile', 'optimFile', '"""--output-dir=."""'], {'cwd': 'outdir', 'env': 'env', 'logcmd': '(True)'}), "(deformExe, 'registration', '3D', modelFile,\n datasetFile, optimFile, '--output-dir=.', cwd=outdir, env=env, logcmd=True)\n", (13778, 13902), False, 'import eidolon\n'), ((14367, 14402), 'os.path.join', 'os.path.join', (['outdir', 'decimatedFile'], {}), '(outdir, decimatedFile)\n', (14379, 14402), False, 'import os\n'), ((14417, 14453), 'os.path.join', 'os.path.join', (['outdir', 'registeredFile'], {}), '(outdir, registeredFile)\n', (14429, 14453), False, 'import os\n'), ((21333, 21373), 'eidolon.printFlush', 'eidolon.printFlush', (['"""Nodes:"""', 'start', 'end'], {}), "('Nodes:', start, end)\n", (21351, 21373), False, 'import eidolon\n'), ((21515, 21543), 'eidolon.first', 'first', (['graph.nodeelem[start]'], {}), '(graph.nodeelem[start])\n', (21520, 21543), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((21555, 21581), 'eidolon.first', 'first', (['graph.nodeelem[end]'], {}), '(graph.nodeelem[end])\n', (21560, 21581), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((28371, 28394), 'os.path.dirname', 'os.path.dirname', (['prefix'], {}), '(prefix)\n', (28386, 28394), False, 'import os\n'), ((28715, 28746), 'sfepy.base.conf.ProblemConf.from_file', 'ProblemConf.from_file', (['probfile'], {}), '(probfile)\n', (28736, 28746), False, 'from sfepy.base.conf import ProblemConf\n'), ((28751, 28789), 'sfepy.base.base.output.set_output', 'output.set_output', (['logfile', '(True)', '(True)'], {}), '(logfile, True, True)\n', (28768, 28789), False, 'from sfepy.base.base import output\n'), ((28792, 28804), 'sfepy.applications.solve_pde', 'solve_pde', (['p'], {}), '(p)\n', (28801, 28804), False, 'from sfepy.applications import solve_pde\n'), ((30819, 30852), 'numpy.asarray', 'np.asarray', (['regionfield', 'np.int32'], {}), '(regionfield, np.int32)\n', (30829, 30852), True, 'import numpy as np\n'), ((39575, 39606), 'eidolon.calculateElemExtAdj', 'eidolon.calculateElemExtAdj', (['ds'], {}), '(ds)\n', (39602, 39606), False, 'import eidolon\n'), ((47719, 47757), 'eidolon.taskmethod', 'taskmethod', (['"""Adding Object to Project"""'], {}), "('Adding Object to Project')\n", (47729, 47757), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((52180, 52220), 'eidolon.taskmethod', 'taskmethod', (['"""Starting to edit landmarks"""'], {}), "('Starting to edit landmarks')\n", (52190, 52220), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((58708, 58743), 'eidolon.taskmethod', 'taskmethod', (['"""Import endo/epi shell"""'], {}), "('Import endo/epi shell')\n", (58718, 58743), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((60004, 60039), 'eidolon.taskmethod', 'taskmethod', (['"""Registering landmarks"""'], {}), "('Registering landmarks')\n", (60014, 60039), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((60731, 60771), 'eidolon.taskmethod', 'taskmethod', (['"""Dividing mesh into regions"""'], {}), "('Dividing mesh into regions')\n", (60741, 60771), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((61112, 61141), 'eidolon.taskmethod', 'taskmethod', (['"""Generating mesh"""'], {}), "('Generating mesh')\n", (61122, 61141), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((564, 665), 'warnings.warn', 'warnings.warn', (['"""SfePy needs to be installed or in PYTHONPATH to generate fiber directions."""'], {}), "(\n 'SfePy needs to be installed or in PYTHONPATH to generate fiber directions.'\n )\n", (577, 665), False, 'import warnings\n'), ((3028, 3086), 'eidolon.ElemType.Tet1NL.applyCoeffs', 'ElemType.Tet1NL.applyCoeffs', (['[nodes[e] for e in elem]', 'mid'], {}), '([nodes[e] for e in elem], mid)\n', (3055, 3086), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((3142, 3195), 'eidolon.PyDataSet', 'PyDataSet', (['"""elemobjds"""', 'elemnodes', '[]', '[elemdirfield]'], {}), "('elemobjds', elemnodes, [], [elemdirfield])\n", (3151, 3195), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((3576, 3625), 'collections.defaultdict.__init__', 'defaultdict.__init__', (['self', 'None', '*args'], {}), '(self, None, *args, **kwargs)\n', (3596, 3625), False, 'from collections import defaultdict\n'), ((5258, 5273), 'eidolon.BoundBox', 'BoundBox', (['nodes'], {}), '(nodes)\n', (5266, 5273), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((12391, 12427), 'os.path.join', 'os.path.join', (['deformDir', 'datasetFile'], {}), '(deformDir, datasetFile)\n', (12403, 12427), False, 'import os\n'), ((12427, 12460), 'os.path.join', 'os.path.join', (['outdir', 'datasetFile'], {}), '(outdir, datasetFile)\n', (12439, 12460), False, 'import os\n'), ((13166, 13198), 'os.path.join', 'os.path.join', (['outdir', 'targetFile'], {}), '(outdir, targetFile)\n', (13178, 13198), False, 'import os\n'), ((15163, 15182), 'eidolon.transform', 'eidolon.transform', ([], {}), '()\n', (15180, 15182), False, 'import eidolon\n'), ((16853, 16877), 'eidolon.successive', 'successive', (['tri', '(2)', '(True)'], {}), '(tri, 2, True)\n', (16863, 16877), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((23070, 23116), 'eidolon.printFlush', 'eidolon.printFlush', (['"""---Resorting to easypath"""'], {}), "('---Resorting to easypath')\n", (23088, 23116), False, 'import eidolon\n'), ((24271, 24288), 'eidolon.first', 'first', (['bordertris'], {}), '(bordertris)\n', (24276, 24288), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((25295, 25324), 'eidolon.listSum', 'eidolon.listSum', (['graph.ragged'], {}), '(graph.ragged)\n', (25310, 25324), False, 'import eidolon\n'), ((32079, 32134), 'eidolon.printFlush', 'eidolon.printFlush', (['"""Region"""', 'r', 'lmstim[r]', 'lmground[r]'], {}), "('Region', r, lmstim[r], lmground[r])\n", (32097, 32134), False, 'import eidolon\n'), ((34914, 34943), 'eidolon.Ray', 'eidolon.Ray', (['start', 'direction'], {}), '(start, direction)\n', (34925, 34943), False, 'import eidolon\n'), ((41922, 41954), 'os.path.join', 'os.path.join', (['tempdir', '"""tetmesh"""'], {}), "(tempdir, 'tetmesh')\n", (41934, 41954), False, 'import os\n'), ((42697, 42728), 'eidolon.avg', 'avg', (['(gfield[i] for i in tets[e])'], {}), '(gfield[i] for i in tets[e])\n', (42700, 42728), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((44600, 44644), 'eidolon.Project.__init__', 'Project.__init__', (['self', 'name', 'parentdir', 'mgr'], {}), '(self, name, parentdir, mgr)\n', (44616, 44644), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((45125, 45145), 'eidolon.Project.create', 'Project.create', (['self'], {}), '(self)\n', (45139, 45145), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((45271, 45295), 'eidolon.Project.getPropBox', 'Project.getPropBox', (['self'], {}), '(self)\n', (45289, 45295), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((45363, 45399), 'eidolon.cppdel', 'eidolon.cppdel', (['prop.chooseLocLayout'], {}), '(prop.chooseLocLayout)\n', (45377, 45399), False, 'import eidolon\n'), ((45408, 45438), 'eidolon.cppdel', 'eidolon.cppdel', (['prop.dirButton'], {}), '(prop.dirButton)\n', (45422, 45438), False, 'import eidolon\n'), ((45447, 45482), 'eidolon.cppdel', 'eidolon.cppdel', (['prop.chooseLocLabel'], {}), '(prop.chooseLocLabel)\n', (45461, 45482), False, 'import eidolon\n'), ((46935, 46974), 'eidolon.Project.updatePropBox', 'Project.updatePropBox', (['self', 'proj', 'prop'], {}), '(self, proj, prop)\n', (46956, 46974), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((48909, 48923), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (48917, 48923), False, 'import os\n'), ((50058, 50083), 'eidolon.taskroutine', 'taskroutine', (['"""Add points"""'], {}), "('Add points')\n", (50069, 50083), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((52756, 52781), 'eidolon.color', 'eidolon.color', (['(1)', '(0)', '(0)', '(1)'], {}), '(1, 0, 0, 1)\n', (52769, 52781), False, 'import eidolon\n'), ((53496, 53522), 'eidolon.setmethod', 'eidolon.setmethod', (['noderep'], {}), '(noderep)\n', (53513, 53522), False, 'import eidolon\n'), ((54425, 54449), 'eidolon.taskroutine', 'taskroutine', (['"""Save mesh"""'], {}), "('Save mesh')\n", (54436, 54449), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((57201, 57242), 'eidolon.ScenePlugin.__init__', 'ScenePlugin.__init__', (['self', '"""AtrialFibre"""'], {}), "(self, 'AtrialFibre')\n", (57221, 57242), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((57312, 57352), 'eidolon.ScenePlugin.init', 'ScenePlugin.init', (['self', 'plugid', 'win', 'mgr'], {}), '(self, plugid, win, mgr)\n', (57328, 57352), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((57996, 58053), 'eidolon.addPathVariable', 'eidolon.addPathVariable', (['"""LD_LIBRARY_PATH"""', 'self.mirtkdir'], {}), "('LD_LIBRARY_PATH', self.mirtkdir)\n", (58019, 58053), False, 'import eidolon\n'), ((58075, 58112), 'os.path.join', 'os.path.join', (['self.mirtkdir', 'decimate'], {}), '(self.mirtkdir, decimate)\n', (58087, 58112), False, 'import os\n'), ((58991, 59001), 'eidolon.avg', 'avg', (['nodes'], {}), '(nodes)\n', (58994, 59001), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((59153, 59221), 'eidolon.listToMatrix', 'listToMatrix', (['[tris[i] for i in outerinds]', '"""tris"""', 'ElemType._Tri1NL'], {}), "([tris[i] for i in outerinds], 'tris', ElemType._Tri1NL)\n", (59165, 59221), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((59431, 59475), 'eidolon.reduceMesh', 'reduceMesh', (['nodes', '[outertris]'], {'marginSq': '(0.1)'}), '(nodes, [outertris], marginSq=0.1)\n', (59441, 59475), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((59493, 59537), 'eidolon.reduceMesh', 'reduceMesh', (['nodes', '[innertris]'], {'marginSq': '(0.1)'}), '(nodes, [innertris], marginSq=0.1)\n', (59503, 59537), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((60355, 60381), 'eidolon.printFlush', 'eidolon.printFlush', (['output'], {}), '(output)\n', (60373, 60381), False, 'import eidolon\n'), ((60567, 60669), 'eidolon.PyDataSet', 'eidolon.PyDataSet', (['"""pts"""', '[subjnodes[n[0]] for n in points]', "[('lines', ElemType._Line1NL, lines)]"], {}), "('pts', [subjnodes[n[0]] for n in points], [('lines',\n ElemType._Line1NL, lines)])\n", (60584, 60669), False, 'import eidolon\n'), ((60686, 60721), 'eidolon.MeshSceneObject', 'eidolon.MeshSceneObject', (['"""LM"""', 'ptds'], {}), "('LM', ptds)\n", (60709, 60721), False, 'import eidolon\n'), ((4778, 4806), 'eidolon.listToMatrix', 'listToMatrix', (['nodes', '"""nodes"""'], {}), "(nodes, 'nodes')\n", (4790, 4806), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((4874, 4900), 'eidolon.listToMatrix', 'listToMatrix', (['tris', '"""tris"""'], {}), "(tris, 'tris')\n", (4886, 4900), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((12756, 12787), 'os.path.join', 'os.path.join', (['outdir', 'modelFile'], {}), '(outdir, modelFile)\n', (12768, 12787), False, 'import os\n'), ((12977, 13008), 'os.path.join', 'os.path.join', (['outdir', 'optimFile'], {}), '(outdir, optimFile)\n', (12989, 13008), False, 'import os\n'), ((26907, 26994), 'eidolon.printFlush', 'eidolon.printFlush', (['"""Skipping"""', 'rindex', '[a for a in allnodes if a not in landmarks]'], {}), "('Skipping', rindex, [a for a in allnodes if a not in\n landmarks])\n", (26925, 26994), False, 'import eidolon\n'), ((35202, 35220), 'eidolon.vec3', 'vec3', (['*dirField[n]'], {}), '(*dirField[n])\n', (35206, 35220), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((41059, 41090), 'eidolon.avg', 'avg', (['(nodes[i] for i in faceinds)'], {}), '(nodes[i] for i in faceinds)\n', (41062, 41090), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((42773, 42784), 'eidolon.vec3', 'vec3', (['*dir1'], {}), '(*dir1)\n', (42777, 42784), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((42785, 42796), 'eidolon.vec3', 'vec3', (['*dir2'], {}), '(*dir2)\n', (42789, 42796), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((45161, 45187), 'os.path.isdir', 'os.path.isdir', (['self.logDir'], {}), '(self.logDir)\n', (45174, 45187), False, 'import os\n'), ((45201, 45222), 'os.mkdir', 'os.mkdir', (['self.logDir'], {}), '(self.logDir)\n', (45209, 45222), False, 'import os\n'), ((47421, 47473), 'eidolon.fillList', 'eidolon.fillList', (['combo', 'names', 'self.configMap[name]'], {}), '(combo, names, self.configMap[name])\n', (47437, 47473), False, 'import eidolon\n'), ((49242, 49267), 'eidolon.taskroutine', 'taskroutine', (['"""Add meshes"""'], {}), "('Add meshes')\n", (49253, 49267), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((50313, 50339), 'eidolon.Future.get', 'eidolon.Future.get', (['result'], {}), '(result)\n', (50331, 50339), False, 'import eidolon\n'), ((50436, 50473), 'os.path.join', 'os.path.join', (['tempdir', 'registeredFile'], {}), '(tempdir, registeredFile)\n', (50448, 50473), False, 'import os\n'), ((57706, 57730), 'os.path.isdir', 'os.path.isdir', (['deformDir'], {}), '(deformDir)\n', (57719, 57730), False, 'import os\n'), ((57746, 57781), 'zipfile.ZipFile', 'zipfile.ZipFile', (["(deformDir + '.zip')"], {}), "(deformDir + '.zip')\n", (57761, 57781), False, 'import zipfile\n'), ((57828, 57891), 'os.chmod', 'os.chmod', (['deformExe', '(stat.S_IRUSR | stat.S_IXUSR | stat.S_IWUSR)'], {}), '(deformExe, stat.S_IRUSR | stat.S_IXUSR | stat.S_IWUSR)\n', (57836, 57891), False, 'import os\n'), ((57935, 57954), 'eidolon.getAppDir', 'eidolon.getAppDir', ([], {}), '()\n', (57952, 57954), False, 'import eidolon\n'), ((59845, 59888), 'eidolon.PyDataSet', 'PyDataSet', (['"""ds"""', 'outermesh[0]', 'outermesh[1]'], {}), "('ds', outermesh[0], outermesh[1])\n", (59854, 59888), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((59927, 59970), 'eidolon.PyDataSet', 'PyDataSet', (['"""ds"""', 'innermesh[0]', 'innermesh[1]'], {}), "('ds', innermesh[0], innermesh[1])\n", (59936, 59970), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((4958, 4964), 'eidolon.vec3', 'vec3', ([], {}), '()\n', (4962, 4964), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((12508, 12542), 'os.path.join', 'os.path.join', (['deformDir', 'modelFile'], {}), '(deformDir, modelFile)\n', (12520, 12542), False, 'import os\n'), ((12872, 12906), 'os.path.join', 'os.path.join', (['deformDir', 'optimFile'], {}), '(deformDir, optimFile)\n', (12884, 12906), False, 'import os\n'), ((30112, 30132), 'numpy.asarray', 'np.asarray', (['edgedirs'], {}), '(edgedirs)\n', (30122, 30132), True, 'import numpy as np\n'), ((30133, 30154), 'numpy.asarray', 'np.asarray', (['edgegrads'], {}), '(edgegrads)\n', (30143, 30154), True, 'import numpy as np\n'), ((30337, 30343), 'eidolon.vec3', 'vec3', ([], {}), '()\n', (30341, 30343), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((33694, 33733), 'os.path.join', 'os.path.join', (['tempdir', "('region%.2i' % r)"], {}), "(tempdir, 'region%.2i' % r)\n", (33706, 33733), False, 'import os\n'), ((34195, 34216), 'eidolon.printFlush', 'eidolon.printFlush', (['e'], {}), '(e)\n', (34213, 34216), False, 'import eidolon\n'), ((47379, 47391), 'eidolon.first', 'first', (['names'], {}), '(names)\n', (47384, 47391), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((34107, 34134), 'eidolon.vec3', 'vec3', (['*directionfield[oldn]'], {}), '(*directionfield[oldn])\n', (34111, 34134), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((40677, 40683), 'eidolon.vec3', 'vec3', ([], {}), '()\n', (40681, 40683), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((30269, 30286), 'eidolon.vec3', 'vec3', (['*nodedir[0]'], {}), '(*nodedir[0])\n', (30273, 30286), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((33295, 33309), 'eidolon.avg', 'avg', (['(a, b, c)'], {}), '((a, b, c))\n', (33298, 33309), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((40596, 40610), 'eidolon.vec3', 'vec3', (['*dirs[i]'], {}), '(*dirs[i])\n', (40600, 40610), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((48851, 48874), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (48872, 48874), False, 'import datetime\n'), ((56282, 56301), 'eidolon.taskroutine', 'taskroutine', (['"""Save"""'], {}), "('Save')\n", (56293, 56301), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n'), ((56427, 56450), 'eidolon.taskroutine', 'taskroutine', (['"""Load Rep"""'], {}), "('Load Rep')\n", (56438, 56450), False, 'from eidolon import ui, ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType, listToMatrix, MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush\n')]
|
from __future__ import absolute_import
from copy import copy
import numpy as nm
from sfepy.base.testing import TestCommon
from sfepy.base.base import ordered_iteritems
from sfepy import data_dir
filename_meshes = [data_dir + '/meshes/elements/%s_2.mesh' % geom
for geom in ['1_2', '2_3', '2_4', '3_4', '3_8', '3_2_4']]
def make_term_args(arg_shapes, arg_kinds, arg_types, ats_mode, domain,
material_value=None, poly_space_base=None):
from sfepy.base.base import basestr
from sfepy.discrete import FieldVariable, Material, Variables, Materials
from sfepy.discrete.fem import Field
from sfepy.solvers.ts import TimeStepper
from sfepy.mechanics.tensors import dim2sym
omega = domain.regions['Omega']
dim = domain.shape.dim
sym = dim2sym(dim)
def _parse_scalar_shape(sh):
if isinstance(sh, basestr):
if sh == 'D':
return dim
elif sh == 'D2':
return dim**2
elif sh == 'S':
return sym
elif sh == 'N': # General number ;)
return 1
else:
return int(sh)
else:
return sh
def _parse_tuple_shape(sh):
if isinstance(sh, basestr):
return [_parse_scalar_shape(ii.strip()) for ii in sh.split(',')]
else:
return (int(sh),)
args = {}
str_args = []
materials = []
variables = []
for ii, arg_kind in enumerate(arg_kinds):
if arg_kind != 'ts':
if ats_mode is not None:
extended_ats = arg_types[ii] + ('/%s' % ats_mode)
else:
extended_ats = arg_types[ii]
try:
sh = arg_shapes[arg_types[ii]]
except KeyError:
sh = arg_shapes[extended_ats]
if arg_kind.endswith('variable'):
shape = _parse_scalar_shape(sh[0] if isinstance(sh, tuple) else sh)
field = Field.from_args('f%d' % ii, nm.float64, shape, omega,
approx_order=1,
poly_space_base=poly_space_base)
if arg_kind == 'virtual_variable':
if sh[1] is not None:
istate = arg_types.index(sh[1])
else:
# Only virtual variable in arguments.
istate = -1
# -> Make fake variable.
var = FieldVariable('u-1', 'unknown', field)
var.set_constant(0.0)
variables.append(var)
var = FieldVariable('v', 'test', field,
primary_var_name='u%d' % istate)
elif arg_kind == 'state_variable':
var = FieldVariable('u%d' % ii, 'unknown', field)
var.set_constant(0.0)
elif arg_kind == 'parameter_variable':
var = FieldVariable('p%d' % ii, 'parameter', field,
primary_var_name='(set-to-None)')
var.set_constant(0.0)
variables.append(var)
str_args.append(var.name)
args[var.name] = var
elif arg_kind.endswith('material'):
if sh is None: # Switched-off opt_material.
continue
prefix = ''
if isinstance(sh, basestr):
aux = sh.split(':')
if len(aux) == 2:
prefix, sh = aux
if material_value is None:
material_value = 1.0
shape = _parse_tuple_shape(sh)
if (len(shape) > 1) or (shape[0] > 1):
if ((len(shape) == 2) and (shape[0] == shape[1])
and (material_value != 0.0)):
# Identity matrix.
val = nm.eye(shape[0], dtype=nm.float64)
else:
# Array.
val = nm.empty(shape, dtype=nm.float64)
val.fill(material_value)
values = {'%sc%d' % (prefix, ii)
: val}
elif (len(shape) == 1) and (shape[0] == 1):
# Single scalar as a special value.
values = {'.c%d' % ii : material_value}
else:
raise ValueError('wrong material shape! (%s)' % shape)
mat = Material('m%d' % ii, values=values)
materials.append(mat)
str_args.append(mat.name + '.' + 'c%d' % ii)
args[mat.name] = mat
elif arg_kind == 'ts':
ts = TimeStepper(0.0, 1.0, 1.0, 5)
str_args.append('ts')
args['ts'] = ts
else:
str_args.append('user%d' % ii)
args[str_args[-1]] = None
materials = Materials(materials)
variables = Variables(variables)
return args, str_args, materials, variables
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
from sfepy.discrete import Integral
from sfepy.discrete.fem import Mesh, FEDomain
domains = []
for filename in filename_meshes:
mesh = Mesh.from_file(filename)
domain = FEDomain('domain_%s' % mesh.name.replace(data_dir, ''),
mesh)
domain.create_region('Omega', 'all')
domain.create_region('Gamma', 'vertices of surface', 'facet')
domains.append(domain)
integral = Integral('i', order=3)
qp_coors, qp_weights = integral.get_qp('3_8')
custom_integral = Integral('i', coors=qp_coors, weights=qp_weights,
order='custom')
test = Test(domains=domains, integral=integral,
custom_integral=custom_integral,
conf=conf, options=options)
return test
def test_term_call_modes(self):
from sfepy.terms import term_table
ok = True
failed = []
for domain in self.domains:
self.report('domain: %s' % domain.name)
domain_geometry = list(domain.geom_els.values())[0].name
if domain.shape.dim != domain.shape.tdim:
domain_geometry = '%d_%s' % (domain.shape.dim, domain_geometry)
for _, term_cls in ordered_iteritems(term_table):
if (domain_geometry not in term_cls.geometries) \
or ("dg" in term_cls.name) \
or (term_cls.name == "dw_ns_dot_grad_s"):
continue
vint = ('volume', 'point', 'custom')
rname = 'Omega' if term_cls.integration in vint else 'Gamma'
self.report('<-- %s ...' % term_cls.name)
if rname == 'Gamma' and domain.mesh.dim == 1:
self.report('--> 1D Gamma region: not tested!')
elif term_cls.arg_shapes:
try:
_ok = self._test_single_term(term_cls, domain, rname)
except:
_ok = False
if not _ok:
failed.append((domain.name, term_cls.name))
ok = ok and _ok
self.report('--> ok: %s' % _ok)
else:
self.report('--> not tested!')
self.report('failed:', failed)
return ok
def _test_single_term(self, term_cls, domain, rname):
from sfepy.terms import Term
from sfepy.terms.terms import get_arg_kinds
ok = True
term_call = term_cls.name + '(%s)'
arg_shapes_list = term_cls.arg_shapes
if not isinstance(arg_shapes_list, list):
arg_shapes_list = [arg_shapes_list]
if term_cls.integration != 'custom':
integral = self.integral
else:
integral = self.custom_integral
poly_space_base = getattr(term_cls, 'poly_space_base', 'lagrange')
prev_shapes = {}
for _arg_shapes in arg_shapes_list:
# Unset shapes are taken from the previous iteration.
arg_shapes = copy(prev_shapes)
arg_shapes.update(_arg_shapes)
prev_shapes = arg_shapes
self.report('arg_shapes:', arg_shapes)
arg_types = term_cls.arg_types
if not isinstance(arg_types[0], tuple):
arg_types = (arg_types,)
for iat, ats in enumerate(arg_types):
self.report('arg_types:', ats)
arg_kinds = get_arg_kinds(ats)
modes = getattr(term_cls, 'modes', None)
mode = modes[iat] if modes is not None else None
if 'dw_s_dot_grad_i_s' in term_cls.name:
material_value = 0.0
else:
material_value = 1.0
aux = make_term_args(arg_shapes, arg_kinds, ats, mode, domain,
material_value=material_value,
poly_space_base=poly_space_base)
args, str_args, materials, variables = aux
self.report('args:', str_args)
name = term_call % (', '.join(str_args))
term = Term.new(name, integral, domain.regions[rname], **args)
term.setup()
call_mode = 'weak' if term.names.virtual else 'eval'
self.report('call mode:', call_mode)
out = term.evaluate(mode=call_mode, ret_status=True)
if call_mode == 'eval':
vals, status = out
vals = nm.array(vals)
else:
vals, iels, status = out
if isinstance(vals, tuple):
# Dynamic connectivity terms.
vals = vals[0]
_ok = nm.isfinite(vals).all()
ok = ok and _ok
self.report('values shape: %s' % (vals.shape,))
if not _ok:
self.report('values are not finite!')
self.report(vals)
_ok = status == 0
if not _ok:
self.report('status is %s!' % status)
ok = ok and _ok
if term.names.virtual:
# Test differentiation w.r.t. state variables in the weak
# mode.
svars = term.get_state_variables(unknown_only=True)
for svar in svars:
vals, iels, status = term.evaluate(mode=call_mode,
diff_var=svar.name,
ret_status=True)
if isinstance(vals, tuple):
# Dynamic connectivity terms.
vals = vals[0]
_ok = status == 0
ok = ok and _ok
self.report('diff: %s' % svar.name)
if not _ok:
self.report('status is %s!' % status)
_ok = nm.isfinite(vals).all()
ok = ok and _ok
self.report('values shape: %s' % (vals.shape,))
if not _ok:
self.report('values are not finite!')
self.report(vals)
return ok
|
[
"sfepy.discrete.Variables",
"sfepy.discrete.fem.Mesh.from_file",
"sfepy.discrete.Integral",
"sfepy.discrete.fem.Field.from_args",
"sfepy.base.base.ordered_iteritems",
"sfepy.terms.Term.new",
"sfepy.solvers.ts.TimeStepper",
"sfepy.discrete.FieldVariable",
"sfepy.mechanics.tensors.dim2sym",
"sfepy.discrete.Material",
"sfepy.discrete.Materials",
"sfepy.terms.terms.get_arg_kinds"
] |
[((801, 813), 'sfepy.mechanics.tensors.dim2sym', 'dim2sym', (['dim'], {}), '(dim)\n', (808, 813), False, 'from sfepy.mechanics.tensors import dim2sym\n'), ((4849, 4869), 'sfepy.discrete.Materials', 'Materials', (['materials'], {}), '(materials)\n', (4858, 4869), False, 'from sfepy.discrete import FieldVariable, Material, Variables, Materials\n'), ((4886, 4906), 'sfepy.discrete.Variables', 'Variables', (['variables'], {}), '(variables)\n', (4895, 4906), False, 'from sfepy.discrete import FieldVariable, Material, Variables, Materials\n'), ((5531, 5553), 'sfepy.discrete.Integral', 'Integral', (['"""i"""'], {'order': '(3)'}), "('i', order=3)\n", (5539, 5553), False, 'from sfepy.discrete import Integral\n'), ((5634, 5699), 'sfepy.discrete.Integral', 'Integral', (['"""i"""'], {'coors': 'qp_coors', 'weights': 'qp_weights', 'order': '"""custom"""'}), "('i', coors=qp_coors, weights=qp_weights, order='custom')\n", (5642, 5699), False, 'from sfepy.discrete import Integral\n'), ((2002, 2108), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (["('f%d' % ii)", 'nm.float64', 'shape', 'omega'], {'approx_order': '(1)', 'poly_space_base': 'poly_space_base'}), "('f%d' % ii, nm.float64, shape, omega, approx_order=1,\n poly_space_base=poly_space_base)\n", (2017, 2108), False, 'from sfepy.discrete.fem import Field\n'), ((5214, 5238), 'sfepy.discrete.fem.Mesh.from_file', 'Mesh.from_file', (['filename'], {}), '(filename)\n', (5228, 5238), False, 'from sfepy.discrete.fem import Mesh, FEDomain\n'), ((6356, 6385), 'sfepy.base.base.ordered_iteritems', 'ordered_iteritems', (['term_table'], {}), '(term_table)\n', (6373, 6385), False, 'from sfepy.base.base import ordered_iteritems\n'), ((8181, 8198), 'copy.copy', 'copy', (['prev_shapes'], {}), '(prev_shapes)\n', (8185, 8198), False, 'from copy import copy\n'), ((2645, 2711), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""v"""', '"""test"""', 'field'], {'primary_var_name': "('u%d' % istate)"}), "('v', 'test', field, primary_var_name='u%d' % istate)\n", (2658, 2711), False, 'from sfepy.discrete import FieldVariable, Material, Variables, Materials\n'), ((4434, 4469), 'sfepy.discrete.Material', 'Material', (["('m%d' % ii)"], {'values': 'values'}), "('m%d' % ii, values=values)\n", (4442, 4469), False, 'from sfepy.discrete import FieldVariable, Material, Variables, Materials\n'), ((8594, 8612), 'sfepy.terms.terms.get_arg_kinds', 'get_arg_kinds', (['ats'], {}), '(ats)\n', (8607, 8612), False, 'from sfepy.terms.terms import get_arg_kinds\n'), ((9303, 9358), 'sfepy.terms.Term.new', 'Term.new', (['name', 'integral', 'domain.regions[rname]'], {}), '(name, integral, domain.regions[rname], **args)\n', (9311, 9358), False, 'from sfepy.terms import Term\n'), ((2499, 2537), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""u-1"""', '"""unknown"""', 'field'], {}), "('u-1', 'unknown', field)\n", (2512, 2537), False, 'from sfepy.discrete import FieldVariable, Material, Variables, Materials\n'), ((2818, 2861), 'sfepy.discrete.FieldVariable', 'FieldVariable', (["('u%d' % ii)", '"""unknown"""', 'field'], {}), "('u%d' % ii, 'unknown', field)\n", (2831, 2861), False, 'from sfepy.discrete import FieldVariable, Material, Variables, Materials\n'), ((4644, 4673), 'sfepy.solvers.ts.TimeStepper', 'TimeStepper', (['(0.0)', '(1.0)', '(1.0)', '(5)'], {}), '(0.0, 1.0, 1.0, 5)\n', (4655, 4673), False, 'from sfepy.solvers.ts import TimeStepper\n'), ((9688, 9702), 'numpy.array', 'nm.array', (['vals'], {}), '(vals)\n', (9696, 9702), True, 'import numpy as nm\n'), ((2974, 3053), 'sfepy.discrete.FieldVariable', 'FieldVariable', (["('p%d' % ii)", '"""parameter"""', 'field'], {'primary_var_name': '"""(set-to-None)"""'}), "('p%d' % ii, 'parameter', field, primary_var_name='(set-to-None)')\n", (2987, 3053), False, 'from sfepy.discrete import FieldVariable, Material, Variables, Materials\n'), ((3885, 3919), 'numpy.eye', 'nm.eye', (['shape[0]'], {'dtype': 'nm.float64'}), '(shape[0], dtype=nm.float64)\n', (3891, 3919), True, 'import numpy as nm\n'), ((3998, 4031), 'numpy.empty', 'nm.empty', (['shape'], {'dtype': 'nm.float64'}), '(shape, dtype=nm.float64)\n', (4006, 4031), True, 'import numpy as nm\n'), ((9924, 9941), 'numpy.isfinite', 'nm.isfinite', (['vals'], {}), '(vals)\n', (9935, 9941), True, 'import numpy as nm\n'), ((11238, 11255), 'numpy.isfinite', 'nm.isfinite', (['vals'], {}), '(vals)\n', (11249, 11255), True, 'import numpy as nm\n')]
|
from sqlmodel import create_engine
engine = create_engine("sqlite:///database.db")
del create_engine
from .models import DuelData
from .session import Session, SessionManager
|
[
"sqlmodel.create_engine"
] |
[((45, 83), 'sqlmodel.create_engine', 'create_engine', (['"""sqlite:///database.db"""'], {}), "('sqlite:///database.db')\n", (58, 83), False, 'from sqlmodel import create_engine\n')]
|
import megengine as mge
import megengine.functional as F
from megengine import tensor
import numpy as np
from megengine.functional.nn import nms
from config import config
from det_opr.bbox_opr import bbox_transform_inv_opr, clip_boxes_opr, \
filter_boxes_opr, box_overlap_opr
# from bbox_opr import box_overlap_opr
import pdb
def find_top_rpn_proposals(is_train, rpn_bbox_offsets_list, rpn_cls_prob_list,
all_anchors_list, im_info):
prev_nms_top_n = config.train_prev_nms_top_n \
if is_train else config.test_prev_nms_top_n
post_nms_top_n = config.train_post_nms_top_n \
if is_train else config.test_post_nms_top_n
batch_per_gpu = config.batch_per_gpu if is_train else 1
nms_threshold = config.rpn_nms_threshold
box_min_size = config.rpn_min_box_size
bbox_normalize_targets = config.rpn_bbox_normalize_targets
bbox_normalize_means = config.bbox_normalize_means
bbox_normalize_stds = config.bbox_normalize_stds
list_size = len(rpn_bbox_offsets_list)
return_rois, return_probs = [], []
batch_per_gpu = rpn_cls_prob_list[0].shape[0]
for bid in range(batch_per_gpu):
batch_proposals_list = []
batch_probs_list = []
for l in range(list_size):
# get proposals and probs
offsets = rpn_bbox_offsets_list[l][bid] \
.transpose(1, 2, 0).reshape(-1, 4)
if bbox_normalize_targets:
std_opr = tensor(config.bbox_normalize_stds[None, :])
mean_opr = tensor(config.bbox_normalize_means[None, :])
pred_offsets = pred_offsets * std_opr
pred_offsets = pred_offsets + mean_opr
all_anchors = all_anchors_list[l]
proposals = bbox_transform_inv_opr(all_anchors, offsets)
if config.anchor_within_border:
proposals = clip_boxes_opr(proposals, im_info[bid, :])
probs = rpn_cls_prob_list[l][bid] \
.transpose(1,2,0).reshape(-1, 2)
probs = F.softmax(probs)[:, 1]
# gather the proposals and probs
batch_proposals_list.append(proposals)
batch_probs_list.append(probs)
batch_proposals = F.concat(batch_proposals_list, axis=0)
batch_probs = F.concat(batch_probs_list, axis=0)
# filter the boxes with small size.
wh = batch_proposals[:, 2:4] - batch_proposals[:, :2] + 1
thresh = box_min_size * im_info[bid, 2]
keep_mask = F.prod((wh >= thresh), axis=1)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0)
keep_mask, inds = F.cond_take(keep_mask > 0, keep_mask)
inds = inds.astype(np.int32)
# batch_proposals = F.nn.indexing_one_hot(batch_proposals, inds, 0)
# batch_probs = F.nn.indexing_one_hot(batch_probs, inds, 0)
batch_proposals, batch_probs = batch_proposals[inds], batch_probs[inds]
# prev_nms_top_n
num_proposals = F.minimum(prev_nms_top_n, batch_proposals.shape[0])
idx = F.argsort(batch_probs, descending=True)
topk_idx = idx[:num_proposals].reshape(-1)
batch_proposals = batch_proposals[topk_idx].detach()
batch_probs = batch_probs[topk_idx].detach()
# For each image, run a total-level NMS, and choose topk results.
keep_inds = nms(batch_proposals, batch_probs, nms_threshold, max_output = 2000)
# num = F.minimum(post_nms_top_n, keep_inds.shape[0])
# keep_inds = keep_inds[:num]
batch_rois, batch_probs = batch_proposals[keep_inds], batch_probs[keep_inds]
# cons the rois
batch_inds = F.ones((batch_rois.shape[0], 1)) * bid
batch_rois = F.concat([batch_inds, batch_rois[:, :4]], axis=1)
return_rois.append(batch_rois)
return_probs.append(batch_probs)
if batch_per_gpu == 1:
return batch_rois, batch_probs
else:
concated_rois = F.concat(return_rois, axis=0)
concated_probs = F.concat(return_probs, axis=0)
return concated_rois, concated_probs
|
[
"megengine.functional.nn.nms",
"megengine.functional.prod",
"megengine.functional.minimum",
"megengine.functional.argsort",
"megengine.tensor",
"megengine.functional.cond_take",
"megengine.functional.concat",
"megengine.functional.ones",
"megengine.functional.softmax"
] |
[((2223, 2261), 'megengine.functional.concat', 'F.concat', (['batch_proposals_list'], {'axis': '(0)'}), '(batch_proposals_list, axis=0)\n', (2231, 2261), True, 'import megengine.functional as F\n'), ((2284, 2318), 'megengine.functional.concat', 'F.concat', (['batch_probs_list'], {'axis': '(0)'}), '(batch_probs_list, axis=0)\n', (2292, 2318), True, 'import megengine.functional as F\n'), ((2497, 2525), 'megengine.functional.prod', 'F.prod', (['(wh >= thresh)'], {'axis': '(1)'}), '(wh >= thresh, axis=1)\n', (2503, 2525), True, 'import megengine.functional as F\n'), ((2614, 2651), 'megengine.functional.cond_take', 'F.cond_take', (['(keep_mask > 0)', 'keep_mask'], {}), '(keep_mask > 0, keep_mask)\n', (2625, 2651), True, 'import megengine.functional as F\n'), ((2964, 3015), 'megengine.functional.minimum', 'F.minimum', (['prev_nms_top_n', 'batch_proposals.shape[0]'], {}), '(prev_nms_top_n, batch_proposals.shape[0])\n', (2973, 3015), True, 'import megengine.functional as F\n'), ((3030, 3069), 'megengine.functional.argsort', 'F.argsort', (['batch_probs'], {'descending': '(True)'}), '(batch_probs, descending=True)\n', (3039, 3069), True, 'import megengine.functional as F\n'), ((3338, 3403), 'megengine.functional.nn.nms', 'nms', (['batch_proposals', 'batch_probs', 'nms_threshold'], {'max_output': '(2000)'}), '(batch_proposals, batch_probs, nms_threshold, max_output=2000)\n', (3341, 3403), False, 'from megengine.functional.nn import nms\n'), ((3698, 3747), 'megengine.functional.concat', 'F.concat', (['[batch_inds, batch_rois[:, :4]]'], {'axis': '(1)'}), '([batch_inds, batch_rois[:, :4]], axis=1)\n', (3706, 3747), True, 'import megengine.functional as F\n'), ((3929, 3958), 'megengine.functional.concat', 'F.concat', (['return_rois'], {'axis': '(0)'}), '(return_rois, axis=0)\n', (3937, 3958), True, 'import megengine.functional as F\n'), ((3984, 4014), 'megengine.functional.concat', 'F.concat', (['return_probs'], {'axis': '(0)'}), '(return_probs, axis=0)\n', (3992, 4014), True, 'import megengine.functional as F\n'), ((1744, 1788), 'det_opr.bbox_opr.bbox_transform_inv_opr', 'bbox_transform_inv_opr', (['all_anchors', 'offsets'], {}), '(all_anchors, offsets)\n', (1766, 1788), False, 'from det_opr.bbox_opr import bbox_transform_inv_opr, clip_boxes_opr, filter_boxes_opr, box_overlap_opr\n'), ((3638, 3670), 'megengine.functional.ones', 'F.ones', (['(batch_rois.shape[0], 1)'], {}), '((batch_rois.shape[0], 1))\n', (3644, 3670), True, 'import megengine.functional as F\n'), ((1448, 1491), 'megengine.tensor', 'tensor', (['config.bbox_normalize_stds[None, :]'], {}), '(config.bbox_normalize_stds[None, :])\n', (1454, 1491), False, 'from megengine import tensor\n'), ((1519, 1563), 'megengine.tensor', 'tensor', (['config.bbox_normalize_means[None, :]'], {}), '(config.bbox_normalize_means[None, :])\n', (1525, 1563), False, 'from megengine import tensor\n'), ((1861, 1903), 'det_opr.bbox_opr.clip_boxes_opr', 'clip_boxes_opr', (['proposals', 'im_info[bid, :]'], {}), '(proposals, im_info[bid, :])\n', (1875, 1903), False, 'from det_opr.bbox_opr import bbox_transform_inv_opr, clip_boxes_opr, filter_boxes_opr, box_overlap_opr\n'), ((2025, 2041), 'megengine.functional.softmax', 'F.softmax', (['probs'], {}), '(probs)\n', (2034, 2041), True, 'import megengine.functional as F\n')]
|
#!/usr/bin/env python
# This code was adapted from http://sfepy.org/doc-devel/mat_optim.html.
"""
Compare various elastic materials w.r.t. uniaxial tension/compression test.
Requires Matplotlib.
"""
from __future__ import absolute_import
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import sys
import six
sys.path.append('.')
from sfepy.base.base import output
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.discrete import Problem
from sfepy.base.plotutils import plt
from matplotlib.collections import PolyCollection
from mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection
import numpy as np
from functools import partial
def define( K=8.333, mu_nh=3.846, mu_mr=1.923, kappa=1.923, lam=5.769, mu=3.846 ):
"""Define the problem to solve."""
from sfepy.discrete.fem.meshio import UserMeshIO
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.mechanics.matcoefs import stiffness_from_lame
def mesh_hook(mesh, mode):
"""
Generate the block mesh.
"""
if mode == 'read':
mesh = gen_block_mesh([2, 2, 3], [2, 2, 4], [0, 0, 1.5], name='el3',
verbose=False)
return mesh
elif mode == 'write':
pass
filename_mesh = UserMeshIO(mesh_hook)
options = {
'nls' : 'newton',
'ls' : 'ls',
'ts' : 'ts',
'save_times' : 'all',
}
functions = {
'linear_pressure' : (linear_pressure,),
'empty' : (lambda ts, coor, mode, region, ig: None,),
}
fields = {
'displacement' : ('real', 3, 'Omega', 1),
}
# Coefficients are chosen so that the tangent stiffness is the same for all
# material for zero strains.
materials = {
'solid' : ({
'K' : K, # bulk modulus
'mu_nh' : mu_nh, # shear modulus of neoHookean term
'mu_mr' : mu_mr, # shear modulus of Mooney-Rivlin term
'kappa' : kappa, # second modulus of Mooney-Rivlin term
# elasticity for LE term
'D' : stiffness_from_lame(dim=3, lam=lam, mu=mu),
},),
'load' : 'empty',
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
}
regions = {
'Omega' : 'all',
'Bottom' : ('vertices in (z < 0.1)', 'facet'),
'Top' : ('vertices in (z > 2.9)', 'facet'),
}
ebcs = {
'fixb' : ('Bottom', {'u.all' : 0.0}),
'fixt' : ('Top', {'u.[0,1]' : 0.0}),
}
integrals = {
'i' : 1,
'isurf' : 2,
}
equations = {
'linear' : """dw_lin_elastic.i.Omega(solid.D, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
'neo-Hookean' : """dw_tl_he_neohook.i.Omega(solid.mu_nh, v, u)
+ dw_tl_bulk_penalty.i.Omega(solid.K, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
'Mooney-Rivlin' : """dw_tl_he_neohook.i.Omega(solid.mu_mr, v, u)
+ dw_tl_he_mooney_rivlin.i.Omega(solid.kappa, v, u)
+ dw_tl_bulk_penalty.i.Omega(solid.K, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 5,
'eps_a' : 1e-10,
'eps_r' : 1.0,
}),
'ts' : ('ts.simple', {
't0' : 0,
't1' : 1,
'dt' : None,
'n_step' : 26, # has precedence over dt!
'verbose' : 1,
}),
}
return locals()
##
# Pressure tractions.
def linear_pressure(ts, coor, mode=None, coef=1, **kwargs):
if mode == 'qp':
val = np.tile(coef * ts.step, (coor.shape[0], 1, 1))
return {'val' : val}
def store_top_u(displacements):
"""Function _store() will be called at the end of each loading step. Top
displacements will be stored into `displacements`."""
def _store(problem, ts, state):
top = problem.domain.regions['Top']
top_u = problem.get_variables()['u'].get_state_in_region(top)
displacements.append(np.mean(top_u[:,-1]))
return _store
def solve_branch(problem, branch_function, material_type):
eq = problem.conf.equations[material_type]
problem.set_equations({material_type : eq})
load = problem.get_materials()['load']
load.set_function(branch_function)
out = []
problem.solve(save_results=False, step_hook=store_top_u(out))
displacements = np.array(out, dtype=np.float64)
return displacements
helps = {
'no_plot' : 'do not show plot window',
}
def plot_mesh(pb):
# plot mesh for macro problem
coors = pb.domain.mesh.coors
graph = pb.domain.mesh.get_conn(pb.domain.mesh.descs[0])
fig2 = plt.figure(figsize=(5,6))
ax = fig2.add_subplot(111, projection='3d')
for e in range(graph.shape[0]):
tupleList = coors[graph[e,:],:]
vertices = [[0, 1, 2, 3], [4, 5, 6, 7],
[0, 1, 5, 4], [1, 2, 6, 5], [2, 3, 7, 6], [3, 0, 4, 7]]
verts = [[tupleList[vertices[ix][iy]] for iy in range(len(vertices[0]))]
for ix in range(len(vertices))]
pc3d = Poly3DCollection(verts=verts, facecolors='white',
edgecolors='black', linewidths=1, alpha=0.5)
ax.add_collection3d(pc3d)
ax.set_xlim3d(-1.2, 1.2)
ax.set_ylim3d(-1.2, 1.2)
ax.set_zlim3d(-0.01, 3.2)
ax.set_title('3D plot of macro system')
plt.show(fig2)
return None
def one_simulation(material_type, define_args, coef_tension=0.25, coef_compression=-0.25,
plot_mesh_bool=False, return_load=False):
#parser = ArgumentParser(description=__doc__,
# formatter_class=RawDescriptionHelpFormatter)
#parser.add_argument('--version', action='version', version='%(prog)s')
#options = parser.parse_args()
output.set_output(filename='sfepy_log.txt', quiet=True)
required, other = get_standard_keywords()
# Use this file as the input file.
conf = ProblemConf.from_file(__file__, required, other,
define_args=define_args)
# Create problem instance, but do not set equations.
problem = Problem.from_conf(conf, init_equations=False)
if plot_mesh_bool:
plot_mesh(problem)
# Solve the problem. Output is ignored, results stored by using the
# step_hook.
linear_tension = partial(linear_pressure, coef=coef_tension)
u_t = solve_branch(problem, linear_tension, material_type)
linear_compression = partial(linear_pressure, coef=coef_compression)
u_c = solve_branch(problem, linear_compression, material_type)
# Get pressure load by calling linear_*() for each time step.
ts = problem.get_timestepper()
load_t = np.array([linear_tension(ts, np.array([[0.0]]), 'qp')['val']
for aux in ts.iter_from(0)],
dtype=np.float64).squeeze()
load_c = np.array([linear_compression(ts, np.array([[0.0]]), 'qp')['val']
for aux in ts.iter_from(0)],
dtype=np.float64).squeeze()
# Join the branches.
displacements = np.r_[u_c[::-1], u_t]
load = np.r_[load_c[::-1], load_t]
if return_load:
return displacements, load
else:
return displacements
def one_simulation_linear(theta, plot_mesh_bool=False, return_load=False):
material_type = 'linear'
theta = np.array(theta).reshape((-1, ))
define_args = {'lam':theta[0], 'mu':theta[1]} # bulk modulus
return one_simulation(material_type=material_type, plot_mesh_bool=plot_mesh_bool,
define_args=define_args, return_load=return_load)
def one_simulation_neo_hookean(theta, plot_mesh_bool=False, return_load=False):
material_type = 'neo-Hookean'
theta = np.array(theta).reshape((-1, ))
define_args = {'mu_nh':theta[0]} # bulk modulus
return one_simulation(material_type=material_type, plot_mesh_bool=plot_mesh_bool,
define_args=define_args, return_load=return_load)
def one_simulation_mooney_rivlin(theta, plot_mesh_bool=False, return_load=False):
material_type = 'Mooney-Rivlin'
theta = np.array(theta).reshape((-1, ))
define_args = {'mu_mr':theta[0], 'kappa':theta[1]} # bulk modulus
return one_simulation(material_type=material_type, plot_mesh_bool=plot_mesh_bool,
define_args=define_args, return_load=return_load)
def one_simulation_linear_v2(theta, plot_mesh_bool=False, return_load=False):
material_type = 'linear'
theta = np.array(theta).reshape((-1, ))
define_args = {'lam':theta[0], 'mu':theta[1]} # bulk modulus
return one_simulation(material_type=material_type, plot_mesh_bool=plot_mesh_bool,
define_args=define_args, return_load=return_load, coef_tension=0.15/5)
def one_simulation_neo_hookean_v2(theta, plot_mesh_bool=False, return_load=False):
material_type = 'neo-Hookean'
theta = np.array(theta).reshape((-1, ))
define_args = {'mu_nh':theta[0]} # bulk modulus
return one_simulation(material_type=material_type, plot_mesh_bool=plot_mesh_bool,
define_args=define_args, return_load=return_load, coef_tension=0.15/5)
def one_simulation_mooney_rivlin_v2(theta, plot_mesh_bool=False, return_load=False):
material_type = 'Mooney-Rivlin'
theta = np.array(theta).reshape((-1, ))
define_args = {'mu_mr':theta[0], 'kappa':theta[1]} # bulk modulus
return one_simulation(material_type=material_type, plot_mesh_bool=plot_mesh_bool,
define_args=define_args, return_load=return_load, coef_tension=0.15/5)
|
[
"sfepy.base.conf.get_standard_keywords",
"sfepy.base.base.output.set_output",
"sfepy.discrete.Problem.from_conf",
"sfepy.base.conf.ProblemConf.from_file",
"sfepy.discrete.fem.meshio.UserMeshIO",
"sfepy.mesh.mesh_generators.gen_block_mesh",
"sfepy.base.plotutils.plt.show",
"sfepy.mechanics.matcoefs.stiffness_from_lame",
"sfepy.base.plotutils.plt.figure"
] |
[((327, 347), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (342, 347), False, 'import sys\n'), ((1326, 1347), 'sfepy.discrete.fem.meshio.UserMeshIO', 'UserMeshIO', (['mesh_hook'], {}), '(mesh_hook)\n', (1336, 1347), False, 'from sfepy.discrete.fem.meshio import UserMeshIO\n'), ((4671, 4702), 'numpy.array', 'np.array', (['out'], {'dtype': 'np.float64'}), '(out, dtype=np.float64)\n', (4679, 4702), True, 'import numpy as np\n'), ((4960, 4986), 'sfepy.base.plotutils.plt.figure', 'plt.figure', ([], {'figsize': '(5, 6)'}), '(figsize=(5, 6))\n', (4970, 4986), False, 'from sfepy.base.plotutils import plt\n'), ((5706, 5720), 'sfepy.base.plotutils.plt.show', 'plt.show', (['fig2'], {}), '(fig2)\n', (5714, 5720), False, 'from sfepy.base.plotutils import plt\n'), ((6123, 6178), 'sfepy.base.base.output.set_output', 'output.set_output', ([], {'filename': '"""sfepy_log.txt"""', 'quiet': '(True)'}), "(filename='sfepy_log.txt', quiet=True)\n", (6140, 6178), False, 'from sfepy.base.base import output\n'), ((6202, 6225), 'sfepy.base.conf.get_standard_keywords', 'get_standard_keywords', ([], {}), '()\n', (6223, 6225), False, 'from sfepy.base.conf import ProblemConf, get_standard_keywords\n'), ((6276, 6349), 'sfepy.base.conf.ProblemConf.from_file', 'ProblemConf.from_file', (['__file__', 'required', 'other'], {'define_args': 'define_args'}), '(__file__, required, other, define_args=define_args)\n', (6297, 6349), False, 'from sfepy.base.conf import ProblemConf, get_standard_keywords\n'), ((6431, 6476), 'sfepy.discrete.Problem.from_conf', 'Problem.from_conf', (['conf'], {'init_equations': '(False)'}), '(conf, init_equations=False)\n', (6448, 6476), False, 'from sfepy.discrete import Problem\n'), ((6638, 6681), 'functools.partial', 'partial', (['linear_pressure'], {'coef': 'coef_tension'}), '(linear_pressure, coef=coef_tension)\n', (6645, 6681), False, 'from functools import partial\n'), ((6770, 6817), 'functools.partial', 'partial', (['linear_pressure'], {'coef': 'coef_compression'}), '(linear_pressure, coef=coef_compression)\n', (6777, 6817), False, 'from functools import partial\n'), ((3867, 3913), 'numpy.tile', 'np.tile', (['(coef * ts.step)', '(coor.shape[0], 1, 1)'], {}), '(coef * ts.step, (coor.shape[0], 1, 1))\n', (3874, 3913), True, 'import numpy as np\n'), ((5400, 5498), 'mpl_toolkits.mplot3d.art3d.Poly3DCollection', 'Poly3DCollection', ([], {'verts': 'verts', 'facecolors': '"""white"""', 'edgecolors': '"""black"""', 'linewidths': '(1)', 'alpha': '(0.5)'}), "(verts=verts, facecolors='white', edgecolors='black',\n linewidths=1, alpha=0.5)\n", (5416, 5498), False, 'from mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection\n'), ((1122, 1198), 'sfepy.mesh.mesh_generators.gen_block_mesh', 'gen_block_mesh', (['[2, 2, 3]', '[2, 2, 4]', '[0, 0, 1.5]'], {'name': '"""el3"""', 'verbose': '(False)'}), "([2, 2, 3], [2, 2, 4], [0, 0, 1.5], name='el3', verbose=False)\n", (1136, 1198), False, 'from sfepy.mesh.mesh_generators import gen_block_mesh\n'), ((4291, 4312), 'numpy.mean', 'np.mean', (['top_u[:, -1]'], {}), '(top_u[:, -1])\n', (4298, 4312), True, 'import numpy as np\n'), ((7652, 7667), 'numpy.array', 'np.array', (['theta'], {}), '(theta)\n', (7660, 7667), True, 'import numpy as np\n'), ((8020, 8035), 'numpy.array', 'np.array', (['theta'], {}), '(theta)\n', (8028, 8035), True, 'import numpy as np\n'), ((8379, 8394), 'numpy.array', 'np.array', (['theta'], {}), '(theta)\n', (8387, 8394), True, 'import numpy as np\n'), ((8745, 8760), 'numpy.array', 'np.array', (['theta'], {}), '(theta)\n', (8753, 8760), True, 'import numpy as np\n'), ((9137, 9152), 'numpy.array', 'np.array', (['theta'], {}), '(theta)\n', (9145, 9152), True, 'import numpy as np\n'), ((9520, 9535), 'numpy.array', 'np.array', (['theta'], {}), '(theta)\n', (9528, 9535), True, 'import numpy as np\n'), ((2120, 2162), 'sfepy.mechanics.matcoefs.stiffness_from_lame', 'stiffness_from_lame', ([], {'dim': '(3)', 'lam': 'lam', 'mu': 'mu'}), '(dim=3, lam=lam, mu=mu)\n', (2139, 2162), False, 'from sfepy.mechanics.matcoefs import stiffness_from_lame\n'), ((7029, 7046), 'numpy.array', 'np.array', (['[[0.0]]'], {}), '([[0.0]])\n', (7037, 7046), True, 'import numpy as np\n'), ((7204, 7221), 'numpy.array', 'np.array', (['[[0.0]]'], {}), '([[0.0]])\n', (7212, 7221), True, 'import numpy as np\n')]
|
import uuid
from typing import List
from typing import Optional
from sqlalchemy.engine import Engine
from sqlmodel import Field
from sqlmodel import Relationship
from sqlmodel import Session
from sqlmodel import SQLModel
from sqlmodel import create_engine
_engine: Optional[Engine] = None
class FileTagAssociation(SQLModel, table=True):
file_id: Optional[uuid.UUID] = Field(
default=None, foreign_key="file.id", primary_key=True
)
tag_id: Optional[uuid.UUID] = Field(
default=None, foreign_key="tag.id", primary_key=True
)
class File(SQLModel, table=True):
id: Optional[uuid.UUID] = Field(default_factory=uuid.uuid4, primary_key=True)
name: str
path: str
tags: List["Tag"] = Relationship(
back_populates="files", link_model=FileTagAssociation
)
class Tag(SQLModel, table=True):
id: Optional[uuid.UUID] = Field(default_factory=uuid.uuid4, primary_key=True)
category: Optional[str]
value: str
files: List[File] = Relationship(
back_populates="tags", link_model=FileTagAssociation
)
def init(url: str) -> None:
global _engine
_engine = create_engine(url)
SQLModel.metadata.create_all(_engine)
def get_engine() -> Engine:
"""Get the global database engine."""
if _engine is None: # pragma: no cover
raise ValueError("Engine must be initialized with `db.init()`")
return _engine
def get_session() -> Session:
"""Create a new database session to use as a context manager."""
return Session(get_engine())
|
[
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.Relationship",
"sqlmodel.Field",
"sqlmodel.create_engine"
] |
[((376, 436), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""file.id"""', 'primary_key': '(True)'}), "(default=None, foreign_key='file.id', primary_key=True)\n", (381, 436), False, 'from sqlmodel import Field\n'), ((485, 544), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""tag.id"""', 'primary_key': '(True)'}), "(default=None, foreign_key='tag.id', primary_key=True)\n", (490, 544), False, 'from sqlmodel import Field\n'), ((625, 676), 'sqlmodel.Field', 'Field', ([], {'default_factory': 'uuid.uuid4', 'primary_key': '(True)'}), '(default_factory=uuid.uuid4, primary_key=True)\n', (630, 676), False, 'from sqlmodel import Field\n'), ((730, 797), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""files"""', 'link_model': 'FileTagAssociation'}), "(back_populates='files', link_model=FileTagAssociation)\n", (742, 797), False, 'from sqlmodel import Relationship\n'), ((877, 928), 'sqlmodel.Field', 'Field', ([], {'default_factory': 'uuid.uuid4', 'primary_key': '(True)'}), '(default_factory=uuid.uuid4, primary_key=True)\n', (882, 928), False, 'from sqlmodel import Field\n'), ((997, 1063), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""tags"""', 'link_model': 'FileTagAssociation'}), "(back_populates='tags', link_model=FileTagAssociation)\n", (1009, 1063), False, 'from sqlmodel import Relationship\n'), ((1141, 1159), 'sqlmodel.create_engine', 'create_engine', (['url'], {}), '(url)\n', (1154, 1159), False, 'from sqlmodel import create_engine\n'), ((1165, 1202), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['_engine'], {}), '(_engine)\n', (1193, 1202), False, 'from sqlmodel import SQLModel\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine
import megengine.autodiff as ad
import megengine.optimizer as optimizer
from megengine import Parameter, tensor
from megengine.module import BatchNorm2d
def test_frozen_bn():
nchannel = 3
m = BatchNorm2d(nchannel, freeze=True)
saved_var = m.running_var.numpy()
saved_mean = m.running_mean.numpy()
saved_wt = m.weight.numpy()
saved_bias = m.bias.numpy()
gm = ad.GradManager().attach(m.parameters())
optim = optimizer.SGD(m.parameters(), lr=1.0)
optim.clear_grad()
data = np.random.random((6, nchannel, 2, 2)).astype("float32")
with gm:
loss = m(data).mean()
gm.backward(loss)
optim.step()
np.testing.assert_equal(m.running_var.numpy(), saved_var)
np.testing.assert_equal(m.running_mean.numpy(), saved_mean)
np.testing.assert_equal(m.weight.numpy(), saved_wt)
np.testing.assert_equal(m.bias.numpy(), saved_bias)
np.testing.assert_almost_equal(loss.numpy(), data.mean(), 5)
def test_bn_no_track_stat():
nchannel = 3
m = BatchNorm2d(nchannel, track_running_stats=False)
gm = ad.GradManager().attach(m.parameters())
optim = optimizer.SGD(m.parameters(), lr=1.0)
optim.clear_grad()
data = np.random.random((6, nchannel, 2, 2)).astype("float32")
with gm:
loss = m(data).sum()
gm.backward(loss)
optim.step()
def test_bn_no_track_stat2():
nchannel = 3
m = BatchNorm2d(nchannel) # Init with track_running_stat = True
m.track_running_stats = False
# m.running_var and m.running_mean created during init time
saved_var = m.running_var.numpy()
assert saved_var is not None
saved_mean = m.running_mean.numpy()
assert saved_mean is not None
gm = ad.GradManager().attach(m.parameters())
optim = optimizer.SGD(m.parameters(), lr=1.0)
optim.clear_grad()
data = np.random.random((6, nchannel, 2, 2)).astype("float32")
with gm:
loss = m(data).sum()
gm.backward(loss)
optim.step()
np.testing.assert_equal(m.running_var.numpy(), saved_var)
np.testing.assert_equal(m.running_mean.numpy(), saved_mean)
def test_bn_no_track_stat3():
nchannel = 3
m = BatchNorm2d(nchannel, track_running_stats=False)
m.track_running_stats = True
data = np.random.random((6, nchannel, 2, 2)).astype("float32")
with pytest.raises(Exception):
m(data)
|
[
"megengine.module.BatchNorm2d",
"megengine.autodiff.GradManager"
] |
[((630, 664), 'megengine.module.BatchNorm2d', 'BatchNorm2d', (['nchannel'], {'freeze': '(True)'}), '(nchannel, freeze=True)\n', (641, 664), False, 'from megengine.module import BatchNorm2d\n'), ((1445, 1493), 'megengine.module.BatchNorm2d', 'BatchNorm2d', (['nchannel'], {'track_running_stats': '(False)'}), '(nchannel, track_running_stats=False)\n', (1456, 1493), False, 'from megengine.module import BatchNorm2d\n'), ((1827, 1848), 'megengine.module.BatchNorm2d', 'BatchNorm2d', (['nchannel'], {}), '(nchannel)\n', (1838, 1848), False, 'from megengine.module import BatchNorm2d\n'), ((2592, 2640), 'megengine.module.BatchNorm2d', 'BatchNorm2d', (['nchannel'], {'track_running_stats': '(False)'}), '(nchannel, track_running_stats=False)\n', (2603, 2640), False, 'from megengine.module import BatchNorm2d\n'), ((2750, 2774), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2763, 2774), False, 'import pytest\n'), ((818, 834), 'megengine.autodiff.GradManager', 'ad.GradManager', ([], {}), '()\n', (832, 834), True, 'import megengine.autodiff as ad\n'), ((943, 980), 'numpy.random.random', 'np.random.random', (['(6, nchannel, 2, 2)'], {}), '((6, nchannel, 2, 2))\n', (959, 980), True, 'import numpy as np\n'), ((1504, 1520), 'megengine.autodiff.GradManager', 'ad.GradManager', ([], {}), '()\n', (1518, 1520), True, 'import megengine.autodiff as ad\n'), ((1629, 1666), 'numpy.random.random', 'np.random.random', (['(6, nchannel, 2, 2)'], {}), '((6, nchannel, 2, 2))\n', (1645, 1666), True, 'import numpy as np\n'), ((2142, 2158), 'megengine.autodiff.GradManager', 'ad.GradManager', ([], {}), '()\n', (2156, 2158), True, 'import megengine.autodiff as ad\n'), ((2267, 2304), 'numpy.random.random', 'np.random.random', (['(6, nchannel, 2, 2)'], {}), '((6, nchannel, 2, 2))\n', (2283, 2304), True, 'import numpy as np\n'), ((2685, 2722), 'numpy.random.random', 'np.random.random', (['(6, nchannel, 2, 2)'], {}), '((6, nchannel, 2, 2))\n', (2701, 2722), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Finetune a pretrained fp32 with int8 post train quantization(calibration)"""
import argparse
import collections
import numbers
import os
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import enable_observer, quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
if world_size != 1:
logger.warning(
"Calibration only supports single GPU now, %d provided", world_size
)
proc_func = dist.launcher(worker) if world_size > 1 else worker
proc_func(world_size, args)
def get_parameters(model, cfg):
if isinstance(cfg.WEIGHT_DECAY, numbers.Number):
return {
"params": model.parameters(requires_grad=True),
"weight_decay": cfg.WEIGHT_DECAY,
}
groups = collections.defaultdict(list) # weight_decay -> List[param]
for pname, p in model.named_parameters(requires_grad=True):
wd = cfg.WEIGHT_DECAY(pname, p)
groups[wd].append(p)
groups = [
{"params": params, "weight_decay": wd} for wd, params in groups.items()
] # List[{param, weight_decay}]
return groups
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
save_dir = os.path.join(args.save, args.arch + "." + "calibration")
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
model = models.__dict__[args.arch]()
# load calibration model
assert args.checkpoint
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
# Build valid datasets
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[T.Resize(256), T.CenterCrop(224), T.Normalize(mean=128), T.ToMode("CHW")]
),
num_workers=args.workers,
)
# calibration
model.fc.disable_quantize()
model = quantize_qat(model, qconfig=Q.calibration_qconfig)
# calculate scale
def calculate_scale(image, label):
model.eval()
enable_observer(model)
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.functional.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.functional.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
infer(calculate_scale, valid_queue, args)
# quantized
model = quantize(model)
# eval quantized model
def eval_func(image, label):
model.eval()
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.functional.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.functional.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
_, valid_acc, valid_acc5 = infer(eval_func, valid_queue, args)
logger.info("TEST %f, %f", valid_acc, valid_acc5)
# save quantized model
mge.save(
{"step": -1, "state_dict": model.state_dict()},
os.path.join(save_dir, "checkpoint-calibration.pkl"),
)
logger.info(
"save in {}".format(os.path.join(save_dir, "checkpoint-calibration.pkl"))
)
def infer(model, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
total_time = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
n = image.shape[0]
image = mge.tensor(image, dtype="float32")
label = mge.tensor(label, dtype="int32")
loss, acc1, acc5 = model(image, label)
objs.update(loss.numpy()[0], n)
top1.update(100 * acc1.numpy()[0], n)
top5.update(100 * acc5.numpy()[0], n)
total_time.update(time.time() - t)
t = time.time()
if step % args.report_freq == 0 and dist.get_rank() == 0:
logger.info("Step %d, %s %s %s %s", step, objs, top1, top5, total_time)
# break
if step == args.report_freq:
break
return objs.avg, top1.avg, top5.avg
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=":.3f"):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
if __name__ == "__main__":
main()
|
[
"megengine.distributed.is_distributed",
"megengine.data.transform.ToMode",
"megengine.data.transform.CenterCrop",
"megengine.data.transform.Normalize",
"megengine.data.SequentialSampler",
"megengine.distributed.get_rank",
"megengine.distributed.get_world_size",
"megengine.get_logger",
"megengine.data.dataset.ImageNet",
"megengine.data.transform.Resize",
"megengine.tensor",
"megengine.distributed.helper.get_device_count_by_fork",
"megengine.functional.loss.cross_entropy",
"megengine.quantization.quantize.quantize",
"megengine.quantization.quantize.quantize_qat",
"megengine.distributed.functional.all_reduce_sum",
"megengine.functional.topk_accuracy",
"megengine.quantization.quantize.enable_observer",
"megengine.load",
"megengine.distributed.launcher"
] |
[((866, 890), 'megengine.get_logger', 'mge.get_logger', (['__name__'], {}), '(__name__)\n', (880, 890), True, 'import megengine as mge\n'), ((918, 943), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (941, 943), False, 'import argparse\n'), ((2188, 2217), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (2211, 2217), False, 'import collections\n'), ((2618, 2633), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (2631, 2633), True, 'import megengine.distributed as dist\n'), ((2807, 2863), 'os.path.join', 'os.path.join', (['args.save', "(args.arch + '.' + 'calibration')"], {}), "(args.save, args.arch + '.' + 'calibration')\n", (2819, 2863), False, 'import os\n'), ((3180, 3205), 'megengine.load', 'mge.load', (['args.checkpoint'], {}), '(args.checkpoint)\n', (3188, 3205), True, 'import megengine as mge\n'), ((3364, 3409), 'megengine.data.dataset.ImageNet', 'data.dataset.ImageNet', (['args.data'], {'train': '(False)'}), '(args.data, train=False)\n', (3385, 3409), True, 'import megengine.data as data\n'), ((3430, 3500), 'megengine.data.SequentialSampler', 'data.SequentialSampler', (['valid_dataset'], {'batch_size': '(100)', 'drop_last': '(False)'}), '(valid_dataset, batch_size=100, drop_last=False)\n', (3452, 3500), True, 'import megengine.data as data\n'), ((3834, 3884), 'megengine.quantization.quantize.quantize_qat', 'quantize_qat', (['model'], {'qconfig': 'Q.calibration_qconfig'}), '(model, qconfig=Q.calibration_qconfig)\n', (3846, 3884), False, 'from megengine.quantization.quantize import enable_observer, quantize, quantize_qat\n'), ((4559, 4574), 'megengine.quantization.quantize.quantize', 'quantize', (['model'], {}), '(model)\n', (4567, 4574), False, 'from megengine.quantization.quantize import enable_observer, quantize, quantize_qat\n'), ((5717, 5728), 'time.time', 'time.time', ([], {}), '()\n', (5726, 5728), False, 'import time\n'), ((1558, 1601), 'megengine.distributed.helper.get_device_count_by_fork', 'dist.helper.get_device_count_by_fork', (['"""gpu"""'], {}), "('gpu')\n", (1594, 1601), True, 'import megengine.distributed as dist\n'), ((1870, 1891), 'megengine.distributed.launcher', 'dist.launcher', (['worker'], {}), '(worker)\n', (1883, 1891), True, 'import megengine.distributed as dist\n'), ((2875, 2899), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (2889, 2899), False, 'import os\n'), ((2909, 2945), 'os.makedirs', 'os.makedirs', (['save_dir'], {'exist_ok': '(True)'}), '(save_dir, exist_ok=True)\n', (2920, 2945), False, 'import os\n'), ((2967, 3000), 'os.path.join', 'os.path.join', (['save_dir', '"""log.txt"""'], {}), "(save_dir, 'log.txt')\n", (2979, 3000), False, 'import os\n'), ((3976, 3998), 'megengine.quantization.quantize.enable_observer', 'enable_observer', (['model'], {}), '(model)\n', (3991, 3998), False, 'from megengine.quantization.quantize import enable_observer, quantize, quantize_qat\n'), ((4044, 4097), 'megengine.functional.loss.cross_entropy', 'F.loss.cross_entropy', (['logits', 'label'], {'label_smooth': '(0.1)'}), '(logits, label, label_smooth=0.1)\n', (4064, 4097), True, 'import megengine.functional as F\n'), ((4119, 4157), 'megengine.functional.topk_accuracy', 'F.topk_accuracy', (['logits', 'label', '(1, 5)'], {}), '(logits, label, (1, 5))\n', (4134, 4157), True, 'import megengine.functional as F\n'), ((4169, 4190), 'megengine.distributed.is_distributed', 'dist.is_distributed', ([], {}), '()\n', (4188, 4190), True, 'import megengine.distributed as dist\n'), ((4702, 4755), 'megengine.functional.loss.cross_entropy', 'F.loss.cross_entropy', (['logits', 'label'], {'label_smooth': '(0.1)'}), '(logits, label, label_smooth=0.1)\n', (4722, 4755), True, 'import megengine.functional as F\n'), ((4777, 4815), 'megengine.functional.topk_accuracy', 'F.topk_accuracy', (['logits', 'label', '(1, 5)'], {}), '(logits, label, (1, 5))\n', (4792, 4815), True, 'import megengine.functional as F\n'), ((4827, 4848), 'megengine.distributed.is_distributed', 'dist.is_distributed', ([], {}), '()\n', (4846, 4848), True, 'import megengine.distributed as dist\n'), ((5369, 5421), 'os.path.join', 'os.path.join', (['save_dir', '"""checkpoint-calibration.pkl"""'], {}), "(save_dir, 'checkpoint-calibration.pkl')\n", (5381, 5421), False, 'import os\n'), ((5827, 5861), 'megengine.tensor', 'mge.tensor', (['image'], {'dtype': '"""float32"""'}), "(image, dtype='float32')\n", (5837, 5861), True, 'import megengine as mge\n'), ((5878, 5910), 'megengine.tensor', 'mge.tensor', (['label'], {'dtype': '"""int32"""'}), "(label, dtype='int32')\n", (5888, 5910), True, 'import megengine as mge\n'), ((6147, 6158), 'time.time', 'time.time', ([], {}), '()\n', (6156, 6158), False, 'import time\n'), ((5474, 5526), 'os.path.join', 'os.path.join', (['save_dir', '"""checkpoint-calibration.pkl"""'], {}), "(save_dir, 'checkpoint-calibration.pkl')\n", (5486, 5526), False, 'import os\n'), ((4230, 4266), 'megengine.distributed.functional.all_reduce_sum', 'dist.functional.all_reduce_sum', (['loss'], {}), '(loss)\n', (4260, 4266), True, 'import megengine.distributed as dist\n'), ((4269, 4290), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (4288, 4290), True, 'import megengine.distributed as dist\n'), ((4310, 4346), 'megengine.distributed.functional.all_reduce_sum', 'dist.functional.all_reduce_sum', (['acc1'], {}), '(acc1)\n', (4340, 4346), True, 'import megengine.distributed as dist\n'), ((4349, 4370), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (4368, 4370), True, 'import megengine.distributed as dist\n'), ((4390, 4426), 'megengine.distributed.functional.all_reduce_sum', 'dist.functional.all_reduce_sum', (['acc5'], {}), '(acc5)\n', (4420, 4426), True, 'import megengine.distributed as dist\n'), ((4429, 4450), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (4448, 4450), True, 'import megengine.distributed as dist\n'), ((4888, 4924), 'megengine.distributed.functional.all_reduce_sum', 'dist.functional.all_reduce_sum', (['loss'], {}), '(loss)\n', (4918, 4924), True, 'import megengine.distributed as dist\n'), ((4927, 4948), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (4946, 4948), True, 'import megengine.distributed as dist\n'), ((4968, 5004), 'megengine.distributed.functional.all_reduce_sum', 'dist.functional.all_reduce_sum', (['acc1'], {}), '(acc1)\n', (4998, 5004), True, 'import megengine.distributed as dist\n'), ((5007, 5028), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (5026, 5028), True, 'import megengine.distributed as dist\n'), ((5048, 5084), 'megengine.distributed.functional.all_reduce_sum', 'dist.functional.all_reduce_sum', (['acc5'], {}), '(acc5)\n', (5078, 5084), True, 'import megengine.distributed as dist\n'), ((5087, 5108), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (5106, 5108), True, 'import megengine.distributed as dist\n'), ((6118, 6129), 'time.time', 'time.time', ([], {}), '()\n', (6127, 6129), False, 'import time\n'), ((6204, 6219), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (6217, 6219), True, 'import megengine.distributed as dist\n'), ((3646, 3659), 'megengine.data.transform.Resize', 'T.Resize', (['(256)'], {}), '(256)\n', (3654, 3659), True, 'import megengine.data.transform as T\n'), ((3661, 3678), 'megengine.data.transform.CenterCrop', 'T.CenterCrop', (['(224)'], {}), '(224)\n', (3673, 3678), True, 'import megengine.data.transform as T\n'), ((3680, 3701), 'megengine.data.transform.Normalize', 'T.Normalize', ([], {'mean': '(128)'}), '(mean=128)\n', (3691, 3701), True, 'import megengine.data.transform as T\n'), ((3703, 3718), 'megengine.data.transform.ToMode', 'T.ToMode', (['"""CHW"""'], {}), "('CHW')\n", (3711, 3718), True, 'import megengine.data.transform as T\n')]
|
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.core import Parameter
from utils import *
def addLeakyRelu(x):
return M.Sequential(x, M.LeakyReLU(0.1))
def addSig(x):
return M.Sequential(x, M.Sigmoid())
def up_block(x, ic, oc):
return M.ConvTranspose2d(ic, oc, 4, stride=2, padding=1)
def down_block(x, ic, oc):
return M.Conv2d(ic, oc, 3, padding=1, stride=2)
class BasicBlock(M.Module):
expansion = 1
def __init__(
self,
in_channels,
channels,
stride=1,
groups=1,
base_width=64,
dilation=1,
norm=M.BatchNorm2d,
):
super().__init__()
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = M.Conv2d(
in_channels, channels, 3, stride, padding=dilation, bias=True
)
self.conv2 = M.Conv2d(channels, channels, 3, 1, padding=1, bias=True)
if in_channels == channels and stride == 1:
self.downsample = M.Identity()
elif stride == 1:
self.downsample = M.Conv2d(in_channels, channels, 1, stride, bias=False)
else:
self.downsample = M.Sequential(
M.AvgPool2d(kernel_size=stride, stride=stride),
M.Conv2d(in_channels, channels, 1, 1, bias=False)
)
self.fc1 = M.Conv2d(channels, 16, kernel_size=1)
self.fc2 = M.Conv2d(16, channels, kernel_size=1)
self.relu1 = M.LeakyReLU(0.1)
self.relu2 = M.LeakyReLU(0.1)
self.relu3 = M.LeakyReLU(0.1)
def forward(self, x):
identity = x
x = self.conv1(x)
x = self.relu1(x)
x = self.conv2(x)
identity = self.downsample(identity)
w = x.mean(3, True).mean(2, True)
w = self.relu2(self.fc1(w))
w = F.sigmoid(self.fc2(w))
x = x * w + identity
x = self.relu3(x)
return x
def subpixel(x):
shape = x.shape
x = x.reshape(shape[0], shape[1] // 4, 2, 2, shape[2], shape[3])
x = F.dimshuffle(x, (0, 1, 4, 2, 5, 3))
return x.reshape(shape[0], shape[1] // 4, shape[2]*2, shape[3]*2)
c = 64
class SimpleUNet(M.Module):
def __init__(self):
super().__init__()
self.conv0_ = (BasicBlock(3, 32, stride=2))
self.conv1_ = (BasicBlock(32, c, stride=2))
self.conv0 = (BasicBlock(15, 32, stride=2))
self.conv1 = (BasicBlock(32, c, stride=2))
self.conv2 = (BasicBlock(c, 2*c, stride=1))
self.conv3 = (BasicBlock(2*c, 2*c, stride=1))
self.conv4 = (BasicBlock(4*c, 2*c, stride=1))
self.conv5 = (BasicBlock(4*c, 2*c, stride=1))
self.conv6 = (BasicBlock(6*c, 2*c, stride=1))
self.conv7 = (BasicBlock(6*c, 2*c, stride=1))
self.conv8 = (BasicBlock(6*c, 2*c, stride=1))
self.conv9 = (BasicBlock(6*c, 2*c, stride=1))
self.conv10 = (BasicBlock(3*c, 4*c, stride=1))
self.conv11 = addSig(M.Conv2d(c+32, 12, 1))
def forward(self, x):
size = x.shape
x = x.reshape((size[0] * 5, 3) + size[2:])
conv0 = tsm(self.conv0_(x))
conv1 = tsm(self.conv1_(conv0))
#
x = (x.reshape((size[0], 15) + x.shape[2:]))
conv0_ = (conv0.reshape((size[0], 5) + conv0.shape[1:]))[:, 2]
conv1_ = (conv1.reshape((size[0], 5) + conv1.shape[1:]))[:, 2]
conv0 = self.conv0(x)
conv1 = self.conv1(conv0)
conv0 += conv0_
conv1 += conv1_
conv2 = (self.conv2(conv1))
conv3 = (self.conv3(conv2))
conv4 = (self.conv4(F.concat((conv3, conv2), 1)))
conv5 = (self.conv5(F.concat((conv4, conv3), 1)))
conv6 = (self.conv6(F.concat((conv5, conv4, conv2), 1)))
conv7 = (self.conv7(F.concat((conv6, conv5, conv3), 1)))
conv8 = (self.conv8(F.concat((conv7, conv6, conv4), 1)))
conv9 = (self.conv9(F.concat((conv8, conv7, conv5), 1)))
conv10 = subpixel(self.conv10(F.concat((conv9, conv1), 1)))
conv11 = subpixel(self.conv11(F.concat((conv10, conv0), 1)))
conv11 = conv11 * 2 - 1 # sigmoid to [-1, 1]
return F.minimum(F.maximum(conv11 + x[:, 6:9], 0), 1)
|
[
"megengine.module.ConvTranspose2d",
"megengine.functional.maximum",
"megengine.module.LeakyReLU",
"megengine.module.Conv2d",
"megengine.module.AvgPool2d",
"megengine.module.Sigmoid",
"megengine.module.Identity",
"megengine.functional.dimshuffle",
"megengine.functional.concat"
] |
[((303, 352), 'megengine.module.ConvTranspose2d', 'M.ConvTranspose2d', (['ic', 'oc', '(4)'], {'stride': '(2)', 'padding': '(1)'}), '(ic, oc, 4, stride=2, padding=1)\n', (320, 352), True, 'import megengine.module as M\n'), ((392, 432), 'megengine.module.Conv2d', 'M.Conv2d', (['ic', 'oc', '(3)'], {'padding': '(1)', 'stride': '(2)'}), '(ic, oc, 3, padding=1, stride=2)\n', (400, 432), True, 'import megengine.module as M\n'), ((2241, 2276), 'megengine.functional.dimshuffle', 'F.dimshuffle', (['x', '(0, 1, 4, 2, 5, 3)'], {}), '(x, (0, 1, 4, 2, 5, 3))\n', (2253, 2276), True, 'import megengine.functional as F\n'), ((192, 208), 'megengine.module.LeakyReLU', 'M.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (203, 208), True, 'import megengine.module as M\n'), ((253, 264), 'megengine.module.Sigmoid', 'M.Sigmoid', ([], {}), '()\n', (262, 264), True, 'import megengine.module as M\n'), ((949, 1020), 'megengine.module.Conv2d', 'M.Conv2d', (['in_channels', 'channels', '(3)', 'stride'], {'padding': 'dilation', 'bias': '(True)'}), '(in_channels, channels, 3, stride, padding=dilation, bias=True)\n', (957, 1020), True, 'import megengine.module as M\n'), ((1064, 1120), 'megengine.module.Conv2d', 'M.Conv2d', (['channels', 'channels', '(3)', '(1)'], {'padding': '(1)', 'bias': '(True)'}), '(channels, channels, 3, 1, padding=1, bias=True)\n', (1072, 1120), True, 'import megengine.module as M\n'), ((1560, 1597), 'megengine.module.Conv2d', 'M.Conv2d', (['channels', '(16)'], {'kernel_size': '(1)'}), '(channels, 16, kernel_size=1)\n', (1568, 1597), True, 'import megengine.module as M\n'), ((1618, 1655), 'megengine.module.Conv2d', 'M.Conv2d', (['(16)', 'channels'], {'kernel_size': '(1)'}), '(16, channels, kernel_size=1)\n', (1626, 1655), True, 'import megengine.module as M\n'), ((1677, 1693), 'megengine.module.LeakyReLU', 'M.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (1688, 1693), True, 'import megengine.module as M\n'), ((1715, 1731), 'megengine.module.LeakyReLU', 'M.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (1726, 1731), True, 'import megengine.module as M\n'), ((1753, 1769), 'megengine.module.LeakyReLU', 'M.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (1764, 1769), True, 'import megengine.module as M\n'), ((1203, 1215), 'megengine.module.Identity', 'M.Identity', ([], {}), '()\n', (1213, 1215), True, 'import megengine.module as M\n'), ((3156, 3179), 'megengine.module.Conv2d', 'M.Conv2d', (['(c + 32)', '(12)', '(1)'], {}), '(c + 32, 12, 1)\n', (3164, 3179), True, 'import megengine.module as M\n'), ((3782, 3809), 'megengine.functional.concat', 'F.concat', (['(conv3, conv2)', '(1)'], {}), '((conv3, conv2), 1)\n', (3790, 3809), True, 'import megengine.functional as F\n'), ((3840, 3867), 'megengine.functional.concat', 'F.concat', (['(conv4, conv3)', '(1)'], {}), '((conv4, conv3), 1)\n', (3848, 3867), True, 'import megengine.functional as F\n'), ((3898, 3932), 'megengine.functional.concat', 'F.concat', (['(conv5, conv4, conv2)', '(1)'], {}), '((conv5, conv4, conv2), 1)\n', (3906, 3932), True, 'import megengine.functional as F\n'), ((3963, 3997), 'megengine.functional.concat', 'F.concat', (['(conv6, conv5, conv3)', '(1)'], {}), '((conv6, conv5, conv3), 1)\n', (3971, 3997), True, 'import megengine.functional as F\n'), ((4028, 4062), 'megengine.functional.concat', 'F.concat', (['(conv7, conv6, conv4)', '(1)'], {}), '((conv7, conv6, conv4), 1)\n', (4036, 4062), True, 'import megengine.functional as F\n'), ((4093, 4127), 'megengine.functional.concat', 'F.concat', (['(conv8, conv7, conv5)', '(1)'], {}), '((conv8, conv7, conv5), 1)\n', (4101, 4127), True, 'import megengine.functional as F\n'), ((4347, 4379), 'megengine.functional.maximum', 'F.maximum', (['(conv11 + x[:, 6:9])', '(0)'], {}), '(conv11 + x[:, 6:9], 0)\n', (4356, 4379), True, 'import megengine.functional as F\n'), ((1272, 1326), 'megengine.module.Conv2d', 'M.Conv2d', (['in_channels', 'channels', '(1)', 'stride'], {'bias': '(False)'}), '(in_channels, channels, 1, stride, bias=False)\n', (1280, 1326), True, 'import megengine.module as M\n'), ((4168, 4195), 'megengine.functional.concat', 'F.concat', (['(conv9, conv1)', '(1)'], {}), '((conv9, conv1), 1)\n', (4176, 4195), True, 'import megengine.functional as F\n'), ((4236, 4264), 'megengine.functional.concat', 'F.concat', (['(conv10, conv0)', '(1)'], {}), '((conv10, conv0), 1)\n', (4244, 4264), True, 'import megengine.functional as F\n'), ((1405, 1451), 'megengine.module.AvgPool2d', 'M.AvgPool2d', ([], {'kernel_size': 'stride', 'stride': 'stride'}), '(kernel_size=stride, stride=stride)\n', (1416, 1451), True, 'import megengine.module as M\n'), ((1473, 1522), 'megengine.module.Conv2d', 'M.Conv2d', (['in_channels', 'channels', '(1)', '(1)'], {'bias': '(False)'}), '(in_channels, channels, 1, 1, bias=False)\n', (1481, 1522), True, 'import megengine.module as M\n')]
|
from sfepy.base.testing import TestCommon
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
return Test(conf=conf, options=options)
def test_elastic_constants(self):
import numpy as nm
from sfepy.mechanics.matcoefs import ElasticConstants
ok = True
names = ['bulk', 'lam', 'mu', 'young', 'poisson', 'p_wave']
ec = ElasticConstants(lam=1.0, mu=1.5)
vals = ec.get(names)
self.report('using values:', vals)
for i1 in range(len(names)):
for i2 in range(i1+1, len(names)):
kwargs = {names[i1] : vals[i1], names[i2] : vals[i2]}
try:
ec.init(**kwargs)
except:
_ok = False
else:
_ok = True
ec_vals = ec.get(names)
_ok = _ok and nm.allclose(ec_vals, vals)
self.report(names[i1], names[i2], '->', _ok)
if not _ok:
self.report('correct:', vals)
self.report(' got:', ec_vals)
ok = ok and _ok
return ok
def test_conversion_functions(self):
import numpy as nm
import sfepy.mechanics.matcoefs as mc
ok = True
lam = 1.0
mu = 1.5
ec = mc.ElasticConstants(lam=lam, mu=mu)
young, poisson, bulk = ec.get(['young', 'poisson', 'bulk'])
lam = nm.array([lam] * 3)
mu = nm.array([mu] * 3)
young = nm.array([young] * 3)
poisson = nm.array([poisson] * 3)
_lam, _mu = mc.lame_from_youngpoisson(young, poisson)
_ok = (nm.allclose(lam, _lam, rtol=0.0, atol=1e-14) and
nm.allclose(mu, _mu, rtol=0.0, atol=1e-14))
self.report('lame_from_youngpoisson():', _ok)
if not _ok:
self.report('correct:', lam, mu)
self.report(' got:', _lam, _mu)
ok = ok and _ok
_bulk = mc.bulk_from_youngpoisson(young, poisson)
_ok = nm.allclose(bulk, _bulk, rtol=0.0, atol=1e-14)
self.report('bulk_from_youngpoisson():', _ok)
if not _ok:
self.report('correct:', bulk)
self.report(' got:', _bulk)
ok = ok and _ok
_bulk = mc.bulk_from_lame(lam, mu)
_ok = nm.allclose(bulk, _bulk, rtol=0.0, atol=1e-14)
self.report('bulk_from_lame():', _ok)
if not _ok:
self.report('correct:', bulk)
self.report(' got:', _bulk)
ok = ok and _ok
return ok
def test_stiffness_tensors(self):
import numpy as nm
from sfepy.base.base import assert_
import sfepy.mechanics.matcoefs as mc
ok = True
lam = 1.0
mu = 4.0
lam = nm.array([lam] * 3)
mu = nm.array([mu] * 3)
d = nm.array([[ 9., 1., 1., 0., 0., 0.],
[ 1., 9., 1., 0., 0., 0.],
[ 1., 1., 9., 0., 0., 0.],
[ 0., 0., 0., 4., 0., 0.],
[ 0., 0., 0., 0., 4., 0.],
[ 0., 0., 0., 0., 0., 4.]])
_ds = mc.stiffness_from_lame(3, lam, mu)
assert_(_ds.shape == (3, 6, 6))
_ok = True
for _d in _ds:
__ok = nm.allclose(_d, d, rtol=0.0, atol=1e-14)
_ok = _ok and __ok
self.report('stiffness_from_lame():', _ok)
ok = ok and _ok
d = 4.0 / 3.0 * nm.array([[ 4., -2., -2., 0., 0., 0.],
[-2., 4., -2., 0., 0., 0.],
[-2., -2., 4., 0., 0., 0.],
[ 0., 0., 0., 3., 0., 0.],
[ 0., 0., 0., 0., 3., 0.],
[ 0., 0., 0., 0., 0., 3.]])
_ds = mc.stiffness_from_lame_mixed(3, lam, mu)
assert_(_ds.shape == (3, 6, 6))
_ok = True
for _d in _ds:
__ok = nm.allclose(_d, d, rtol=0.0, atol=1e-14)
_ok = _ok and __ok
self.report('stiffness_from_lame_mixed():', _ok)
ok = ok and _ok
blam = - mu * 2.0 / 3.0
_ds = mc.stiffness_from_lame(3, blam, mu)
assert_(_ds.shape == (3, 6, 6))
_ok = True
for _d in _ds:
__ok = nm.allclose(_d, d, rtol=0.0, atol=1e-14)
_ok = _ok and __ok
self.report('stiffness_from_lame() with modified lambda:', _ok)
ok = ok and _ok
return ok
|
[
"sfepy.mechanics.matcoefs.bulk_from_lame",
"sfepy.mechanics.matcoefs.lame_from_youngpoisson",
"sfepy.mechanics.matcoefs.stiffness_from_lame_mixed",
"sfepy.mechanics.matcoefs.stiffness_from_lame",
"sfepy.base.base.assert_",
"sfepy.mechanics.matcoefs.ElasticConstants",
"sfepy.mechanics.matcoefs.bulk_from_youngpoisson"
] |
[((398, 431), 'sfepy.mechanics.matcoefs.ElasticConstants', 'ElasticConstants', ([], {'lam': '(1.0)', 'mu': '(1.5)'}), '(lam=1.0, mu=1.5)\n', (414, 431), False, 'from sfepy.mechanics.matcoefs import ElasticConstants\n'), ((1359, 1394), 'sfepy.mechanics.matcoefs.ElasticConstants', 'mc.ElasticConstants', ([], {'lam': 'lam', 'mu': 'mu'}), '(lam=lam, mu=mu)\n', (1378, 1394), True, 'import sfepy.mechanics.matcoefs as mc\n'), ((1478, 1497), 'numpy.array', 'nm.array', (['([lam] * 3)'], {}), '([lam] * 3)\n', (1486, 1497), True, 'import numpy as nm\n'), ((1511, 1529), 'numpy.array', 'nm.array', (['([mu] * 3)'], {}), '([mu] * 3)\n', (1519, 1529), True, 'import numpy as nm\n'), ((1546, 1567), 'numpy.array', 'nm.array', (['([young] * 3)'], {}), '([young] * 3)\n', (1554, 1567), True, 'import numpy as nm\n'), ((1586, 1609), 'numpy.array', 'nm.array', (['([poisson] * 3)'], {}), '([poisson] * 3)\n', (1594, 1609), True, 'import numpy as nm\n'), ((1631, 1672), 'sfepy.mechanics.matcoefs.lame_from_youngpoisson', 'mc.lame_from_youngpoisson', (['young', 'poisson'], {}), '(young, poisson)\n', (1656, 1672), True, 'import sfepy.mechanics.matcoefs as mc\n'), ((2003, 2044), 'sfepy.mechanics.matcoefs.bulk_from_youngpoisson', 'mc.bulk_from_youngpoisson', (['young', 'poisson'], {}), '(young, poisson)\n', (2028, 2044), True, 'import sfepy.mechanics.matcoefs as mc\n'), ((2059, 2105), 'numpy.allclose', 'nm.allclose', (['bulk', '_bulk'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(bulk, _bulk, rtol=0.0, atol=1e-14)\n', (2070, 2105), True, 'import numpy as nm\n'), ((2306, 2332), 'sfepy.mechanics.matcoefs.bulk_from_lame', 'mc.bulk_from_lame', (['lam', 'mu'], {}), '(lam, mu)\n', (2323, 2332), True, 'import sfepy.mechanics.matcoefs as mc\n'), ((2347, 2393), 'numpy.allclose', 'nm.allclose', (['bulk', '_bulk'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(bulk, _bulk, rtol=0.0, atol=1e-14)\n', (2358, 2393), True, 'import numpy as nm\n'), ((2815, 2834), 'numpy.array', 'nm.array', (['([lam] * 3)'], {}), '([lam] * 3)\n', (2823, 2834), True, 'import numpy as nm\n'), ((2848, 2866), 'numpy.array', 'nm.array', (['([mu] * 3)'], {}), '([mu] * 3)\n', (2856, 2866), True, 'import numpy as nm\n'), ((2880, 3092), 'numpy.array', 'nm.array', (['[[9.0, 1.0, 1.0, 0.0, 0.0, 0.0], [1.0, 9.0, 1.0, 0.0, 0.0, 0.0], [1.0, 1.0,\n 9.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 4.0, 0.0, 0.0], [0.0, 0.0, 0.0, \n 0.0, 4.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 4.0]]'], {}), '([[9.0, 1.0, 1.0, 0.0, 0.0, 0.0], [1.0, 9.0, 1.0, 0.0, 0.0, 0.0], [\n 1.0, 1.0, 9.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 4.0, 0.0, 0.0], [0.0, \n 0.0, 0.0, 0.0, 4.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 4.0]])\n', (2888, 3092), True, 'import numpy as nm\n'), ((3208, 3242), 'sfepy.mechanics.matcoefs.stiffness_from_lame', 'mc.stiffness_from_lame', (['(3)', 'lam', 'mu'], {}), '(3, lam, mu)\n', (3230, 3242), True, 'import sfepy.mechanics.matcoefs as mc\n'), ((3251, 3282), 'sfepy.base.base.assert_', 'assert_', (['(_ds.shape == (3, 6, 6))'], {}), '(_ds.shape == (3, 6, 6))\n', (3258, 3282), False, 'from sfepy.base.base import assert_\n'), ((3905, 3945), 'sfepy.mechanics.matcoefs.stiffness_from_lame_mixed', 'mc.stiffness_from_lame_mixed', (['(3)', 'lam', 'mu'], {}), '(3, lam, mu)\n', (3933, 3945), True, 'import sfepy.mechanics.matcoefs as mc\n'), ((3954, 3985), 'sfepy.base.base.assert_', 'assert_', (['(_ds.shape == (3, 6, 6))'], {}), '(_ds.shape == (3, 6, 6))\n', (3961, 3985), False, 'from sfepy.base.base import assert_\n'), ((4248, 4283), 'sfepy.mechanics.matcoefs.stiffness_from_lame', 'mc.stiffness_from_lame', (['(3)', 'blam', 'mu'], {}), '(3, blam, mu)\n', (4270, 4283), True, 'import sfepy.mechanics.matcoefs as mc\n'), ((4292, 4323), 'sfepy.base.base.assert_', 'assert_', (['(_ds.shape == (3, 6, 6))'], {}), '(_ds.shape == (3, 6, 6))\n', (4299, 4323), False, 'from sfepy.base.base import assert_\n'), ((1688, 1732), 'numpy.allclose', 'nm.allclose', (['lam', '_lam'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(lam, _lam, rtol=0.0, atol=1e-14)\n', (1699, 1732), True, 'import numpy as nm\n'), ((1752, 1794), 'numpy.allclose', 'nm.allclose', (['mu', '_mu'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(mu, _mu, rtol=0.0, atol=1e-14)\n', (1763, 1794), True, 'import numpy as nm\n'), ((3345, 3385), 'numpy.allclose', 'nm.allclose', (['_d', 'd'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(_d, d, rtol=0.0, atol=1e-14)\n', (3356, 3385), True, 'import numpy as nm\n'), ((3517, 3735), 'numpy.array', 'nm.array', (['[[4.0, -2.0, -2.0, 0.0, 0.0, 0.0], [-2.0, 4.0, -2.0, 0.0, 0.0, 0.0], [-2.0,\n -2.0, 4.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 3.0, 0.0, 0.0], [0.0, 0.0, \n 0.0, 0.0, 3.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 3.0]]'], {}), '([[4.0, -2.0, -2.0, 0.0, 0.0, 0.0], [-2.0, 4.0, -2.0, 0.0, 0.0, 0.0\n ], [-2.0, -2.0, 4.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 3.0, 0.0, 0.0], [\n 0.0, 0.0, 0.0, 0.0, 3.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 3.0]])\n', (3525, 3735), True, 'import numpy as nm\n'), ((4048, 4088), 'numpy.allclose', 'nm.allclose', (['_d', 'd'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(_d, d, rtol=0.0, atol=1e-14)\n', (4059, 4088), True, 'import numpy as nm\n'), ((4386, 4426), 'numpy.allclose', 'nm.allclose', (['_d', 'd'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(_d, d, rtol=0.0, atol=1e-14)\n', (4397, 4426), True, 'import numpy as nm\n'), ((902, 928), 'numpy.allclose', 'nm.allclose', (['ec_vals', 'vals'], {}), '(ec_vals, vals)\n', (913, 928), True, 'import numpy as nm\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing as mp
import os
import cv2
import megengine as mge
import megengine.data as data
import megengine.data.dataset as dataset
import megengine.data.transform as T
import megengine.jit as jit
import numpy as np
from tqdm import tqdm
from official.vision.segmentation.deeplabv3plus import DeepLabV3Plus
from official.vision.segmentation.utils import import_config_from_file
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-c", "--config", type=str, required=True, help="configuration file"
)
parser.add_argument(
"-d", "--dataset_dir", type=str, default="/data/datasets/VOC2012",
)
parser.add_argument(
"-m", "--model_path", type=str, default=None, help="eval model file"
)
args = parser.parse_args()
cfg = import_config_from_file(args.config)
test_loader, test_size = build_dataloader(args.dataset_dir, cfg)
print("number of test images: %d" % (test_size))
net = DeepLabV3Plus(class_num=cfg.NUM_CLASSES)
model_dict = mge.load(args.model_path)
net.load_state_dict(model_dict["state_dict"])
print("load model %s" % (args.model_path))
net.eval()
result_list = []
for sample_batched in tqdm(test_loader):
img = sample_batched[0].squeeze()
label = sample_batched[1].squeeze()
im_info = sample_batched[2]
pred = evaluate(net, img, cfg)
result_list.append({"pred": pred, "gt": label, "name":im_info[2]})
if cfg.VAL_SAVE:
save_results(result_list, cfg.VAL_SAVE, cfg)
compute_metric(result_list, cfg)
## inference one image
def pad_image_to_shape(img, shape, border_mode, value):
margin = np.zeros(4, np.uint32)
pad_height = shape[0] - img.shape[0] if shape[0] - img.shape[0] > 0 else 0
pad_width = shape[1] - img.shape[1] if shape[1] - img.shape[1] > 0 else 0
margin[0] = pad_height // 2
margin[1] = pad_height // 2 + pad_height % 2
margin[2] = pad_width // 2
margin[3] = pad_width // 2 + pad_width % 2
img = cv2.copyMakeBorder(
img, margin[0], margin[1], margin[2], margin[3], border_mode, value=value
)
return img, margin
def eval_single(net, img, is_flip):
@jit.trace(symbolic=True, opt_level=2)
def pred_fun(data, net=None):
net.eval()
pred = net(data)
return pred
data = mge.tensor()
data.set_value(img.transpose(2, 0, 1)[np.newaxis])
pred = pred_fun(data, net=net)
if is_flip:
img_flip = img[:, ::-1, :]
data.set_value(img_flip.transpose(2, 0, 1)[np.newaxis])
pred_flip = pred_fun(data, net=net)
pred = (pred + pred_flip[:, :, :, ::-1]) / 2.0
del pred_flip
pred = pred.numpy().squeeze().transpose(1, 2, 0)
del data
return pred
def evaluate(net, img, cfg):
ori_h, ori_w, _ = img.shape
pred_all = np.zeros((ori_h, ori_w, cfg.NUM_CLASSES))
for rate in cfg.VAL_MULTISCALE:
if cfg.VAL_SLIP:
new_h, new_w = int(ori_h*rate), int(ori_w*rate)
val_size = (cfg.VAL_HEIGHT, cfg.VAL_WIDTH)
else:
new_h, new_w = int(cfg.VAL_HEIGHT*rate), int(cfg.VAL_WIDTH*rate)
val_size = (new_h, new_w)
img_scale = cv2.resize(
img, (new_w, new_h), interpolation=cv2.INTER_LINEAR
)
if (new_h <= val_size[0]) and (new_h <= val_size[1]):
img_pad, margin = pad_image_to_shape(
img_scale, val_size, cv2.BORDER_CONSTANT, value=0
)
pred = eval_single(net, img_pad, cfg.VAL_FLIP)
pred = pred[
margin[0] : (pred.shape[0] - margin[1]),
margin[2] : (pred.shape[1] - margin[3]),
:,
]
else:
stride_rate = 2 / 3
stride = [int(np.ceil(i * stride_rate)) for i in val_size]
img_pad, margin = pad_image_to_shape(
img_scale, val_size, cv2.BORDER_CONSTANT, value=0
)
pad_h, pad_w = img_pad.shape[:2]
r_grid, c_grid = [
int(np.ceil((ps - cs) / stride)) + 1
for ps, cs, stride in zip(img_pad.shape, val_size, stride)
]
pred_scale = np.zeros((pad_h, pad_w, cfg.NUM_CLASSES))
count_scale = np.zeros((pad_h, pad_w, cfg.NUM_CLASSES))
for grid_yidx in range(r_grid):
for grid_xidx in range(c_grid):
s_x = grid_xidx * stride[1]
s_y = grid_yidx * stride[0]
e_x = min(s_x + val_size[1], pad_w)
e_y = min(s_y + val_size[0], pad_h)
s_x = e_x - val_size[1]
s_y = e_y - val_size[0]
img_sub = img_pad[s_y:e_y, s_x:e_x, :]
tpred = eval_single(net, img_sub, cfg.VAL_FLIP)
count_scale[s_y:e_y, s_x:e_x, :] += 1
pred_scale[s_y:e_y, s_x:e_x, :] += tpred
#pred_scale = pred_scale / count_scale
pred = pred_scale[
margin[0] : (pred_scale.shape[0] - margin[1]),
margin[2] : (pred_scale.shape[1] - margin[3]),
:,
]
pred = cv2.resize(pred, (ori_w, ori_h), interpolation=cv2.INTER_LINEAR)
pred_all = pred_all + pred
#pred_all = pred_all / len(cfg.VAL_MULTISCALE)
result = np.argmax(pred_all, axis=2).astype(np.uint8)
return result
def save_results(result_list, save_dir, cfg):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
for idx, sample in enumerate(result_list):
if cfg.DATASET == "Cityscapes":
name = sample["name"].split('/')[-1][:-4]
else:
name = sample["name"]
file_path = os.path.join(save_dir, "%s.png"%name)
cv2.imwrite(file_path, sample["pred"])
file_path = os.path.join(save_dir, "%s.gt.png"%name)
cv2.imwrite(file_path, sample["gt"])
# voc cityscapes metric
def compute_metric(result_list, cfg):
class_num = cfg.NUM_CLASSES
hist = np.zeros((class_num, class_num))
correct = 0
labeled = 0
count = 0
for idx in range(len(result_list)):
pred = result_list[idx]['pred']
gt = result_list[idx]['gt']
assert(pred.shape == gt.shape)
k = (gt>=0) & (gt<class_num)
labeled += np.sum(k)
correct += np.sum((pred[k]==gt[k]))
hist += np.bincount(class_num * gt[k].astype(int) + pred[k].astype(int), minlength=class_num**2).reshape(class_num, class_num)
count += 1
iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
mean_IU = np.nanmean(iu)
mean_IU_no_back = np.nanmean(iu[1:])
freq = hist.sum(1) / hist.sum()
freq_IU = (iu[freq > 0] * freq[freq >0]).sum()
mean_pixel_acc = correct / labeled
if cfg.DATASET == "VOC2012":
class_names = ("background", ) + dataset.PascalVOC.class_names
elif cfg.DATASET == "Cityscapes":
class_names = dataset.Cityscapes.class_names
else:
raise ValueError("Unsupported dataset {}".format(cfg.DATASET))
n = iu.size
lines = []
for i in range(n):
if class_names is None:
cls = 'Class %d:' % (i+1)
else:
cls = '%d %s' % (i+1, class_names[i])
lines.append('%-8s\t%.3f%%' % (cls, iu[i] * 100))
lines.append('---------------------------- %-8s\t%.3f%%\t%-8s\t%.3f%%' % ('mean_IU', mean_IU * 100,'mean_pixel_ACC',mean_pixel_acc*100))
line = "\n".join(lines)
print(line)
return mean_IU
class EvalPascalVOC(dataset.PascalVOC):
def _trans_mask(self, mask):
label = np.ones(mask.shape[:2]) * 255
class_colors = self.class_colors.copy()
class_colors.insert(0, [0,0,0])
for i in range(len(class_colors)):
b, g, r = class_colors[i]
label[
(mask[:, :, 0] == b) & (mask[:, :, 1] == g) & (mask[:, :, 2] == r)
] = i
return label.astype(np.uint8)
def build_dataloader(dataset_dir, cfg):
if cfg.DATASET == "VOC2012":
val_dataset = EvalPascalVOC(
dataset_dir,
"val",
order=["image", "mask", "info"]
)
elif cfg.DATASET == "Cityscapes":
val_dataset = dataset.Cityscapes(
dataset_dir,
"val",
mode='gtFine',
order=["image", "mask", "info"]
)
else:
raise ValueError("Unsupported dataset {}".format(cfg.DATASET))
val_sampler = data.SequentialSampler(val_dataset, cfg.VAL_BATCHES)
val_dataloader = data.DataLoader(
val_dataset,
sampler=val_sampler,
transform=T.Normalize(
mean=cfg.IMG_MEAN, std=cfg.IMG_STD, order=["image", "mask"]
),
num_workers=cfg.DATA_WORKERS,
)
return val_dataloader, val_dataset.__len__()
if __name__ == "__main__":
main()
|
[
"megengine.data.transform.Normalize",
"megengine.data.SequentialSampler",
"megengine.jit.trace",
"megengine.tensor",
"megengine.data.dataset.Cityscapes",
"megengine.load"
] |
[((813, 838), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (836, 838), False, 'import argparse\n'), ((1203, 1239), 'official.vision.segmentation.utils.import_config_from_file', 'import_config_from_file', (['args.config'], {}), '(args.config)\n', (1226, 1239), False, 'from official.vision.segmentation.utils import import_config_from_file\n'), ((1373, 1413), 'official.vision.segmentation.deeplabv3plus.DeepLabV3Plus', 'DeepLabV3Plus', ([], {'class_num': 'cfg.NUM_CLASSES'}), '(class_num=cfg.NUM_CLASSES)\n', (1386, 1413), False, 'from official.vision.segmentation.deeplabv3plus import DeepLabV3Plus\n'), ((1431, 1456), 'megengine.load', 'mge.load', (['args.model_path'], {}), '(args.model_path)\n', (1439, 1456), True, 'import megengine as mge\n'), ((1618, 1635), 'tqdm.tqdm', 'tqdm', (['test_loader'], {}), '(test_loader)\n', (1622, 1635), False, 'from tqdm import tqdm\n'), ((2078, 2100), 'numpy.zeros', 'np.zeros', (['(4)', 'np.uint32'], {}), '(4, np.uint32)\n', (2086, 2100), True, 'import numpy as np\n'), ((2427, 2524), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['img', 'margin[0]', 'margin[1]', 'margin[2]', 'margin[3]', 'border_mode'], {'value': 'value'}), '(img, margin[0], margin[1], margin[2], margin[3],\n border_mode, value=value)\n', (2445, 2524), False, 'import cv2\n'), ((2601, 2638), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)', 'opt_level': '(2)'}), '(symbolic=True, opt_level=2)\n', (2610, 2638), True, 'import megengine.jit as jit\n'), ((2749, 2761), 'megengine.tensor', 'mge.tensor', ([], {}), '()\n', (2759, 2761), True, 'import megengine as mge\n'), ((3248, 3289), 'numpy.zeros', 'np.zeros', (['(ori_h, ori_w, cfg.NUM_CLASSES)'], {}), '((ori_h, ori_w, cfg.NUM_CLASSES))\n', (3256, 3289), True, 'import numpy as np\n'), ((6466, 6498), 'numpy.zeros', 'np.zeros', (['(class_num, class_num)'], {}), '((class_num, class_num))\n', (6474, 6498), True, 'import numpy as np\n'), ((7061, 7075), 'numpy.nanmean', 'np.nanmean', (['iu'], {}), '(iu)\n', (7071, 7075), True, 'import numpy as np\n'), ((7098, 7116), 'numpy.nanmean', 'np.nanmean', (['iu[1:]'], {}), '(iu[1:])\n', (7108, 7116), True, 'import numpy as np\n'), ((8939, 8991), 'megengine.data.SequentialSampler', 'data.SequentialSampler', (['val_dataset', 'cfg.VAL_BATCHES'], {}), '(val_dataset, cfg.VAL_BATCHES)\n', (8961, 8991), True, 'import megengine.data as data\n'), ((3615, 3678), 'cv2.resize', 'cv2.resize', (['img', '(new_w, new_h)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(img, (new_w, new_h), interpolation=cv2.INTER_LINEAR)\n', (3625, 3678), False, 'import cv2\n'), ((5617, 5681), 'cv2.resize', 'cv2.resize', (['pred', '(ori_w, ori_h)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(pred, (ori_w, ori_h), interpolation=cv2.INTER_LINEAR)\n', (5627, 5681), False, 'import cv2\n'), ((5904, 5928), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (5918, 5928), False, 'import os\n'), ((5938, 5959), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (5949, 5959), False, 'import os\n'), ((6169, 6208), 'os.path.join', 'os.path.join', (['save_dir', "('%s.png' % name)"], {}), "(save_dir, '%s.png' % name)\n", (6181, 6208), False, 'import os\n'), ((6215, 6253), 'cv2.imwrite', 'cv2.imwrite', (['file_path', "sample['pred']"], {}), "(file_path, sample['pred'])\n", (6226, 6253), False, 'import cv2\n'), ((6274, 6316), 'os.path.join', 'os.path.join', (['save_dir', "('%s.gt.png' % name)"], {}), "(save_dir, '%s.gt.png' % name)\n", (6286, 6316), False, 'import os\n'), ((6323, 6359), 'cv2.imwrite', 'cv2.imwrite', (['file_path', "sample['gt']"], {}), "(file_path, sample['gt'])\n", (6334, 6359), False, 'import cv2\n'), ((6756, 6765), 'numpy.sum', 'np.sum', (['k'], {}), '(k)\n', (6762, 6765), True, 'import numpy as np\n'), ((6785, 6809), 'numpy.sum', 'np.sum', (['(pred[k] == gt[k])'], {}), '(pred[k] == gt[k])\n', (6791, 6809), True, 'import numpy as np\n'), ((6985, 6998), 'numpy.diag', 'np.diag', (['hist'], {}), '(hist)\n', (6992, 6998), True, 'import numpy as np\n'), ((4616, 4657), 'numpy.zeros', 'np.zeros', (['(pad_h, pad_w, cfg.NUM_CLASSES)'], {}), '((pad_h, pad_w, cfg.NUM_CLASSES))\n', (4624, 4657), True, 'import numpy as np\n'), ((4684, 4725), 'numpy.zeros', 'np.zeros', (['(pad_h, pad_w, cfg.NUM_CLASSES)'], {}), '((pad_h, pad_w, cfg.NUM_CLASSES))\n', (4692, 4725), True, 'import numpy as np\n'), ((5782, 5809), 'numpy.argmax', 'np.argmax', (['pred_all'], {'axis': '(2)'}), '(pred_all, axis=2)\n', (5791, 5809), True, 'import numpy as np\n'), ((7030, 7043), 'numpy.diag', 'np.diag', (['hist'], {}), '(hist)\n', (7037, 7043), True, 'import numpy as np\n'), ((8068, 8091), 'numpy.ones', 'np.ones', (['mask.shape[:2]'], {}), '(mask.shape[:2])\n', (8075, 8091), True, 'import numpy as np\n'), ((8694, 8784), 'megengine.data.dataset.Cityscapes', 'dataset.Cityscapes', (['dataset_dir', '"""val"""'], {'mode': '"""gtFine"""', 'order': "['image', 'mask', 'info']"}), "(dataset_dir, 'val', mode='gtFine', order=['image',\n 'mask', 'info'])\n", (8712, 8784), True, 'import megengine.data.dataset as dataset\n'), ((9098, 9170), 'megengine.data.transform.Normalize', 'T.Normalize', ([], {'mean': 'cfg.IMG_MEAN', 'std': 'cfg.IMG_STD', 'order': "['image', 'mask']"}), "(mean=cfg.IMG_MEAN, std=cfg.IMG_STD, order=['image', 'mask'])\n", (9109, 9170), True, 'import megengine.data.transform as T\n'), ((4197, 4221), 'numpy.ceil', 'np.ceil', (['(i * stride_rate)'], {}), '(i * stride_rate)\n', (4204, 4221), True, 'import numpy as np\n'), ((4468, 4495), 'numpy.ceil', 'np.ceil', (['((ps - cs) / stride)'], {}), '((ps - cs) / stride)\n', (4475, 4495), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
"""
Plot logs of variables saved in a text file by sfepy.base.log.Log class.
The plot should be almost the same as the plot that would be generated by the
Log directly.
"""
from __future__ import absolute_import
import sys
sys.path.append('.')
from argparse import ArgumentParser, Action, RawDescriptionHelpFormatter
import matplotlib.pyplot as plt
from sfepy.base.log import read_log, plot_log
class ParseRc(Action):
def __call__(self, parser, namespace, values, option_string=None):
pars = eval('{' + values + '}')
setattr(namespace, self.dest, pars)
helps = {
'groups' :
'list of log data groups subplots (from 0) to plot - all groups are'
' plotted if not given',
'output_filename' :
'save the figure using the given file name',
'rc' : 'matplotlib resources',
'no_legends' :
'do not show legends in the log plots',
'nbins' :
'the numbers of bins in x, y axes for all groups [default: %(default)s]',
'swap_axes' :
'swap the axes of the plots',
'no_show' :
'do not show the figure',
}
def main():
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('-g', '--groups', metavar='int[,int,...]',
action='store', dest='groups',
default=None, help=helps['groups'])
parser.add_argument('-o', '--output', metavar='filename',
action='store', dest='output_filename',
default=None, help=helps['output_filename'])
parser.add_argument('--rc', type=str, metavar='key:val,...',
action=ParseRc, dest='rc',
default={}, help=helps['rc'])
parser.add_argument('--no-legends',
action='store_false', dest='show_legends',
default=True, help=helps['no_legends'])
parser.add_argument('--nbins', metavar='nx1,ny1,...',
action='store', dest='nbins',
default=None, help=helps['nbins'])
parser.add_argument('--swap-axes',
action='store_true', dest='swap_axes',
default=False, help=helps['swap_axes'])
parser.add_argument('-n', '--no-show',
action='store_true', dest='no_show',
default=False, help=helps['no_show'])
parser.add_argument('filename')
options = parser.parse_args()
filename = options.filename
if options.groups is not None:
options.groups = [int(ii) for ii in options.groups.split(',')]
if options.nbins is not None:
aux = [int(ii) if ii != 'None' else None
for ii in options.nbins.split(',')]
xnbins, ynbins = aux[::2], aux[1::2]
else:
xnbins = ynbins = None
log, info = read_log(filename)
plt.rcParams.update(options.rc)
plot_log(None, log, info, groups=options.groups,
xnbins=xnbins, ynbins=ynbins,
show_legends=options.show_legends, swap_axes=options.swap_axes)
if options.output_filename:
plt.savefig(options.output_filename)
if not options.no_show:
plt.show()
if __name__ == '__main__':
main()
|
[
"sfepy.base.log.plot_log",
"sfepy.base.log.read_log"
] |
[((246, 266), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (261, 266), False, 'import sys\n'), ((1116, 1201), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'RawDescriptionHelpFormatter'}), '(description=__doc__, formatter_class=RawDescriptionHelpFormatter\n )\n', (1130, 1201), False, 'from argparse import ArgumentParser, Action, RawDescriptionHelpFormatter\n'), ((2895, 2913), 'sfepy.base.log.read_log', 'read_log', (['filename'], {}), '(filename)\n', (2903, 2913), False, 'from sfepy.base.log import read_log, plot_log\n'), ((2919, 2950), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (['options.rc'], {}), '(options.rc)\n', (2938, 2950), True, 'import matplotlib.pyplot as plt\n'), ((2956, 3103), 'sfepy.base.log.plot_log', 'plot_log', (['None', 'log', 'info'], {'groups': 'options.groups', 'xnbins': 'xnbins', 'ynbins': 'ynbins', 'show_legends': 'options.show_legends', 'swap_axes': 'options.swap_axes'}), '(None, log, info, groups=options.groups, xnbins=xnbins, ynbins=\n ynbins, show_legends=options.show_legends, swap_axes=options.swap_axes)\n', (2964, 3103), False, 'from sfepy.base.log import read_log, plot_log\n'), ((3166, 3202), 'matplotlib.pyplot.savefig', 'plt.savefig', (['options.output_filename'], {}), '(options.output_filename)\n', (3177, 3202), True, 'import matplotlib.pyplot as plt\n'), ((3240, 3250), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3248, 3250), True, 'import matplotlib.pyplot as plt\n')]
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import numpy as np
import megengine.functional as F
from megengine.core import Function, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def test_a_plus_b():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
class MulFunc(Function):
def forward(self, a, b):
return a * b
def backward(self, grad_o):
return (grad_o * b * 2, grad_o * a * 3)
c = MulFunc()(a, b).sum()
assertTensorClose(c.numpy(), (av * bv).sum())
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), bv * 2)
assertTensorClose(F.grad(c, b, use_virtual_grad=False).numpy(), av * 3)
def test_skip_invalid_grad():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
cookie = tensor(np.random.random(data_shape).astype(np.float32))
class EqWithFakeGrad(Function):
def forward(self, a, b):
return a == b
def backward(self, grad_o):
_ = grad_o
return cookie, cookie
c = EqWithFakeGrad()(a, b).sum()
assertTensorClose(c.numpy(), (av == bv).sum().astype(np.float32))
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), cookie)
assertTensorClose(F.grad(c, b, use_virtual_grad=False).numpy(), cookie)
def test_ste():
class STE(Function):
def forward(self, x):
maxv, minv = x.max(), x.min()
scale = F.maximum(maxv, -minv) / 127
return F.round(x / scale) * scale
def backward(self, grad_y):
return grad_y
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
q = STE()(a)
q_2 = (q * 2.0).sum()
assertTensorClose(
F.grad(q_2, a, use_virtual_grad=False).numpy(),
np.broadcast_to(np.array([2.0], dtype=np.float32), data_shape),
)
def test_deepcopy():
class Sigmoid(Function):
def __init__(self, param):
super().__init__()
self.param = param
def forward(self, x):
y = 1 / (1 + F.exp(-x))
self.save_for_backward(y)
return y
def backward(self, grad_y):
(y,) = self.saved_tensors
return grad_y * y * (1 - y)
origin = Sigmoid(0)
new = copy.deepcopy(Sigmoid(0))
assert new.param == origin.param
def test_save_context():
class Sigmoid(Function):
def forward(self, x):
y = 1 / (1 + F.exp(-x))
self.save_for_backward(y)
return y
def backward(self, grad_y):
(y,) = self.saved_tensors
return grad_y * y * (1 - y)
def run_saved_context(a, net=None):
return net(a)
def run(use_trace, symbolic):
a = tensor(np.array([1926.0817], dtype=np.float32))
net = Sigmoid()
func_run = run_saved_context
if use_trace:
func_run = trace(run_saved_context, symbolic=symbolic)
s = func_run(a, net=net)
s2 = F.sigmoid(a)
assertTensorClose(s.numpy(), s2.numpy())
assertTensorClose(
F.grad(s, a, use_virtual_grad=False).numpy(),
F.grad(s2, a, use_virtual_grad=False).numpy(),
)
run(False, False)
run(True, False)
run(True, True)
def test_none_in_out_grad():
class Test(Function):
def forward(self, a, b):
return a, b
def backward(self, grad_a, grad_b):
assert grad_b is None
return (grad_a, 0)
a = tensor(np.array([1.0], dtype=np.float32))
b = tensor(np.array([2.0], dtype=np.float32))
aa, bb = Test()(a, b)
assertTensorClose(
F.grad(aa, a, use_virtual_grad=False).numpy(), np.array([1.0], dtype=np.float32)
)
assertTensorClose(
F.grad(aa, b, use_virtual_grad=False).numpy(), np.array([0.0], dtype=np.float32)
)
def test_zero_grad():
class StopGradient(Function):
def forward(self, a):
return a
def backward(self, *_):
return None
a = tensor(np.array([1.0], dtype=np.float32))
b = a * 3.0
c = a * 4.0
loss = StopGradient()(b) + c
assertTensorClose(
F.grad(loss, a, use_virtual_grad=False).numpy(),
np.array([4.0], dtype=np.float32),
)
|
[
"megengine.functional.sigmoid",
"megengine.jit.trace",
"megengine.functional.exp",
"megengine.core.tensor",
"megengine.functional.maximum",
"megengine.functional.grad",
"megengine.functional.round"
] |
[((715, 725), 'megengine.core.tensor', 'tensor', (['av'], {}), '(av)\n', (721, 725), False, 'from megengine.core import Function, tensor\n'), ((734, 744), 'megengine.core.tensor', 'tensor', (['bv'], {}), '(bv)\n', (740, 744), False, 'from megengine.core import Function, tensor\n'), ((1339, 1349), 'megengine.core.tensor', 'tensor', (['av'], {}), '(av)\n', (1345, 1349), False, 'from megengine.core import Function, tensor\n'), ((1358, 1368), 'megengine.core.tensor', 'tensor', (['bv'], {}), '(bv)\n', (1364, 1368), False, 'from megengine.core import Function, tensor\n'), ((2257, 2267), 'megengine.core.tensor', 'tensor', (['av'], {}), '(av)\n', (2263, 2267), False, 'from megengine.core import Function, tensor\n'), ((3606, 3618), 'megengine.functional.sigmoid', 'F.sigmoid', (['a'], {}), '(a)\n', (3615, 3618), True, 'import megengine.functional as F\n'), ((4126, 4159), 'numpy.array', 'np.array', (['[1.0]'], {'dtype': 'np.float32'}), '([1.0], dtype=np.float32)\n', (4134, 4159), True, 'import numpy as np\n'), ((4176, 4209), 'numpy.array', 'np.array', (['[2.0]'], {'dtype': 'np.float32'}), '([2.0], dtype=np.float32)\n', (4184, 4209), True, 'import numpy as np\n'), ((4315, 4348), 'numpy.array', 'np.array', (['[1.0]'], {'dtype': 'np.float32'}), '([1.0], dtype=np.float32)\n', (4323, 4348), True, 'import numpy as np\n'), ((4433, 4466), 'numpy.array', 'np.array', (['[0.0]'], {'dtype': 'np.float32'}), '([0.0], dtype=np.float32)\n', (4441, 4466), True, 'import numpy as np\n'), ((4655, 4688), 'numpy.array', 'np.array', (['[1.0]'], {'dtype': 'np.float32'}), '([1.0], dtype=np.float32)\n', (4663, 4688), True, 'import numpy as np\n'), ((4843, 4876), 'numpy.array', 'np.array', (['[4.0]'], {'dtype': 'np.float32'}), '([4.0], dtype=np.float32)\n', (4851, 4876), True, 'import numpy as np\n'), ((602, 630), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (618, 630), True, 'import numpy as np\n'), ((659, 687), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (675, 687), True, 'import numpy as np\n'), ((1226, 1254), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (1242, 1254), True, 'import numpy as np\n'), ((1283, 1311), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (1299, 1311), True, 'import numpy as np\n'), ((2201, 2229), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (2217, 2229), True, 'import numpy as np\n'), ((2414, 2447), 'numpy.array', 'np.array', (['[2.0]'], {'dtype': 'np.float32'}), '([2.0], dtype=np.float32)\n', (2422, 2447), True, 'import numpy as np\n'), ((3369, 3408), 'numpy.array', 'np.array', (['[1926.0817]'], {'dtype': 'np.float32'}), '([1926.0817], dtype=np.float32)\n', (3377, 3408), True, 'import numpy as np\n'), ((3516, 3559), 'megengine.jit.trace', 'trace', (['run_saved_context'], {'symbolic': 'symbolic'}), '(run_saved_context, symbolic=symbolic)\n', (3521, 3559), False, 'from megengine.jit import trace\n'), ((1025, 1061), 'megengine.functional.grad', 'F.grad', (['c', 'a'], {'use_virtual_grad': '(False)'}), '(c, a, use_virtual_grad=False)\n', (1031, 1061), True, 'import megengine.functional as F\n'), ((1101, 1137), 'megengine.functional.grad', 'F.grad', (['c', 'b'], {'use_virtual_grad': '(False)'}), '(c, b, use_virtual_grad=False)\n', (1107, 1137), True, 'import megengine.functional as F\n'), ((1389, 1417), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (1405, 1417), True, 'import numpy as np\n'), ((1758, 1794), 'megengine.functional.grad', 'F.grad', (['c', 'a'], {'use_virtual_grad': '(False)'}), '(c, a, use_virtual_grad=False)\n', (1764, 1794), True, 'import megengine.functional as F\n'), ((1834, 1870), 'megengine.functional.grad', 'F.grad', (['c', 'b'], {'use_virtual_grad': '(False)'}), '(c, b, use_virtual_grad=False)\n', (1840, 1870), True, 'import megengine.functional as F\n'), ((2023, 2045), 'megengine.functional.maximum', 'F.maximum', (['maxv', '(-minv)'], {}), '(maxv, -minv)\n', (2032, 2045), True, 'import megengine.functional as F\n'), ((2071, 2089), 'megengine.functional.round', 'F.round', (['(x / scale)'], {}), '(x / scale)\n', (2078, 2089), True, 'import megengine.functional as F\n'), ((2342, 2380), 'megengine.functional.grad', 'F.grad', (['q_2', 'a'], {'use_virtual_grad': '(False)'}), '(q_2, a, use_virtual_grad=False)\n', (2348, 2380), True, 'import megengine.functional as F\n'), ((4268, 4305), 'megengine.functional.grad', 'F.grad', (['aa', 'a'], {'use_virtual_grad': '(False)'}), '(aa, a, use_virtual_grad=False)\n', (4274, 4305), True, 'import megengine.functional as F\n'), ((4386, 4423), 'megengine.functional.grad', 'F.grad', (['aa', 'b'], {'use_virtual_grad': '(False)'}), '(aa, b, use_virtual_grad=False)\n', (4392, 4423), True, 'import megengine.functional as F\n'), ((4786, 4825), 'megengine.functional.grad', 'F.grad', (['loss', 'a'], {'use_virtual_grad': '(False)'}), '(loss, a, use_virtual_grad=False)\n', (4792, 4825), True, 'import megengine.functional as F\n'), ((2673, 2682), 'megengine.functional.exp', 'F.exp', (['(-x)'], {}), '(-x)\n', (2678, 2682), True, 'import megengine.functional as F\n'), ((3067, 3076), 'megengine.functional.exp', 'F.exp', (['(-x)'], {}), '(-x)\n', (3072, 3076), True, 'import megengine.functional as F\n'), ((3707, 3743), 'megengine.functional.grad', 'F.grad', (['s', 'a'], {'use_virtual_grad': '(False)'}), '(s, a, use_virtual_grad=False)\n', (3713, 3743), True, 'import megengine.functional as F\n'), ((3765, 3802), 'megengine.functional.grad', 'F.grad', (['s2', 'a'], {'use_virtual_grad': '(False)'}), '(s2, a, use_virtual_grad=False)\n', (3771, 3802), True, 'import megengine.functional as F\n')]
|
from typing import Optional
from sqlmodel import SQLModel, Field, create_engine, Session
engine = create_engine(url="sqlite:///users.db", echo=True)
class User(SQLModel, table=True):
id:Optional[int] = Field(None, primary_key=True)
username: str
password:str
def get_session():
with Session(engine) as session:
yield session
def init_db():
SQLModel.metadata.create_all(engine)
|
[
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.Field",
"sqlmodel.create_engine",
"sqlmodel.Session"
] |
[((99, 149), 'sqlmodel.create_engine', 'create_engine', ([], {'url': '"""sqlite:///users.db"""', 'echo': '(True)'}), "(url='sqlite:///users.db', echo=True)\n", (112, 149), False, 'from sqlmodel import SQLModel, Field, create_engine, Session\n'), ((208, 237), 'sqlmodel.Field', 'Field', (['None'], {'primary_key': '(True)'}), '(None, primary_key=True)\n', (213, 237), False, 'from sqlmodel import SQLModel, Field, create_engine, Session\n'), ((372, 408), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (400, 408), False, 'from sqlmodel import SQLModel, Field, create_engine, Session\n'), ((302, 317), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (309, 317), False, 'from sqlmodel import SQLModel, Field, create_engine, Session\n')]
|
import datetime
from sqlmodel import Field, Relationship, SQLModel
class User(SQLModel, table=True):
__tablename__ = "users"
id: int = Field(primary_key=True)
create_at: datetime.datetime = Field(default_factory=lambda: datetime.datetime.utcnow())
user_name: str
password: str
alias: str
|
[
"sqlmodel.Field"
] |
[((146, 169), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (151, 169), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((235, 261), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (259, 261), False, 'import datetime\n')]
|
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import output, OneTypeList, Struct
from sfepy.discrete.fem.mesh import Mesh
from sfepy.discrete.fem.meshio import MeshIO
from sfepy.solvers.ts import TimeStepper
from sfepy.base.ioutils import get_trunk, write_dict_hdf5
import six
from six.moves import range
def _linearize(out, fields, linearization):
new = {}
for key, val in six.iteritems(out):
field = fields[val.field_name]
new.update(field.create_output(val.data, var_name=key,
dof_names=val.dofs, key=key,
linearization=linearization))
return new
def dump_to_vtk(filename, output_filename_trunk=None, step0=0, steps=None,
fields=None, linearization=None):
"""Dump a multi-time-step results file into a sequence of VTK files."""
def _save_step(suffix, out, mesh):
if linearization is not None:
output('linearizing...')
out = _linearize(out, fields, linearization)
output('...done')
for key, val in six.iteritems(out):
lmesh = val.get('mesh', mesh)
lmesh.write(output_filename_trunk + '_' + key + suffix,
io='auto', out={key : val})
if hasattr(val, 'levels'):
output('max. refinement per group:', val.levels)
else:
mesh.write(output_filename_trunk + suffix, io='auto', out=out)
output('dumping to VTK...')
io = MeshIO.any_from_filename(filename)
mesh = Mesh.from_file(filename, io=io)
if output_filename_trunk is None:
output_filename_trunk = get_trunk(filename)
try:
ts = TimeStepper(*io.read_time_stepper())
all_steps, times, nts, dts = extract_times(filename)
except ValueError:
output('no time stepping info found, assuming single step')
out = io.read_data(0)
if out is not None:
_save_step('.vtk', out, mesh)
ret = None
else:
ts.times = times
ts.n_step = times.shape[0]
if steps is None:
ii0 = nm.searchsorted(all_steps, step0)
iterator = ((all_steps[ii], times[ii])
for ii in range(ii0, len(times)))
else:
iterator = [(step, ts.times[step]) for step in steps]
max_step = all_steps.max()
for step, time in iterator:
output(ts.format % (step, max_step))
out = io.read_data(step)
if out is None: break
_save_step('.' + ts.suffix % step + '.vtk', out, mesh)
ret = ts.suffix
output('...done')
return ret
def extract_times(filename):
"""
Read true time step data from individual time steps.
Returns
-------
steps : array
The time steps.
times : array
The times of the time steps.
nts : array
The normalized times of the time steps, in [0, 1].
dts : array
The true time deltas.
"""
io = MeshIO.any_from_filename(filename)
steps, times, nts = io.read_times()
dts = nm.ediff1d(times, to_end=0)
return steps, times, nts, dts
def extract_time_history(filename, extract, verbose=True):
"""Extract time history of a variable from a multi-time-step results file.
Parameters
----------
filename : str
The name of file to extract from.
extract : str
The description of what to extract in a string of comma-separated
description items. A description item consists of: name of the variable
to extract, mode ('e' for elements, 'n' for nodes), ids of the nodes or
elements (given by the mode). Example: 'u n 10 15, p e 0' means
variable 'u' in nodes 10, 15 and variable 'p' in element 0.
verbose : bool
Verbosity control.
Returns
-------
ths : dict
The time histories in a dict with variable names as keys. If a nodal
variable is requested in elements, its value is a dict of histories in
the element nodes.
ts : TimeStepper instance
The time stepping information.
"""
output('extracting selected data...', verbose=verbose)
output('selection:', extract, verbose=verbose)
##
# Parse extractions.
pes = OneTypeList(Struct)
for chunk in extract.split(','):
aux = chunk.strip().split()
pes.append(Struct(var=aux[0],
mode=aux[1],
indx=map(int, aux[2:])))
##
# Verify array limits.
mesh = Mesh.from_file(filename)
for pe in pes:
if pe.mode == 'n':
for ii in pe.indx:
if (ii < 0) or (ii >= mesh.n_nod):
raise ValueError('node index 0 <= %d < %d!'
% (ii, mesh.n_nod))
if pe.mode == 'e':
for ii, ie in enumerate(pe.indx[:]):
if (ie < 0) or (ie >= mesh.n_el):
raise ValueError('element index 0 <= %d < %d!'
% (ie, mesh.n_el))
pe.indx[ii] = ie
##
# Extract data.
io = MeshIO.any_from_filename(filename)
ths = {}
for pe in pes:
mode, nname = io.read_data_header(pe.var)
output(mode, nname, verbose=verbose)
if ((pe.mode == 'n' and mode == 'vertex') or
(pe.mode == 'e' and mode == 'cell')):
th = io.read_time_history(nname, pe.indx)
elif pe.mode == 'e' and mode == 'vertex':
conn = mesh.conns[0]
th = {}
for iel in pe.indx:
ips = conn[iel]
th[iel] = io.read_time_history(nname, ips)
else:
raise ValueError('cannot extract cell data %s in nodes!' % pe.var)
ths[pe.var] = th
output('...done', verbose=verbose)
ts = TimeStepper(*io.read_time_stepper())
return ths, ts
def average_vertex_var_in_cells(ths_in):
"""Average histories in the element nodes for each nodal variable
originally requested in elements."""
ths = dict.fromkeys(list(ths_in.keys()))
for var, th in six.iteritems(ths_in):
aux = dict.fromkeys(list(th.keys()))
for ir, data in six.iteritems(th):
if isinstance(data, dict):
for ic, ndata in six.iteritems(data):
if aux[ir] is None:
aux[ir] = ndata
else:
aux[ir] += ndata
aux[ir] /= float(len(data))
else:
aux[ir] = data
ths[var] = aux
return ths
def save_time_history(ths, ts, filename_out):
"""Save time history and time-stepping information in a HDF5 file."""
ths.update({'times' : ts.times, 'dt' : ts.dt})
write_dict_hdf5(filename_out, ths)
def guess_time_units(times):
"""
Given a vector of times in seconds, return suitable time units and
new vector of times suitable for plotting.
Parameters
----------
times : array
The vector of times in seconds.
Returns
-------
new_times : array
The vector of times in `units`.
units : str
The time units.
"""
times = nm.asarray(times)
if (times[-1] / 60.0 / 60.0) > 10.0:
units = 'hours'
new_times = times / 60.0 / 60.0
elif (times[-1] / 60.0) > 10.0:
units = 'min.'
new_times = times / 60.0
else:
units = 's'
new_times = times
return new_times, units
|
[
"sfepy.base.ioutils.write_dict_hdf5",
"sfepy.base.base.OneTypeList",
"sfepy.discrete.fem.meshio.MeshIO.any_from_filename",
"sfepy.base.base.output",
"sfepy.base.ioutils.get_trunk",
"sfepy.discrete.fem.mesh.Mesh.from_file"
] |
[((417, 435), 'six.iteritems', 'six.iteritems', (['out'], {}), '(out)\n', (430, 435), False, 'import six\n'), ((1525, 1552), 'sfepy.base.base.output', 'output', (['"""dumping to VTK..."""'], {}), "('dumping to VTK...')\n", (1531, 1552), False, 'from sfepy.base.base import output, OneTypeList, Struct\n'), ((1563, 1597), 'sfepy.discrete.fem.meshio.MeshIO.any_from_filename', 'MeshIO.any_from_filename', (['filename'], {}), '(filename)\n', (1587, 1597), False, 'from sfepy.discrete.fem.meshio import MeshIO\n'), ((1609, 1640), 'sfepy.discrete.fem.mesh.Mesh.from_file', 'Mesh.from_file', (['filename'], {'io': 'io'}), '(filename, io=io)\n', (1623, 1640), False, 'from sfepy.discrete.fem.mesh import Mesh\n'), ((2696, 2713), 'sfepy.base.base.output', 'output', (['"""...done"""'], {}), "('...done')\n", (2702, 2713), False, 'from sfepy.base.base import output, OneTypeList, Struct\n'), ((3084, 3118), 'sfepy.discrete.fem.meshio.MeshIO.any_from_filename', 'MeshIO.any_from_filename', (['filename'], {}), '(filename)\n', (3108, 3118), False, 'from sfepy.discrete.fem.meshio import MeshIO\n'), ((3170, 3197), 'numpy.ediff1d', 'nm.ediff1d', (['times'], {'to_end': '(0)'}), '(times, to_end=0)\n', (3180, 3197), True, 'import numpy as nm\n'), ((4206, 4260), 'sfepy.base.base.output', 'output', (['"""extracting selected data..."""'], {'verbose': 'verbose'}), "('extracting selected data...', verbose=verbose)\n", (4212, 4260), False, 'from sfepy.base.base import output, OneTypeList, Struct\n'), ((4266, 4312), 'sfepy.base.base.output', 'output', (['"""selection:"""', 'extract'], {'verbose': 'verbose'}), "('selection:', extract, verbose=verbose)\n", (4272, 4312), False, 'from sfepy.base.base import output, OneTypeList, Struct\n'), ((4356, 4375), 'sfepy.base.base.OneTypeList', 'OneTypeList', (['Struct'], {}), '(Struct)\n', (4367, 4375), False, 'from sfepy.base.base import output, OneTypeList, Struct\n'), ((4623, 4647), 'sfepy.discrete.fem.mesh.Mesh.from_file', 'Mesh.from_file', (['filename'], {}), '(filename)\n', (4637, 4647), False, 'from sfepy.discrete.fem.mesh import Mesh\n'), ((5217, 5251), 'sfepy.discrete.fem.meshio.MeshIO.any_from_filename', 'MeshIO.any_from_filename', (['filename'], {}), '(filename)\n', (5241, 5251), False, 'from sfepy.discrete.fem.meshio import MeshIO\n'), ((5888, 5922), 'sfepy.base.base.output', 'output', (['"""...done"""'], {'verbose': 'verbose'}), "('...done', verbose=verbose)\n", (5894, 5922), False, 'from sfepy.base.base import output, OneTypeList, Struct\n'), ((6211, 6232), 'six.iteritems', 'six.iteritems', (['ths_in'], {}), '(ths_in)\n', (6224, 6232), False, 'import six\n'), ((6870, 6904), 'sfepy.base.ioutils.write_dict_hdf5', 'write_dict_hdf5', (['filename_out', 'ths'], {}), '(filename_out, ths)\n', (6885, 6904), False, 'from sfepy.base.ioutils import get_trunk, write_dict_hdf5\n'), ((7297, 7314), 'numpy.asarray', 'nm.asarray', (['times'], {}), '(times)\n', (7307, 7314), True, 'import numpy as nm\n'), ((1712, 1731), 'sfepy.base.ioutils.get_trunk', 'get_trunk', (['filename'], {}), '(filename)\n', (1721, 1731), False, 'from sfepy.base.ioutils import get_trunk, write_dict_hdf5\n'), ((5342, 5378), 'sfepy.base.base.output', 'output', (['mode', 'nname'], {'verbose': 'verbose'}), '(mode, nname, verbose=verbose)\n', (5348, 5378), False, 'from sfepy.base.base import output, OneTypeList, Struct\n'), ((6303, 6320), 'six.iteritems', 'six.iteritems', (['th'], {}), '(th)\n', (6316, 6320), False, 'import six\n'), ((984, 1008), 'sfepy.base.base.output', 'output', (['"""linearizing..."""'], {}), "('linearizing...')\n", (990, 1008), False, 'from sfepy.base.base import output, OneTypeList, Struct\n'), ((1078, 1095), 'sfepy.base.base.output', 'output', (['"""...done"""'], {}), "('...done')\n", (1084, 1095), False, 'from sfepy.base.base import output, OneTypeList, Struct\n'), ((1124, 1142), 'six.iteritems', 'six.iteritems', (['out'], {}), '(out)\n', (1137, 1142), False, 'import six\n'), ((1885, 1944), 'sfepy.base.base.output', 'output', (['"""no time stepping info found, assuming single step"""'], {}), "('no time stepping info found, assuming single step')\n", (1891, 1944), False, 'from sfepy.base.base import output, OneTypeList, Struct\n'), ((2182, 2215), 'numpy.searchsorted', 'nm.searchsorted', (['all_steps', 'step0'], {}), '(all_steps, step0)\n', (2197, 2215), True, 'import numpy as nm\n'), ((2490, 2526), 'sfepy.base.base.output', 'output', (['(ts.format % (step, max_step))'], {}), '(ts.format % (step, max_step))\n', (2496, 2526), False, 'from sfepy.base.base import output, OneTypeList, Struct\n'), ((6394, 6413), 'six.iteritems', 'six.iteritems', (['data'], {}), '(data)\n', (6407, 6413), False, 'import six\n'), ((1381, 1429), 'sfepy.base.base.output', 'output', (['"""max. refinement per group:"""', 'val.levels'], {}), "('max. refinement per group:', val.levels)\n", (1387, 1429), False, 'from sfepy.base.base import output, OneTypeList, Struct\n')]
|
"""Contact Database Tables/Models.
Models of the Endorser tables for Contacts (Authors) and related data.
"""
import uuid
from datetime import datetime
from typing import List
from sqlmodel import Field
from sqlalchemy import Column, func, text, String
from sqlalchemy.dialects.postgresql import UUID, TIMESTAMP, ARRAY
from api.db.models.base import BaseModel
class Contact(BaseModel, table=True):
"""Contact.
This is the model for the Contact table (postgresql specific dialects in use).
Attributes:
contact_id: Endorser's Contact ID
author_status: Whether they are an approved author or not
endorse_status: Whether endorsements are auto-approved or not
tags: Set by endorser for arbitrary grouping of Contacts
connection_id: Underlying AcaPy connection id
connection_alias: Underlying AcaPy connection alias
public_did: Represents the Contact's agent's Public DID (if any)
state: The underlying AcaPy connection state
created_at: Timestamp when record was created
updated_at: Timestamp when record was last modified
"""
contact_id: uuid.UUID = Field(
sa_column=Column(
UUID(as_uuid=True),
primary_key=True,
server_default=text("gen_random_uuid()"),
)
)
author_status: str = Field(nullable=False)
endorse_status: str = Field(nullable=False)
tags: List[str] = Field(sa_column=Column(ARRAY(String)))
# acapy data ---
connection_id: uuid.UUID = Field(nullable=False)
connection_protocol: str = Field(nullable=False)
connection_alias: str = Field(nullable=True, default=False)
public_did: str = Field(nullable=True, default=False)
state: str = Field(nullable=False)
# --- acapy data
created_at: datetime = Field(
sa_column=Column(TIMESTAMP, nullable=False, server_default=func.now())
)
updated_at: datetime = Field(
sa_column=Column(
TIMESTAMP, nullable=False, server_default=func.now(), onupdate=func.now()
)
)
|
[
"sqlmodel.Field"
] |
[((1326, 1347), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (1331, 1347), False, 'from sqlmodel import Field\n'), ((1374, 1395), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (1379, 1395), False, 'from sqlmodel import Field\n'), ((1510, 1531), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (1515, 1531), False, 'from sqlmodel import Field\n'), ((1563, 1584), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (1568, 1584), False, 'from sqlmodel import Field\n'), ((1613, 1648), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': '(False)'}), '(nullable=True, default=False)\n', (1618, 1648), False, 'from sqlmodel import Field\n'), ((1671, 1706), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': '(False)'}), '(nullable=True, default=False)\n', (1676, 1706), False, 'from sqlmodel import Field\n'), ((1724, 1745), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (1729, 1745), False, 'from sqlmodel import Field\n'), ((1180, 1198), 'sqlalchemy.dialects.postgresql.UUID', 'UUID', ([], {'as_uuid': '(True)'}), '(as_uuid=True)\n', (1184, 1198), False, 'from sqlalchemy.dialects.postgresql import UUID, TIMESTAMP, ARRAY\n'), ((1441, 1454), 'sqlalchemy.dialects.postgresql.ARRAY', 'ARRAY', (['String'], {}), '(String)\n', (1446, 1454), False, 'from sqlalchemy.dialects.postgresql import UUID, TIMESTAMP, ARRAY\n'), ((1257, 1282), 'sqlalchemy.text', 'text', (['"""gen_random_uuid()"""'], {}), "('gen_random_uuid()')\n", (1261, 1282), False, 'from sqlalchemy import Column, func, text, String\n'), ((1869, 1879), 'sqlalchemy.func.now', 'func.now', ([], {}), '()\n', (1877, 1879), False, 'from sqlalchemy import Column, func, text, String\n'), ((2001, 2011), 'sqlalchemy.func.now', 'func.now', ([], {}), '()\n', (2009, 2011), False, 'from sqlalchemy import Column, func, text, String\n'), ((2022, 2032), 'sqlalchemy.func.now', 'func.now', ([], {}), '()\n', (2030, 2032), False, 'from sqlalchemy import Column, func, text, String\n')]
|
from fastapi import APIRouter, Depends
from ..utils import engine, get_session
from sqlmodel import Session, select, or_
from ..models.team import Team
from ..models.user import User
from sqlalchemy.exc import NoResultFound
from datetime import datetime
router = APIRouter(prefix="/api/teams", tags=["team"])
session = Session(engine)
@router.post("/")
async def post_team(
*,
team: Team,
session: Session = Depends(get_session),
):
"""Post new team"""
statement = select(Team).where(or_(Team.name == team.name, Team.id == team.id))
try:
result = session.exec(statement).one()
return False
except NoResultFound:
session.add(team)
session.commit()
session.refresh(team)
return team
@router.get("/")
async def get_team_list(session: Session = Depends(get_session)):
"""Get team list"""
statement = select(Team)
results = session.exec(statement).all()
return results
@router.get("/active")
async def get_active_team_list(session: Session = Depends(get_session)):
"""Get list of active teams"""
statement = (
select(
Team.id,
Team.lead_user_id,
Team.name.label("team_name"),
Team.short_name.label("team_short_name"),
User.id,
User.short_name.label("user_name"),
)
.join(User)
.where(Team.is_active == True)
)
results = session.exec(statement).all()
return results
@router.get("/{team_name}")
async def read_teams(team_name: str = None, session: Session = Depends(get_session)):
"""Read the contents of a given team"""
statement = select(Team).where(Team.name == team_name)
try:
result = session.exec(statement).one()
return result
except NoResultFound:
msg = f"""There is no team named {team_name}"""
return msg
@router.get("/{team_id}/user-name")
async def get_user_name_by_team_id(
team_id: int, session: Session = Depends(get_session)
):
"""Get user name by team id"""
statement = (
select(Team.id, User.id, User.name)
.join(User)
.where(Team.id == team_id)
.where(User.active == True)
)
result = session.exec(statement).one()
return result
@router.put("/{team_name}/activate")
async def activate_team(
team_name: str = None,
session: Session = Depends(get_session),
):
"""Activate team"""
statement = select(Team).where(Team.name == team_name)
team_to_activate = session.exec(statement).one()
team_to_activate.is_active = True
team_to_activate.updated_at = datetime.now()
session.add(team_to_activate)
session.commit()
session.refresh(team_to_activate)
return team_to_activate
@router.put("/{team_name}/deactivate")
async def deactivate_team(
team_name: str = None,
session: Session = Depends(get_session),
):
"""Deactivate team"""
statement = select(Team).where(Team.name == team_name)
team_to_deactivate = session.exec(statement).one()
team_to_deactivate.is_active = False
team_to_deactivate.updated_at = datetime.now()
session.add(team_to_deactivate)
session.commit()
session.refresh(team_to_deactivate)
return team_to_deactivate
@router.put("/")
async def update_team(
id: str = None,
lead_user_id: str = None,
name: str = None,
is_active: bool = None,
session: Session = Depends(get_session),
):
"""Update team"""
statement = select(Team).where(or_(Team.name == name, Team.id == id))
team_to_update = session.exec(statement).one()
team_to_update.lead_user_id = lead_user_id
team_to_update.name = name
team_to_update.is_active = is_active
session.add(team_to_update)
team_to_update.updated_at = datetime.now()
session.commit()
session.refresh(team_to_update)
return team_to_update
|
[
"sqlmodel.Session",
"sqlmodel.or_",
"sqlmodel.select"
] |
[((264, 309), 'fastapi.APIRouter', 'APIRouter', ([], {'prefix': '"""/api/teams"""', 'tags': "['team']"}), "(prefix='/api/teams', tags=['team'])\n", (273, 309), False, 'from fastapi import APIRouter, Depends\n'), ((320, 335), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (327, 335), False, 'from sqlmodel import Session, select, or_\n'), ((423, 443), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (430, 443), False, 'from fastapi import APIRouter, Depends\n'), ((822, 842), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (829, 842), False, 'from fastapi import APIRouter, Depends\n'), ((885, 897), 'sqlmodel.select', 'select', (['Team'], {}), '(Team)\n', (891, 897), False, 'from sqlmodel import Session, select, or_\n'), ((1036, 1056), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1043, 1056), False, 'from fastapi import APIRouter, Depends\n'), ((1576, 1596), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1583, 1596), False, 'from fastapi import APIRouter, Depends\n'), ((1992, 2012), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1999, 2012), False, 'from fastapi import APIRouter, Depends\n'), ((2385, 2405), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (2392, 2405), False, 'from fastapi import APIRouter, Depends\n'), ((2618, 2632), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2630, 2632), False, 'from datetime import datetime\n'), ((2872, 2892), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (2879, 2892), False, 'from fastapi import APIRouter, Depends\n'), ((3114, 3128), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3126, 3128), False, 'from datetime import datetime\n'), ((3421, 3441), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (3428, 3441), False, 'from fastapi import APIRouter, Depends\n'), ((3776, 3790), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3788, 3790), False, 'from datetime import datetime\n'), ((507, 554), 'sqlmodel.or_', 'or_', (['(Team.name == team.name)', '(Team.id == team.id)'], {}), '(Team.name == team.name, Team.id == team.id)\n', (510, 554), False, 'from sqlmodel import Session, select, or_\n'), ((3503, 3540), 'sqlmodel.or_', 'or_', (['(Team.name == name)', '(Team.id == id)'], {}), '(Team.name == name, Team.id == id)\n', (3506, 3540), False, 'from sqlmodel import Session, select, or_\n'), ((488, 500), 'sqlmodel.select', 'select', (['Team'], {}), '(Team)\n', (494, 500), False, 'from sqlmodel import Session, select, or_\n'), ((1659, 1671), 'sqlmodel.select', 'select', (['Team'], {}), '(Team)\n', (1665, 1671), False, 'from sqlmodel import Session, select, or_\n'), ((2450, 2462), 'sqlmodel.select', 'select', (['Team'], {}), '(Team)\n', (2456, 2462), False, 'from sqlmodel import Session, select, or_\n'), ((2939, 2951), 'sqlmodel.select', 'select', (['Team'], {}), '(Team)\n', (2945, 2951), False, 'from sqlmodel import Session, select, or_\n'), ((3484, 3496), 'sqlmodel.select', 'select', (['Team'], {}), '(Team)\n', (3490, 3496), False, 'from sqlmodel import Session, select, or_\n'), ((2077, 2112), 'sqlmodel.select', 'select', (['Team.id', 'User.id', 'User.name'], {}), '(Team.id, User.id, User.name)\n', (2083, 2112), False, 'from sqlmodel import Session, select, or_\n')]
|
#!/usr/bin/env python
"""
Diametrically point loaded 2-D disk, using commands for interactive use. See
:ref:`sec-primer`.
The script combines the functionality of all the ``its2D_?.py`` examples and
allows setting various simulation parameters, namely:
- material parameters
- displacement field approximation order
- uniform mesh refinement level
The example shows also how to probe the results as in
:ref:`linear_elasticity-its2D_4`, and how to display the results using Mayavi.
Using :mod:`sfepy.discrete.probes` allows correct probing of fields with the
approximation order greater than one.
In the SfePy top-level directory the following command can be used to get usage
information::
python examples/linear_elasticity/its2D_interactive.py -h
Notes
-----
The ``--probe`` and ``--show`` options work simultaneously only if Mayavi and
Matplotlib use the same backend type (for example wx).
"""
from __future__ import absolute_import
import sys
from six.moves import range
sys.path.append('.')
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Integrals,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.discrete.fem.geometry_element import geometry_data
from sfepy.discrete.probes import LineProbe
from sfepy.discrete.projections import project_by_component
from examples.linear_elasticity.its2D_2 import stress_strain
from examples.linear_elasticity.its2D_3 import nodal_stress
def gen_lines(problem):
"""
Define two line probes.
Additional probes can be added by appending to `ps0` (start points) and
`ps1` (end points) lists.
"""
ps0 = [[0.0, 0.0], [0.0, 0.0]]
ps1 = [[75.0, 0.0], [0.0, 75.0]]
# Use enough points for higher order approximations.
n_point = 1000
labels = ['%s -> %s' % (p0, p1) for p0, p1 in zip(ps0, ps1)]
probes = []
for ip in range(len(ps0)):
p0, p1 = ps0[ip], ps1[ip]
probes.append(LineProbe(p0, p1, n_point))
return probes, labels
def probe_results(u, strain, stress, probe, label):
"""
Probe the results using the given probe and plot the probed values.
"""
results = {}
pars, vals = probe(u)
results['u'] = (pars, vals)
pars, vals = probe(strain)
results['cauchy_strain'] = (pars, vals)
pars, vals = probe(stress)
results['cauchy_stress'] = (pars, vals)
fig = plt.figure()
plt.clf()
fig.subplots_adjust(hspace=0.4)
plt.subplot(311)
pars, vals = results['u']
for ic in range(vals.shape[1]):
plt.plot(pars, vals[:,ic], label=r'$u_{%d}$' % (ic + 1),
lw=1, ls='-', marker='+', ms=3)
plt.ylabel('displacements')
plt.xlabel('probe %s' % label, fontsize=8)
plt.legend(loc='best', fontsize=10)
sym_indices = ['11', '22', '12']
plt.subplot(312)
pars, vals = results['cauchy_strain']
for ic in range(vals.shape[1]):
plt.plot(pars, vals[:,ic], label=r'$e_{%s}$' % sym_indices[ic],
lw=1, ls='-', marker='+', ms=3)
plt.ylabel('Cauchy strain')
plt.xlabel('probe %s' % label, fontsize=8)
plt.legend(loc='best', fontsize=10)
plt.subplot(313)
pars, vals = results['cauchy_stress']
for ic in range(vals.shape[1]):
plt.plot(pars, vals[:,ic], label=r'$\sigma_{%s}$' % sym_indices[ic],
lw=1, ls='-', marker='+', ms=3)
plt.ylabel('Cauchy stress')
plt.xlabel('probe %s' % label, fontsize=8)
plt.legend(loc='best', fontsize=10)
return fig, results
helps = {
'young' : "the Young's modulus [default: %(default)s]",
'poisson' : "the Poisson's ratio [default: %(default)s]",
'load' : "the vertical load value (negative means compression)"
" [default: %(default)s]",
'order' : 'displacement field approximation order [default: %(default)s]',
'refine' : 'uniform mesh refinement level [default: %(default)s]',
'probe' : 'probe the results',
'show' : 'show the results figure',
}
def main():
from sfepy import data_dir
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('--young', metavar='float', type=float,
action='store', dest='young',
default=2000.0, help=helps['young'])
parser.add_argument('--poisson', metavar='float', type=float,
action='store', dest='poisson',
default=0.4, help=helps['poisson'])
parser.add_argument('--load', metavar='float', type=float,
action='store', dest='load',
default=-1000.0, help=helps['load'])
parser.add_argument('--order', metavar='int', type=int,
action='store', dest='order',
default=1, help=helps['order'])
parser.add_argument('-r', '--refine', metavar='int', type=int,
action='store', dest='refine',
default=0, help=helps['refine'])
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
parser.add_argument('-p', '--probe',
action="store_true", dest='probe',
default=False, help=helps['probe'])
options = parser.parse_args()
assert_((0.0 < options.poisson < 0.5),
"Poisson's ratio must be in ]0, 0.5[!")
assert_((0 < options.order),
'displacement approximation order must be at least 1!')
output('using values:')
output(" Young's modulus:", options.young)
output(" Poisson's ratio:", options.poisson)
output(' vertical load:', options.load)
output('uniform mesh refinement level:', options.refine)
# Build the problem definition.
mesh = Mesh.from_file(data_dir + '/meshes/2d/its2D.mesh')
domain = FEDomain('domain', mesh)
if options.refine > 0:
for ii in range(options.refine):
output('refine %d...' % ii)
domain = domain.refine()
output('... %d nodes %d elements'
% (domain.shape.n_nod, domain.shape.n_el))
omega = domain.create_region('Omega', 'all')
left = domain.create_region('Left',
'vertices in x < 0.001', 'facet')
bottom = domain.create_region('Bottom',
'vertices in y < 0.001', 'facet')
top = domain.create_region('Top', 'vertex 2', 'vertex')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=options.order)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
D = stiffness_from_youngpoisson(2, options.young, options.poisson)
asphalt = Material('Asphalt', D=D)
load = Material('Load', values={'.val' : [0.0, options.load]})
integral = Integral('i', order=2*options.order)
integral0 = Integral('i', order=0)
t1 = Term.new('dw_lin_elastic(Asphalt.D, v, u)',
integral, omega, Asphalt=asphalt, v=v, u=u)
t2 = Term.new('dw_point_load(Load.val, v)',
integral0, top, Load=load, v=v)
eq = Equation('balance', t1 - t2)
eqs = Equations([eq])
xsym = EssentialBC('XSym', bottom, {'u.1' : 0.0})
ysym = EssentialBC('YSym', left, {'u.0' : 0.0})
ls = ScipyDirect({})
nls_status = IndexedStruct()
nls = Newton({}, lin_solver=ls, status=nls_status)
pb = Problem('elasticity', equations=eqs, nls=nls, ls=ls)
pb.time_update(ebcs=Conditions([xsym, ysym]))
# Solve the problem.
state = pb.solve()
output(nls_status)
# Postprocess the solution.
out = state.create_output_dict()
out = stress_strain(out, pb, state, extend=True)
pb.save_state('its2D_interactive.vtk', out=out)
gdata = geometry_data['2_3']
nc = len(gdata.coors)
integral_vn = Integral('ivn', coors=gdata.coors,
weights=[gdata.volume / nc] * nc)
nodal_stress(out, pb, state, integrals=Integrals([integral_vn]))
if options.probe:
# Probe the solution.
probes, labels = gen_lines(pb)
sfield = Field.from_args('sym_tensor', nm.float64, 3, omega,
approx_order=options.order - 1)
stress = FieldVariable('stress', 'parameter', sfield,
primary_var_name='(set-to-None)')
strain = FieldVariable('strain', 'parameter', sfield,
primary_var_name='(set-to-None)')
cfield = Field.from_args('component', nm.float64, 1, omega,
approx_order=options.order - 1)
component = FieldVariable('component', 'parameter', cfield,
primary_var_name='(set-to-None)')
ev = pb.evaluate
order = 2 * (options.order - 1)
strain_qp = ev('ev_cauchy_strain.%d.Omega(u)' % order, mode='qp')
stress_qp = ev('ev_cauchy_stress.%d.Omega(Asphalt.D, u)' % order,
mode='qp', copy_materials=False)
project_by_component(strain, strain_qp, component, order)
project_by_component(stress, stress_qp, component, order)
all_results = []
for ii, probe in enumerate(probes):
fig, results = probe_results(u, strain, stress, probe, labels[ii])
fig.savefig('its2D_interactive_probe_%d.png' % ii)
all_results.append(results)
for ii, results in enumerate(all_results):
output('probe %d:' % ii)
output.level += 2
for key, res in ordered_iteritems(results):
output(key + ':')
val = res[1]
output(' min: %+.2e, mean: %+.2e, max: %+.2e'
% (val.min(), val.mean(), val.max()))
output.level -= 2
if options.show:
# Show the solution. If the approximation order is greater than 1, the
# extra DOFs are simply thrown away.
from sfepy.postprocess.viewer import Viewer
view = Viewer('its2D_interactive.vtk')
view(vector_mode='warp_norm', rel_scaling=1,
is_scalar_bar=True, is_wireframe=True)
if __name__ == '__main__':
main()
|
[
"sfepy.discrete.conditions.EssentialBC",
"sfepy.discrete.probes.LineProbe",
"sfepy.mechanics.matcoefs.stiffness_from_youngpoisson",
"sfepy.discrete.Integral",
"sfepy.postprocess.viewer.Viewer",
"sfepy.discrete.projections.project_by_component",
"sfepy.solvers.ls.ScipyDirect",
"sfepy.discrete.Integrals",
"sfepy.discrete.Equations",
"sfepy.base.base.ordered_iteritems",
"sfepy.discrete.fem.Field.from_args",
"sfepy.discrete.Equation",
"sfepy.discrete.fem.Mesh.from_file",
"sfepy.discrete.fem.FEDomain",
"sfepy.base.base.output",
"sfepy.terms.Term.new",
"sfepy.base.base.assert_",
"sfepy.discrete.conditions.Conditions",
"sfepy.discrete.FieldVariable",
"sfepy.discrete.Material",
"sfepy.discrete.Problem",
"sfepy.base.base.IndexedStruct",
"sfepy.solvers.nls.Newton"
] |
[((984, 1004), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (999, 1004), False, 'import sys\n'), ((2837, 2849), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2847, 2849), True, 'import matplotlib.pyplot as plt\n'), ((2854, 2863), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2861, 2863), True, 'import matplotlib.pyplot as plt\n'), ((2904, 2920), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(311)'], {}), '(311)\n', (2915, 2920), True, 'import matplotlib.pyplot as plt\n'), ((2965, 2985), 'six.moves.range', 'range', (['vals.shape[1]'], {}), '(vals.shape[1])\n', (2970, 2985), False, 'from six.moves import range\n'), ((3105, 3132), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""displacements"""'], {}), "('displacements')\n", (3115, 3132), True, 'import matplotlib.pyplot as plt\n'), ((3137, 3179), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('probe %s' % label)"], {'fontsize': '(8)'}), "('probe %s' % label, fontsize=8)\n", (3147, 3179), True, 'import matplotlib.pyplot as plt\n'), ((3184, 3219), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'fontsize': '(10)'}), "(loc='best', fontsize=10)\n", (3194, 3219), True, 'import matplotlib.pyplot as plt\n'), ((3263, 3279), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(312)'], {}), '(312)\n', (3274, 3279), True, 'import matplotlib.pyplot as plt\n'), ((3336, 3356), 'six.moves.range', 'range', (['vals.shape[1]'], {}), '(vals.shape[1])\n', (3341, 3356), False, 'from six.moves import range\n'), ((3483, 3510), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cauchy strain"""'], {}), "('Cauchy strain')\n", (3493, 3510), True, 'import matplotlib.pyplot as plt\n'), ((3515, 3557), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('probe %s' % label)"], {'fontsize': '(8)'}), "('probe %s' % label, fontsize=8)\n", (3525, 3557), True, 'import matplotlib.pyplot as plt\n'), ((3562, 3597), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'fontsize': '(10)'}), "(loc='best', fontsize=10)\n", (3572, 3597), True, 'import matplotlib.pyplot as plt\n'), ((3603, 3619), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(313)'], {}), '(313)\n', (3614, 3619), True, 'import matplotlib.pyplot as plt\n'), ((3676, 3696), 'six.moves.range', 'range', (['vals.shape[1]'], {}), '(vals.shape[1])\n', (3681, 3696), False, 'from six.moves import range\n'), ((3828, 3855), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cauchy stress"""'], {}), "('Cauchy stress')\n", (3838, 3855), True, 'import matplotlib.pyplot as plt\n'), ((3860, 3902), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('probe %s' % label)"], {'fontsize': '(8)'}), "('probe %s' % label, fontsize=8)\n", (3870, 3902), True, 'import matplotlib.pyplot as plt\n'), ((3907, 3942), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'fontsize': '(10)'}), "(loc='best', fontsize=10)\n", (3917, 3942), True, 'import matplotlib.pyplot as plt\n'), ((4485, 4570), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'RawDescriptionHelpFormatter'}), '(description=__doc__, formatter_class=RawDescriptionHelpFormatter\n )\n', (4499, 4570), False, 'from argparse import ArgumentParser, RawDescriptionHelpFormatter\n'), ((5884, 5960), 'sfepy.base.base.assert_', 'assert_', (['(0.0 < options.poisson < 0.5)', '"""Poisson\'s ratio must be in ]0, 0.5[!"""'], {}), '(0.0 < options.poisson < 0.5, "Poisson\'s ratio must be in ]0, 0.5[!")\n', (5891, 5960), False, 'from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct\n'), ((5979, 6065), 'sfepy.base.base.assert_', 'assert_', (['(0 < options.order)', '"""displacement approximation order must be at least 1!"""'], {}), "(0 < options.order,\n 'displacement approximation order must be at least 1!')\n", (5986, 6065), False, 'from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct\n'), ((6081, 6104), 'sfepy.base.base.output', 'output', (['"""using values:"""'], {}), "('using values:')\n", (6087, 6104), False, 'from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct\n'), ((6109, 6152), 'sfepy.base.base.output', 'output', (['""" Young\'s modulus:"""', 'options.young'], {}), '(" Young\'s modulus:", options.young)\n', (6115, 6152), False, 'from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct\n'), ((6157, 6202), 'sfepy.base.base.output', 'output', (['""" Poisson\'s ratio:"""', 'options.poisson'], {}), '(" Poisson\'s ratio:", options.poisson)\n', (6163, 6202), False, 'from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct\n'), ((6207, 6247), 'sfepy.base.base.output', 'output', (['""" vertical load:"""', 'options.load'], {}), "(' vertical load:', options.load)\n", (6213, 6247), False, 'from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct\n'), ((6252, 6308), 'sfepy.base.base.output', 'output', (['"""uniform mesh refinement level:"""', 'options.refine'], {}), "('uniform mesh refinement level:', options.refine)\n", (6258, 6308), False, 'from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct\n'), ((6357, 6407), 'sfepy.discrete.fem.Mesh.from_file', 'Mesh.from_file', (["(data_dir + '/meshes/2d/its2D.mesh')"], {}), "(data_dir + '/meshes/2d/its2D.mesh')\n", (6371, 6407), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((6421, 6445), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['"""domain"""', 'mesh'], {}), "('domain', mesh)\n", (6429, 6445), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((7041, 7119), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""fu"""', 'nm.float64', '"""vector"""', 'omega'], {'approx_order': 'options.order'}), "('fu', nm.float64, 'vector', omega, approx_order=options.order)\n", (7056, 7119), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((7157, 7193), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""u"""', '"""unknown"""', 'field'], {}), "('u', 'unknown', field)\n", (7170, 7193), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Integrals, Equation, Equations, Problem\n'), ((7202, 7257), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""v"""', '"""test"""', 'field'], {'primary_var_name': '"""u"""'}), "('v', 'test', field, primary_var_name='u')\n", (7215, 7257), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Integrals, Equation, Equations, Problem\n'), ((7267, 7329), 'sfepy.mechanics.matcoefs.stiffness_from_youngpoisson', 'stiffness_from_youngpoisson', (['(2)', 'options.young', 'options.poisson'], {}), '(2, options.young, options.poisson)\n', (7294, 7329), False, 'from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson\n'), ((7345, 7369), 'sfepy.discrete.Material', 'Material', (['"""Asphalt"""'], {'D': 'D'}), "('Asphalt', D=D)\n", (7353, 7369), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Integrals, Equation, Equations, Problem\n'), ((7381, 7435), 'sfepy.discrete.Material', 'Material', (['"""Load"""'], {'values': "{'.val': [0.0, options.load]}"}), "('Load', values={'.val': [0.0, options.load]})\n", (7389, 7435), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Integrals, Equation, Equations, Problem\n'), ((7453, 7491), 'sfepy.discrete.Integral', 'Integral', (['"""i"""'], {'order': '(2 * options.order)'}), "('i', order=2 * options.order)\n", (7461, 7491), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Integrals, Equation, Equations, Problem\n'), ((7506, 7528), 'sfepy.discrete.Integral', 'Integral', (['"""i"""'], {'order': '(0)'}), "('i', order=0)\n", (7514, 7528), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Integrals, Equation, Equations, Problem\n'), ((7539, 7631), 'sfepy.terms.Term.new', 'Term.new', (['"""dw_lin_elastic(Asphalt.D, v, u)"""', 'integral', 'omega'], {'Asphalt': 'asphalt', 'v': 'v', 'u': 'u'}), "('dw_lin_elastic(Asphalt.D, v, u)', integral, omega, Asphalt=\n asphalt, v=v, u=u)\n", (7547, 7631), False, 'from sfepy.terms import Term\n'), ((7654, 7724), 'sfepy.terms.Term.new', 'Term.new', (['"""dw_point_load(Load.val, v)"""', 'integral0', 'top'], {'Load': 'load', 'v': 'v'}), "('dw_point_load(Load.val, v)', integral0, top, Load=load, v=v)\n", (7662, 7724), False, 'from sfepy.terms import Term\n'), ((7752, 7780), 'sfepy.discrete.Equation', 'Equation', (['"""balance"""', '(t1 - t2)'], {}), "('balance', t1 - t2)\n", (7760, 7780), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Integrals, Equation, Equations, Problem\n'), ((7791, 7806), 'sfepy.discrete.Equations', 'Equations', (['[eq]'], {}), '([eq])\n', (7800, 7806), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Integrals, Equation, Equations, Problem\n'), ((7819, 7860), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', (['"""XSym"""', 'bottom', "{'u.1': 0.0}"], {}), "('XSym', bottom, {'u.1': 0.0})\n", (7830, 7860), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((7873, 7912), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', (['"""YSym"""', 'left', "{'u.0': 0.0}"], {}), "('YSym', left, {'u.0': 0.0})\n", (7884, 7912), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((7924, 7939), 'sfepy.solvers.ls.ScipyDirect', 'ScipyDirect', (['{}'], {}), '({})\n', (7935, 7939), False, 'from sfepy.solvers.ls import ScipyDirect\n'), ((7958, 7973), 'sfepy.base.base.IndexedStruct', 'IndexedStruct', ([], {}), '()\n', (7971, 7973), False, 'from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct\n'), ((7984, 8028), 'sfepy.solvers.nls.Newton', 'Newton', (['{}'], {'lin_solver': 'ls', 'status': 'nls_status'}), '({}, lin_solver=ls, status=nls_status)\n', (7990, 8028), False, 'from sfepy.solvers.nls import Newton\n'), ((8039, 8091), 'sfepy.discrete.Problem', 'Problem', (['"""elasticity"""'], {'equations': 'eqs', 'nls': 'nls', 'ls': 'ls'}), "('elasticity', equations=eqs, nls=nls, ls=ls)\n", (8046, 8091), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Integrals, Equation, Equations, Problem\n'), ((8196, 8214), 'sfepy.base.base.output', 'output', (['nls_status'], {}), '(nls_status)\n', (8202, 8214), False, 'from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct\n'), ((8295, 8337), 'examples.linear_elasticity.its2D_2.stress_strain', 'stress_strain', (['out', 'pb', 'state'], {'extend': '(True)'}), '(out, pb, state, extend=True)\n', (8308, 8337), False, 'from examples.linear_elasticity.its2D_2 import stress_strain\n'), ((8469, 8537), 'sfepy.discrete.Integral', 'Integral', (['"""ivn"""'], {'coors': 'gdata.coors', 'weights': '([gdata.volume / nc] * nc)'}), "('ivn', coors=gdata.coors, weights=[gdata.volume / nc] * nc)\n", (8477, 8537), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Integrals, Equation, Equations, Problem\n'), ((2995, 3087), 'matplotlib.pyplot.plot', 'plt.plot', (['pars', 'vals[:, ic]'], {'label': "('$u_{%d}$' % (ic + 1))", 'lw': '(1)', 'ls': '"""-"""', 'marker': '"""+"""', 'ms': '(3)'}), "(pars, vals[:, ic], label='$u_{%d}$' % (ic + 1), lw=1, ls='-',\n marker='+', ms=3)\n", (3003, 3087), True, 'import matplotlib.pyplot as plt\n'), ((3366, 3466), 'matplotlib.pyplot.plot', 'plt.plot', (['pars', 'vals[:, ic]'], {'label': "('$e_{%s}$' % sym_indices[ic])", 'lw': '(1)', 'ls': '"""-"""', 'marker': '"""+"""', 'ms': '(3)'}), "(pars, vals[:, ic], label='$e_{%s}$' % sym_indices[ic], lw=1, ls=\n '-', marker='+', ms=3)\n", (3374, 3466), True, 'import matplotlib.pyplot as plt\n'), ((3706, 3811), 'matplotlib.pyplot.plot', 'plt.plot', (['pars', 'vals[:, ic]'], {'label': "('$\\\\sigma_{%s}$' % sym_indices[ic])", 'lw': '(1)', 'ls': '"""-"""', 'marker': '"""+"""', 'ms': '(3)'}), "(pars, vals[:, ic], label='$\\\\sigma_{%s}$' % sym_indices[ic], lw=1,\n ls='-', marker='+', ms=3)\n", (3714, 3811), True, 'import matplotlib.pyplot as plt\n'), ((6492, 6513), 'six.moves.range', 'range', (['options.refine'], {}), '(options.refine)\n', (6497, 6513), False, 'from six.moves import range\n'), ((8744, 8832), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""sym_tensor"""', 'nm.float64', '(3)', 'omega'], {'approx_order': '(options.order - 1)'}), "('sym_tensor', nm.float64, 3, omega, approx_order=options.\n order - 1)\n", (8759, 8832), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((8877, 8955), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""stress"""', '"""parameter"""', 'sfield'], {'primary_var_name': '"""(set-to-None)"""'}), "('stress', 'parameter', sfield, primary_var_name='(set-to-None)')\n", (8890, 8955), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Integrals, Equation, Equations, Problem\n'), ((9004, 9082), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""strain"""', '"""parameter"""', 'sfield'], {'primary_var_name': '"""(set-to-None)"""'}), "('strain', 'parameter', sfield, primary_var_name='(set-to-None)')\n", (9017, 9082), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Integrals, Equation, Equations, Problem\n'), ((9132, 9219), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""component"""', 'nm.float64', '(1)', 'omega'], {'approx_order': '(options.order - 1)'}), "('component', nm.float64, 1, omega, approx_order=options.\n order - 1)\n", (9147, 9219), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((9268, 9354), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""component"""', '"""parameter"""', 'cfield'], {'primary_var_name': '"""(set-to-None)"""'}), "('component', 'parameter', cfield, primary_var_name=\n '(set-to-None)')\n", (9281, 9354), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Integrals, Equation, Equations, Problem\n'), ((9663, 9720), 'sfepy.discrete.projections.project_by_component', 'project_by_component', (['strain', 'strain_qp', 'component', 'order'], {}), '(strain, strain_qp, component, order)\n', (9683, 9720), False, 'from sfepy.discrete.projections import project_by_component\n'), ((9729, 9786), 'sfepy.discrete.projections.project_by_component', 'project_by_component', (['stress', 'stress_qp', 'component', 'order'], {}), '(stress, stress_qp, component, order)\n', (9749, 9786), False, 'from sfepy.discrete.projections import project_by_component\n'), ((10646, 10677), 'sfepy.postprocess.viewer.Viewer', 'Viewer', (['"""its2D_interactive.vtk"""'], {}), "('its2D_interactive.vtk')\n", (10652, 10677), False, 'from sfepy.postprocess.viewer import Viewer\n'), ((2404, 2430), 'sfepy.discrete.probes.LineProbe', 'LineProbe', (['p0', 'p1', 'n_point'], {}), '(p0, p1, n_point)\n', (2413, 2430), False, 'from sfepy.discrete.probes import LineProbe\n'), ((6527, 6554), 'sfepy.base.base.output', 'output', (["('refine %d...' % ii)"], {}), "('refine %d...' % ii)\n", (6533, 6554), False, 'from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct\n'), ((6604, 6680), 'sfepy.base.base.output', 'output', (["('... %d nodes %d elements' % (domain.shape.n_nod, domain.shape.n_el))"], {}), "('... %d nodes %d elements' % (domain.shape.n_nod, domain.shape.n_el))\n", (6610, 6680), False, 'from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct\n'), ((8117, 8141), 'sfepy.discrete.conditions.Conditions', 'Conditions', (['[xsym, ysym]'], {}), '([xsym, ysym])\n', (8127, 8141), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((8608, 8632), 'sfepy.discrete.Integrals', 'Integrals', (['[integral_vn]'], {}), '([integral_vn])\n', (8617, 8632), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Integrals, Equation, Equations, Problem\n'), ((10104, 10128), 'sfepy.base.base.output', 'output', (["('probe %d:' % ii)"], {}), "('probe %d:' % ii)\n", (10110, 10128), False, 'from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct\n'), ((10187, 10213), 'sfepy.base.base.ordered_iteritems', 'ordered_iteritems', (['results'], {}), '(results)\n', (10204, 10213), False, 'from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct\n'), ((10231, 10248), 'sfepy.base.base.output', 'output', (["(key + ':')"], {}), "(key + ':')\n", (10237, 10248), False, 'from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct\n')]
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from megengine.autodiff import GradManager
from megengine.core._imperative_rt.imperative import sync
from megengine.distributed.helper import get_device_count_by_fork
from megengine.jit import trace
def test_basic():
x = mge.tensor([1.0, 3.0, 5.0]).reshape(1, 3)
w = mge.tensor([2.0, 4.0, 6.0]).reshape(3, 1)
b = mge.tensor(-1.0)
gm = GradManager().attach([w, b])
gm.record()
p = F.matmul(x, w)
y = p + b
gm.backward(y)
gm.release() # is not necessary
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
w.grad = None
b.grad = None
with gm:
p = F.matmul(x, w)
y = p + b
gm.backward(y)
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
def test_attach_in_with_block():
a = mge.Parameter([1.0])
gm = GradManager()
with gm:
b = a * 3
gm.attach(b)
c = b + 1
gm.backward(c)
assert int(b.grad.numpy()) == 1
def test_attach_temporary():
w = mge.Parameter(2.0)
gm = GradManager()
gm.attach(w)
def cb(x, g):
assert x is ref()
cb.called = True
for i in range(3):
with gm:
cb.called = False
x = mge.Tensor(i, dtype="float32")
gm.attach(x, callbacks=cb)
ref = weakref.ref(x)
y = x * w
gm.backward(y)
assert cb.called
del x
assert ref() is None
# NOTE: does not guarantee timely release when recording
# for i in range(3):
# with gm:
# x = mge.Tensor(i, dtype='float32')
# gm.attach(x)
# ref = weakref.ref(x)
# y = x * w
# del x
# assert ref() is None
# gm.backward(y)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device")
@pytest.mark.isolated_distributed
def test_remote_grad():
@dist.launcher
def worker():
rank = dist.get_rank()
size = dist.get_world_size()
x = mge.tensor(np.random.randn(1, rank * 2 + 2), dtype=np.float32)
m = M.Linear(rank * 2 + 2, rank * 2 + 4)
gm = GradManager().attach(m.parameters())
opt = optim.SGD(m.parameters(), 1e-3, momentum=0.9)
@trace(symbolic=True)
def train_func(x):
with gm:
if rank != 0:
x = dist.functional.remote_recv(
rank - 1, shape=(1, rank * 2 + 2), dtype=np.float32
)
y = m(x)
if rank != size - 1:
y = dist.functional.remote_send(y, dest_rank=rank + 1)
if rank == size - 1:
y = y.mean()
gm.backward(y)
else:
gm.backward()
opt.step().clear_grad()
for i in range(3):
train_func(x)
for param in m.parameters():
param.numpy()
worker()
|
[
"megengine.distributed.helper.get_device_count_by_fork",
"megengine.jit.trace",
"megengine.Tensor",
"megengine.tensor",
"megengine.module.Linear",
"megengine.distributed.get_rank",
"megengine.autodiff.GradManager",
"megengine.Parameter",
"megengine.functional.matmul",
"megengine.distributed.functional.remote_send",
"megengine.distributed.functional.remote_recv",
"megengine.distributed.get_world_size"
] |
[((905, 921), 'megengine.tensor', 'mge.tensor', (['(-1.0)'], {}), '(-1.0)\n', (915, 921), True, 'import megengine as mge\n'), ((986, 1000), 'megengine.functional.matmul', 'F.matmul', (['x', 'w'], {}), '(x, w)\n', (994, 1000), True, 'import megengine.functional as F\n'), ((1454, 1474), 'megengine.Parameter', 'mge.Parameter', (['[1.0]'], {}), '([1.0])\n', (1467, 1474), True, 'import megengine as mge\n'), ((1484, 1497), 'megengine.autodiff.GradManager', 'GradManager', ([], {}), '()\n', (1495, 1497), False, 'from megengine.autodiff import GradManager\n'), ((1666, 1684), 'megengine.Parameter', 'mge.Parameter', (['(2.0)'], {}), '(2.0)\n', (1679, 1684), True, 'import megengine as mge\n'), ((1694, 1707), 'megengine.autodiff.GradManager', 'GradManager', ([], {}), '()\n', (1705, 1707), False, 'from megengine.autodiff import GradManager\n'), ((1244, 1258), 'megengine.functional.matmul', 'F.matmul', (['x', 'w'], {}), '(x, w)\n', (1252, 1258), True, 'import megengine.functional as F\n'), ((2833, 2848), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (2846, 2848), True, 'import megengine.distributed as dist\n'), ((2864, 2885), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (2883, 2885), True, 'import megengine.distributed as dist\n'), ((2973, 3009), 'megengine.module.Linear', 'M.Linear', (['(rank * 2 + 2)', '(rank * 2 + 4)'], {}), '(rank * 2 + 2, rank * 2 + 4)\n', (2981, 3009), True, 'import megengine.module as M\n'), ((3130, 3150), 'megengine.jit.trace', 'trace', ([], {'symbolic': '(True)'}), '(symbolic=True)\n', (3135, 3150), False, 'from megengine.jit import trace\n'), ((2458, 2475), 'platform.system', 'platform.system', ([], {}), '()\n', (2473, 2475), False, 'import platform\n'), ((2558, 2575), 'platform.system', 'platform.system', ([], {}), '()\n', (2573, 2575), False, 'import platform\n'), ((2655, 2686), 'megengine.distributed.helper.get_device_count_by_fork', 'get_device_count_by_fork', (['"""gpu"""'], {}), "('gpu')\n", (2679, 2686), False, 'from megengine.distributed.helper import get_device_count_by_fork\n'), ((805, 832), 'megengine.tensor', 'mge.tensor', (['[1.0, 3.0, 5.0]'], {}), '([1.0, 3.0, 5.0])\n', (815, 832), True, 'import megengine as mge\n'), ((855, 882), 'megengine.tensor', 'mge.tensor', (['[2.0, 4.0, 6.0]'], {}), '([2.0, 4.0, 6.0])\n', (865, 882), True, 'import megengine as mge\n'), ((932, 945), 'megengine.autodiff.GradManager', 'GradManager', ([], {}), '()\n', (943, 945), False, 'from megengine.autodiff import GradManager\n'), ((1882, 1912), 'megengine.Tensor', 'mge.Tensor', (['i'], {'dtype': '"""float32"""'}), "(i, dtype='float32')\n", (1892, 1912), True, 'import megengine as mge\n'), ((1970, 1984), 'weakref.ref', 'weakref.ref', (['x'], {}), '(x)\n', (1981, 1984), False, 'import weakref\n'), ((2909, 2941), 'numpy.random.randn', 'np.random.randn', (['(1)', '(rank * 2 + 2)'], {}), '(1, rank * 2 + 2)\n', (2924, 2941), True, 'import numpy as np\n'), ((3023, 3036), 'megengine.autodiff.GradManager', 'GradManager', ([], {}), '()\n', (3034, 3036), False, 'from megengine.autodiff import GradManager\n'), ((3253, 3338), 'megengine.distributed.functional.remote_recv', 'dist.functional.remote_recv', (['(rank - 1)'], {'shape': '(1, rank * 2 + 2)', 'dtype': 'np.float32'}), '(rank - 1, shape=(1, rank * 2 + 2), dtype=np.float32\n )\n', (3280, 3338), True, 'import megengine.distributed as dist\n'), ((3466, 3516), 'megengine.distributed.functional.remote_send', 'dist.functional.remote_send', (['y'], {'dest_rank': '(rank + 1)'}), '(y, dest_rank=rank + 1)\n', (3493, 3516), True, 'import megengine.distributed as dist\n')]
|
import os
import cv2
import argparse
import warnings
import megengine as mge
import megengine.functional as F
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser(description='Interpolation for a pair of images')
parser.add_argument('--img', dest='img', nargs=2, required=True)
parser.add_argument('--exp', default=4, type=int)
parser.add_argument('--ratio', default=0, type=float, help='inference ratio between two images with 0 - 1 range')
parser.add_argument('--rthreshold', default=0.02, type=float, help='returns image when actual ratio falls in given range threshold')
parser.add_argument('--rmaxcycles', default=8, type=int, help='limit max number of bisectional cycles')
parser.add_argument('--model', dest='modelDir', type=str, default='train_log', help='directory with trained model files')
args = parser.parse_args()
from model.RIFE import Model
model = Model()
model.load_model(args.modelDir, -1)
print("Loaded model")
model.eval()
if args.img[0].endswith('.exr') and args.img[1].endswith('.exr'):
img0 = cv2.imread(args.img[0], cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)
img1 = cv2.imread(args.img[1], cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)
img0 = F.expand_dims(mge.Tensor(img0.transpose(2, 0, 1)), 0)
img1 = F.expand_dims(mge.Tensor(img1.transpose(2, 0, 1)), 0)
else:
img0 = cv2.imread(args.img[0], cv2.IMREAD_UNCHANGED)
img1 = cv2.imread(args.img[1], cv2.IMREAD_UNCHANGED)
img0 = F.expand_dims(mge.Tensor(img0.transpose(2, 0, 1)) / 255. , 0)
img1 = F.expand_dims(mge.Tensor(img1.transpose(2, 0, 1)) / 255. , 0)
n, c, h, w = img0.shape
ph = ((h - 1) // 32 + 1) * 32
pw = ((w - 1) // 32 + 1) * 32
padding = ((0, 0), (0, 0), (0, ph - h), (0, pw - w))
img0 = F.nn.pad(img0, padding)
img1 = F.nn.pad(img1, padding)
if args.ratio:
img_list = [img0]
img0_ratio = 0.0
img1_ratio = 1.0
if args.ratio <= img0_ratio + args.rthreshold / 2:
middle = img0
elif args.ratio >= img1_ratio - args.rthreshold / 2:
middle = img1
else:
tmp_img0 = img0
tmp_img1 = img1
for inference_cycle in range(args.rmaxcycles):
middle = model.inference(tmp_img0, tmp_img1)
middle_ratio = ( img0_ratio + img1_ratio ) / 2
if args.ratio - (args.rthreshold / 2) <= middle_ratio <= args.ratio + (args.rthreshold / 2):
break
if args.ratio > middle_ratio:
tmp_img0 = middle
img0_ratio = middle_ratio
else:
tmp_img1 = middle
img1_ratio = middle_ratio
img_list.append(middle)
img_list.append(img1)
else:
img_list = [img0, img1]
for i in range(args.exp):
tmp = []
for j in range(len(img_list) - 1):
mid = model.inference(img_list[j], img_list[j + 1])
tmp.append(img_list[j])
tmp.append(mid)
tmp.append(img1)
img_list = tmp
if not os.path.exists('output'):
os.mkdir('output')
for i in range(len(img_list)):
if args.img[0].endswith('.exr') and args.img[1].endswith('.exr'):
cv2.imwrite('output/img{}.exr'.format(i), (img_list[i][0]).numpy().transpose(1, 2, 0)[:h, :w], [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])
else:
cv2.imwrite('output/img{}.png'.format(i), (img_list[i][0] * 255).numpy().transpose(1, 2, 0)[:h, :w])
|
[
"megengine.functional.nn.pad"
] |
[((110, 143), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (133, 143), False, 'import warnings\n'), ((154, 227), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Interpolation for a pair of images"""'}), "(description='Interpolation for a pair of images')\n", (177, 227), False, 'import argparse\n'), ((883, 890), 'model.RIFE.Model', 'Model', ([], {}), '()\n', (888, 890), False, 'from model.RIFE import Model\n'), ((1721, 1744), 'megengine.functional.nn.pad', 'F.nn.pad', (['img0', 'padding'], {}), '(img0, padding)\n', (1729, 1744), True, 'import megengine.functional as F\n'), ((1752, 1775), 'megengine.functional.nn.pad', 'F.nn.pad', (['img1', 'padding'], {}), '(img1, padding)\n', (1760, 1775), True, 'import megengine.functional as F\n'), ((1040, 1103), 'cv2.imread', 'cv2.imread', (['args.img[0]', '(cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)'], {}), '(args.img[0], cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)\n', (1050, 1103), False, 'import cv2\n'), ((1115, 1178), 'cv2.imread', 'cv2.imread', (['args.img[1]', '(cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)'], {}), '(args.img[1], cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)\n', (1125, 1178), False, 'import cv2\n'), ((1327, 1372), 'cv2.imread', 'cv2.imread', (['args.img[0]', 'cv2.IMREAD_UNCHANGED'], {}), '(args.img[0], cv2.IMREAD_UNCHANGED)\n', (1337, 1372), False, 'import cv2\n'), ((1384, 1429), 'cv2.imread', 'cv2.imread', (['args.img[1]', 'cv2.IMREAD_UNCHANGED'], {}), '(args.img[1], cv2.IMREAD_UNCHANGED)\n', (1394, 1429), False, 'import cv2\n'), ((2943, 2967), 'os.path.exists', 'os.path.exists', (['"""output"""'], {}), "('output')\n", (2957, 2967), False, 'import os\n'), ((2973, 2991), 'os.mkdir', 'os.mkdir', (['"""output"""'], {}), "('output')\n", (2981, 2991), False, 'import os\n')]
|
import io
import pickle
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine.core._trace_option import set_symbolic_shape
from megengine.jit import trace
from megengine.traced_module import trace_module
set_symbolic_shape(True)
class Main(M.Module):
def forward(self, x):
return x
class PreProcess(M.Module):
def __init__(self):
super().__init__()
self.I = F.ones((1,))
self.M = F.zeros((1,))
def forward(self, data, idx, roi):
N, H, W, C = data.shape
xmax = roi[:, 1, 0]
xmin = roi[:, 0, 0]
ymax = roi[:, 1, 1]
ymin = roi[:, 0, 1]
scale = F.maximum((xmax - xmin) / W, (ymax - ymin) / H)
I = F.broadcast_to(self.I, (N,))
M = F.broadcast_to(self.M, (N, 3, 3))
M[:, 0, 0] = scale
M[:, 0, 2] = xmin
M[:, 1, 1] = scale
M[:, 1, 2] = ymin
M[:, 2, 2] = I
resized = (
F.warp_perspective(
data, M, (H, W), mat_idx=idx, border_mode="CONSTANT", format="NHWC"
)
.transpose(0, 3, 1, 2)
.astype(np.float32)
)
return resized
class Net(M.Module):
def __init__(self, traced_module):
super().__init__()
self.pre_process = PreProcess()
self.traced_module = traced_module
def forward(self, data, idx, roi):
x = self.pre_process(data, idx, roi)
x = self.traced_module(x)
return x
def test_preprocess():
module = Main()
data = F.ones((1, 14, 8, 8), dtype=np.uint8)
traced_module = trace_module(module, data)
obj = pickle.dumps(traced_module)
traced_module = pickle.loads(obj)
module = Net(traced_module)
module.eval()
idx = F.zeros((1,), dtype=np.int32)
roi = F.ones((1, 2, 2), dtype=np.float32)
y = module(data, idx, roi)
traced_module = trace_module(module, data, idx, roi)
np.testing.assert_array_equal(traced_module(data, idx, roi), y)
func = trace(traced_module, capture_as_const=True)
np.testing.assert_array_equal(func(data, idx, roi), y)
model = io.BytesIO()
func.dump(model, arg_names=("data", "idx", "roi"))
model.seek(0)
infer_cg = cgtools.GraphInference(model)
np.testing.assert_allclose(
list(
infer_cg.run(
inp_dict={"data": data.numpy(), "idx": idx.numpy(), "roi": roi.numpy()}
).values()
)[0],
y,
atol=1e-6,
)
|
[
"megengine.jit.trace",
"megengine.functional.maximum",
"megengine.functional.zeros",
"megengine.functional.broadcast_to",
"megengine.functional.ones",
"megengine.functional.warp_perspective",
"megengine.core._trace_option.set_symbolic_shape",
"megengine.traced_module.trace_module",
"megengine.utils.comp_graph_tools.GraphInference"
] |
[((300, 324), 'megengine.core._trace_option.set_symbolic_shape', 'set_symbolic_shape', (['(True)'], {}), '(True)\n', (318, 324), False, 'from megengine.core._trace_option import set_symbolic_shape\n'), ((1612, 1649), 'megengine.functional.ones', 'F.ones', (['(1, 14, 8, 8)'], {'dtype': 'np.uint8'}), '((1, 14, 8, 8), dtype=np.uint8)\n', (1618, 1649), True, 'import megengine.functional as F\n'), ((1670, 1696), 'megengine.traced_module.trace_module', 'trace_module', (['module', 'data'], {}), '(module, data)\n', (1682, 1696), False, 'from megengine.traced_module import trace_module\n'), ((1707, 1734), 'pickle.dumps', 'pickle.dumps', (['traced_module'], {}), '(traced_module)\n', (1719, 1734), False, 'import pickle\n'), ((1755, 1772), 'pickle.loads', 'pickle.loads', (['obj'], {}), '(obj)\n', (1767, 1772), False, 'import pickle\n'), ((1833, 1862), 'megengine.functional.zeros', 'F.zeros', (['(1,)'], {'dtype': 'np.int32'}), '((1,), dtype=np.int32)\n', (1840, 1862), True, 'import megengine.functional as F\n'), ((1873, 1908), 'megengine.functional.ones', 'F.ones', (['(1, 2, 2)'], {'dtype': 'np.float32'}), '((1, 2, 2), dtype=np.float32)\n', (1879, 1908), True, 'import megengine.functional as F\n'), ((1960, 1996), 'megengine.traced_module.trace_module', 'trace_module', (['module', 'data', 'idx', 'roi'], {}), '(module, data, idx, roi)\n', (1972, 1996), False, 'from megengine.traced_module import trace_module\n'), ((2076, 2119), 'megengine.jit.trace', 'trace', (['traced_module'], {'capture_as_const': '(True)'}), '(traced_module, capture_as_const=True)\n', (2081, 2119), False, 'from megengine.jit import trace\n'), ((2191, 2203), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (2201, 2203), False, 'import io\n'), ((2292, 2321), 'megengine.utils.comp_graph_tools.GraphInference', 'cgtools.GraphInference', (['model'], {}), '(model)\n', (2314, 2321), True, 'import megengine.utils.comp_graph_tools as cgtools\n'), ((490, 502), 'megengine.functional.ones', 'F.ones', (['(1,)'], {}), '((1,))\n', (496, 502), True, 'import megengine.functional as F\n'), ((520, 533), 'megengine.functional.zeros', 'F.zeros', (['(1,)'], {}), '((1,))\n', (527, 533), True, 'import megengine.functional as F\n'), ((734, 781), 'megengine.functional.maximum', 'F.maximum', (['((xmax - xmin) / W)', '((ymax - ymin) / H)'], {}), '((xmax - xmin) / W, (ymax - ymin) / H)\n', (743, 781), True, 'import megengine.functional as F\n'), ((794, 822), 'megengine.functional.broadcast_to', 'F.broadcast_to', (['self.I', '(N,)'], {}), '(self.I, (N,))\n', (808, 822), True, 'import megengine.functional as F\n'), ((835, 868), 'megengine.functional.broadcast_to', 'F.broadcast_to', (['self.M', '(N, 3, 3)'], {}), '(self.M, (N, 3, 3))\n', (849, 868), True, 'import megengine.functional as F\n'), ((1030, 1121), 'megengine.functional.warp_perspective', 'F.warp_perspective', (['data', 'M', '(H, W)'], {'mat_idx': 'idx', 'border_mode': '"""CONSTANT"""', 'format': '"""NHWC"""'}), "(data, M, (H, W), mat_idx=idx, border_mode='CONSTANT',\n format='NHWC')\n", (1048, 1121), True, 'import megengine.functional as F\n')]
|
from sqlalchemy.sql.schema import MetaData
from sqlmodel import Field, SQLModel
from datetime import datetime
from uuid import UUID, uuid4
class DbtLog(SQLModel, table=True):
"""
Table: __Dbt_Log
"""
__tablename__ = "__Dbt_Log"
Id: UUID = Field(default_factory=uuid4, primary_key=True)
TaskId: str = Field(max_length=128)
Data: str = Field(index=False)
Timestamp: datetime = Field(index=False, default_factory=datetime.utcnow)
|
[
"sqlmodel.Field"
] |
[((262, 308), 'sqlmodel.Field', 'Field', ([], {'default_factory': 'uuid4', 'primary_key': '(True)'}), '(default_factory=uuid4, primary_key=True)\n', (267, 308), False, 'from sqlmodel import Field, SQLModel\n'), ((327, 348), 'sqlmodel.Field', 'Field', ([], {'max_length': '(128)'}), '(max_length=128)\n', (332, 348), False, 'from sqlmodel import Field, SQLModel\n'), ((365, 383), 'sqlmodel.Field', 'Field', ([], {'index': '(False)'}), '(index=False)\n', (370, 383), False, 'from sqlmodel import Field, SQLModel\n'), ((410, 461), 'sqlmodel.Field', 'Field', ([], {'index': '(False)', 'default_factory': 'datetime.utcnow'}), '(index=False, default_factory=datetime.utcnow)\n', (415, 461), False, 'from sqlmodel import Field, SQLModel\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2020 <NAME>
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
"""LARS optimizer
References: https://github.com/rwightman/pytorch-image-models/blob/master/timm/optim/lars.py
"""
import os
from typing import Iterable, Union
import megengine.functional as F
from megengine import Parameter, tensor
from megengine.functional.inplace import _inplace_add_
from megengine.optimizer import Optimizer
class LARS(Optimizer):
r"""Implements LARS algorithm.
LARS is proposed in `"Large Batch Optimization for Deep Learning: Training BERT in 76 minutes"
<https://arxiv.org/abs/1904.00962>`_.
Args:
params: iterable of parameters to optimize or dicts defining parameter groups.
lr: learning rate.
momentum: momentum factor. Default: ``0.0``
nesterov: enables Nesterov momentum. Default: ``False``
weight_decay: weight decay (L2 penalty). Default: ``0.0``
always_adapt: apply adaptive lr to ``0.0`` weight decay parameter. Default: ``False``
"""
def __init__(
self,
params: Union[Iterable[Parameter], dict],
lr: float,
momentum: float = 0.0,
nesterov: bool = False,
weight_decay: float = 0.0,
always_adapt: bool = False,
):
if lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
if nesterov and momentum <= 0:
raise ValueError("Nesterov momentum requires a momentum")
defaults = dict(lr=lr, momentum=momentum, weight_decay=weight_decay)
super().__init__(params, defaults)
self.nesterov = nesterov
self.always_adapt = always_adapt
self._disable_type_convert = True
def _create_state(self, param_group):
if param_group["momentum"] != 0.0:
for param in param_group["params"]:
self._add_state(param, "momentum_buffer")
def _updates(self, param_group):
lr = param_group["lr"]
weight_decay = param_group["weight_decay"]
momentum = param_group["momentum"]
# since `conver_inputs` is disabled for param updates,
# scalar should be explicitly tansforred to tensor
_lr = tensor(lr)
_weight_decay = tensor(weight_decay)
_momentum = tensor(momentum)
c1, c05, c0 = map(tensor, (1.0, 0.5, 0.0))
def norm(vec):
return F.sum(vec * vec) ** c05
inplace_mode = int(os.getenv("MEGENGINE_INPLACE_UPDATE", "0"))
if inplace_mode:
_neg_lr = tensor(-lr)
for param in param_group["params"]:
if param.grad is None:
continue
grad = param.grad
if weight_decay != 0.0:
grad = grad + param * _weight_decay
p_norm = norm(param.flatten())
if inplace_mode:
if momentum != 0.0:
v = self._state[param]["momentum_buffer"]
_inplace_add_(v, grad, alpha=_momentum, beta=c1)
if self.nesterov:
grad = grad + v * _momentum
else:
grad = v
d_norm = norm(grad.flatten())
trust_ratio = (
p_norm / d_norm
if (self.always_adapt or weight_decay > 0) and p_norm > c0 and d_norm > c0
else c1
)
_inplace_add_(param, grad, alpha=c1, beta=_neg_lr * trust_ratio)
continue
if momentum != 0.0:
v = self._state[param]["momentum_buffer"]
v *= _momentum
v += grad
if self.nesterov:
grad = grad + v * _momentum
else:
grad = v
d_norm = norm(grad.flatten())
trust_ratio = (
p_norm / d_norm
if (self.always_adapt or weight_decay > 0) and p_norm > c0 and d_norm > c0
else c1
)
param -= _lr * trust_ratio * grad
|
[
"megengine.tensor",
"megengine.functional.inplace._inplace_add_",
"megengine.functional.sum"
] |
[((2538, 2548), 'megengine.tensor', 'tensor', (['lr'], {}), '(lr)\n', (2544, 2548), False, 'from megengine import Parameter, tensor\n'), ((2573, 2593), 'megengine.tensor', 'tensor', (['weight_decay'], {}), '(weight_decay)\n', (2579, 2593), False, 'from megengine import Parameter, tensor\n'), ((2614, 2630), 'megengine.tensor', 'tensor', (['momentum'], {}), '(momentum)\n', (2620, 2630), False, 'from megengine import Parameter, tensor\n'), ((2778, 2820), 'os.getenv', 'os.getenv', (['"""MEGENGINE_INPLACE_UPDATE"""', '"""0"""'], {}), "('MEGENGINE_INPLACE_UPDATE', '0')\n", (2787, 2820), False, 'import os\n'), ((2869, 2880), 'megengine.tensor', 'tensor', (['(-lr)'], {}), '(-lr)\n', (2875, 2880), False, 'from megengine import Parameter, tensor\n'), ((2726, 2742), 'megengine.functional.sum', 'F.sum', (['(vec * vec)'], {}), '(vec * vec)\n', (2731, 2742), True, 'import megengine.functional as F\n'), ((3766, 3830), 'megengine.functional.inplace._inplace_add_', '_inplace_add_', (['param', 'grad'], {'alpha': 'c1', 'beta': '(_neg_lr * trust_ratio)'}), '(param, grad, alpha=c1, beta=_neg_lr * trust_ratio)\n', (3779, 3830), False, 'from megengine.functional.inplace import _inplace_add_\n'), ((3297, 3345), 'megengine.functional.inplace._inplace_add_', '_inplace_add_', (['v', 'grad'], {'alpha': '_momentum', 'beta': 'c1'}), '(v, grad, alpha=_momentum, beta=c1)\n', (3310, 3345), False, 'from megengine.functional.inplace import _inplace_add_\n')]
|
"""init
Revision ID: 3b7e032d2384
Revises:
Create Date: 2021-10-01 02:25:02.820531
"""
import sqlalchemy as sa
import sqlmodel
from alembic import op
# revision identifiers, used by Alembic.
revision = "3b7e032d2384"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"prep",
sa.Column("address", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("name", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("country", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("city", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("email", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("website", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("details", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("p2p_endpoint", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("node_address", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("status", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("penalty", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("grade", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("last_updated_block", sa.Integer(), nullable=True),
sa.Column("last_updated_timestamp", sa.Integer(), nullable=True),
sa.Column("created_block", sa.Integer(), nullable=True),
sa.Column("created_timestamp", sa.Integer(), nullable=True),
sa.Column("logo_256", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("logo_1024", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("logo_svg", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("steemit", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("twitter", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("youtube", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("facebook", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("github", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("reddit", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("keybase", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("telegram", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("wechat", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("api_endpoint", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("server_country", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("server_city", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("server_type", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.PrimaryKeyConstraint("address"),
)
op.create_index(op.f("ix_prep_address"), "prep", ["address"], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f("ix_prep_address"), table_name="prep")
op.drop_table("prep")
# ### end Alembic commands ###
|
[
"sqlmodel.sql.sqltypes.AutoString"
] |
[((3285, 3306), 'alembic.op.drop_table', 'op.drop_table', (['"""prep"""'], {}), "('prep')\n", (3298, 3306), False, 'from alembic import op\n'), ((2977, 3011), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""address"""'], {}), "('address')\n", (3000, 3011), True, 'import sqlalchemy as sa\n'), ((3039, 3062), 'alembic.op.f', 'op.f', (['"""ix_prep_address"""'], {}), "('ix_prep_address')\n", (3043, 3062), False, 'from alembic import op\n'), ((3237, 3260), 'alembic.op.f', 'op.f', (['"""ix_prep_address"""'], {}), "('ix_prep_address')\n", (3241, 3260), False, 'from alembic import op\n'), ((429, 463), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (461, 463), False, 'import sqlmodel\n'), ((508, 542), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (540, 542), False, 'import sqlmodel\n'), ((589, 623), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (621, 623), False, 'import sqlmodel\n'), ((667, 701), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (699, 701), False, 'import sqlmodel\n'), ((746, 780), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (778, 780), False, 'import sqlmodel\n'), ((827, 861), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (859, 861), False, 'import sqlmodel\n'), ((908, 942), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (940, 942), False, 'import sqlmodel\n'), ((994, 1028), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1026, 1028), False, 'import sqlmodel\n'), ((1080, 1114), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1112, 1114), False, 'import sqlmodel\n'), ((1160, 1194), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1192, 1194), False, 'import sqlmodel\n'), ((1241, 1275), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1273, 1275), False, 'import sqlmodel\n'), ((1320, 1354), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1352, 1354), False, 'import sqlmodel\n'), ((1412, 1424), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1422, 1424), True, 'import sqlalchemy as sa\n'), ((1486, 1498), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1496, 1498), True, 'import sqlalchemy as sa\n'), ((1551, 1563), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1561, 1563), True, 'import sqlalchemy as sa\n'), ((1620, 1632), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1630, 1632), True, 'import sqlalchemy as sa\n'), ((1680, 1714), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1712, 1714), False, 'import sqlmodel\n'), ((1763, 1797), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1795, 1797), False, 'import sqlmodel\n'), ((1845, 1879), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1877, 1879), False, 'import sqlmodel\n'), ((1926, 1960), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1958, 1960), False, 'import sqlmodel\n'), ((2007, 2041), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2039, 2041), False, 'import sqlmodel\n'), ((2088, 2122), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2120, 2122), False, 'import sqlmodel\n'), ((2170, 2204), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2202, 2204), False, 'import sqlmodel\n'), ((2250, 2284), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2282, 2284), False, 'import sqlmodel\n'), ((2330, 2364), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2362, 2364), False, 'import sqlmodel\n'), ((2411, 2445), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2443, 2445), False, 'import sqlmodel\n'), ((2493, 2527), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2525, 2527), False, 'import sqlmodel\n'), ((2573, 2607), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2605, 2607), False, 'import sqlmodel\n'), ((2659, 2693), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2691, 2693), False, 'import sqlmodel\n'), ((2747, 2781), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2779, 2781), False, 'import sqlmodel\n'), ((2832, 2866), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2864, 2866), False, 'import sqlmodel\n'), ((2917, 2951), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2949, 2951), False, 'import sqlmodel\n')]
|
from typing import TYPE_CHECKING, List, Optional
from sqlalchemy import Column
from sqlalchemy.dialects.postgresql import ARRAY
from sqlmodel import AutoString, Field, Relationship, SQLModel
if TYPE_CHECKING:
from .application import Application, ApplicationList
class SchoolBase(SQLModel):
name: str
abbreviations: List[str] = Field(
default=[],
sa_column=Column(ARRAY(AutoString()), nullable=False),
)
alternatives: List[str] = Field(
default=[],
sa_column=Column(ARRAY(AutoString()), nullable=False),
)
class School(SchoolBase, table=True):
__tablename__ = "schools"
id: Optional[str] = Field(default=None, primary_key=True, nullable=False)
needs_review: bool = False
applications: List["Application"] = Relationship(back_populates="school")
class SchoolCreate(SchoolBase):
pass
class SchoolList(SQLModel):
id: str
name: str
needs_review: bool
class SchoolRead(SchoolBase):
id: str
needs_review: bool
applications: List["ApplicationList"]
class SchoolUpdate(SQLModel):
name: Optional[str]
needs_review: Optional[bool]
abbreviations: Optional[List[str]]
alternatives: Optional[List[str]]
|
[
"sqlmodel.Relationship",
"sqlmodel.AutoString",
"sqlmodel.Field"
] |
[((662, 715), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)', 'nullable': '(False)'}), '(default=None, primary_key=True, nullable=False)\n', (667, 715), False, 'from sqlmodel import AutoString, Field, Relationship, SQLModel\n'), ((788, 825), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""school"""'}), "(back_populates='school')\n", (800, 825), False, 'from sqlmodel import AutoString, Field, Relationship, SQLModel\n'), ((403, 415), 'sqlmodel.AutoString', 'AutoString', ([], {}), '()\n', (413, 415), False, 'from sqlmodel import AutoString, Field, Relationship, SQLModel\n'), ((529, 541), 'sqlmodel.AutoString', 'AutoString', ([], {}), '()\n', (539, 541), False, 'from sqlmodel import AutoString, Field, Relationship, SQLModel\n')]
|
import os, sys
import numpy as np
from config import config
from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr
import megengine as mge
from megengine import functional as F
import pdb
def _compute_center(boxes):
ptrx = 0.5 * (boxes[:, 0] + boxes[:, 2])
ptry = 0.5 * (boxes[:, 1] + boxes[:, 3])
centre = F.stack([ptrx, ptry], axis=1)
return centre
def _compute_pos_area(gtboxes, ratio = 0.3):
H, W = gtboxes[:, 3] - gtboxes[:, 1], gtboxes[:, 2] - gtboxes[:, 0]
centres = _compute_center(gtboxes)
l = centres[:, 0] - ratio * W
r = centres[:, 0] + ratio * W
t = centres[:, 1] - ratio * H
b = centres[:, 1] + ratio * H
boundary = F.stack([l, t, r, b], axis = 1)
return boundary
def _anchor_double_target(gt_boxes, im_info, all_anchors):
gt_boxes, im_info = gt_boxes.detach(), im_info.detach()
all_anchors = all_anchors.detach()
gt_boxes = gt_boxes[:im_info[5].astype(np.int32), :]
dummy = -F.ones([1, gt_boxes.shape[1]]).to(gt_boxes.device)
gt_boxes = F.concat([gt_boxes, dummy], axis=0)
valid_mask = 1 - (gt_boxes[:, 4] < 0).astype(np.float32)
anchor_centers = _compute_center(all_anchors)
gtboxes_centers = _compute_center(gt_boxes)
# gtboxes_centers = gtboxes_centers * valid_mask.unsqueeze(1)
gtboxes_centers = gtboxes_centers * F.expand_dims(valid_mask, axis=1)
N, K = all_anchors.shape[0], gt_boxes.shape[0]
an_centers = F.expand_dims(anchor_centers, axis=1)
gt_centers = F.expand_dims(gtboxes_centers, axis=0)
# an_centers = anchor_centers.unsqueeze(1).repeat(1, K, 1)
# gt_centers = gtboxes_centers.unsqueeze(0).repeat(N, 1, 1)
distance = F.abs(an_centers - gt_centers)
distance = F.sqrt(F.pow(distance, 2).sum(axis=2))
start = 0
end = 5
overlaps = box_overlap_opr(all_anchors[:, :4], gt_boxes[:, :4])
overlaps *= F.expand_dims(valid_mask, axis=0)
default_num = 16
ious_list = []
for l in range(start, end):
_, index = F.cond_take(all_anchors[:, 4] == l, all_anchors[:, 4])
level_dist = distance[index, :].transpose(1, 0)
ious = overlaps[index, :].transpose(1, 0)
sorted_index = F.argsort(level_dist, descending=False)
n = min(sorted_index.shape[1], default_num)
ious = F.gather(ious, 1, sorted_index[:, :n]).transpose(1, 0)
ious_list.append(ious)
ious = F.concat(ious_list, axis=0)
mean_var = F.mean(ious, axis = 0)
std_var = F.std(ious, 0)
iou_thresh_per_gt = mean_var + std_var
iou_thresh_per_gt = F.maximum(iou_thresh_per_gt, 0.2)
# limits the anchor centers in the gtboxes
N, K = all_anchors.shape[0], gt_boxes.shape[0]
anchor_points = an_centers
pos_area = _compute_pos_area(gt_boxes, 0.3)
# pos_area = pos_area.unsqueeze(0).repeat(N, 1, 1)
pos_area = F.broadcast_to(F.expand_dims(pos_area, axis=0), (N, K, pos_area.shape[-1]))
l = anchor_points[:, :, 0] - pos_area[:, :, 0]
r = pos_area[:, :, 2] - anchor_points[:, :, 0]
t = anchor_points[:, :, 1] - pos_area[:, :, 1]
b = pos_area[:, :, 3] - anchor_points[:, :, 1]
is_in_gt = F.stack([l, r, t, b], axis=2)
is_in_gt = is_in_gt.min(axis = 2) > 0.1
valid_mask = (overlaps >= F.expand_dims(iou_thresh_per_gt, axis=0)) * is_in_gt.astype(np.float32)
ious = overlaps * valid_mask
sorted_index = F.argsort(ious, 1)
sorted_overlaps = F.gather(ious, 1, sorted_index)
max_overlaps = sorted_overlaps[:, :2].flatten()
argmax_overlaps = sorted_index[:, :2].flatten()
n, c = all_anchors.shape
device = all_anchors.device
labels = -F.ones(2 * n).to(device)
positive_mask = (max_overlaps >= 0.2).to(device).astype(np.float32)
negative_mask = (max_overlaps < 0.2).to(device).astype(np.float32)
labels = positive_mask + labels * (1 - positive_mask) * (1 - negative_mask)
bbox_targets = gt_boxes[argmax_overlaps, :4]
all_anchors = F.broadcast_to(F.expand_dims(all_anchors, axis=1), (n,2, c)).reshape(-1, c)
bbox_targets = bbox_transform_opr(all_anchors[:, :4], bbox_targets)
labels_cat = gt_boxes[argmax_overlaps, 4]
labels_cat = labels_cat * (1 - F.equal(labels, -1).astype(np.float32)) - F.equal(labels, -1).astype(np.float32)
return labels, bbox_targets, labels_cat
def _anchor_target(gt_boxes, im_info, all_anchors):
gt_boxes, im_info = gt_boxes.detach(), im_info.detach()
all_anchors = all_anchors.detach()
gt_boxes = gt_boxes[:im_info[5], :]
valid_mask = 1 - (gt_boxes[:, 4] < 0).astype(np.float32)
anchor_centers = _compute_center(all_anchors)
gtboxes_centers = _compute_center(gt_boxes) * F.expand_dims(valid_mask, axis=0)
N, K = all_anchors.shape[0], gt_boxes.shape[0]
# an_centers = anchor_centers.unsqueeze(1).repeat(1, K, 1)
an_centers = F.expand_dims(anchor_centers, axis=1)
gt_centers = F.expand_dims(gtboxes_centers, axis=0)
# gt_centers = gtboxes_centers.unsqueeze(0).repeat(N, 1, 1)
distance = F.abs(an_centers - gt_centers)
distance = F.sqrt(F.pow(distance, 2).sum(axis=2))
start = 0
end = 5
overlaps = box_overlap_opr(all_anchors[:, :4], gt_boxes[:, :4])
overlaps = overlaps * valid_mask.unsqueeze(0)
default_num = 9
ious_list = []
for l in range(start, end):
index = torch.nonzero(all_anchors[:,4].eq(l), as_tuple=False)[:, 0]
level_dist = level_dist[index, :].transpose(1, 0)
ious = distance[index, :].transpose(1, 0)
sorted_index = torch.argsort(ious, 1, descending=False)
n = min(default_num, sorted_index.shape[1])
ious = torch.gather(ious, 1, sorted_index[:, :n]).transpose(1, 0)
ious_list.append(ious)
ious = F.concat(ious_list, axis=0)
mean_var = ious.mean(0)
std_var = ious.std(0)
iou_thresh_per_gt = mean_var + std_var
iou_thresh_per_gt = torch.clamp(iou_thresh_per_gt, 0.35)
n = iou_thresh_per_gt.shape[0]
# limits the anchor centers in the gtboxes
N, K = all_anchors.shape[0], gt_boxes.shape[0]
anchor_points = an_centers
proxies = gt_boxes.unsqueeze(0).repeat(N, 1, 1)
l = anchor_points[:, :, 0] - proxies[:, :, 0]
r = proxies[:, :, 2] - anchor_points[:, :, 0]
t = anchor_points[:, :, 1] - proxies[:, :, 1]
b = proxies[:, :, 3] - anchor_points[:, :, 1]
is_in_gt = F.stack([l, r, t, b], axis=2)
is_in_gt = is_in_gt.min(axis = 2) > 0.1
valid_mask = (overlaps >= iou_thresh_per_gt.unsqueeze(0)) * is_in_gt
ious = overlaps * valid_mask
argmax_overlaps = torch.argmax(ious, axis=1)
max_overlaps = torch.gather(ious, 1, argmax_overlaps.unsqueeze(1))
n = all_anchors.shape[0]
labels = -F.ones(n)
positive_mask = max_overlaps > 0
negative_mask = max_overlaps < config.rpn_negative_overlap
labels = positive_mask + labels * (1 - positive_mask) * (1 - negative_mask)
bbox_targets = gt_boxes[argmax_overlaps, :4]
bbox_targets = bbox_transform_opr(all_anchors[:, :4], bbox_targets)
labels_cat = gt_boxes[argmax_overlaps, 4]
labels_cat = labels_cat * (1 - labels.eq(0).astype(np.float32))
labels_cat = labels_cat * (1 - labels.eq(-1).astype(np.float32)) - labels.eq(-1).astype(np.float32)
return labels, bbox_targets, labels_cat
def rpn_anchor_target_opr(gt_boxes, im_info, anchors):
rpn_label_list, rpn_target_boxes_list, iou_thresh_list = [], [], []
for i in range(config.train_batch_per_gpu):
rpn_labels, rpn_target_boxes, _ = _anchor_double_target(gt_boxes[i], im_info[i], anchors)
rpn_labels = rpn_labels.reshape(-1, 2)
c = rpn_target_boxes.shape[1]
rpn_target_boxes = rpn_target_boxes.reshape(-1, 2, c)
# mask the anchors overlapping with ignore regions
ignore_label = mask_anchor_opr(gt_boxes[i], im_info[i], anchors, rpn_labels[:, 0])
rpn_labels = rpn_labels - F.equal(rpn_labels, 0).astype(np.float32) * F.expand_dims(ignore_label < 0, 1).astype(np.float32)
# rpn_labels = rpn_labels - rpn_labels.eq(0).astype(np.float32) * (ignore_label < 0).unsqueeze(1).astype(np.float32)
rpn_label_list.append(F.expand_dims(rpn_labels, 0))
rpn_target_boxes_list.append(F.expand_dims(rpn_target_boxes, 0))
rpn_labels = F.concat(rpn_label_list, axis = 0)
rpn_target_boxes = F.concat(rpn_target_boxes_list, axis = 0)
return rpn_labels, rpn_target_boxes
def mask_anchor_opr(gtboxes, im_info, anchors, labels):
eps = 1e-6
gtboxes = gtboxes[:im_info[5].astype(np.int32), :]
ignore_mask = (gtboxes[:, 4] < 0).astype(np.float32)
mask_flag = F.zeros(labels.shape[0])
N, K = anchors.shape[0], gtboxes.shape[0]
p_pred = F.broadcast_to(F.expand_dims(anchors, 1), (N, K, anchors.shape[1]))
p_gt = F.broadcast_to(F.expand_dims(gtboxes, 0), (N, K, gtboxes.shape[1]))
max_off = F.concat([F.maximum(p_pred[:,:, :2], p_gt[:,:,:2]),
F.minimum(p_pred[:, :, 2:4], p_gt[:, :, 2:4])],
axis = 2)
I = F.maximum(max_off[:, :, 2] - max_off[:, :, 0] + 1, 0) * F.maximum(
max_off[:, :, 3] - max_off[:, :, 1] + 1, 0)
A = F.maximum(p_pred[:, :, 2] - p_pred[:, :, 0] + 1, 0) * F.maximum(
p_pred[:, :, 3] - p_pred[:, :, 1] + 1, 0)
# I = F.maximum(I, 0)
# A = F.maximum(A, 0)
IoA = I / (A + eps)
IoA = IoA * F.expand_dims(ignore_mask, 0)
mask_flag = (IoA > 0.5).sum(axis=1) > 0
labels = labels - F.equal(labels, 0).astype(np.float32) * mask_flag.astype(np.float32)
return labels
def rpn_anchor_target_opr_impl(
gt_boxes, im_info, anchors, clobber_positives = True, ignore_label=-1,
background_label=0):
gt_boxes, im_info = gt_boxes.detach(), im_info.detach()
anchors = anchors.detach()
# NOTE: For multi-gpu version, this function should be re-written
a_shp0 = anchors.shape[0]
valid_gt_boxes = gt_boxes[:im_info[5], :]
valid_mask = (gt_boxes[:im_info[5], 4] > 0).astype(np.float32)
overlaps = box_overlap_opr(anchors[:, :4], valid_gt_boxes[:, :4])
overlaps = overlaps * valid_mask.unsqueeze(0)
argmax_overlaps = torch.argmax(overlaps,axis=1)
max_overlaps = torch.gather(overlaps, 1, argmax_overlaps.unsqueeze(1))
gt_argmax_overlaps = torch.argmax(overlaps, axis=0)
gt_argmax_overlaps = torch.gather(overlaps, 1, gt_argmax_overlaps.unsqueeze(0))
cond_max_overlaps = overlaps.eq(gt_argmax_overlaps).astype(np.float32)
cmo_shape1 = cond_max_overlaps.shape[1]
gt_argmax_overlaps = torch.nonzero(cond_max_overlaps.flatten(), as_tuple=False)
gt_argmax_overlaps = gt_argmax_overlaps // cmo_shape1
labels = ignore_label * F.ones(a_shp0)
fg_mask = (max_overlaps >= config.rpn_positive_overlap).astype(np.float32)
fg_mask[gt_argmax_overlaps] = 1
index = torch.nonzero(fg_mask, as_tuple=False).reshape(-1).long()
labels[index] = 1
bbox_targets = bbox_transform_opr(anchors, valid_gt_boxes[index, :4])
# fg_mask[gt_argmax_overlaps]
# --- megbrain fashion code ---
# argmax_overlaps = O.Argmax(overlaps, axis=1)
# max_overlaps = O.IndexingOneHot(overlaps, 1, argmax_overlaps)
# gt_argmax_overlaps = O.Argmax(overlaps, axis=0)
# gt_max_overlaps = O.IndexingOneHot(overlaps, 0, gt_argmax_overlaps)
# cond_max_overlaps = overlaps.eq(gt_max_overlaps.add_axis(0))
# cmo_shape1 = cond_max_overlaps.shape[1]
# gt_argmax_overlaps = \
# O.CondTake(cond_max_overlaps.flatten(), cond_max_overlaps.flatten(),
# 'EQ',1).outputs[1]
# # why should be divided by the cmo_shape1
# gt_argmax_overlaps = gt_argmax_overlaps // cmo_shape1
# labels = O.ones(a_shp0) * ignore_label
# const_one = O.ConstProvider(1.0)
# if not clobber_positives:
# labels = labels * (max_overlaps >= config.rpn_negative_overlap)
# fg_mask = (max_overlaps >= config.rpn_positive_overlap)
# fg_mask = fg_mask.set_ai[gt_argmax_overlaps](
# const_one.broadcast(gt_argmax_overlaps.shape))
# fg_mask_ind = O.CondTake(fg_mask, fg_mask, 'EQ', 1).outputs[1]
# labels = labels.set_ai[fg_mask_ind](const_one.broadcast(fg_mask_ind.shape))
# if clobber_positives:
# labels = labels * (max_overlaps >= config.rpn_negative_overlap)
# Here, we compute the targets for each anchors
# bbox_targets = bbox_transform_opr(
# anchors, valid_gt_boxes.ai[argmax_overlaps, :4])
return labels, bbox_targets
|
[
"megengine.functional.zeros",
"megengine.functional.gather",
"megengine.functional.minimum",
"megengine.functional.mean",
"megengine.functional.abs",
"megengine.functional.stack",
"megengine.functional.std",
"megengine.functional.maximum",
"megengine.functional.argsort",
"megengine.functional.cond_take",
"megengine.functional.expand_dims",
"megengine.functional.equal",
"megengine.functional.concat",
"megengine.functional.ones",
"megengine.functional.pow"
] |
[((330, 359), 'megengine.functional.stack', 'F.stack', (['[ptrx, ptry]'], {'axis': '(1)'}), '([ptrx, ptry], axis=1)\n', (337, 359), True, 'from megengine import functional as F\n'), ((687, 716), 'megengine.functional.stack', 'F.stack', (['[l, t, r, b]'], {'axis': '(1)'}), '([l, t, r, b], axis=1)\n', (694, 716), True, 'from megengine import functional as F\n'), ((1040, 1075), 'megengine.functional.concat', 'F.concat', (['[gt_boxes, dummy]'], {'axis': '(0)'}), '([gt_boxes, dummy], axis=0)\n', (1048, 1075), True, 'from megengine import functional as F\n'), ((1445, 1482), 'megengine.functional.expand_dims', 'F.expand_dims', (['anchor_centers'], {'axis': '(1)'}), '(anchor_centers, axis=1)\n', (1458, 1482), True, 'from megengine import functional as F\n'), ((1500, 1538), 'megengine.functional.expand_dims', 'F.expand_dims', (['gtboxes_centers'], {'axis': '(0)'}), '(gtboxes_centers, axis=0)\n', (1513, 1538), True, 'from megengine import functional as F\n'), ((1682, 1712), 'megengine.functional.abs', 'F.abs', (['(an_centers - gt_centers)'], {}), '(an_centers - gt_centers)\n', (1687, 1712), True, 'from megengine import functional as F\n'), ((1813, 1865), 'det_opr.bbox_opr.box_overlap_opr', 'box_overlap_opr', (['all_anchors[:, :4]', 'gt_boxes[:, :4]'], {}), '(all_anchors[:, :4], gt_boxes[:, :4])\n', (1828, 1865), False, 'from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr\n'), ((1882, 1915), 'megengine.functional.expand_dims', 'F.expand_dims', (['valid_mask'], {'axis': '(0)'}), '(valid_mask, axis=0)\n', (1895, 1915), True, 'from megengine import functional as F\n'), ((2401, 2428), 'megengine.functional.concat', 'F.concat', (['ious_list'], {'axis': '(0)'}), '(ious_list, axis=0)\n', (2409, 2428), True, 'from megengine import functional as F\n'), ((2444, 2464), 'megengine.functional.mean', 'F.mean', (['ious'], {'axis': '(0)'}), '(ious, axis=0)\n', (2450, 2464), True, 'from megengine import functional as F\n'), ((2481, 2495), 'megengine.functional.std', 'F.std', (['ious', '(0)'], {}), '(ious, 0)\n', (2486, 2495), True, 'from megengine import functional as F\n'), ((2564, 2597), 'megengine.functional.maximum', 'F.maximum', (['iou_thresh_per_gt', '(0.2)'], {}), '(iou_thresh_per_gt, 0.2)\n', (2573, 2597), True, 'from megengine import functional as F\n'), ((3143, 3172), 'megengine.functional.stack', 'F.stack', (['[l, r, t, b]'], {'axis': '(2)'}), '([l, r, t, b], axis=2)\n', (3150, 3172), True, 'from megengine import functional as F\n'), ((3372, 3390), 'megengine.functional.argsort', 'F.argsort', (['ious', '(1)'], {}), '(ious, 1)\n', (3381, 3390), True, 'from megengine import functional as F\n'), ((3413, 3444), 'megengine.functional.gather', 'F.gather', (['ious', '(1)', 'sorted_index'], {}), '(ious, 1, sorted_index)\n', (3421, 3444), True, 'from megengine import functional as F\n'), ((4037, 4089), 'det_opr.bbox_opr.bbox_transform_opr', 'bbox_transform_opr', (['all_anchors[:, :4]', 'bbox_targets'], {}), '(all_anchors[:, :4], bbox_targets)\n', (4055, 4089), False, 'from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr\n'), ((4825, 4862), 'megengine.functional.expand_dims', 'F.expand_dims', (['anchor_centers'], {'axis': '(1)'}), '(anchor_centers, axis=1)\n', (4838, 4862), True, 'from megengine import functional as F\n'), ((4880, 4918), 'megengine.functional.expand_dims', 'F.expand_dims', (['gtboxes_centers'], {'axis': '(0)'}), '(gtboxes_centers, axis=0)\n', (4893, 4918), True, 'from megengine import functional as F\n'), ((4999, 5029), 'megengine.functional.abs', 'F.abs', (['(an_centers - gt_centers)'], {}), '(an_centers - gt_centers)\n', (5004, 5029), True, 'from megengine import functional as F\n'), ((5130, 5182), 'det_opr.bbox_opr.box_overlap_opr', 'box_overlap_opr', (['all_anchors[:, :4]', 'gt_boxes[:, :4]'], {}), '(all_anchors[:, :4], gt_boxes[:, :4])\n', (5145, 5182), False, 'from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr\n'), ((5723, 5750), 'megengine.functional.concat', 'F.concat', (['ious_list'], {'axis': '(0)'}), '(ious_list, axis=0)\n', (5731, 5750), True, 'from megengine import functional as F\n'), ((6343, 6372), 'megengine.functional.stack', 'F.stack', (['[l, r, t, b]'], {'axis': '(2)'}), '([l, r, t, b], axis=2)\n', (6350, 6372), True, 'from megengine import functional as F\n'), ((6947, 6999), 'det_opr.bbox_opr.bbox_transform_opr', 'bbox_transform_opr', (['all_anchors[:, :4]', 'bbox_targets'], {}), '(all_anchors[:, :4], bbox_targets)\n', (6965, 6999), False, 'from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr\n'), ((8267, 8299), 'megengine.functional.concat', 'F.concat', (['rpn_label_list'], {'axis': '(0)'}), '(rpn_label_list, axis=0)\n', (8275, 8299), True, 'from megengine import functional as F\n'), ((8325, 8364), 'megengine.functional.concat', 'F.concat', (['rpn_target_boxes_list'], {'axis': '(0)'}), '(rpn_target_boxes_list, axis=0)\n', (8333, 8364), True, 'from megengine import functional as F\n'), ((8613, 8637), 'megengine.functional.zeros', 'F.zeros', (['labels.shape[0]'], {}), '(labels.shape[0])\n', (8620, 8637), True, 'from megengine import functional as F\n'), ((10025, 10079), 'det_opr.bbox_opr.box_overlap_opr', 'box_overlap_opr', (['anchors[:, :4]', 'valid_gt_boxes[:, :4]'], {}), '(anchors[:, :4], valid_gt_boxes[:, :4])\n', (10040, 10079), False, 'from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr\n'), ((10932, 10986), 'det_opr.bbox_opr.bbox_transform_opr', 'bbox_transform_opr', (['anchors', 'valid_gt_boxes[index, :4]'], {}), '(anchors, valid_gt_boxes[index, :4])\n', (10950, 10986), False, 'from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr\n'), ((1342, 1375), 'megengine.functional.expand_dims', 'F.expand_dims', (['valid_mask'], {'axis': '(1)'}), '(valid_mask, axis=1)\n', (1355, 1375), True, 'from megengine import functional as F\n'), ((2010, 2064), 'megengine.functional.cond_take', 'F.cond_take', (['(all_anchors[:, 4] == l)', 'all_anchors[:, 4]'], {}), '(all_anchors[:, 4] == l, all_anchors[:, 4])\n', (2021, 2064), True, 'from megengine import functional as F\n'), ((2195, 2234), 'megengine.functional.argsort', 'F.argsort', (['level_dist'], {'descending': '(False)'}), '(level_dist, descending=False)\n', (2204, 2234), True, 'from megengine import functional as F\n'), ((2861, 2892), 'megengine.functional.expand_dims', 'F.expand_dims', (['pos_area'], {'axis': '(0)'}), '(pos_area, axis=0)\n', (2874, 2892), True, 'from megengine import functional as F\n'), ((4659, 4692), 'megengine.functional.expand_dims', 'F.expand_dims', (['valid_mask'], {'axis': '(0)'}), '(valid_mask, axis=0)\n', (4672, 4692), True, 'from megengine import functional as F\n'), ((6688, 6697), 'megengine.functional.ones', 'F.ones', (['n'], {}), '(n)\n', (6694, 6697), True, 'from megengine import functional as F\n'), ((8712, 8737), 'megengine.functional.expand_dims', 'F.expand_dims', (['anchors', '(1)'], {}), '(anchors, 1)\n', (8725, 8737), True, 'from megengine import functional as F\n'), ((8791, 8816), 'megengine.functional.expand_dims', 'F.expand_dims', (['gtboxes', '(0)'], {}), '(gtboxes, 0)\n', (8804, 8816), True, 'from megengine import functional as F\n'), ((9035, 9088), 'megengine.functional.maximum', 'F.maximum', (['(max_off[:, :, 2] - max_off[:, :, 0] + 1)', '(0)'], {}), '(max_off[:, :, 2] - max_off[:, :, 0] + 1, 0)\n', (9044, 9088), True, 'from megengine import functional as F\n'), ((9091, 9144), 'megengine.functional.maximum', 'F.maximum', (['(max_off[:, :, 3] - max_off[:, :, 1] + 1)', '(0)'], {}), '(max_off[:, :, 3] - max_off[:, :, 1] + 1, 0)\n', (9100, 9144), True, 'from megengine import functional as F\n'), ((9162, 9213), 'megengine.functional.maximum', 'F.maximum', (['(p_pred[:, :, 2] - p_pred[:, :, 0] + 1)', '(0)'], {}), '(p_pred[:, :, 2] - p_pred[:, :, 0] + 1, 0)\n', (9171, 9213), True, 'from megengine import functional as F\n'), ((9216, 9267), 'megengine.functional.maximum', 'F.maximum', (['(p_pred[:, :, 3] - p_pred[:, :, 1] + 1)', '(0)'], {}), '(p_pred[:, :, 3] - p_pred[:, :, 1] + 1, 0)\n', (9225, 9267), True, 'from megengine import functional as F\n'), ((9374, 9403), 'megengine.functional.expand_dims', 'F.expand_dims', (['ignore_mask', '(0)'], {}), '(ignore_mask, 0)\n', (9387, 9403), True, 'from megengine import functional as F\n'), ((10690, 10704), 'megengine.functional.ones', 'F.ones', (['a_shp0'], {}), '(a_shp0)\n', (10696, 10704), True, 'from megengine import functional as F\n'), ((3247, 3287), 'megengine.functional.expand_dims', 'F.expand_dims', (['iou_thresh_per_gt'], {'axis': '(0)'}), '(iou_thresh_per_gt, axis=0)\n', (3260, 3287), True, 'from megengine import functional as F\n'), ((8146, 8174), 'megengine.functional.expand_dims', 'F.expand_dims', (['rpn_labels', '(0)'], {}), '(rpn_labels, 0)\n', (8159, 8174), True, 'from megengine import functional as F\n'), ((8213, 8247), 'megengine.functional.expand_dims', 'F.expand_dims', (['rpn_target_boxes', '(0)'], {}), '(rpn_target_boxes, 0)\n', (8226, 8247), True, 'from megengine import functional as F\n'), ((8873, 8916), 'megengine.functional.maximum', 'F.maximum', (['p_pred[:, :, :2]', 'p_gt[:, :, :2]'], {}), '(p_pred[:, :, :2], p_gt[:, :, :2])\n', (8882, 8916), True, 'from megengine import functional as F\n'), ((8940, 8985), 'megengine.functional.minimum', 'F.minimum', (['p_pred[:, :, 2:4]', 'p_gt[:, :, 2:4]'], {}), '(p_pred[:, :, 2:4], p_gt[:, :, 2:4])\n', (8949, 8985), True, 'from megengine import functional as F\n'), ((974, 1004), 'megengine.functional.ones', 'F.ones', (['[1, gt_boxes.shape[1]]'], {}), '([1, gt_boxes.shape[1]])\n', (980, 1004), True, 'from megengine import functional as F\n'), ((1735, 1753), 'megengine.functional.pow', 'F.pow', (['distance', '(2)'], {}), '(distance, 2)\n', (1740, 1753), True, 'from megengine import functional as F\n'), ((2302, 2340), 'megengine.functional.gather', 'F.gather', (['ious', '(1)', 'sorted_index[:, :n]'], {}), '(ious, 1, sorted_index[:, :n])\n', (2310, 2340), True, 'from megengine import functional as F\n'), ((3625, 3638), 'megengine.functional.ones', 'F.ones', (['(2 * n)'], {}), '(2 * n)\n', (3631, 3638), True, 'from megengine import functional as F\n'), ((3956, 3990), 'megengine.functional.expand_dims', 'F.expand_dims', (['all_anchors'], {'axis': '(1)'}), '(all_anchors, axis=1)\n', (3969, 3990), True, 'from megengine import functional as F\n'), ((4214, 4233), 'megengine.functional.equal', 'F.equal', (['labels', '(-1)'], {}), '(labels, -1)\n', (4221, 4233), True, 'from megengine import functional as F\n'), ((5052, 5070), 'megengine.functional.pow', 'F.pow', (['distance', '(2)'], {}), '(distance, 2)\n', (5057, 5070), True, 'from megengine import functional as F\n'), ((9471, 9489), 'megengine.functional.equal', 'F.equal', (['labels', '(0)'], {}), '(labels, 0)\n', (9478, 9489), True, 'from megengine import functional as F\n'), ((4172, 4191), 'megengine.functional.equal', 'F.equal', (['labels', '(-1)'], {}), '(labels, -1)\n', (4179, 4191), True, 'from megengine import functional as F\n'), ((7884, 7906), 'megengine.functional.equal', 'F.equal', (['rpn_labels', '(0)'], {}), '(rpn_labels, 0)\n', (7891, 7906), True, 'from megengine import functional as F\n'), ((7928, 7962), 'megengine.functional.expand_dims', 'F.expand_dims', (['(ignore_label < 0)', '(1)'], {}), '(ignore_label < 0, 1)\n', (7941, 7962), True, 'from megengine import functional as F\n')]
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import sys
import pytest
from megengine.core import _config as config
from megengine.core import _trace_option as trace_option
from megengine.core import get_option
from megengine.core._imperative_rt.core2 import (
_get_amp_dtype_autocast,
_get_amp_high_prec_dtype,
_get_amp_low_prec_dtype,
_get_convert_inputs,
)
from megengine.core.tensor import amp
from megengine.device import get_device_count
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
_ngpu = get_device_count("gpu")
@pytest.fixture(autouse=True)
def skip_by_ngpu(request):
if request.node.get_closest_marker("require_ngpu"):
require_ngpu = int(request.node.get_closest_marker("require_ngpu").args[0])
if require_ngpu > _ngpu:
pytest.skip("skipped for ngpu unsatisfied: {}".format(require_ngpu))
@pytest.fixture(autouse=True)
def skip_distributed(request):
if request.node.get_closest_marker("distributed_isolated"):
if platform.system() in ("Windows", "Darwin"):
pytest.skip(
"skipped for distributed unsupported at platform: {}".format(
platform.system()
)
)
@pytest.fixture(autouse=True)
def run_around_tests():
env_vars1 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": get_option("enable_dtr_auto_drop"),
"enable_dtr_sqrt_sampling": get_option("enable_dtr_sqrt_sampling"),
"dtr_eviction_threshold": get_option("dtr_eviction_threshold"),
"dtr_evictee_minimum_size": get_option("dtr_evictee_minimum_size"),
"benchmark_kernel": config.benchmark_kernel,
"deterministic_kernel": config.deterministic_kernel,
"compute_mode": config._compute_mode,
"conv_format": config._conv_format,
"amp_enabled": amp.enabled,
"convert_inputs": _get_convert_inputs(),
"amp_dtype_autocast": _get_amp_dtype_autocast(),
"amp_high_prec_dtype": _get_amp_high_prec_dtype(),
"amp_low_prec_dtype": _get_amp_low_prec_dtype(),
}
yield
env_vars2 = {
"symbolic_shape": trace_option.use_symbolic_shape(),
"async_level": get_option("async_level"),
"enable_drop": get_option("enable_drop"),
"max_recompute_time": get_option("max_recompute_time"),
"catch_worker_execption": get_option("catch_worker_execption"),
"enable_host_compute": get_option("enable_host_compute"),
# "record_computing_path": get_option("record_computing_path"),
"disable_memory_forwarding": get_option("disable_memory_forwarding"),
"enable_dtr_auto_drop": get_option("enable_dtr_auto_drop"),
"enable_dtr_sqrt_sampling": get_option("enable_dtr_sqrt_sampling"),
"dtr_eviction_threshold": get_option("dtr_eviction_threshold"),
"dtr_evictee_minimum_size": get_option("dtr_evictee_minimum_size"),
"benchmark_kernel": config.benchmark_kernel,
"deterministic_kernel": config.deterministic_kernel,
"compute_mode": config._compute_mode,
"conv_format": config._conv_format,
"amp_enabled": amp.enabled,
"convert_inputs": _get_convert_inputs(),
"amp_dtype_autocast": _get_amp_dtype_autocast(),
"amp_high_prec_dtype": _get_amp_high_prec_dtype(),
"amp_low_prec_dtype": _get_amp_low_prec_dtype(),
}
for key in env_vars1:
assert (
env_vars1[key] == env_vars2[key]
), "{} have been changed after test".format(key)
|
[
"megengine.core._imperative_rt.core2._get_amp_low_prec_dtype",
"megengine.core._imperative_rt.core2._get_amp_high_prec_dtype",
"megengine.device.get_device_count",
"megengine.core._trace_option.use_symbolic_shape",
"megengine.core._imperative_rt.core2._get_amp_dtype_autocast",
"megengine.core.get_option",
"megengine.core._imperative_rt.core2._get_convert_inputs"
] |
[((873, 896), 'megengine.device.get_device_count', 'get_device_count', (['"""gpu"""'], {}), "('gpu')\n", (889, 896), False, 'from megengine.device import get_device_count\n'), ((900, 928), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (914, 928), False, 'import pytest\n'), ((1213, 1241), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (1227, 1241), False, 'import pytest\n'), ((1568, 1596), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (1582, 1596), False, 'import pytest\n'), ((825, 850), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (840, 850), False, 'import os\n'), ((1665, 1698), 'megengine.core._trace_option.use_symbolic_shape', 'trace_option.use_symbolic_shape', ([], {}), '()\n', (1696, 1698), True, 'from megengine.core import _trace_option as trace_option\n'), ((1723, 1748), 'megengine.core.get_option', 'get_option', (['"""async_level"""'], {}), "('async_level')\n", (1733, 1748), False, 'from megengine.core import get_option\n'), ((1773, 1798), 'megengine.core.get_option', 'get_option', (['"""enable_drop"""'], {}), "('enable_drop')\n", (1783, 1798), False, 'from megengine.core import get_option\n'), ((1830, 1862), 'megengine.core.get_option', 'get_option', (['"""max_recompute_time"""'], {}), "('max_recompute_time')\n", (1840, 1862), False, 'from megengine.core import get_option\n'), ((1898, 1934), 'megengine.core.get_option', 'get_option', (['"""catch_worker_execption"""'], {}), "('catch_worker_execption')\n", (1908, 1934), False, 'from megengine.core import get_option\n'), ((1967, 2000), 'megengine.core.get_option', 'get_option', (['"""enable_host_compute"""'], {}), "('enable_host_compute')\n", (1977, 2000), False, 'from megengine.core import get_option\n'), ((2111, 2150), 'megengine.core.get_option', 'get_option', (['"""disable_memory_forwarding"""'], {}), "('disable_memory_forwarding')\n", (2121, 2150), False, 'from megengine.core import get_option\n'), ((2184, 2218), 'megengine.core.get_option', 'get_option', (['"""enable_dtr_auto_drop"""'], {}), "('enable_dtr_auto_drop')\n", (2194, 2218), False, 'from megengine.core import get_option\n'), ((2256, 2294), 'megengine.core.get_option', 'get_option', (['"""enable_dtr_sqrt_sampling"""'], {}), "('enable_dtr_sqrt_sampling')\n", (2266, 2294), False, 'from megengine.core import get_option\n'), ((2330, 2366), 'megengine.core.get_option', 'get_option', (['"""dtr_eviction_threshold"""'], {}), "('dtr_eviction_threshold')\n", (2340, 2366), False, 'from megengine.core import get_option\n'), ((2404, 2442), 'megengine.core.get_option', 'get_option', (['"""dtr_evictee_minimum_size"""'], {}), "('dtr_evictee_minimum_size')\n", (2414, 2442), False, 'from megengine.core import get_option\n'), ((2710, 2731), 'megengine.core._imperative_rt.core2._get_convert_inputs', '_get_convert_inputs', ([], {}), '()\n', (2729, 2731), False, 'from megengine.core._imperative_rt.core2 import _get_amp_dtype_autocast, _get_amp_high_prec_dtype, _get_amp_low_prec_dtype, _get_convert_inputs\n'), ((2763, 2788), 'megengine.core._imperative_rt.core2._get_amp_dtype_autocast', '_get_amp_dtype_autocast', ([], {}), '()\n', (2786, 2788), False, 'from megengine.core._imperative_rt.core2 import _get_amp_dtype_autocast, _get_amp_high_prec_dtype, _get_amp_low_prec_dtype, _get_convert_inputs\n'), ((2821, 2847), 'megengine.core._imperative_rt.core2._get_amp_high_prec_dtype', '_get_amp_high_prec_dtype', ([], {}), '()\n', (2845, 2847), False, 'from megengine.core._imperative_rt.core2 import _get_amp_dtype_autocast, _get_amp_high_prec_dtype, _get_amp_low_prec_dtype, _get_convert_inputs\n'), ((2879, 2904), 'megengine.core._imperative_rt.core2._get_amp_low_prec_dtype', '_get_amp_low_prec_dtype', ([], {}), '()\n', (2902, 2904), False, 'from megengine.core._imperative_rt.core2 import _get_amp_dtype_autocast, _get_amp_high_prec_dtype, _get_amp_low_prec_dtype, _get_convert_inputs\n'), ((2966, 2999), 'megengine.core._trace_option.use_symbolic_shape', 'trace_option.use_symbolic_shape', ([], {}), '()\n', (2997, 2999), True, 'from megengine.core import _trace_option as trace_option\n'), ((3024, 3049), 'megengine.core.get_option', 'get_option', (['"""async_level"""'], {}), "('async_level')\n", (3034, 3049), False, 'from megengine.core import get_option\n'), ((3074, 3099), 'megengine.core.get_option', 'get_option', (['"""enable_drop"""'], {}), "('enable_drop')\n", (3084, 3099), False, 'from megengine.core import get_option\n'), ((3131, 3163), 'megengine.core.get_option', 'get_option', (['"""max_recompute_time"""'], {}), "('max_recompute_time')\n", (3141, 3163), False, 'from megengine.core import get_option\n'), ((3199, 3235), 'megengine.core.get_option', 'get_option', (['"""catch_worker_execption"""'], {}), "('catch_worker_execption')\n", (3209, 3235), False, 'from megengine.core import get_option\n'), ((3268, 3301), 'megengine.core.get_option', 'get_option', (['"""enable_host_compute"""'], {}), "('enable_host_compute')\n", (3278, 3301), False, 'from megengine.core import get_option\n'), ((3412, 3451), 'megengine.core.get_option', 'get_option', (['"""disable_memory_forwarding"""'], {}), "('disable_memory_forwarding')\n", (3422, 3451), False, 'from megengine.core import get_option\n'), ((3485, 3519), 'megengine.core.get_option', 'get_option', (['"""enable_dtr_auto_drop"""'], {}), "('enable_dtr_auto_drop')\n", (3495, 3519), False, 'from megengine.core import get_option\n'), ((3557, 3595), 'megengine.core.get_option', 'get_option', (['"""enable_dtr_sqrt_sampling"""'], {}), "('enable_dtr_sqrt_sampling')\n", (3567, 3595), False, 'from megengine.core import get_option\n'), ((3631, 3667), 'megengine.core.get_option', 'get_option', (['"""dtr_eviction_threshold"""'], {}), "('dtr_eviction_threshold')\n", (3641, 3667), False, 'from megengine.core import get_option\n'), ((3705, 3743), 'megengine.core.get_option', 'get_option', (['"""dtr_evictee_minimum_size"""'], {}), "('dtr_evictee_minimum_size')\n", (3715, 3743), False, 'from megengine.core import get_option\n'), ((4011, 4032), 'megengine.core._imperative_rt.core2._get_convert_inputs', '_get_convert_inputs', ([], {}), '()\n', (4030, 4032), False, 'from megengine.core._imperative_rt.core2 import _get_amp_dtype_autocast, _get_amp_high_prec_dtype, _get_amp_low_prec_dtype, _get_convert_inputs\n'), ((4064, 4089), 'megengine.core._imperative_rt.core2._get_amp_dtype_autocast', '_get_amp_dtype_autocast', ([], {}), '()\n', (4087, 4089), False, 'from megengine.core._imperative_rt.core2 import _get_amp_dtype_autocast, _get_amp_high_prec_dtype, _get_amp_low_prec_dtype, _get_convert_inputs\n'), ((4122, 4148), 'megengine.core._imperative_rt.core2._get_amp_high_prec_dtype', '_get_amp_high_prec_dtype', ([], {}), '()\n', (4146, 4148), False, 'from megengine.core._imperative_rt.core2 import _get_amp_dtype_autocast, _get_amp_high_prec_dtype, _get_amp_low_prec_dtype, _get_convert_inputs\n'), ((4180, 4205), 'megengine.core._imperative_rt.core2._get_amp_low_prec_dtype', '_get_amp_low_prec_dtype', ([], {}), '()\n', (4203, 4205), False, 'from megengine.core._imperative_rt.core2 import _get_amp_dtype_autocast, _get_amp_high_prec_dtype, _get_amp_low_prec_dtype, _get_convert_inputs\n'), ((1348, 1365), 'platform.system', 'platform.system', ([], {}), '()\n', (1363, 1365), False, 'import platform\n'), ((1515, 1532), 'platform.system', 'platform.system', ([], {}), '()\n', (1530, 1532), False, 'import platform\n')]
|
from typing import List
from app.schemas.role import IRoleCreate, IRoleUpdate
from app.models.role import Role
from app.models.user import User
from app.crud.base_sqlmodel import CRUDBase
from sqlmodel.ext.asyncio.session import AsyncSession
from datetime import datetime
from sqlmodel import select
from uuid import UUID
class CRUDRole(CRUDBase[Role, IRoleCreate, IRoleUpdate]):
async def get_role_by_name(self, db_session: AsyncSession, *, name: str) -> Role:
role = await db_session.exec(select(Role).where(Role.name == name))
return role.first()
async def add_role_to_user(self, db_session: AsyncSession, *, user: User, role_id: UUID) -> Role:
role = await super().get(db_session, role_id)
role.users.append(user)
db_session.add(role)
await db_session.commit()
await db_session.refresh(role)
return role
role = CRUDRole(Role)
|
[
"sqlmodel.select"
] |
[((504, 516), 'sqlmodel.select', 'select', (['Role'], {}), '(Role)\n', (510, 516), False, 'from sqlmodel import select\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import caffe # pylint: disable=import-error
import megengine
import megengine.hub
import numpy as np
import pytest
from mgeconvert.caffe_converter import convert_to_caffe
from .utils import (
ActiveOpr,
BnOpr,
BroadcastOpr,
ConcatOpr,
ConvOpr,
ElemwiseOpr,
LinearOpr,
PoolOpr,
ReduceOpr,
ReshapeOpr,
SoftmaxOpr,
SqueezeOpr,
SubtensorOpr,
TransposeOpr,
XORNet,
dump_mge_model,
)
max_error = 1e-6
tmp_file = "test_model"
def _test_convert_result(inputs, fpath, mge_results, max_err):
convert_to_caffe(
fpath + ".mge", prototxt=tmp_file + ".txt", caffemodel=tmp_file + ".caffemodel"
)
caffe_net = caffe.Net(tmp_file + ".txt", "test_model.caffemodel", caffe.TEST)
for i in caffe_net.blobs.keys():
if "data" in i:
caffe_net.blobs[i].data[...] = inputs
break
caffe_net.forward()
caffe_dict = caffe_net.blobs
caffe_results = list(caffe_dict.items())[-1][1].data
assert caffe_results.shape == mge_results.shape
assert np.allclose(caffe_results, mge_results, atol=max_err)
@pytest.mark.parametrize("mode", ["normal", "group", "transpose"])
def test_conv2d(mode):
net = ConvOpr(mode)
mge_result = dump_mge_model(net, net.data, tmp_file)
_test_convert_result(net.data, tmp_file, mge_result, max_error)
def test_linear():
net = LinearOpr()
mge_result = dump_mge_model(net, net.data, tmp_file)
_test_convert_result(net.data, tmp_file, mge_result, max_error)
def test_softmax():
net = SoftmaxOpr()
mge_result = dump_mge_model(net, net.data, tmp_file)
_test_convert_result(net.data, tmp_file, mge_result, max_error)
def test_squeeze():
net = SqueezeOpr()
mge_result = dump_mge_model(net, net.data, tmp_file)
_test_convert_result(net.data, tmp_file, mge_result, max_error)
@pytest.mark.parametrize("mode", ["max", "avg"])
def test_pooling(mode):
if megengine.__version__ > "0.6.0" and mode == "avg":
return
net = PoolOpr(mode)
mge_result = dump_mge_model(net, net.data, tmp_file)
_test_convert_result(net.data, tmp_file, mge_result, max_error)
@pytest.mark.parametrize("mode", ["bn1d", "bn2d"])
def test_batchnorm(mode):
net = BnOpr(mode)
data = net.data1 if mode == "bn1d" else net.data2
mge_result = dump_mge_model(net, data, tmp_file)
_test_convert_result(data, tmp_file, mge_result, max_error)
def test_subtensor():
net = SubtensorOpr()
mge_result = dump_mge_model(net, net.data, tmp_file)
_test_convert_result(net.data, tmp_file, mge_result, max_error)
def test_transopse():
net = TransposeOpr()
mge_result = dump_mge_model(net, net.data, tmp_file)
_test_convert_result(net.data, tmp_file, mge_result, max_error)
def test_concat():
net = ConcatOpr()
mge_result = dump_mge_model(net, net.data, tmp_file)
_test_convert_result(net.data, tmp_file, mge_result, max_error)
def test_reshape():
net = ReshapeOpr()
mge_result = dump_mge_model(net, net.data, tmp_file)
_test_convert_result(net.data, tmp_file, mge_result, max_error)
@pytest.mark.parametrize(
"mode", ["add", "sub", "mul", "div", "abs", "exp", "log", "max", "pow"]
)
def test_elemwise(mode):
net = ElemwiseOpr(mode)
mge_result = dump_mge_model(net, net.data, tmp_file)
_test_convert_result(net.data, tmp_file, mge_result, max_error)
@pytest.mark.parametrize(
"mode", ["add", "sub", "mul", "div", "abs", "exp", "log", "pow"]
)
def test_elemwise_broadcast(mode):
net = ElemwiseOpr(mode)
mge_result = dump_mge_model(net, np.array([2.0]).astype("float32"), tmp_file)
_test_convert_result(np.array([2.0]), tmp_file, mge_result, max_error)
@pytest.mark.parametrize("mode", ["relu", "sigmoid", "tanh", "leaky_relu"])
def test_active(mode):
net = ActiveOpr(mode)
mge_result = dump_mge_model(net, net.data, tmp_file)
_test_convert_result(net.data, tmp_file, mge_result, max_error)
@pytest.mark.parametrize("mode", ["max", "sum", "mean"])
def test_reduce(mode):
net = ReduceOpr(mode)
mge_result = dump_mge_model(net, net.data, tmp_file)
_test_convert_result(net.data, tmp_file, mge_result, max_error)
def test_broadcast():
net = BroadcastOpr()
mge_result = dump_mge_model(net, net.data, tmp_file)
_test_convert_result(net.data, tmp_file, mge_result, max_error)
@pytest.mark.parametrize(
"model",
[
"shufflenet_v2_x0_5",
"shufflenet_v2_x1_0",
"resnet18",
"resnet50",
"resnet101",
"resnext50_32x4d",
],
)
def test_model(model):
data = (
np.random.randint(0, 255, 3 * 224 * 224)
.reshape((1, 3, 224, 224))
.astype(np.float32)
)
if megengine.__version__ < "1.1.0":
commit_id = "dc2f2cfb228a135747d083517b98aea56e7aab92"
else:
commit_id = None
net = megengine.hub.load(
"megengine/models", model, use_cache=False, commit=commit_id, pretrained=True
)
mge_result = dump_mge_model(net, data, tmp_file)
_test_convert_result(data, tmp_file, mge_result, 1e-2)
def test_xornet():
if megengine.__version__ < "1.1.0":
return
net = XORNet()
mge_result = dump_mge_model(net, net.data, tmp_file, True)
_test_convert_result(net.data, tmp_file, mge_result, max_error)
def test_leakyrelu_model():
if megengine.__version__ < "1.1.0":
return
net = XORNet()
mge_result = dump_mge_model(net, net.data, tmp_file, False)
_test_convert_result(net.data, tmp_file, mge_result, max_error)
|
[
"megengine.hub.load"
] |
[((1489, 1554), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['normal', 'group', 'transpose']"], {}), "('mode', ['normal', 'group', 'transpose'])\n", (1512, 1554), False, 'import pytest\n'), ((2238, 2285), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['max', 'avg']"], {}), "('mode', ['max', 'avg'])\n", (2261, 2285), False, 'import pytest\n'), ((2535, 2584), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['bn1d', 'bn2d']"], {}), "('mode', ['bn1d', 'bn2d'])\n", (2558, 2584), False, 'import pytest\n'), ((3493, 3593), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['add', 'sub', 'mul', 'div', 'abs', 'exp', 'log', 'max', 'pow']"], {}), "('mode', ['add', 'sub', 'mul', 'div', 'abs', 'exp',\n 'log', 'max', 'pow'])\n", (3516, 3593), False, 'import pytest\n'), ((3777, 3870), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['add', 'sub', 'mul', 'div', 'abs', 'exp', 'log', 'pow']"], {}), "('mode', ['add', 'sub', 'mul', 'div', 'abs', 'exp',\n 'log', 'pow'])\n", (3800, 3870), False, 'import pytest\n'), ((4096, 4170), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['relu', 'sigmoid', 'tanh', 'leaky_relu']"], {}), "('mode', ['relu', 'sigmoid', 'tanh', 'leaky_relu'])\n", (4119, 4170), False, 'import pytest\n'), ((4348, 4403), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['max', 'sum', 'mean']"], {}), "('mode', ['max', 'sum', 'mean'])\n", (4371, 4403), False, 'import pytest\n'), ((4755, 4897), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""model"""', "['shufflenet_v2_x0_5', 'shufflenet_v2_x1_0', 'resnet18', 'resnet50',\n 'resnet101', 'resnext50_32x4d']"], {}), "('model', ['shufflenet_v2_x0_5',\n 'shufflenet_v2_x1_0', 'resnet18', 'resnet50', 'resnet101',\n 'resnext50_32x4d'])\n", (4778, 4897), False, 'import pytest\n'), ((932, 1034), 'mgeconvert.caffe_converter.convert_to_caffe', 'convert_to_caffe', (["(fpath + '.mge')"], {'prototxt': "(tmp_file + '.txt')", 'caffemodel': "(tmp_file + '.caffemodel')"}), "(fpath + '.mge', prototxt=tmp_file + '.txt', caffemodel=\n tmp_file + '.caffemodel')\n", (948, 1034), False, 'from mgeconvert.caffe_converter import convert_to_caffe\n'), ((1060, 1125), 'caffe.Net', 'caffe.Net', (["(tmp_file + '.txt')", '"""test_model.caffemodel"""', 'caffe.TEST'], {}), "(tmp_file + '.txt', 'test_model.caffemodel', caffe.TEST)\n", (1069, 1125), False, 'import caffe\n'), ((1432, 1485), 'numpy.allclose', 'np.allclose', (['caffe_results', 'mge_results'], {'atol': 'max_err'}), '(caffe_results, mge_results, atol=max_err)\n', (1443, 1485), True, 'import numpy as np\n'), ((5258, 5360), 'megengine.hub.load', 'megengine.hub.load', (['"""megengine/models"""', 'model'], {'use_cache': '(False)', 'commit': 'commit_id', 'pretrained': '(True)'}), "('megengine/models', model, use_cache=False, commit=\n commit_id, pretrained=True)\n", (5276, 5360), False, 'import megengine\n'), ((4043, 4058), 'numpy.array', 'np.array', (['[2.0]'], {}), '([2.0])\n', (4051, 4058), True, 'import numpy as np\n'), ((3973, 3988), 'numpy.array', 'np.array', (['[2.0]'], {}), '([2.0])\n', (3981, 3988), True, 'import numpy as np\n'), ((5000, 5040), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', '(3 * 224 * 224)'], {}), '(0, 255, 3 * 224 * 224)\n', (5017, 5040), True, 'import numpy as np\n')]
|
# coding=utf8
from __future__ import absolute_import
import os
from sfepy import data_dir
import six
filename_meshes = ['/meshes/3d/cylinder.mesh',
'/meshes/3d/cylinder.vtk',
'/meshes/various_formats/small2d.mesh',
'/meshes/various_formats/small2d.vtk',
'/meshes/various_formats/octahedron.node',
'/meshes/various_formats/comsol_tri.txt',
'/meshes/various_formats/abaqus_hex.inp',
'/meshes/various_formats/abaqus_tet.inp',
'/meshes/various_formats/abaqus_quad.inp',
'/meshes/various_formats/abaqus_tri.inp',
'/meshes/various_formats/abaqus_quad_tri.inp',
'/meshes/various_formats/hex4.mesh3d',
'/meshes/various_formats/tetra8.mesh3d',
'/meshes/various_formats/cube.bdf',
'/meshes/various_formats/med_2d_tri_quad.med',
'/meshes/various_formats/med_3d_tet_hex.med',
'/meshes/various_formats/msh_tri.msh',
'/meshes/various_formats/msh_tetra.msh',
'/meshes/various_formats/xyz_quad.xyz',
'/meshes/various_formats/xyz_tet.xyz']
filename_meshes = [data_dir + name for name in filename_meshes]
def mesh_hook(mesh, mode):
"""
Define a mesh programmatically.
"""
if mode == 'read':
nodes = [[0, 0], [1, 0], [1, 1], [0, 1]]
nod_ids = [0, 0, 1, 1]
conns = [[[0, 1, 2], [0, 2, 3]]]
mat_ids = [[0, 1]]
descs = ['2_3']
mesh._set_io_data(nodes, nod_ids, conns, mat_ids, descs)
elif mode == 'write':
pass
from sfepy.discrete.fem.meshio import UserMeshIO
filename_meshes.extend([mesh_hook, UserMeshIO(mesh_hook)])
same = [(0, 1), (2, 3)]
import os.path as op
from sfepy.base.base import assert_
from sfepy.base.testing import TestCommon
class Test(TestCommon):
"""Write test names explicitely to impose a given order of evaluation."""
tests = ['test_read_meshes', 'test_compare_same_meshes',
'test_read_dimension', 'test_write_read_meshes',
'test_hdf5_meshio']
@staticmethod
def from_conf(conf, options):
return Test(conf=conf, options=options)
def test_read_meshes(self):
"""Try to read all listed meshes."""
from sfepy.discrete.fem import Mesh
conf_dir = op.dirname(__file__)
meshes = {}
for ii, filename in enumerate(filename_meshes):
self.report('%d. mesh: %s' % (ii + 1, filename))
mesh = Mesh.from_file(filename, prefix_dir=conf_dir)
assert_(mesh.dim == (mesh.coors.shape[1]))
assert_(mesh.n_nod == (mesh.coors.shape[0]))
assert_(mesh.n_nod == (mesh.cmesh.vertex_groups.shape[0]))
assert_(mesh.n_el == mesh.cmesh.num[mesh.cmesh.tdim])
self.report('read ok')
meshes[filename] = mesh
self.meshes = meshes
return True
def _compare_meshes(self, mesh0, mesh1):
import numpy as nm
oks = []
ok0 = (mesh0.dim == mesh1.dim)
if not ok0:
self.report('dimension failed!')
oks.append(ok0)
ok0 = mesh0.n_nod == mesh1.n_nod
if not ok0:
self.report('number of nodes failed!')
oks.append(ok0)
ok0 = mesh0.n_el == mesh1.n_el
if not ok0:
self.report('number of elements failed!')
oks.append(ok0)
ok0 = mesh0.descs == mesh1.descs
if not ok0:
self.report('element types failed!')
oks.append(ok0)
ok0 = nm.allclose(mesh0.coors, mesh1.coors)
if not ok0:
self.report('nodes failed!')
oks.append(ok0)
ok0 = nm.all(mesh0.cmesh.vertex_groups == mesh1.cmesh.vertex_groups)
if not ok0:
self.report('node groups failed!')
oks.append(ok0)
ok0 = nm.all(mesh0.cmesh.cell_groups == mesh1.cmesh.cell_groups)
if not ok0:
self.report('material ids failed!')
oks.append(ok0)
ok0 = (nm.all(mesh0.cmesh.get_cell_conn().indices ==
mesh1.cmesh.get_cell_conn().indices) and
nm.all(mesh0.cmesh.get_cell_conn().offsets ==
mesh1.cmesh.get_cell_conn().offsets))
if not ok0:
self.report('connectivities failed!')
oks.append(ok0)
return oks
def test_compare_same_meshes(self):
"""
Compare same meshes in various formats.
"""
oks = []
for i0, i1 in same:
name0 = filename_meshes[i0]
name1 = filename_meshes[i1]
self.report('comparing meshes from "%s" and "%s"' % (name0, name1))
mesh0 = self.meshes[name0]
mesh1 = self.meshes[name1]
oks = self._compare_meshes(mesh0, mesh1)
return sum(oks) == len(oks)
def test_read_dimension(self):
from sfepy.discrete.fem import MeshIO
meshes = {data_dir + '/meshes/various_formats/small2d.mesh' : 2,
data_dir + '/meshes/various_formats/small2d.vtk' : 2,
data_dir + '/meshes/various_formats/small3d.mesh' : 3}
ok = True
conf_dir = op.dirname(__file__)
for filename, adim in six.iteritems(meshes):
self.report('mesh: %s, dimension %d' % (filename, adim))
io = MeshIO.any_from_filename(filename, prefix_dir=conf_dir)
dim = io.read_dimension()
if dim != adim:
self.report('read dimension %d -> failed' % dim)
ok = False
else:
self.report('read dimension %d -> ok' % dim)
return ok
def test_write_read_meshes(self):
"""
Try to write and then read all supported formats.
"""
from sfepy.discrete.fem import Mesh
from sfepy.discrete.fem.meshio import (supported_formats,
supported_capabilities)
conf_dir = op.dirname(__file__)
mesh0 = Mesh.from_file(data_dir
+ '/meshes/various_formats/small3d.mesh',
prefix_dir=conf_dir)
oks = []
for suffix, format_ in six.iteritems(supported_formats):
if isinstance(format_, tuple) or (format_ == 'xyz'):
continue
if 'w' not in supported_capabilities[format_]: continue
filename = op.join(self.options.out_dir, 'test_mesh_wr' + suffix)
self.report('%s format: %s' % (suffix, filename))
mesh0.write(filename, io='auto')
mesh1 = Mesh.from_file(filename)
oks.extend(self._compare_meshes(mesh0, mesh1))
return sum(oks) == len(oks)
def test_hdf5_meshio(self):
try:
from igakit import igalib
except ImportError:
self.report('hdf5_meshio not-tested (missing igalib module)!')
return True
import tempfile
import numpy as nm
import scipy.sparse as sps
from sfepy.discrete.fem.meshio import HDF5MeshIO
from sfepy.base.base import Struct
from sfepy.base.ioutils import Cached, Uncached, SoftLink, \
DataSoftLink
from sfepy.discrete.iga.domain import IGDomain
from sfepy.discrete.iga.domain_generators import gen_patch_block_domain
from sfepy.solvers.ts import TimeStepper
from sfepy.discrete.fem import Mesh
conf_dir = op.dirname(__file__)
mesh0 = Mesh.from_file(data_dir +
'/meshes/various_formats/small3d.mesh',
prefix_dir=conf_dir)
shape = [4, 4, 4]
dims = [5, 5, 5]
centre = [0, 0, 0]
degrees = [2, 2, 2]
nurbs, bmesh, regions = gen_patch_block_domain(dims, shape, centre,
degrees,
cp_mode='greville',
name='iga')
ig_domain = IGDomain('iga', nurbs, bmesh, regions=regions)
int_ar = nm.arange(4)
data = {
'list': range(4),
'mesh1': mesh0,
'mesh2': mesh0,
'mesh3': Uncached(mesh0),
'mesh4': SoftLink('/step0/__cdata/data/data/mesh2'),
'mesh5': DataSoftLink('Mesh','/step0/__cdata/data/data/mesh1/data'),
'mesh6': DataSoftLink('Mesh','/step0/__cdata/data/data/mesh2/data',
mesh0),
'mesh7': DataSoftLink('Mesh','/step0/__cdata/data/data/mesh1/data',
True),
'iga' : ig_domain,
'cached1': Cached(1),
'cached2': Cached(int_ar),
'cached3': Cached(int_ar),
'types': ( True, False, None ),
'tuple': ('first string', 'druhý UTF8 řetězec'),
'struct': Struct(
double=nm.arange(4, dtype=float),
int=nm.array([2,3,4,7]),
sparse=sps.csr_matrix(nm.array([1,0,0,5]).
reshape((2,2)))
)
}
with tempfile.NamedTemporaryFile(suffix='.h5', delete=False) as fil:
io = HDF5MeshIO(fil.name)
ts = TimeStepper(0,1.,0.1, 10)
io.write(fil.name, mesh0, {
'cdata' : Struct(
mode='custom',
data=data,
unpack_markers=False
)
}, ts=ts)
ts.advance()
mesh = io.read()
data['problem_mesh'] = DataSoftLink('Mesh', '/mesh', mesh)
io.write(fil.name, mesh0, {
'cdata' : Struct(
mode='custom',
data=data,
unpack_markers=True
)
}, ts=ts)
cache = {'/mesh': mesh }
fout = io.read_data(0, cache=cache)
fout2 = io.read_data(1, cache=cache )
out = fout['cdata']
out2 = fout2['cdata']
assert_(out['mesh7'] is out2['mesh7'],
'These two meshes should be in fact the same object')
assert_(out['mesh6'] is out2['mesh6'],
'These two meshes should be in fact the same object')
assert_(out['mesh5'] is not out2['mesh5'],
'These two meshes shouldn''t be in fact the same object')
assert_(out['mesh1'] is out['mesh2'],
'These two meshes should be in fact the same object')
assert_(out['mesh1'] is out['mesh2'],
'These two meshes should be in fact the same object')
assert_(out['mesh4'] is out['mesh2'],
'These two meshes should be in fact the same object')
assert_(out['mesh5'] is not out['mesh2'],
'These two meshes shouldn''t be in fact the same object')
assert_(out['mesh6'] is out['mesh2'],
'These two meshes should be in fact the same object')
assert_(out['mesh7'] is not out['mesh2'],
'These two meshes shouldn''t be in fact the same object')
assert_(out['mesh3'] is not out['mesh2'],
'These two meshes should be different objects')
assert_(out['cached2'] is out['cached3'],
'These two array should be the same object')
assert_(out2['problem_mesh'] is mesh,
'These two meshes should be the same objects')
assert_(self._compare_meshes(out['mesh1'], mesh0),
'Failed to restore mesh')
assert_(self._compare_meshes(out['mesh3'], mesh0),
'Failed to restore mesh')
assert_((out['struct'].sparse == data['struct'].sparse).todense()
.all(), 'Sparse matrix restore failed')
ts.advance()
io.write(fil.name, mesh0, {
'cdata' : Struct(
mode='custom',
data=[
DataSoftLink('Mesh',
'/step0/__cdata/data/data/mesh1/data',
mesh0),
mesh0
]
)
}, ts=ts)
out3 = io.read_data(2)['cdata']
assert_(out3[0] is out3[1])
os.remove(fil.name)
#this property is not restored
del data['iga'].nurbs.nurbs
#not supporting comparison
del data['iga']._bnf
del out2['iga']._bnf
#restoration of this property fails
del data['iga'].vertex_set_bcs
del out2['iga'].vertex_set_bcs
#these soflink has no information how to unpack, so it must be
#done manually
data['mesh4'] = mesh0
data['mesh5'] = mesh0
data['mesh7'] = mesh0
for key, val in six.iteritems(out2):
self.report('comparing:', key)
self.assert_equal(val, data[key])
return True
|
[
"sfepy.discrete.fem.meshio.HDF5MeshIO",
"sfepy.base.ioutils.Uncached",
"sfepy.solvers.ts.TimeStepper",
"sfepy.base.ioutils.Cached",
"sfepy.discrete.fem.Mesh.from_file",
"sfepy.discrete.iga.domain_generators.gen_patch_block_domain",
"sfepy.base.base.Struct",
"sfepy.base.base.assert_",
"sfepy.base.ioutils.SoftLink",
"sfepy.discrete.fem.MeshIO.any_from_filename",
"sfepy.discrete.iga.domain.IGDomain",
"sfepy.discrete.fem.meshio.UserMeshIO",
"sfepy.base.ioutils.DataSoftLink"
] |
[((1816, 1837), 'sfepy.discrete.fem.meshio.UserMeshIO', 'UserMeshIO', (['mesh_hook'], {}), '(mesh_hook)\n', (1826, 1837), False, 'from sfepy.discrete.fem.meshio import UserMeshIO\n'), ((2467, 2487), 'os.path.dirname', 'op.dirname', (['__file__'], {}), '(__file__)\n', (2477, 2487), True, 'import os.path as op\n'), ((3708, 3745), 'numpy.allclose', 'nm.allclose', (['mesh0.coors', 'mesh1.coors'], {}), '(mesh0.coors, mesh1.coors)\n', (3719, 3745), True, 'import numpy as nm\n'), ((3846, 3908), 'numpy.all', 'nm.all', (['(mesh0.cmesh.vertex_groups == mesh1.cmesh.vertex_groups)'], {}), '(mesh0.cmesh.vertex_groups == mesh1.cmesh.vertex_groups)\n', (3852, 3908), True, 'import numpy as nm\n'), ((4015, 4073), 'numpy.all', 'nm.all', (['(mesh0.cmesh.cell_groups == mesh1.cmesh.cell_groups)'], {}), '(mesh0.cmesh.cell_groups == mesh1.cmesh.cell_groups)\n', (4021, 4073), True, 'import numpy as nm\n'), ((5352, 5372), 'os.path.dirname', 'op.dirname', (['__file__'], {}), '(__file__)\n', (5362, 5372), True, 'import os.path as op\n'), ((5403, 5424), 'six.iteritems', 'six.iteritems', (['meshes'], {}), '(meshes)\n', (5416, 5424), False, 'import six\n'), ((6146, 6166), 'os.path.dirname', 'op.dirname', (['__file__'], {}), '(__file__)\n', (6156, 6166), True, 'import os.path as op\n'), ((6183, 6273), 'sfepy.discrete.fem.Mesh.from_file', 'Mesh.from_file', (["(data_dir + '/meshes/various_formats/small3d.mesh')"], {'prefix_dir': 'conf_dir'}), "(data_dir + '/meshes/various_formats/small3d.mesh',\n prefix_dir=conf_dir)\n", (6197, 6273), False, 'from sfepy.discrete.fem import Mesh\n'), ((6381, 6413), 'six.iteritems', 'six.iteritems', (['supported_formats'], {}), '(supported_formats)\n', (6394, 6413), False, 'import six\n'), ((7669, 7689), 'os.path.dirname', 'op.dirname', (['__file__'], {}), '(__file__)\n', (7679, 7689), True, 'import os.path as op\n'), ((7706, 7796), 'sfepy.discrete.fem.Mesh.from_file', 'Mesh.from_file', (["(data_dir + '/meshes/various_formats/small3d.mesh')"], {'prefix_dir': 'conf_dir'}), "(data_dir + '/meshes/various_formats/small3d.mesh',\n prefix_dir=conf_dir)\n", (7720, 7796), False, 'from sfepy.discrete.fem import Mesh\n'), ((7995, 8083), 'sfepy.discrete.iga.domain_generators.gen_patch_block_domain', 'gen_patch_block_domain', (['dims', 'shape', 'centre', 'degrees'], {'cp_mode': '"""greville"""', 'name': '"""iga"""'}), "(dims, shape, centre, degrees, cp_mode='greville',\n name='iga')\n", (8017, 8083), False, 'from sfepy.discrete.iga.domain_generators import gen_patch_block_domain\n'), ((8265, 8311), 'sfepy.discrete.iga.domain.IGDomain', 'IGDomain', (['"""iga"""', 'nurbs', 'bmesh'], {'regions': 'regions'}), "('iga', nurbs, bmesh, regions=regions)\n", (8273, 8311), False, 'from sfepy.discrete.iga.domain import IGDomain\n'), ((8330, 8342), 'numpy.arange', 'nm.arange', (['(4)'], {}), '(4)\n', (8339, 8342), True, 'import numpy as nm\n'), ((12640, 12659), 'os.remove', 'os.remove', (['fil.name'], {}), '(fil.name)\n', (12649, 12659), False, 'import os\n'), ((13163, 13182), 'six.iteritems', 'six.iteritems', (['out2'], {}), '(out2)\n', (13176, 13182), False, 'import six\n'), ((2644, 2689), 'sfepy.discrete.fem.Mesh.from_file', 'Mesh.from_file', (['filename'], {'prefix_dir': 'conf_dir'}), '(filename, prefix_dir=conf_dir)\n', (2658, 2689), False, 'from sfepy.discrete.fem import Mesh\n'), ((2703, 2743), 'sfepy.base.base.assert_', 'assert_', (['(mesh.dim == mesh.coors.shape[1])'], {}), '(mesh.dim == mesh.coors.shape[1])\n', (2710, 2743), False, 'from sfepy.base.base import assert_\n'), ((2758, 2800), 'sfepy.base.base.assert_', 'assert_', (['(mesh.n_nod == mesh.coors.shape[0])'], {}), '(mesh.n_nod == mesh.coors.shape[0])\n', (2765, 2800), False, 'from sfepy.base.base import assert_\n'), ((2815, 2871), 'sfepy.base.base.assert_', 'assert_', (['(mesh.n_nod == mesh.cmesh.vertex_groups.shape[0])'], {}), '(mesh.n_nod == mesh.cmesh.vertex_groups.shape[0])\n', (2822, 2871), False, 'from sfepy.base.base import assert_\n'), ((2886, 2939), 'sfepy.base.base.assert_', 'assert_', (['(mesh.n_el == mesh.cmesh.num[mesh.cmesh.tdim])'], {}), '(mesh.n_el == mesh.cmesh.num[mesh.cmesh.tdim])\n', (2893, 2939), False, 'from sfepy.base.base import assert_\n'), ((5512, 5567), 'sfepy.discrete.fem.MeshIO.any_from_filename', 'MeshIO.any_from_filename', (['filename'], {'prefix_dir': 'conf_dir'}), '(filename, prefix_dir=conf_dir)\n', (5536, 5567), False, 'from sfepy.discrete.fem import MeshIO\n'), ((6597, 6651), 'os.path.join', 'op.join', (['self.options.out_dir', "('test_mesh_wr' + suffix)"], {}), "(self.options.out_dir, 'test_mesh_wr' + suffix)\n", (6604, 6651), True, 'import os.path as op\n'), ((6780, 6804), 'sfepy.discrete.fem.Mesh.from_file', 'Mesh.from_file', (['filename'], {}), '(filename)\n', (6794, 6804), False, 'from sfepy.discrete.fem import Mesh\n'), ((8468, 8483), 'sfepy.base.ioutils.Uncached', 'Uncached', (['mesh0'], {}), '(mesh0)\n', (8476, 8483), False, 'from sfepy.base.ioutils import Cached, Uncached, SoftLink, DataSoftLink\n'), ((8506, 8548), 'sfepy.base.ioutils.SoftLink', 'SoftLink', (['"""/step0/__cdata/data/data/mesh2"""'], {}), "('/step0/__cdata/data/data/mesh2')\n", (8514, 8548), False, 'from sfepy.base.ioutils import Cached, Uncached, SoftLink, DataSoftLink\n'), ((8571, 8630), 'sfepy.base.ioutils.DataSoftLink', 'DataSoftLink', (['"""Mesh"""', '"""/step0/__cdata/data/data/mesh1/data"""'], {}), "('Mesh', '/step0/__cdata/data/data/mesh1/data')\n", (8583, 8630), False, 'from sfepy.base.ioutils import Cached, Uncached, SoftLink, DataSoftLink\n'), ((8652, 8718), 'sfepy.base.ioutils.DataSoftLink', 'DataSoftLink', (['"""Mesh"""', '"""/step0/__cdata/data/data/mesh2/data"""', 'mesh0'], {}), "('Mesh', '/step0/__cdata/data/data/mesh2/data', mesh0)\n", (8664, 8718), False, 'from sfepy.base.ioutils import Cached, Uncached, SoftLink, DataSoftLink\n'), ((8756, 8821), 'sfepy.base.ioutils.DataSoftLink', 'DataSoftLink', (['"""Mesh"""', '"""/step0/__cdata/data/data/mesh1/data"""', '(True)'], {}), "('Mesh', '/step0/__cdata/data/data/mesh1/data', True)\n", (8768, 8821), False, 'from sfepy.base.ioutils import Cached, Uncached, SoftLink, DataSoftLink\n'), ((8892, 8901), 'sfepy.base.ioutils.Cached', 'Cached', (['(1)'], {}), '(1)\n', (8898, 8901), False, 'from sfepy.base.ioutils import Cached, Uncached, SoftLink, DataSoftLink\n'), ((8926, 8940), 'sfepy.base.ioutils.Cached', 'Cached', (['int_ar'], {}), '(int_ar)\n', (8932, 8940), False, 'from sfepy.base.ioutils import Cached, Uncached, SoftLink, DataSoftLink\n'), ((8965, 8979), 'sfepy.base.ioutils.Cached', 'Cached', (['int_ar'], {}), '(int_ar)\n', (8971, 8979), False, 'from sfepy.base.ioutils import Cached, Uncached, SoftLink, DataSoftLink\n'), ((9359, 9414), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".h5"""', 'delete': '(False)'}), "(suffix='.h5', delete=False)\n", (9386, 9414), False, 'import tempfile\n'), ((9440, 9460), 'sfepy.discrete.fem.meshio.HDF5MeshIO', 'HDF5MeshIO', (['fil.name'], {}), '(fil.name)\n', (9450, 9460), False, 'from sfepy.discrete.fem.meshio import HDF5MeshIO\n'), ((9478, 9506), 'sfepy.solvers.ts.TimeStepper', 'TimeStepper', (['(0)', '(1.0)', '(0.1)', '(10)'], {}), '(0, 1.0, 0.1, 10)\n', (9489, 9506), False, 'from sfepy.solvers.ts import TimeStepper\n'), ((9816, 9851), 'sfepy.base.ioutils.DataSoftLink', 'DataSoftLink', (['"""Mesh"""', '"""/mesh"""', 'mesh'], {}), "('Mesh', '/mesh', mesh)\n", (9828, 9851), False, 'from sfepy.base.ioutils import Cached, Uncached, SoftLink, DataSoftLink\n'), ((10288, 10384), 'sfepy.base.base.assert_', 'assert_', (["(out['mesh7'] is out2['mesh7'])", '"""These two meshes should be in fact the same object"""'], {}), "(out['mesh7'] is out2['mesh7'],\n 'These two meshes should be in fact the same object')\n", (10295, 10384), False, 'from sfepy.base.base import assert_\n'), ((10410, 10506), 'sfepy.base.base.assert_', 'assert_', (["(out['mesh6'] is out2['mesh6'])", '"""These two meshes should be in fact the same object"""'], {}), "(out['mesh6'] is out2['mesh6'],\n 'These two meshes should be in fact the same object')\n", (10417, 10506), False, 'from sfepy.base.base import assert_\n'), ((10532, 10634), 'sfepy.base.base.assert_', 'assert_', (["(out['mesh5'] is not out2['mesh5'])", '"""These two meshes shouldnt be in fact the same object"""'], {}), "(out['mesh5'] is not out2['mesh5'],\n 'These two meshes shouldnt be in fact the same object')\n", (10539, 10634), False, 'from sfepy.base.base import assert_\n'), ((10662, 10757), 'sfepy.base.base.assert_', 'assert_', (["(out['mesh1'] is out['mesh2'])", '"""These two meshes should be in fact the same object"""'], {}), "(out['mesh1'] is out['mesh2'],\n 'These two meshes should be in fact the same object')\n", (10669, 10757), False, 'from sfepy.base.base import assert_\n'), ((10783, 10878), 'sfepy.base.base.assert_', 'assert_', (["(out['mesh1'] is out['mesh2'])", '"""These two meshes should be in fact the same object"""'], {}), "(out['mesh1'] is out['mesh2'],\n 'These two meshes should be in fact the same object')\n", (10790, 10878), False, 'from sfepy.base.base import assert_\n'), ((10904, 10999), 'sfepy.base.base.assert_', 'assert_', (["(out['mesh4'] is out['mesh2'])", '"""These two meshes should be in fact the same object"""'], {}), "(out['mesh4'] is out['mesh2'],\n 'These two meshes should be in fact the same object')\n", (10911, 10999), False, 'from sfepy.base.base import assert_\n'), ((11025, 11126), 'sfepy.base.base.assert_', 'assert_', (["(out['mesh5'] is not out['mesh2'])", '"""These two meshes shouldnt be in fact the same object"""'], {}), "(out['mesh5'] is not out['mesh2'],\n 'These two meshes shouldnt be in fact the same object')\n", (11032, 11126), False, 'from sfepy.base.base import assert_\n'), ((11154, 11249), 'sfepy.base.base.assert_', 'assert_', (["(out['mesh6'] is out['mesh2'])", '"""These two meshes should be in fact the same object"""'], {}), "(out['mesh6'] is out['mesh2'],\n 'These two meshes should be in fact the same object')\n", (11161, 11249), False, 'from sfepy.base.base import assert_\n'), ((11275, 11376), 'sfepy.base.base.assert_', 'assert_', (["(out['mesh7'] is not out['mesh2'])", '"""These two meshes shouldnt be in fact the same object"""'], {}), "(out['mesh7'] is not out['mesh2'],\n 'These two meshes shouldnt be in fact the same object')\n", (11282, 11376), False, 'from sfepy.base.base import assert_\n'), ((11404, 11497), 'sfepy.base.base.assert_', 'assert_', (["(out['mesh3'] is not out['mesh2'])", '"""These two meshes should be different objects"""'], {}), "(out['mesh3'] is not out['mesh2'],\n 'These two meshes should be different objects')\n", (11411, 11497), False, 'from sfepy.base.base import assert_\n'), ((11523, 11613), 'sfepy.base.base.assert_', 'assert_', (["(out['cached2'] is out['cached3'])", '"""These two array should be the same object"""'], {}), "(out['cached2'] is out['cached3'],\n 'These two array should be the same object')\n", (11530, 11613), False, 'from sfepy.base.base import assert_\n'), ((11639, 11727), 'sfepy.base.base.assert_', 'assert_', (["(out2['problem_mesh'] is mesh)", '"""These two meshes should be the same objects"""'], {}), "(out2['problem_mesh'] is mesh,\n 'These two meshes should be the same objects')\n", (11646, 11727), False, 'from sfepy.base.base import assert_\n'), ((12603, 12630), 'sfepy.base.base.assert_', 'assert_', (['(out3[0] is out3[1])'], {}), '(out3[0] is out3[1])\n', (12610, 12630), False, 'from sfepy.base.base import assert_\n'), ((9139, 9164), 'numpy.arange', 'nm.arange', (['(4)'], {'dtype': 'float'}), '(4, dtype=float)\n', (9148, 9164), True, 'import numpy as nm\n'), ((9186, 9208), 'numpy.array', 'nm.array', (['[2, 3, 4, 7]'], {}), '([2, 3, 4, 7])\n', (9194, 9208), True, 'import numpy as nm\n'), ((9571, 9625), 'sfepy.base.base.Struct', 'Struct', ([], {'mode': '"""custom"""', 'data': 'data', 'unpack_markers': '(False)'}), "(mode='custom', data=data, unpack_markers=False)\n", (9577, 9625), False, 'from sfepy.base.base import Struct\n'), ((9919, 9972), 'sfepy.base.base.Struct', 'Struct', ([], {'mode': '"""custom"""', 'data': 'data', 'unpack_markers': '(True)'}), "(mode='custom', data=data, unpack_markers=True)\n", (9925, 9972), False, 'from sfepy.base.base import Struct\n'), ((9245, 9267), 'numpy.array', 'nm.array', (['[1, 0, 0, 5]'], {}), '([1, 0, 0, 5])\n', (9253, 9267), True, 'import numpy as nm\n'), ((12293, 12359), 'sfepy.base.ioutils.DataSoftLink', 'DataSoftLink', (['"""Mesh"""', '"""/step0/__cdata/data/data/mesh1/data"""', 'mesh0'], {}), "('Mesh', '/step0/__cdata/data/data/mesh1/data', mesh0)\n", (12305, 12359), False, 'from sfepy.base.ioutils import Cached, Uncached, SoftLink, DataSoftLink\n')]
|
"""Instancia da tabela User e seus metodos"""
from typing import Optional, List, TYPE_CHECKING
from datetime import datetime
from sqlalchemy import UniqueConstraint
from sqlmodel import SQLModel, Field, Relationship
if TYPE_CHECKING:
from .tokens import Token
class User(SQLModel, table=True):
"""Tabela de usuarios"""
__table_args__ = (UniqueConstraint("email", "username"),)
id: Optional[int] = Field(primary_key=True, default=None, nullable=False)
name: str
email: str
username: str
password_hash: str
secundary_id: int = 0
is_staff: bool
is_active_user: bool
last_login: datetime
date_joined: datetime
token: List["Token"] = Relationship(back_populates="user")
def __repr__(self):
return f"<User {self.name}>"
|
[
"sqlmodel.Relationship",
"sqlmodel.Field"
] |
[((418, 471), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)', 'default': 'None', 'nullable': '(False)'}), '(primary_key=True, default=None, nullable=False)\n', (423, 471), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((692, 727), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""user"""'}), "(back_populates='user')\n", (704, 727), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((353, 390), 'sqlalchemy.UniqueConstraint', 'UniqueConstraint', (['"""email"""', '"""username"""'], {}), "('email', 'username')\n", (369, 390), False, 'from sqlalchemy import UniqueConstraint\n')]
|
#!/usr/bin/env python3
import asyncio
import datetime
import json
import os
import time
from argparse import ArgumentParser
from collections.abc import Sequence
import httpx
import uvloop
from dotenv import load_dotenv
from loguru import logger
from sqlmodel import Session, create_engine
from steam2sqlite import APPIDS_URL, BATCH_SIZE, navigator, utils
from steam2sqlite.handler import (
get_appids_from_db,
get_apps_achievements,
get_apps_data,
get_error_appids,
store_apps_achievements,
store_apps_data,
)
load_dotenv()
APPIDS_FILE = os.getenv("APPIDS_FILE")
sqlite_file_name = "database.db"
SQLITE_URL = f"sqlite:///{sqlite_file_name}"
async def get_appids_from_steam(local_file: str = None) -> dict[int, str]:
if local_file:
logger.info(f"Loading appids from local file: {local_file}")
with open(local_file) as steam_appids_fp:
appid_data = json.load(steam_appids_fp)
else:
logger.info("Loading appids from Steam API")
try:
async with httpx.AsyncClient() as client:
resp = await navigator.get(client, APPIDS_URL)
appid_data = resp.json()
await asyncio.sleep(1)
except navigator.NavigatorError:
logger.error("Error getting the appids from Steam")
raise
return {item["appid"]: item["name"] for item in appid_data["applist"]["apps"]}
def main(argv: Sequence[str] | None = None) -> int:
parser = ArgumentParser()
parser.add_argument(
"-l",
"--limit",
type=float,
default=None,
nargs="?",
const=1,
help="limit runtime (minutes)",
)
args = parser.parse_args(argv)
logger.info("Starting...")
start_time = time.monotonic()
uvloop.install()
engine = create_engine(SQLITE_URL, echo=False)
# From steam api, dict of: {appids: names}
steam_appids_names = asyncio.run(get_appids_from_steam(APPIDS_FILE))
with Session(engine) as session:
# query db for all appids we already have, sort by last_modified
db_appids_updated = get_appids_from_db(session)
# identify any missing appids -- these go on the top of our stack to process
missing_appids = set(steam_appids_names.keys()) - {
appid for appid, _ in db_appids_updated
}
# remove any appids that have been modified recently
db_appids = [
appid
for appid, updated in db_appids_updated
if ((datetime.datetime.utcnow().date() - updated.date()).days > 3)
]
appids_missing_and_older = list(missing_appids) + db_appids
# remove any appids that have been flagged as errors from previous runs
error_appids = get_error_appids(session)
appids_to_process = [
appid
for appid in appids_missing_and_older
if appid not in set(error_appids)
]
logger.info("Loading app data from Steam API and saving to db")
for appids in utils.grouper(appids_to_process, BATCH_SIZE, fillvalue=None):
apps_data = get_apps_data(session, steam_appids_names, appids)
apps = store_apps_data(session, steam_appids_names, apps_data)
apps_with_achievements = [app for app in apps if app.achievements_total > 0]
if apps_with_achievements:
apps_achievements_data = utils.delay_by(len(apps_with_achievements))(
get_apps_achievements
)(apps_with_achievements)
store_apps_achievements(session, apps_achievements_data)
if args.limit and (time.monotonic() - start_time) / 60 > args.limit:
logger.info(f"Limit ({args.limit} min) reached shutting down...")
break
return 0
if __name__ == "__main__":
exit(main())
|
[
"sqlmodel.create_engine",
"sqlmodel.Session"
] |
[((538, 551), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (549, 551), False, 'from dotenv import load_dotenv\n'), ((567, 591), 'os.getenv', 'os.getenv', (['"""APPIDS_FILE"""'], {}), "('APPIDS_FILE')\n", (576, 591), False, 'import os\n'), ((1478, 1494), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (1492, 1494), False, 'from argparse import ArgumentParser\n'), ((1717, 1743), 'loguru.logger.info', 'logger.info', (['"""Starting..."""'], {}), "('Starting...')\n", (1728, 1743), False, 'from loguru import logger\n'), ((1762, 1778), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (1776, 1778), False, 'import time\n'), ((1784, 1800), 'uvloop.install', 'uvloop.install', ([], {}), '()\n', (1798, 1800), False, 'import uvloop\n'), ((1815, 1852), 'sqlmodel.create_engine', 'create_engine', (['SQLITE_URL'], {'echo': '(False)'}), '(SQLITE_URL, echo=False)\n', (1828, 1852), False, 'from sqlmodel import Session, create_engine\n'), ((775, 835), 'loguru.logger.info', 'logger.info', (['f"""Loading appids from local file: {local_file}"""'], {}), "(f'Loading appids from local file: {local_file}')\n", (786, 835), False, 'from loguru import logger\n'), ((956, 1000), 'loguru.logger.info', 'logger.info', (['"""Loading appids from Steam API"""'], {}), "('Loading appids from Steam API')\n", (967, 1000), False, 'from loguru import logger\n'), ((1984, 1999), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1991, 1999), False, 'from sqlmodel import Session, create_engine\n'), ((2114, 2141), 'steam2sqlite.handler.get_appids_from_db', 'get_appids_from_db', (['session'], {}), '(session)\n', (2132, 2141), False, 'from steam2sqlite.handler import get_appids_from_db, get_apps_achievements, get_apps_data, get_error_appids, store_apps_achievements, store_apps_data\n'), ((2766, 2791), 'steam2sqlite.handler.get_error_appids', 'get_error_appids', (['session'], {}), '(session)\n', (2782, 2791), False, 'from steam2sqlite.handler import get_appids_from_db, get_apps_achievements, get_apps_data, get_error_appids, store_apps_achievements, store_apps_data\n'), ((2955, 3018), 'loguru.logger.info', 'logger.info', (['"""Loading app data from Steam API and saving to db"""'], {}), "('Loading app data from Steam API and saving to db')\n", (2966, 3018), False, 'from loguru import logger\n'), ((3042, 3102), 'steam2sqlite.utils.grouper', 'utils.grouper', (['appids_to_process', 'BATCH_SIZE'], {'fillvalue': 'None'}), '(appids_to_process, BATCH_SIZE, fillvalue=None)\n', (3055, 3102), False, 'from steam2sqlite import APPIDS_URL, BATCH_SIZE, navigator, utils\n'), ((911, 937), 'json.load', 'json.load', (['steam_appids_fp'], {}), '(steam_appids_fp)\n', (920, 937), False, 'import json\n'), ((3129, 3179), 'steam2sqlite.handler.get_apps_data', 'get_apps_data', (['session', 'steam_appids_names', 'appids'], {}), '(session, steam_appids_names, appids)\n', (3142, 3179), False, 'from steam2sqlite.handler import get_appids_from_db, get_apps_achievements, get_apps_data, get_error_appids, store_apps_achievements, store_apps_data\n'), ((3199, 3254), 'steam2sqlite.handler.store_apps_data', 'store_apps_data', (['session', 'steam_appids_names', 'apps_data'], {}), '(session, steam_appids_names, apps_data)\n', (3214, 3254), False, 'from steam2sqlite.handler import get_appids_from_db, get_apps_achievements, get_apps_data, get_error_appids, store_apps_achievements, store_apps_data\n'), ((1037, 1056), 'httpx.AsyncClient', 'httpx.AsyncClient', ([], {}), '()\n', (1054, 1056), False, 'import httpx\n'), ((1186, 1202), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (1199, 1202), False, 'import asyncio\n'), ((1256, 1307), 'loguru.logger.error', 'logger.error', (['"""Error getting the appids from Steam"""'], {}), "('Error getting the appids from Steam')\n", (1268, 1307), False, 'from loguru import logger\n'), ((3570, 3626), 'steam2sqlite.handler.store_apps_achievements', 'store_apps_achievements', (['session', 'apps_achievements_data'], {}), '(session, apps_achievements_data)\n', (3593, 3626), False, 'from steam2sqlite.handler import get_appids_from_db, get_apps_achievements, get_apps_data, get_error_appids, store_apps_achievements, store_apps_data\n'), ((3725, 3790), 'loguru.logger.info', 'logger.info', (['f"""Limit ({args.limit} min) reached shutting down..."""'], {}), "(f'Limit ({args.limit} min) reached shutting down...')\n", (3736, 3790), False, 'from loguru import logger\n'), ((1097, 1130), 'steam2sqlite.navigator.get', 'navigator.get', (['client', 'APPIDS_URL'], {}), '(client, APPIDS_URL)\n', (1110, 1130), False, 'from steam2sqlite import APPIDS_URL, BATCH_SIZE, navigator, utils\n'), ((3659, 3675), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (3673, 3675), False, 'import time\n'), ((2521, 2547), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (2545, 2547), False, 'import datetime\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.all(a.numpy() == b.numpy())
def test_random_dynamic_diff_result():
a = R.uniform(5) + R.gaussian(5)
b = R.uniform(5) + R.gaussian(5)
assert np.any(a.numpy() != b.numpy())
def test_random_dynamic_same_result():
R.manual_seed(0)
a = R.uniform(5) + R.gaussian(5)
R.manual_seed(0)
b = R.uniform(5) + R.gaussian(5)
assert np.all(a.numpy() == b.numpy())
def test_dropout_dynamic_diff_result():
x = mge.ones(10)
a = F.dropout(x, 0.5)
b = F.dropout(x, 0.5)
assert np.any(a.numpy() != b.numpy())
def test_dropout_dynamic_same_result():
x = mge.ones(10)
R.manual_seed(0)
a = F.dropout(x, 0.5)
R.manual_seed(0)
b = F.dropout(x, 0.5)
assert np.all(a.numpy() == b.numpy())
def test_M_dropout_static_diff_result():
m = M.Dropout(0.5)
@jit.trace(symbolic=True)
def graph_a(x):
return m(x)
@jit.trace(symbolic=True)
def graph_b(x):
return m(x)
x = np.ones(10, dtype="float32")
a = graph_a(x)
a = a.numpy().copy()
b = graph_b(x)
c = graph_a(x)
assert np.any(a != b.numpy())
assert np.any(a != c.numpy())
def test_M_dropout_static_same_result():
m = M.Dropout(0.5)
@jit.trace(symbolic=True)
def graph_a(x):
return m(x)
@jit.trace(symbolic=True)
def graph_b(x):
return m(x)
x = np.ones(10, dtype="float32")
R.manual_seed(0)
a = graph_a(x)
a = a.numpy().copy()
R.manual_seed(0)
b = graph_b(x)
R.manual_seed(0) # useless
c = graph_a(x)
assert np.all(a == b.numpy())
assert np.any(a != c.numpy())
|
[
"megengine.module.Dropout",
"megengine.jit.trace",
"megengine.functional.dropout",
"megengine.random.uniform",
"megengine.ones",
"megengine.random.gaussian",
"megengine.random.manual_seed"
] |
[((585, 609), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)'}), '(symbolic=True)\n', (594, 609), True, 'import megengine.jit as jit\n'), ((679, 703), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)'}), '(symbolic=True)\n', (688, 703), True, 'import megengine.jit as jit\n'), ((891, 915), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)'}), '(symbolic=True)\n', (900, 915), True, 'import megengine.jit as jit\n'), ((1012, 1036), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)'}), '(symbolic=True)\n', (1021, 1036), True, 'import megengine.jit as jit\n'), ((1408, 1424), 'megengine.random.manual_seed', 'R.manual_seed', (['(0)'], {}), '(0)\n', (1421, 1424), True, 'import megengine.random as R\n'), ((1466, 1482), 'megengine.random.manual_seed', 'R.manual_seed', (['(0)'], {}), '(0)\n', (1479, 1482), True, 'import megengine.random as R\n'), ((1612, 1624), 'megengine.ones', 'mge.ones', (['(10)'], {}), '(10)\n', (1620, 1624), True, 'import megengine as mge\n'), ((1633, 1650), 'megengine.functional.dropout', 'F.dropout', (['x', '(0.5)'], {}), '(x, 0.5)\n', (1642, 1650), True, 'import megengine.functional as F\n'), ((1659, 1676), 'megengine.functional.dropout', 'F.dropout', (['x', '(0.5)'], {}), '(x, 0.5)\n', (1668, 1676), True, 'import megengine.functional as F\n'), ((1769, 1781), 'megengine.ones', 'mge.ones', (['(10)'], {}), '(10)\n', (1777, 1781), True, 'import megengine as mge\n'), ((1786, 1802), 'megengine.random.manual_seed', 'R.manual_seed', (['(0)'], {}), '(0)\n', (1799, 1802), True, 'import megengine.random as R\n'), ((1811, 1828), 'megengine.functional.dropout', 'F.dropout', (['x', '(0.5)'], {}), '(x, 0.5)\n', (1820, 1828), True, 'import megengine.functional as F\n'), ((1833, 1849), 'megengine.random.manual_seed', 'R.manual_seed', (['(0)'], {}), '(0)\n', (1846, 1849), True, 'import megengine.random as R\n'), ((1858, 1875), 'megengine.functional.dropout', 'F.dropout', (['x', '(0.5)'], {}), '(x, 0.5)\n', (1867, 1875), True, 'import megengine.functional as F\n'), ((1969, 1983), 'megengine.module.Dropout', 'M.Dropout', (['(0.5)'], {}), '(0.5)\n', (1978, 1983), True, 'import megengine.module as M\n'), ((1990, 2014), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)'}), '(symbolic=True)\n', (1999, 2014), True, 'import megengine.jit as jit\n'), ((2061, 2085), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)'}), '(symbolic=True)\n', (2070, 2085), True, 'import megengine.jit as jit\n'), ((2135, 2163), 'numpy.ones', 'np.ones', (['(10)'], {'dtype': '"""float32"""'}), "(10, dtype='float32')\n", (2142, 2163), True, 'import numpy as np\n'), ((2365, 2379), 'megengine.module.Dropout', 'M.Dropout', (['(0.5)'], {}), '(0.5)\n', (2374, 2379), True, 'import megengine.module as M\n'), ((2386, 2410), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)'}), '(symbolic=True)\n', (2395, 2410), True, 'import megengine.jit as jit\n'), ((2457, 2481), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)'}), '(symbolic=True)\n', (2466, 2481), True, 'import megengine.jit as jit\n'), ((2531, 2559), 'numpy.ones', 'np.ones', (['(10)'], {'dtype': '"""float32"""'}), "(10, dtype='float32')\n", (2538, 2559), True, 'import numpy as np\n'), ((2564, 2580), 'megengine.random.manual_seed', 'R.manual_seed', (['(0)'], {}), '(0)\n', (2577, 2580), True, 'import megengine.random as R\n'), ((2629, 2645), 'megengine.random.manual_seed', 'R.manual_seed', (['(0)'], {}), '(0)\n', (2642, 2645), True, 'import megengine.random as R\n'), ((2669, 2685), 'megengine.random.manual_seed', 'R.manual_seed', (['(0)'], {}), '(0)\n', (2682, 2685), True, 'import megengine.random as R\n'), ((943, 961), 'megengine.random.manual_seed', 'R.manual_seed', (['(731)'], {}), '(731)\n', (956, 961), True, 'import megengine.random as R\n'), ((1064, 1082), 'megengine.random.manual_seed', 'R.manual_seed', (['(731)'], {}), '(731)\n', (1077, 1082), True, 'import megengine.random as R\n'), ((1255, 1267), 'megengine.random.uniform', 'R.uniform', (['(5)'], {}), '(5)\n', (1264, 1267), True, 'import megengine.random as R\n'), ((1270, 1283), 'megengine.random.gaussian', 'R.gaussian', (['(5)'], {}), '(5)\n', (1280, 1283), True, 'import megengine.random as R\n'), ((1292, 1304), 'megengine.random.uniform', 'R.uniform', (['(5)'], {}), '(5)\n', (1301, 1304), True, 'import megengine.random as R\n'), ((1307, 1320), 'megengine.random.gaussian', 'R.gaussian', (['(5)'], {}), '(5)\n', (1317, 1320), True, 'import megengine.random as R\n'), ((1433, 1445), 'megengine.random.uniform', 'R.uniform', (['(5)'], {}), '(5)\n', (1442, 1445), True, 'import megengine.random as R\n'), ((1448, 1461), 'megengine.random.gaussian', 'R.gaussian', (['(5)'], {}), '(5)\n', (1458, 1461), True, 'import megengine.random as R\n'), ((1491, 1503), 'megengine.random.uniform', 'R.uniform', (['(5)'], {}), '(5)\n', (1500, 1503), True, 'import megengine.random as R\n'), ((1506, 1519), 'megengine.random.gaussian', 'R.gaussian', (['(5)'], {}), '(5)\n', (1516, 1519), True, 'import megengine.random as R\n'), ((644, 656), 'megengine.random.uniform', 'R.uniform', (['(5)'], {}), '(5)\n', (653, 656), True, 'import megengine.random as R\n'), ((659, 672), 'megengine.random.gaussian', 'R.gaussian', (['(5)'], {}), '(5)\n', (669, 672), True, 'import megengine.random as R\n'), ((738, 750), 'megengine.random.uniform', 'R.uniform', (['(5)'], {}), '(5)\n', (747, 750), True, 'import megengine.random as R\n'), ((753, 766), 'megengine.random.gaussian', 'R.gaussian', (['(5)'], {}), '(5)\n', (763, 766), True, 'import megengine.random as R\n'), ((977, 989), 'megengine.random.uniform', 'R.uniform', (['(5)'], {}), '(5)\n', (986, 989), True, 'import megengine.random as R\n'), ((992, 1005), 'megengine.random.gaussian', 'R.gaussian', (['(5)'], {}), '(5)\n', (1002, 1005), True, 'import megengine.random as R\n'), ((1098, 1110), 'megengine.random.uniform', 'R.uniform', (['(5)'], {}), '(5)\n', (1107, 1110), True, 'import megengine.random as R\n'), ((1113, 1126), 'megengine.random.gaussian', 'R.gaussian', (['(5)'], {}), '(5)\n', (1123, 1126), True, 'import megengine.random as R\n')]
|
from typing import Optional
from sqlmodel import SQLModel, Field, func, DateTime, Column, Boolean
from datetime import datetime
class BaseModel(SQLModel):
"""
BaseModel class
"""
class Config:
use_enum_values = True
class BaseTableFields(SQLModel):
"""
BaseTableField class
"""
id: Optional[int] = Field(default=None, primary_key=True, nullable=False)
created_at: Optional[datetime] = Field(
default=None,
sa_column=Column(
DateTime(timezone=True),
server_default=func.now(),
nullable=False,
)
)
updated_at: Optional[datetime] = Field(
default=None,
sa_column=Column(
DateTime(timezone=True),
server_default=func.now(),
onupdate=func.now(),
nullable=False,
)
)
is_active: Optional[bool] = Field(
default=None,
sa_column=Column(Boolean, server_default='true', default=True)
)
|
[
"sqlmodel.func.now",
"sqlmodel.Field",
"sqlmodel.Column",
"sqlmodel.DateTime"
] |
[((344, 397), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)', 'nullable': '(False)'}), '(default=None, primary_key=True, nullable=False)\n', (349, 397), False, 'from sqlmodel import SQLModel, Field, func, DateTime, Column, Boolean\n'), ((937, 989), 'sqlmodel.Column', 'Column', (['Boolean'], {'server_default': '"""true"""', 'default': '(True)'}), "(Boolean, server_default='true', default=True)\n", (943, 989), False, 'from sqlmodel import SQLModel, Field, func, DateTime, Column, Boolean\n'), ((503, 526), 'sqlmodel.DateTime', 'DateTime', ([], {'timezone': '(True)'}), '(timezone=True)\n', (511, 526), False, 'from sqlmodel import SQLModel, Field, func, DateTime, Column, Boolean\n'), ((716, 739), 'sqlmodel.DateTime', 'DateTime', ([], {'timezone': '(True)'}), '(timezone=True)\n', (724, 739), False, 'from sqlmodel import SQLModel, Field, func, DateTime, Column, Boolean\n'), ((555, 565), 'sqlmodel.func.now', 'func.now', ([], {}), '()\n', (563, 565), False, 'from sqlmodel import SQLModel, Field, func, DateTime, Column, Boolean\n'), ((768, 778), 'sqlmodel.func.now', 'func.now', ([], {}), '()\n', (776, 778), False, 'from sqlmodel import SQLModel, Field, func, DateTime, Column, Boolean\n'), ((801, 811), 'sqlmodel.func.now', 'func.now', ([], {}), '()\n', (809, 811), False, 'from sqlmodel import SQLModel, Field, func, DateTime, Column, Boolean\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from functools import partial
import numpy as np
import pytest
from utils import opr_test
import megengine.functional as F
from megengine import jit, tensor
def common_test_reduce(opr, ref_opr):
data1_shape = (5, 6, 7)
data2_shape = (2, 9, 12)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [
{"input": data1},
{"input": data2},
{"input": np.array([[[1, 2, np.nan, 4], [8, 6, 5, 2], [2, 3, 4, 5]]])},
]
if opr not in (F.argmin, F.argmax):
# test default axis
opr_test(cases, opr, ref_fn=ref_opr)
# test all axises in range of input shape
for axis in range(-3, 3):
# test keepdims False
opr_test(cases, opr, ref_fn=lambda x: ref_opr(x, axis=axis), axis=axis)
# test keepdims True
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis, keepdims=True),
axis=axis,
keepdims=True,
)
else:
# test defaut axis
opr_test(cases, opr, ref_fn=lambda x: ref_opr(x).astype(np.int32))
# test all axises in range of input shape
for axis in range(0, 3):
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
axis=axis,
)
# test negative axis
axis = axis - len(data1_shape)
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
axis=axis,
)
def test_sum():
common_test_reduce(opr=F.sum, ref_opr=np.sum)
def test_prod():
common_test_reduce(opr=F.prod, ref_opr=np.prod)
def test_mean():
common_test_reduce(opr=F.mean, ref_opr=np.mean)
def test_var():
common_test_reduce(opr=F.var, ref_opr=np.var)
def test_std():
common_test_reduce(opr=F.std, ref_opr=np.std)
def test_min():
common_test_reduce(opr=F.min, ref_opr=np.min)
def test_max():
common_test_reduce(opr=F.max, ref_opr=np.max)
def test_argmin():
common_test_reduce(opr=F.argmin, ref_opr=np.argmin)
def test_argmax():
common_test_reduce(opr=F.argmax, ref_opr=np.argmax)
def test_sqrt():
d1_shape = (15,)
d2_shape = (25,)
d1 = np.random.random(d1_shape).astype(np.float32)
d2 = np.random.random(d2_shape).astype(np.float32)
cases = [{"input": d1}, {"input": d2}]
opr_test(cases, F.sqrt, ref_fn=np.sqrt)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output1 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output2 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output1},
{"input": data2, "output": output2},
]
opr_test(cases, F.sort)
@pytest.mark.parametrize("is_symbolic", [None, False, True])
def test_sort_empty(is_symbolic):
data_shapes = [
(0,),
(10, 0),
]
def fn(x):
return F.sort(x)
for shape in data_shapes:
if is_symbolic is not None:
fn_ = jit.trace(symbolic=is_symbolic)(fn)
else:
fn_ = fn
data = np.random.random(shape).astype(np.float32)
for _ in range(3):
outs = fn_(tensor(data))
ref_outs = (np.sort(data), np.argsort(data))
assert len(ref_outs) == len(outs)
for i in range(len(outs)):
np.testing.assert_equal(outs[i].numpy(), ref_outs[i])
if is_symbolic is None:
break
def test_normalize():
cases = [
{"input": np.random.random((2, 3, 12, 12)).astype(np.float32)} for i in range(2)
]
def np_normalize(x, p=2, axis=None, eps=1e-12):
if axis is None:
norm = np.sum(x ** p) ** (1.0 / p)
else:
norm = np.sum(x ** p, axis=axis, keepdims=True) ** (1.0 / p)
return x / np.clip(norm, a_min=eps, a_max=np.inf)
# # Test L-2 norm along all dimensions
# opr_test(cases, F.normalize, ref_fn=np_normalize)
# # Test L-1 norm along all dimensions
# opr_test(cases, partial(F.normalize, p=1), ref_fn=partial(np_normalize, p=1))
# Test L-2 norm along the second dimension
opr_test(cases, partial(F.normalize, axis=1), ref_fn=partial(np_normalize, axis=1))
# Test some norm == 0
cases[0]["input"][0, 0, 0, :] = 0
cases[1]["input"][0, 0, 0, :] = 0
opr_test(cases, partial(F.normalize, axis=3), ref_fn=partial(np_normalize, axis=3))
def test_sum_neg_axis():
shape = (2, 3)
data = np.random.random(shape).astype(np.float32)
for axis in (-1, -2, (-2, 1), (-1, 0)):
get = F.sum(tensor(data), axis=axis)
ref = np.sum(data, axis=axis)
np.testing.assert_allclose(get.numpy(), ref, rtol=1e-6)
with pytest.raises(AssertionError):
F.sum(tensor(data), axis=(-1, 1))
def test_non_finite():
shape = (32, 3, 32, 32)
data1 = np.random.random(shape).astype(np.float32)
data2 = np.random.random(shape).astype(np.float32)
rst = F.math._check_non_finite([tensor(data1), tensor(data2)])
np.testing.assert_equal(rst.numpy(), [0])
data2[0][0][0][0] = float("inf")
rst = F.math._check_non_finite([tensor(data1), tensor(data2)])
np.testing.assert_equal(rst.numpy(), [1])
data2[0][0][0][0] = float("nan")
rst = F.math._check_non_finite([tensor(data1), tensor(data2)])
np.testing.assert_equal(rst.numpy(), [1])
@pytest.mark.parametrize("descending", [True, False])
@pytest.mark.parametrize("sorted", [True, False])
@pytest.mark.parametrize("inp1d", [True, False])
@pytest.mark.parametrize("kth_only", [True, False])
def test_topk(descending, sorted, inp1d, kth_only):
k = 3
if inp1d:
data = np.random.permutation(7)
else:
data = np.random.permutation(5 * 7).reshape(5, 7)
data = data.astype(np.int32)
def np_sort(x):
if descending:
return np.sort(x)[..., ::-1]
return np.sort(x)
res = F.topk(
tensor(data), k, descending=descending, no_sort=(not sorted), kth_only=kth_only
)
values, indices = res
values = values.numpy()
indices = indices.numpy()
if kth_only:
np.testing.assert_equal(
values, np.take_along_axis(data, indices[..., None], -1).squeeze(-1)
)
np.testing.assert_equal(values, np_sort(data)[..., k - 1])
else:
np.testing.assert_equal(values, np.take_along_axis(data, indices, -1))
if not sorted:
values = np_sort(values)
np.testing.assert_equal(values, np_sort(data)[..., :k])
@pytest.mark.parametrize("is_trace", [True, False])
def test_reduce_on_empty_tensor(is_trace):
dtypes = [np.float32, np.int32, np.bool]
inputs = [
(np.random.random((0,)), None),
(np.random.random((3, 0, 2)), 1),
(np.random.random((10, 10, 0, 10)), 0),
]
def run_test(fn, ref_fn, input, dtype, axis=None, symbolic=False):
if is_trace:
fn = jit.trace(symbolic=symbolic)(fn)
for i in range(3):
out = fn(tensor(input, dtype=dtype), axis=axis).numpy()
out_ref = ref_fn(input.astype(dtype), axis=axis)
np.testing.assert_equal(out, out_ref)
for dtype in dtypes:
for inp, axis in inputs:
run_test(F.sum, np.sum, inp, dtype, axis, True)
run_test(F.sum, np.sum, inp, dtype, axis, False)
run_test(F.prod, np.prod, inp, dtype, axis, True)
run_test(F.prod, np.prod, inp, dtype, axis, False)
|
[
"megengine.jit.trace",
"megengine.tensor",
"megengine.functional.sort"
] |
[((3460, 3519), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""is_symbolic"""', '[None, False, True]'], {}), "('is_symbolic', [None, False, True])\n", (3483, 3519), False, 'import pytest\n'), ((6105, 6157), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""descending"""', '[True, False]'], {}), "('descending', [True, False])\n", (6128, 6157), False, 'import pytest\n'), ((6159, 6207), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sorted"""', '[True, False]'], {}), "('sorted', [True, False])\n", (6182, 6207), False, 'import pytest\n'), ((6209, 6256), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inp1d"""', '[True, False]'], {}), "('inp1d', [True, False])\n", (6232, 6256), False, 'import pytest\n'), ((6258, 6308), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""kth_only"""', '[True, False]'], {}), "('kth_only', [True, False])\n", (6281, 6308), False, 'import pytest\n'), ((7259, 7309), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""is_trace"""', '[True, False]'], {}), "('is_trace', [True, False])\n", (7282, 7309), False, 'import pytest\n'), ((2951, 2990), 'utils.opr_test', 'opr_test', (['cases', 'F.sqrt'], {'ref_fn': 'np.sqrt'}), '(cases, F.sqrt, ref_fn=np.sqrt)\n', (2959, 2990), False, 'from utils import opr_test\n'), ((3433, 3456), 'utils.opr_test', 'opr_test', (['cases', 'F.sort'], {}), '(cases, F.sort)\n', (3441, 3456), False, 'from utils import opr_test\n'), ((984, 1020), 'utils.opr_test', 'opr_test', (['cases', 'opr'], {'ref_fn': 'ref_opr'}), '(cases, opr, ref_fn=ref_opr)\n', (992, 1020), False, 'from utils import opr_test\n'), ((3199, 3213), 'numpy.sort', 'np.sort', (['data1'], {}), '(data1)\n', (3206, 3213), True, 'import numpy as np\n'), ((3266, 3280), 'numpy.sort', 'np.sort', (['data2'], {}), '(data2)\n', (3273, 3280), True, 'import numpy as np\n'), ((3642, 3651), 'megengine.functional.sort', 'F.sort', (['x'], {}), '(x)\n', (3648, 3651), True, 'import megengine.functional as F\n'), ((4892, 4920), 'functools.partial', 'partial', (['F.normalize'], {'axis': '(1)'}), '(F.normalize, axis=1)\n', (4899, 4920), False, 'from functools import partial\n'), ((5083, 5111), 'functools.partial', 'partial', (['F.normalize'], {'axis': '(3)'}), '(F.normalize, axis=3)\n', (5090, 5111), False, 'from functools import partial\n'), ((5354, 5377), 'numpy.sum', 'np.sum', (['data'], {'axis': 'axis'}), '(data, axis=axis)\n', (5360, 5377), True, 'import numpy as np\n'), ((5451, 5480), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (5464, 5480), False, 'import pytest\n'), ((6400, 6424), 'numpy.random.permutation', 'np.random.permutation', (['(7)'], {}), '(7)\n', (6421, 6424), True, 'import numpy as np\n'), ((6626, 6636), 'numpy.sort', 'np.sort', (['x'], {}), '(x)\n', (6633, 6636), True, 'import numpy as np\n'), ((6664, 6676), 'megengine.tensor', 'tensor', (['data'], {}), '(data)\n', (6670, 6676), False, 'from megengine import jit, tensor\n'), ((645, 674), 'numpy.random.random', 'np.random.random', (['data1_shape'], {}), '(data1_shape)\n', (661, 674), True, 'import numpy as np\n'), ((706, 735), 'numpy.random.random', 'np.random.random', (['data2_shape'], {}), '(data2_shape)\n', (722, 735), True, 'import numpy as np\n'), ((839, 898), 'numpy.array', 'np.array', (['[[[1, 2, np.nan, 4], [8, 6, 5, 2], [2, 3, 4, 5]]]'], {}), '([[[1, 2, np.nan, 4], [8, 6, 5, 2], [2, 3, 4, 5]]])\n', (847, 898), True, 'import numpy as np\n'), ((2802, 2828), 'numpy.random.random', 'np.random.random', (['d1_shape'], {}), '(d1_shape)\n', (2818, 2828), True, 'import numpy as np\n'), ((2857, 2883), 'numpy.random.random', 'np.random.random', (['d2_shape'], {}), '(d2_shape)\n', (2873, 2883), True, 'import numpy as np\n'), ((3074, 3103), 'numpy.random.random', 'np.random.random', (['data1_shape'], {}), '(data1_shape)\n', (3090, 3103), True, 'import numpy as np\n'), ((3135, 3164), 'numpy.random.random', 'np.random.random', (['data2_shape'], {}), '(data2_shape)\n', (3151, 3164), True, 'import numpy as np\n'), ((4557, 4595), 'numpy.clip', 'np.clip', (['norm'], {'a_min': 'eps', 'a_max': 'np.inf'}), '(norm, a_min=eps, a_max=np.inf)\n', (4564, 4595), True, 'import numpy as np\n'), ((4929, 4958), 'functools.partial', 'partial', (['np_normalize'], {'axis': '(1)'}), '(np_normalize, axis=1)\n', (4936, 4958), False, 'from functools import partial\n'), ((5120, 5149), 'functools.partial', 'partial', (['np_normalize'], {'axis': '(3)'}), '(np_normalize, axis=3)\n', (5127, 5149), False, 'from functools import partial\n'), ((5208, 5231), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (5224, 5231), True, 'import numpy as np\n'), ((5315, 5327), 'megengine.tensor', 'tensor', (['data'], {}), '(data)\n', (5321, 5327), False, 'from megengine import jit, tensor\n'), ((5496, 5508), 'megengine.tensor', 'tensor', (['data'], {}), '(data)\n', (5502, 5508), False, 'from megengine import jit, tensor\n'), ((5589, 5612), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (5605, 5612), True, 'import numpy as np\n'), ((5644, 5667), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (5660, 5667), True, 'import numpy as np\n'), ((5723, 5736), 'megengine.tensor', 'tensor', (['data1'], {}), '(data1)\n', (5729, 5736), False, 'from megengine import jit, tensor\n'), ((5738, 5751), 'megengine.tensor', 'tensor', (['data2'], {}), '(data2)\n', (5744, 5751), False, 'from megengine import jit, tensor\n'), ((5874, 5887), 'megengine.tensor', 'tensor', (['data1'], {}), '(data1)\n', (5880, 5887), False, 'from megengine import jit, tensor\n'), ((5889, 5902), 'megengine.tensor', 'tensor', (['data2'], {}), '(data2)\n', (5895, 5902), False, 'from megengine import jit, tensor\n'), ((6025, 6038), 'megengine.tensor', 'tensor', (['data1'], {}), '(data1)\n', (6031, 6038), False, 'from megengine import jit, tensor\n'), ((6040, 6053), 'megengine.tensor', 'tensor', (['data2'], {}), '(data2)\n', (6046, 6053), False, 'from megengine import jit, tensor\n'), ((7093, 7130), 'numpy.take_along_axis', 'np.take_along_axis', (['data', 'indices', '(-1)'], {}), '(data, indices, -1)\n', (7111, 7130), True, 'import numpy as np\n'), ((7422, 7444), 'numpy.random.random', 'np.random.random', (['(0,)'], {}), '((0,))\n', (7438, 7444), True, 'import numpy as np\n'), ((7462, 7489), 'numpy.random.random', 'np.random.random', (['(3, 0, 2)'], {}), '((3, 0, 2))\n', (7478, 7489), True, 'import numpy as np\n'), ((7504, 7537), 'numpy.random.random', 'np.random.random', (['(10, 10, 0, 10)'], {}), '((10, 10, 0, 10))\n', (7520, 7537), True, 'import numpy as np\n'), ((7860, 7897), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['out', 'out_ref'], {}), '(out, out_ref)\n', (7883, 7897), True, 'import numpy as np\n'), ((3215, 3232), 'numpy.argsort', 'np.argsort', (['data1'], {}), '(data1)\n', (3225, 3232), True, 'import numpy as np\n'), ((3282, 3299), 'numpy.argsort', 'np.argsort', (['data2'], {}), '(data2)\n', (3292, 3299), True, 'import numpy as np\n'), ((3737, 3768), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': 'is_symbolic'}), '(symbolic=is_symbolic)\n', (3746, 3768), False, 'from megengine import jit, tensor\n'), ((3823, 3846), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (3839, 3846), True, 'import numpy as np\n'), ((3916, 3928), 'megengine.tensor', 'tensor', (['data'], {}), '(data)\n', (3922, 3928), False, 'from megengine import jit, tensor\n'), ((3954, 3967), 'numpy.sort', 'np.sort', (['data'], {}), '(data)\n', (3961, 3967), True, 'import numpy as np\n'), ((3969, 3985), 'numpy.argsort', 'np.argsort', (['data'], {}), '(data)\n', (3979, 3985), True, 'import numpy as np\n'), ((4423, 4437), 'numpy.sum', 'np.sum', (['(x ** p)'], {}), '(x ** p)\n', (4429, 4437), True, 'import numpy as np\n'), ((4484, 4524), 'numpy.sum', 'np.sum', (['(x ** p)'], {'axis': 'axis', 'keepdims': '(True)'}), '(x ** p, axis=axis, keepdims=True)\n', (4490, 4524), True, 'import numpy as np\n'), ((6450, 6478), 'numpy.random.permutation', 'np.random.permutation', (['(5 * 7)'], {}), '(5 * 7)\n', (6471, 6478), True, 'import numpy as np\n'), ((6589, 6599), 'numpy.sort', 'np.sort', (['x'], {}), '(x)\n', (6596, 6599), True, 'import numpy as np\n'), ((7659, 7687), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': 'symbolic'}), '(symbolic=symbolic)\n', (7668, 7687), False, 'from megengine import jit, tensor\n'), ((4249, 4281), 'numpy.random.random', 'np.random.random', (['(2, 3, 12, 12)'], {}), '((2, 3, 12, 12))\n', (4265, 4281), True, 'import numpy as np\n'), ((6905, 6953), 'numpy.take_along_axis', 'np.take_along_axis', (['data', 'indices[..., None]', '(-1)'], {}), '(data, indices[..., None], -1)\n', (6923, 6953), True, 'import numpy as np\n'), ((7740, 7766), 'megengine.tensor', 'tensor', (['input'], {'dtype': 'dtype'}), '(input, dtype=dtype)\n', (7746, 7766), False, 'from megengine import jit, tensor\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 9 19:16:05 2021
@author: shrohanmohapatra
"""
r"""
Navier-Stokes equations for incompressible fluid flow in 2D.
Find :math:`\ul{u}`, :math:`p` such that:
.. math::
\int_{\Omega} \nu\ \nabla \ul{v} : \nabla \ul{u}
+ \int_{\Omega} ((\ul{u} \cdot \nabla) \ul{u}) \cdot \ul{v}
- \int_{\Omega} p\ \nabla \cdot \ul{v}
= 0
\;, \quad \forall \ul{v} \;,
\int_{\Omega} q\ \nabla \cdot \ul{u}
= 0
\;, \quad \forall q \;.
The mesh is created by ``gen_block_mesh()`` function.
View the results using::
$ ./postproc.py user_block.vtk -b
"""
#from __future__ import absolute_import
from sfepy.discrete.fem.meshio import UserMeshIO
from sfepy.mesh.mesh_generators import gen_block_mesh
import numpy as nm
# Mesh dimensions.
dims = [0.1, 0.1]
# Mesh resolution: increase to improve accuracy.
shape = [75, 75]
def mesh_hook(mesh, mode):
"""
Generate the block mesh.
"""
if mode == 'read':
mesh = gen_block_mesh(dims, shape, [0, 0],
name='user_block', verbose=False)
return mesh
elif mode == 'write':
pass
filename_mesh = UserMeshIO(mesh_hook)
print('Just a check if the mesh is generated ...')
def get_circle(coors, domain=None):
r = nm.sqrt(coors[:,0]**2.0 + coors[:,1]**2.0)
return nm.where(r < 0.09)[0]
functions = {
'get_circle' : (get_circle,),
}
print('Checked in a function ...')
regions = {
'Omega' : 'all',
'Walls' : ('vertices by get_circle', 'facet'),
}
print('Checked in the definition of regions ...')
materials = {
'fluid' : ({'viscosity' : 8.917e-7},),
}
print('Checked in the definition of the material params ...')
fields = {
'velocity': ('real', 'vector', 'Omega', 2),
'pressure': ('real', 'scalar', 'Omega', 1),
}
print('Checked in the definition of u, v, p ...')
variables = {
'u' : ('unknown field', 'velocity', 0),
'v' : ('test field', 'velocity', 'u'),
'p' : ('unknown field', 'pressure', 1),
'q' : ('test field', 'pressure', 'p'),
}
ebcs = {
'1_Wall': {'Walls': {'u.all':10}}
}
print('Checked in the boundary conditions ...')
integrals = {
'i' : 4,
}
equations = {
'balance' :
"""+ dw_div_grad.i.Omega(fluid.viscosity, v, u)
+ dw_convect.i.Omega(v, u)
- dw_stokes.i.Omega(v, p) = 0""",
'incompressibility' :
"""dw_stokes.i.Omega(u, q) = 0""",
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 15,
'eps_a' : 1e-10,
'eps_r' : 1.0,
}),
}
|
[
"sfepy.mesh.mesh_generators.gen_block_mesh",
"sfepy.discrete.fem.meshio.UserMeshIO"
] |
[((1195, 1216), 'sfepy.discrete.fem.meshio.UserMeshIO', 'UserMeshIO', (['mesh_hook'], {}), '(mesh_hook)\n', (1205, 1216), False, 'from sfepy.discrete.fem.meshio import UserMeshIO\n'), ((1312, 1360), 'numpy.sqrt', 'nm.sqrt', (['(coors[:, 0] ** 2.0 + coors[:, 1] ** 2.0)'], {}), '(coors[:, 0] ** 2.0 + coors[:, 1] ** 2.0)\n', (1319, 1360), True, 'import numpy as nm\n'), ((1017, 1086), 'sfepy.mesh.mesh_generators.gen_block_mesh', 'gen_block_mesh', (['dims', 'shape', '[0, 0]'], {'name': '"""user_block"""', 'verbose': '(False)'}), "(dims, shape, [0, 0], name='user_block', verbose=False)\n", (1031, 1086), False, 'from sfepy.mesh.mesh_generators import gen_block_mesh\n'), ((1366, 1384), 'numpy.where', 'nm.where', (['(r < 0.09)'], {}), '(r < 0.09)\n', (1374, 1384), True, 'import numpy as nm\n')]
|
import re
from datetime import datetime
from enum import Enum
from functools import lru_cache
from inspect import Parameter, signature
from typing import (
TYPE_CHECKING,
Any,
Callable,
Generator,
Generic,
List,
Optional,
Tuple,
Type,
TypeVar,
Union,
)
from uuid import UUID
from fastapi import Depends, File, Form, Request, UploadFile, params
from fastapi_utils.api_model import APIModel
from fastapi_utils.camelcase import snake2camel
from makefun import wraps
from pydantic import (
BaseModel as PydanticBaseModel,
ConstrainedInt,
ConstrainedStr,
create_model,
)
from pydantic.datetime_parse import parse_datetime
from pydantic.fields import Undefined
from pydantic.main import ModelMetaclass
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.functions import FunctionElement
from sqlalchemy.sql.schema import Column
from sqlalchemy.types import DateTime
from sqlmodel import Field, SQLModel
from starlette.datastructures import MultiDict
from joj.horse.utils.base import is_uuid
from joj.horse.utils.errors import ErrorCode
if TYPE_CHECKING:
Model = TypeVar("Model", bound="BaseModel")
class BaseModel(APIModel):
""""""
class Config:
validate_all = True
class Operation(Enum):
Create = "Create"
Read = "Read"
Update = "Update"
Delete = "Delete"
class NoneNegativeInt(ConstrainedInt):
ge = 0
# class PositiveInt(ConstrainedInt):
# gt = 0
class PaginationLimit(NoneNegativeInt):
le = 500
class LongStr(ConstrainedStr):
max_length = 256
class NoneEmptyStr(ConstrainedStr):
min_length = 1
class SearchQueryStr(ConstrainedStr):
min_length = 2
class NoneEmptyLongStr(LongStr, NoneEmptyStr):
pass
class UserInputURL(str):
URL_RE = re.compile(r"[\w-]+", flags=re.ASCII)
@classmethod
def __get_validators__(
cls,
) -> Generator[Callable[[Union[str, Any]], str], None, None]:
yield cls.validate
@classmethod
def validate(cls, v: Optional[str]) -> LongStr:
if not v:
return LongStr("")
if is_uuid(v):
raise ValueError("url can not be uuid")
if not UserInputURL.URL_RE.fullmatch(v):
raise ValueError("url can only contains [a-zA-Z0-9_-]")
return LongStr(v)
class LongText(ConstrainedStr):
max_length = 65536
class utcnow(FunctionElement):
type = DateTime()
@compiles(utcnow, "postgresql")
def pg_utcnow(element: Any, compiler: Any, **kwargs: Any) -> str:
return "TIMEZONE('utc', CURRENT_TIMESTAMP)"
@compiles(utcnow, "mssql")
def ms_utcnow(element: Any, compiler: Any, **kwargs: Any) -> str:
return "GETUTCDATE()"
def get_datetime_column(**kwargs: Any) -> Column:
if "index" not in kwargs:
kwargs["index"] = False
if "nullable" not in kwargs:
kwargs["nullable"] = False
return Column(DateTime(timezone=True), **kwargs)
class UTCDatetime(datetime):
"""parse a datetime and convert in into UTC format"""
@classmethod
def __get_validators__(cls) -> Any:
yield cls.validate
@classmethod
def validate(cls, v: Any) -> datetime:
return datetime.fromtimestamp(parse_datetime(v).timestamp())
class BaseORMSchema(SQLModel, BaseModel):
pass
class URLORMSchema(BaseORMSchema):
url: str = Field("", description="(unique) url of the domain")
class URLCreateMixin(BaseModel):
if not TYPE_CHECKING:
url: UserInputURL = Field("", description="(unique) url of the domain")
class DomainMixin(BaseModel):
domain_id: UUID
class IDMixin(BaseModel):
id: UUID
class TimestampMixin(BaseModel):
created_at: Optional[datetime]
updated_at: Optional[datetime]
class EditMetaclass(ModelMetaclass):
async def edit_dependency(cls: Any) -> Any: # pragma: no cover
pass
def __new__(mcs, name: str, bases: Any, class_dict: Any, **kwargs: Any) -> Any:
cls = super().__new__(mcs, name, bases, class_dict, **kwargs)
async def edit_dependency(request: Request, edit: cls) -> cls: # type: ignore
data = await request.json()
for field in cls.__fields__.values():
if field.name not in data and field.alias not in data:
setattr(edit, field.name, Undefined)
return edit
cls.edit_dependency = edit_dependency
return cls
class FormMetaclass(ModelMetaclass):
"""
Adds an form_dependency class method to the original model.
The form_dependency class method can be used with FastAPI endpoints.
"""
async def form_dependency(cls: Any) -> Any: # pragma: no cover
pass
def __new__(mcs, name: str, bases: Any, class_dict: Any, **kwargs: Any) -> Any:
cls = super().__new__(mcs, name, bases, class_dict, **kwargs)
parameters = []
for field in cls.__fields__.values():
if field.type_ == UploadFile:
fastapi_type = File
else:
fastapi_type = Form
parameters.append(
Parameter(
field.name,
Parameter.POSITIONAL_ONLY,
default=(
fastapi_type(field.default)
if not field.required
else fastapi_type(...)
),
annotation=field.outer_type_,
)
)
async def form_dependency(**data: Any) -> cls: # type: ignore
return cls(**data)
sig = signature(form_dependency)
sig = sig.replace(parameters=parameters)
form_dependency.__signature__ = sig # type: ignore
cls.form_dependency = form_dependency
return cls
BT = TypeVar("BT", bound=PydanticBaseModel)
@lru_cache(maxsize=128)
def get_standard_list_response_sub_model(
cls: Type[PydanticBaseModel],
) -> Type[PydanticBaseModel]:
name = cls.__name__
return create_model(
f"{name}List",
count=(int, 0),
results=(List[cls], []), # type: ignore
__base__=BaseModel,
)
@lru_cache(maxsize=None)
def get_standard_response_model(
cls: Type[PydanticBaseModel], is_list: bool = False
) -> Tuple[Type[PydanticBaseModel], Optional[Type[PydanticBaseModel]]]:
name = cls.__name__
sub_model: Optional[Type[PydanticBaseModel]]
if is_list:
model_name = f"{name}ListResp"
sub_model = get_standard_list_response_sub_model(cls)
data_type = (Optional[sub_model], None)
else:
model_name = f"{name}Resp"
sub_model = None
data_type = (Optional[cls], None)
return (
create_model(
model_name,
error_code=(ErrorCode, ...),
error_msg=(Optional[str], None),
data=data_type,
__base__=BaseModel,
),
sub_model,
)
class Empty(BaseModel):
pass
class StandardErrorResponse(BaseModel):
error_code: ErrorCode
error_msg: Optional[str] = None
data: Optional[Any] = None
class StandardResponse(Generic[BT]):
def __class_getitem__(cls, item: Any) -> Type[Any]:
return get_standard_response_model(item)[0]
def __new__(
cls, data: Union[BT, Type[BT], Empty] = Empty()
) -> "StandardResponse[BT]":
response_type, _ = get_standard_response_model(type(data)) # type: ignore
response_data = data
return response_type( # type: ignore
error_code=ErrorCode.Success, error_msg=None, data=response_data
)
class StandardListResponse(Generic[BT]):
def __class_getitem__(cls, item: Any) -> Type[Any]:
return get_standard_response_model(item, True)[0]
def __new__(
cls,
results: Optional[List[BT]] = None,
count: Optional[int] = None,
) -> "StandardListResponse[BT]":
if results is None:
results = []
data_type = len(results) and type(results[0]) or Empty
response_type, sub_model_type = get_standard_response_model(data_type, True)
if count is None:
count = len(results)
response_data: PydanticBaseModel
if sub_model_type is None:
response_data = Empty()
else:
response_data = sub_model_type(count=count, results=results)
return response_type( # type: ignore
error_code=ErrorCode.Success, error_msg=None, data=response_data
)
class LimitOffsetPagination(BaseModel):
count: int
def camelcase_parameters(func: Any) -> Any:
func_sig = signature(func)
parameters = list(func_sig.parameters.values())
start_index = -1
for i, parameter in enumerate(parameters):
if (
parameter.default
and isinstance(parameter.default, (params.Query, params.Path))
and parameter.default.alias is None
):
if start_index < 0:
start_index = i
parameter.default.alias = snake2camel(parameter.name, start_lower=True)
if start_index >= 0:
parameters.insert(
start_index,
Parameter(
"camelcase_parameters_dependency",
kind=Parameter.POSITIONAL_OR_KEYWORD,
default=Depends(camelcase_parameters_dependency),
),
)
new_sig = func_sig.replace(parameters=parameters)
@wraps(func, new_sig=new_sig)
def wrapper(*args: Any, **kwargs: Any) -> Any:
if "camelcase_parameters_dependency" in kwargs:
del kwargs["camelcase_parameters_dependency"]
return func(*args, **kwargs)
return wrapper
def camelcase_parameters_dependency(request: Request) -> None:
query_params = request.query_params
new_params = MultiDict()
for k, v in query_params.multi_items():
if "_" in k:
camel = snake2camel(k, start_lower=True)
new_params.append(camel, v)
else:
new_params.append(k, v)
request._query_params = new_params
|
[
"sqlmodel.Field"
] |
[((2437, 2467), 'sqlalchemy.ext.compiler.compiles', 'compiles', (['utcnow', '"""postgresql"""'], {}), "(utcnow, 'postgresql')\n", (2445, 2467), False, 'from sqlalchemy.ext.compiler import compiles\n'), ((2585, 2610), 'sqlalchemy.ext.compiler.compiles', 'compiles', (['utcnow', '"""mssql"""'], {}), "(utcnow, 'mssql')\n", (2593, 2610), False, 'from sqlalchemy.ext.compiler import compiles\n'), ((5783, 5821), 'typing.TypeVar', 'TypeVar', (['"""BT"""'], {'bound': 'PydanticBaseModel'}), "('BT', bound=PydanticBaseModel)\n", (5790, 5821), False, 'from typing import TYPE_CHECKING, Any, Callable, Generator, Generic, List, Optional, Tuple, Type, TypeVar, Union\n'), ((5825, 5847), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(128)'}), '(maxsize=128)\n', (5834, 5847), False, 'from functools import lru_cache\n'), ((6136, 6159), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': 'None'}), '(maxsize=None)\n', (6145, 6159), False, 'from functools import lru_cache\n'), ((1136, 1171), 'typing.TypeVar', 'TypeVar', (['"""Model"""'], {'bound': '"""BaseModel"""'}), "('Model', bound='BaseModel')\n", (1143, 1171), False, 'from typing import TYPE_CHECKING, Any, Callable, Generator, Generic, List, Optional, Tuple, Type, TypeVar, Union\n'), ((1795, 1832), 're.compile', 're.compile', (['"""[\\\\w-]+"""'], {'flags': 're.ASCII'}), "('[\\\\w-]+', flags=re.ASCII)\n", (1805, 1832), False, 'import re\n'), ((2423, 2433), 'sqlalchemy.types.DateTime', 'DateTime', ([], {}), '()\n', (2431, 2433), False, 'from sqlalchemy.types import DateTime\n'), ((3347, 3398), 'sqlmodel.Field', 'Field', (['""""""'], {'description': '"""(unique) url of the domain"""'}), "('', description='(unique) url of the domain')\n", (3352, 3398), False, 'from sqlmodel import Field, SQLModel\n'), ((5989, 6081), 'pydantic.create_model', 'create_model', (['f"""{name}List"""'], {'count': '(int, 0)', 'results': '(List[cls], [])', '__base__': 'BaseModel'}), "(f'{name}List', count=(int, 0), results=(List[cls], []),\n __base__=BaseModel)\n", (6001, 6081), False, 'from pydantic import BaseModel as PydanticBaseModel, ConstrainedInt, ConstrainedStr, create_model\n'), ((8599, 8614), 'inspect.signature', 'signature', (['func'], {}), '(func)\n', (8608, 8614), False, 'from inspect import Parameter, signature\n'), ((9417, 9445), 'makefun.wraps', 'wraps', (['func'], {'new_sig': 'new_sig'}), '(func, new_sig=new_sig)\n', (9422, 9445), False, 'from makefun import wraps\n'), ((9790, 9801), 'starlette.datastructures.MultiDict', 'MultiDict', ([], {}), '()\n', (9799, 9801), False, 'from starlette.datastructures import MultiDict\n'), ((2115, 2125), 'joj.horse.utils.base.is_uuid', 'is_uuid', (['v'], {}), '(v)\n', (2122, 2125), False, 'from joj.horse.utils.base import is_uuid\n'), ((2903, 2926), 'sqlalchemy.types.DateTime', 'DateTime', ([], {'timezone': '(True)'}), '(timezone=True)\n', (2911, 2926), False, 'from sqlalchemy.types import DateTime\n'), ((3488, 3539), 'sqlmodel.Field', 'Field', (['""""""'], {'description': '"""(unique) url of the domain"""'}), "('', description='(unique) url of the domain')\n", (3493, 3539), False, 'from sqlmodel import Field, SQLModel\n'), ((5575, 5601), 'inspect.signature', 'signature', (['form_dependency'], {}), '(form_dependency)\n', (5584, 5601), False, 'from inspect import Parameter, signature\n'), ((6692, 6819), 'pydantic.create_model', 'create_model', (['model_name'], {'error_code': '(ErrorCode, ...)', 'error_msg': '(Optional[str], None)', 'data': 'data_type', '__base__': 'BaseModel'}), '(model_name, error_code=(ErrorCode, ...), error_msg=(Optional[\n str], None), data=data_type, __base__=BaseModel)\n', (6704, 6819), False, 'from pydantic import BaseModel as PydanticBaseModel, ConstrainedInt, ConstrainedStr, create_model\n'), ((9014, 9059), 'fastapi_utils.camelcase.snake2camel', 'snake2camel', (['parameter.name'], {'start_lower': '(True)'}), '(parameter.name, start_lower=True)\n', (9025, 9059), False, 'from fastapi_utils.camelcase import snake2camel\n'), ((9887, 9919), 'fastapi_utils.camelcase.snake2camel', 'snake2camel', (['k'], {'start_lower': '(True)'}), '(k, start_lower=True)\n', (9898, 9919), False, 'from fastapi_utils.camelcase import snake2camel\n'), ((3211, 3228), 'pydantic.datetime_parse.parse_datetime', 'parse_datetime', (['v'], {}), '(v)\n', (3225, 3228), False, 'from pydantic.datetime_parse import parse_datetime\n'), ((9290, 9330), 'fastapi.Depends', 'Depends', (['camelcase_parameters_dependency'], {}), '(camelcase_parameters_dependency)\n', (9297, 9330), False, 'from fastapi import Depends, File, Form, Request, UploadFile, params\n')]
|
import traceback
from contextlib import contextmanager
from sqlmodel import create_engine, Session
from fastapi_dream_core.application_dependencies.application_dependencies_abc import ApplicationDependenciesABC
from fastapi_dream_core.utils import logger
class DatabaseSQLModel(ApplicationDependenciesABC):
def __init__(self, db_url: str, echo_queries: bool = False) -> None:
self._engine = create_engine(db_url, echo=echo_queries)
def readiness(self) -> bool:
with Session(self._engine) as session:
try:
database_status = session.connection().connection.is_valid
logger.debug(f"DatabaseSQLModel.readiness = {database_status}")
return True
except Exception:
traceback.print_exc()
return False
@contextmanager
def session(self) -> Session:
with Session(self._engine) as session:
try:
yield session
except Exception:
logger.exception("Session rollback because of exception")
session.rollback()
raise
finally:
session.close()
def __str__(self):
return "DatabaseSQLModel"
|
[
"sqlmodel.create_engine",
"sqlmodel.Session"
] |
[((408, 448), 'sqlmodel.create_engine', 'create_engine', (['db_url'], {'echo': 'echo_queries'}), '(db_url, echo=echo_queries)\n', (421, 448), False, 'from sqlmodel import create_engine, Session\n'), ((496, 517), 'sqlmodel.Session', 'Session', (['self._engine'], {}), '(self._engine)\n', (503, 517), False, 'from sqlmodel import create_engine, Session\n'), ((896, 917), 'sqlmodel.Session', 'Session', (['self._engine'], {}), '(self._engine)\n', (903, 917), False, 'from sqlmodel import create_engine, Session\n'), ((638, 701), 'fastapi_dream_core.utils.logger.debug', 'logger.debug', (['f"""DatabaseSQLModel.readiness = {database_status}"""'], {}), "(f'DatabaseSQLModel.readiness = {database_status}')\n", (650, 701), False, 'from fastapi_dream_core.utils import logger\n'), ((777, 798), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (796, 798), False, 'import traceback\n'), ((1023, 1080), 'fastapi_dream_core.utils.logger.exception', 'logger.exception', (['"""Session rollback because of exception"""'], {}), "('Session rollback because of exception')\n", (1039, 1080), False, 'from fastapi_dream_core.utils import logger\n')]
|
import os
import sys
import time
import logging
from collections import namedtuple
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
import megengine.autodiff as autodiff
import megengine.optimizer as optim
import yaml
from tensorboardX import SummaryWriter
from nets import Model
from dataset import CREStereoDataset
from megengine.data import DataLoader, RandomSampler, Infinite
def parse_yaml(file_path: str) -> namedtuple:
"""Parse yaml configuration file and return the object in `namedtuple`."""
with open(file_path, "rb") as f:
cfg: dict = yaml.safe_load(f)
args = namedtuple("train_args", cfg.keys())(*cfg.values())
return args
def format_time(elapse):
elapse = int(elapse)
hour = elapse // 3600
minute = elapse % 3600 // 60
seconds = elapse % 60
return "{:02d}:{:02d}:{:02d}".format(hour, minute, seconds)
def ensure_dir(path):
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
def adjust_learning_rate(optimizer, epoch):
warm_up = 0.02
const_range = 0.6
min_lr_rate = 0.05
if epoch <= args.n_total_epoch * warm_up:
lr = (1 - min_lr_rate) * args.base_lr / (
args.n_total_epoch * warm_up
) * epoch + min_lr_rate * args.base_lr
elif args.n_total_epoch * warm_up < epoch <= args.n_total_epoch * const_range:
lr = args.base_lr
else:
lr = (min_lr_rate - 1) * args.base_lr / (
(1 - const_range) * args.n_total_epoch
) * epoch + (1 - min_lr_rate * const_range) / (1 - const_range) * args.base_lr
optimizer.param_groups[0]["lr"] = lr
def sequence_loss(flow_preds, flow_gt, valid, gamma=0.8):
n_predictions = len(flow_preds)
flow_loss = 0.0
for i in range(n_predictions):
i_weight = gamma ** (n_predictions - i - 1)
i_loss = F.abs(flow_preds[i] - flow_gt)
flow_loss += i_weight * (F.expand_dims(valid, axis=1) * i_loss).mean()
return flow_loss
def main(args):
# initial info
mge.random.seed(args.seed)
rank, world_size = dist.get_rank(), dist.get_world_size()
mge.dtr.enable() # Dynamic tensor rematerialization for memory optimization
# directory check
log_model_dir = os.path.join(args.log_dir, "models")
ensure_dir(log_model_dir)
# model / optimizer
model = Model(
max_disp=args.max_disp, mixed_precision=args.mixed_precision, test_mode=False
)
optimizer = optim.Adam(model.parameters(), lr=0.1, betas=(0.9, 0.999))
dist_callbacks = None if world_size == 1 else [dist.make_allreduce_cb("mean")]
gm = autodiff.GradManager().attach(model.parameters(), callbacks=dist_callbacks)
scaler = mge.amp.GradScaler() if args.mixed_precision else None
if rank == 0:
# tensorboard
tb_log = SummaryWriter(os.path.join(args.log_dir, "train.events"))
# worklog
logging.basicConfig(level=eval(args.log_level))
worklog = logging.getLogger("train_logger")
worklog.propagate = False
fileHandler = logging.FileHandler(
os.path.join(args.log_dir, "worklog.txt"), mode="a", encoding="utf8"
)
formatter = logging.Formatter(
fmt="%(asctime)s %(message)s", datefmt="%Y/%m/%d %H:%M:%S"
)
fileHandler.setFormatter(formatter)
consoleHandler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(
fmt="\x1b[32m%(asctime)s\x1b[0m %(message)s", datefmt="%Y/%m/%d %H:%M:%S"
)
consoleHandler.setFormatter(formatter)
worklog.handlers = [fileHandler, consoleHandler]
# params stat
worklog.info(f"Use {world_size} GPU(s)")
worklog.info("Params: %s" % sum([p.size for p in model.parameters()]))
# load pretrained model if exist
chk_path = os.path.join(log_model_dir, "latest.mge")
if args.loadmodel is not None:
chk_path = args.loadmodel
elif not os.path.exists(chk_path):
chk_path = None
if chk_path is not None:
if rank == 0:
worklog.info(f"loading model: {chk_path}")
pretrained_dict = mge.load(chk_path, map_location="cpu")
resume_epoch_idx = pretrained_dict["epoch"]
resume_iters = pretrained_dict["iters"]
model.load_state_dict(pretrained_dict["state_dict"], strict=True)
optimizer.load_state_dict(pretrained_dict["optim_state_dict"])
start_epoch_idx = resume_epoch_idx + 1
start_iters = resume_iters
else:
start_epoch_idx = 1
start_iters = 0
# auxiliary
if world_size > 1:
dist.bcast_list_(model.tensors())
# datasets
dataset = CREStereoDataset(args.training_data_path)
if rank == 0:
worklog.info(f"Dataset size: {len(dataset)}")
inf_sampler = Infinite(
RandomSampler(
dataset,
batch_size=args.batch_size_single,
drop_last=False,
world_size=world_size,
rank=rank,
seed=args.seed,
)
)
dataloader = DataLoader(
dataset, sampler=inf_sampler, num_workers=0, divide=False, preload=True
)
# counter
cur_iters = start_iters
total_iters = args.minibatch_per_epoch * args.n_total_epoch
t0 = time.perf_counter()
for epoch_idx in range(start_epoch_idx, args.n_total_epoch + 1):
# adjust learning rate
epoch_total_train_loss = 0
adjust_learning_rate(optimizer, epoch_idx)
model.train()
t1 = time.perf_counter()
batch_idx = 0
for mini_batch_data in dataloader:
if batch_idx % args.minibatch_per_epoch == 0 and batch_idx != 0:
break
batch_idx += 1
cur_iters += 1
# parse data
left, right, gt_disp, valid_mask = (
mini_batch_data["left"],
mini_batch_data["right"],
mini_batch_data["disparity"],
mini_batch_data["mask"],
)
t2 = time.perf_counter()
with gm: # GradManager
with mge.amp.autocast(enabled=args.mixed_precision):
# pre-process
left = mge.tensor(left)
right = mge.tensor(right)
gt_disp = mge.tensor(gt_disp)
valid_mask = mge.tensor(valid_mask)
gt_disp = F.expand_dims(gt_disp, axis=1)
gt_flow = F.concat([gt_disp, gt_disp * 0], axis=1)
# forward
flow_predictions = model(left, right)
# loss & backword
loss = sequence_loss(
flow_predictions, gt_flow, valid_mask, gamma=0.8
)
if args.mixed_precision:
scaler.backward(gm, loss)
else:
gm.backward(loss)
optimizer.step().clear_grad()
# loss stats
loss_item = loss.item()
epoch_total_train_loss += loss_item
t3 = time.perf_counter()
# terminal print log
if rank == 0:
if cur_iters % 5 == 0:
tdata = t2 - t1
time_train_passed = t3 - t0
time_iter_passed = t3 - t1
step_passed = cur_iters - start_iters
eta = (
(total_iters - cur_iters)
/ max(step_passed, 1e-7)
* time_train_passed
)
meta_info = list()
meta_info.append("{:.2g} b/s".format(1.0 / time_iter_passed))
meta_info.append("passed:{}".format(format_time(time_train_passed)))
meta_info.append("eta:{}".format(format_time(eta)))
meta_info.append(
"data_time:{:.2g}".format(tdata / time_iter_passed)
)
meta_info.append(
"lr:{:.5g}".format(optimizer.param_groups[0]["lr"])
)
meta_info.append(
"[{}/{}:{}/{}]".format(
epoch_idx,
args.n_total_epoch,
batch_idx,
args.minibatch_per_epoch,
)
)
loss_info = [" ==> {}:{:.4g}".format("loss", loss_item)]
# exp_name = ['\n' + os.path.basename(os.getcwd())]
info = [",".join(meta_info)] + loss_info
worklog.info("".join(info))
# minibatch loss
tb_log.add_scalar("train/loss_batch", loss_item, cur_iters)
tb_log.add_scalar(
"train/lr", optimizer.param_groups[0]["lr"], cur_iters
)
tb_log.flush()
t1 = time.perf_counter()
if rank == 0:
# epoch loss
tb_log.add_scalar(
"train/loss",
epoch_total_train_loss / args.minibatch_per_epoch,
epoch_idx,
)
tb_log.flush()
# save model params
ckp_data = {
"epoch": epoch_idx,
"iters": cur_iters,
"batch_size": args.batch_size_single * args.nr_gpus,
"epoch_size": args.minibatch_per_epoch,
"train_loss": epoch_total_train_loss / args.minibatch_per_epoch,
"state_dict": model.state_dict(),
"optim_state_dict": optimizer.state_dict(),
}
mge.save(ckp_data, os.path.join(log_model_dir, "latest.mge"))
if epoch_idx % args.model_save_freq_epoch == 0:
save_path = os.path.join(log_model_dir, "epoch-%d.mge" % epoch_idx)
worklog.info(f"Model params saved: {save_path}")
mge.save(ckp_data, save_path)
if rank == 0:
worklog.info("Training is done, exit.")
if __name__ == "__main__":
# train configuration
args = parse_yaml("cfgs/train.yaml")
# distributed training
run = main if mge.get_device_count("gpu") == 1 else dist.launcher(main)
run(args)
|
[
"megengine.distributed.get_rank",
"megengine.distributed.get_world_size",
"megengine.data.DataLoader",
"megengine.get_device_count",
"megengine.load",
"megengine.functional.abs",
"megengine.tensor",
"megengine.functional.concat",
"megengine.amp.GradScaler",
"megengine.distributed.make_allreduce_cb",
"megengine.data.RandomSampler",
"megengine.random.seed",
"megengine.dtr.enable",
"megengine.distributed.launcher",
"megengine.save",
"megengine.amp.autocast",
"megengine.functional.expand_dims",
"megengine.autodiff.GradManager"
] |
[((2048, 2074), 'megengine.random.seed', 'mge.random.seed', (['args.seed'], {}), '(args.seed)\n', (2063, 2074), True, 'import megengine as mge\n'), ((2141, 2157), 'megengine.dtr.enable', 'mge.dtr.enable', ([], {}), '()\n', (2155, 2157), True, 'import megengine as mge\n'), ((2261, 2297), 'os.path.join', 'os.path.join', (['args.log_dir', '"""models"""'], {}), "(args.log_dir, 'models')\n", (2273, 2297), False, 'import os\n'), ((2365, 2453), 'nets.Model', 'Model', ([], {'max_disp': 'args.max_disp', 'mixed_precision': 'args.mixed_precision', 'test_mode': '(False)'}), '(max_disp=args.max_disp, mixed_precision=args.mixed_precision,\n test_mode=False)\n', (2370, 2453), False, 'from nets import Model\n'), ((3852, 3893), 'os.path.join', 'os.path.join', (['log_model_dir', '"""latest.mge"""'], {}), "(log_model_dir, 'latest.mge')\n", (3864, 3893), False, 'import os\n'), ((4700, 4741), 'dataset.CREStereoDataset', 'CREStereoDataset', (['args.training_data_path'], {}), '(args.training_data_path)\n', (4716, 4741), False, 'from dataset import CREStereoDataset\n'), ((5081, 5168), 'megengine.data.DataLoader', 'DataLoader', (['dataset'], {'sampler': 'inf_sampler', 'num_workers': '(0)', 'divide': '(False)', 'preload': '(True)'}), '(dataset, sampler=inf_sampler, num_workers=0, divide=False,\n preload=True)\n', (5091, 5168), False, 'from megengine.data import DataLoader, RandomSampler, Infinite\n'), ((5295, 5314), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (5312, 5314), False, 'import time\n'), ((612, 629), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (626, 629), False, 'import yaml\n'), ((945, 965), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (959, 965), False, 'import os\n'), ((975, 1007), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (986, 1007), False, 'import os\n'), ((1875, 1905), 'megengine.functional.abs', 'F.abs', (['(flow_preds[i] - flow_gt)'], {}), '(flow_preds[i] - flow_gt)\n', (1880, 1905), True, 'import megengine.functional as F\n'), ((2098, 2113), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (2111, 2113), True, 'import megengine.distributed as dist\n'), ((2115, 2136), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (2134, 2136), True, 'import megengine.distributed as dist\n'), ((2720, 2740), 'megengine.amp.GradScaler', 'mge.amp.GradScaler', ([], {}), '()\n', (2738, 2740), True, 'import megengine as mge\n'), ((2984, 3017), 'logging.getLogger', 'logging.getLogger', (['"""train_logger"""'], {}), "('train_logger')\n", (3001, 3017), False, 'import logging\n'), ((3206, 3283), 'logging.Formatter', 'logging.Formatter', ([], {'fmt': '"""%(asctime)s %(message)s"""', 'datefmt': '"""%Y/%m/%d %H:%M:%S"""'}), "(fmt='%(asctime)s %(message)s', datefmt='%Y/%m/%d %H:%M:%S')\n", (3223, 3283), False, 'import logging\n'), ((3375, 3408), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (3396, 3408), False, 'import logging\n'), ((3429, 3526), 'logging.Formatter', 'logging.Formatter', ([], {'fmt': '"""\x1b[32m%(asctime)s\x1b[0m %(message)s"""', 'datefmt': '"""%Y/%m/%d %H:%M:%S"""'}), "(fmt='\\x1b[32m%(asctime)s\\x1b[0m %(message)s', datefmt=\n '%Y/%m/%d %H:%M:%S')\n", (3446, 3526), False, 'import logging\n'), ((4160, 4198), 'megengine.load', 'mge.load', (['chk_path'], {'map_location': '"""cpu"""'}), "(chk_path, map_location='cpu')\n", (4168, 4198), True, 'import megengine as mge\n'), ((4850, 4978), 'megengine.data.RandomSampler', 'RandomSampler', (['dataset'], {'batch_size': 'args.batch_size_single', 'drop_last': '(False)', 'world_size': 'world_size', 'rank': 'rank', 'seed': 'args.seed'}), '(dataset, batch_size=args.batch_size_single, drop_last=False,\n world_size=world_size, rank=rank, seed=args.seed)\n', (4863, 4978), False, 'from megengine.data import DataLoader, RandomSampler, Infinite\n'), ((5538, 5557), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (5555, 5557), False, 'import time\n'), ((10389, 10408), 'megengine.distributed.launcher', 'dist.launcher', (['main'], {}), '(main)\n', (10402, 10408), True, 'import megengine.distributed as dist\n'), ((2590, 2620), 'megengine.distributed.make_allreduce_cb', 'dist.make_allreduce_cb', (['"""mean"""'], {}), "('mean')\n", (2612, 2620), True, 'import megengine.distributed as dist\n'), ((2631, 2653), 'megengine.autodiff.GradManager', 'autodiff.GradManager', ([], {}), '()\n', (2651, 2653), True, 'import megengine.autodiff as autodiff\n'), ((2847, 2889), 'os.path.join', 'os.path.join', (['args.log_dir', '"""train.events"""'], {}), "(args.log_dir, 'train.events')\n", (2859, 2889), False, 'import os\n'), ((3107, 3148), 'os.path.join', 'os.path.join', (['args.log_dir', '"""worklog.txt"""'], {}), "(args.log_dir, 'worklog.txt')\n", (3119, 3148), False, 'import os\n'), ((3976, 4000), 'os.path.exists', 'os.path.exists', (['chk_path'], {}), '(chk_path)\n', (3990, 4000), False, 'import os\n'), ((6055, 6074), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (6072, 6074), False, 'import time\n'), ((7149, 7168), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (7166, 7168), False, 'import time\n'), ((9089, 9108), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (9106, 9108), False, 'import time\n'), ((10351, 10378), 'megengine.get_device_count', 'mge.get_device_count', (['"""gpu"""'], {}), "('gpu')\n", (10371, 10378), True, 'import megengine as mge\n'), ((9844, 9885), 'os.path.join', 'os.path.join', (['log_model_dir', '"""latest.mge"""'], {}), "(log_model_dir, 'latest.mge')\n", (9856, 9885), False, 'import os\n'), ((9975, 10030), 'os.path.join', 'os.path.join', (['log_model_dir', "('epoch-%d.mge' % epoch_idx)"], {}), "(log_model_dir, 'epoch-%d.mge' % epoch_idx)\n", (9987, 10030), False, 'import os\n'), ((10112, 10141), 'megengine.save', 'mge.save', (['ckp_data', 'save_path'], {}), '(ckp_data, save_path)\n', (10120, 10141), True, 'import megengine as mge\n'), ((6133, 6179), 'megengine.amp.autocast', 'mge.amp.autocast', ([], {'enabled': 'args.mixed_precision'}), '(enabled=args.mixed_precision)\n', (6149, 6179), True, 'import megengine as mge\n'), ((6243, 6259), 'megengine.tensor', 'mge.tensor', (['left'], {}), '(left)\n', (6253, 6259), True, 'import megengine as mge\n'), ((6288, 6305), 'megengine.tensor', 'mge.tensor', (['right'], {}), '(right)\n', (6298, 6305), True, 'import megengine as mge\n'), ((6336, 6355), 'megengine.tensor', 'mge.tensor', (['gt_disp'], {}), '(gt_disp)\n', (6346, 6355), True, 'import megengine as mge\n'), ((6389, 6411), 'megengine.tensor', 'mge.tensor', (['valid_mask'], {}), '(valid_mask)\n', (6399, 6411), True, 'import megengine as mge\n'), ((6442, 6472), 'megengine.functional.expand_dims', 'F.expand_dims', (['gt_disp'], {'axis': '(1)'}), '(gt_disp, axis=1)\n', (6455, 6472), True, 'import megengine.functional as F\n'), ((6503, 6543), 'megengine.functional.concat', 'F.concat', (['[gt_disp, gt_disp * 0]'], {'axis': '(1)'}), '([gt_disp, gt_disp * 0], axis=1)\n', (6511, 6543), True, 'import megengine.functional as F\n'), ((1939, 1967), 'megengine.functional.expand_dims', 'F.expand_dims', (['valid'], {'axis': '(1)'}), '(valid, axis=1)\n', (1952, 1967), True, 'import megengine.functional as F\n')]
|
#!/usr/bin/env python
"""
Transient Laplace equation (heat equation) with non-constant initial conditions
given by a function, using commands for interactive use.
The script allows setting various simulation parameters, namely:
- the diffusivity coefficient
- the max. initial condition value
- temperature field approximation order
- uniform mesh refinement
The example shows also how to probe the results.
In the SfePy top-level directory the following command can be used to get usage
information::
python examples/diffusion/time_poisson_interactive.py -h
"""
from __future__ import absolute_import
import sys
from six.moves import range
sys.path.append('.')
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.problem import prepare_matrix
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.solvers.ts_solvers import SimpleTimeSteppingSolver
from sfepy.discrete.probes import LineProbe, CircleProbe
from sfepy.discrete.projections import project_by_component
def gen_probes(problem):
"""
Define a line probe and a circle probe.
"""
# Use enough points for higher order approximations.
n_point = 1000
p0, p1 = nm.array([0.0, 0.0, 0.0]), nm.array([0.1, 0.0, 0.0])
line = LineProbe(p0, p1, n_point, share_geometry=True)
# Workaround current probe code shortcoming.
line.set_options(close_limit=0.5)
centre = 0.5 * (p0 + p1)
normal = [0.0, 1.0, 0.0]
r = 0.019
circle = CircleProbe(centre, normal, r, n_point, share_geometry=True)
circle.set_options(close_limit=0.0)
probes = [line, circle]
labels = ['%s -> %s' % (p0, p1),
'circle(%s, %s, %s' % (centre, normal, r)]
return probes, labels
def probe_results(ax_num, T, dvel, probe, label):
"""
Probe the results using the given probe and plot the probed values.
"""
results = {}
pars, vals = probe(T)
results['T'] = (pars, vals)
pars, vals = probe(dvel)
results['dvel'] = (pars, vals)
fig = plt.figure(1)
ax = plt.subplot(2, 2, 2 * ax_num + 1)
ax.cla()
pars, vals = results['T']
ax.plot(pars, vals, label=r'$T$', lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('temperature')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
ax = plt.subplot(2, 2, 2 * ax_num + 2)
ax.cla()
pars, vals = results['dvel']
for ic in range(vals.shape[1]):
ax.plot(pars, vals[:, ic], label=r'$w_{%d}$' % (ic + 1),
lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('diffusion velocity')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
return fig, results
helps = {
'diffusivity' : 'the diffusivity coefficient [default: %(default)s]',
'ic_max' : 'the max. initial condition value [default: %(default)s]',
'order' : 'temperature field approximation order [default: %(default)s]',
'refine' : 'uniform mesh refinement level [default: %(default)s]',
'probe' : 'probe the results',
'show' : 'show the probing results figure, if --probe is used',
}
def main():
from sfepy import data_dir
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('--diffusivity', metavar='float', type=float,
action='store', dest='diffusivity',
default=1e-5, help=helps['diffusivity'])
parser.add_argument('--ic-max', metavar='float', type=float,
action='store', dest='ic_max',
default=2.0, help=helps['ic_max'])
parser.add_argument('--order', metavar='int', type=int,
action='store', dest='order',
default=2, help=helps['order'])
parser.add_argument('-r', '--refine', metavar='int', type=int,
action='store', dest='refine',
default=0, help=helps['refine'])
parser.add_argument('-p', '--probe',
action="store_true", dest='probe',
default=False, help=helps['probe'])
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
assert_((0 < options.order),
'temperature approximation order must be at least 1!')
output('using values:')
output(' diffusivity:', options.diffusivity)
output(' max. IC value:', options.ic_max)
output('uniform mesh refinement level:', options.refine)
mesh = Mesh.from_file(data_dir + '/meshes/3d/cylinder.mesh')
domain = FEDomain('domain', mesh)
if options.refine > 0:
for ii in range(options.refine):
output('refine %d...' % ii)
domain = domain.refine()
output('... %d nodes %d elements'
% (domain.shape.n_nod, domain.shape.n_el))
omega = domain.create_region('Omega', 'all')
left = domain.create_region('Left',
'vertices in x < 0.00001', 'facet')
right = domain.create_region('Right',
'vertices in x > 0.099999', 'facet')
field = Field.from_args('fu', nm.float64, 'scalar', omega,
approx_order=options.order)
T = FieldVariable('T', 'unknown', field, history=1)
s = FieldVariable('s', 'test', field, primary_var_name='T')
m = Material('m', diffusivity=options.diffusivity * nm.eye(3))
integral = Integral('i', order=2*options.order)
t1 = Term.new('dw_diffusion(m.diffusivity, s, T)',
integral, omega, m=m, s=s, T=T)
t2 = Term.new('dw_volume_dot(s, dT/dt)',
integral, omega, s=s, T=T)
eq = Equation('balance', t1 + t2)
eqs = Equations([eq])
# Boundary conditions.
ebc1 = EssentialBC('T1', left, {'T.0' : 2.0})
ebc2 = EssentialBC('T2', right, {'T.0' : -2.0})
# Initial conditions.
def get_ic(coors, ic):
x, y, z = coors.T
return 2 - 40.0 * x + options.ic_max * nm.sin(4 * nm.pi * x / 0.1)
ic_fun = Function('ic_fun', get_ic)
ic = InitialCondition('ic', omega, {'T.0' : ic_fun})
pb = Problem('heat', equations=eqs)
pb.set_bcs(ebcs=Conditions([ebc1, ebc2]))
pb.set_ics(Conditions([ic]))
state0 = pb.get_initial_state()
init_fun, prestep_fun, _poststep_fun = pb.get_tss_functions(state0)
ls = ScipyDirect({})
nls_status = IndexedStruct()
nls = Newton({'is_linear' : True}, lin_solver=ls, status=nls_status)
tss = SimpleTimeSteppingSolver({'t0' : 0.0, 't1' : 100.0, 'n_step' : 11},
nls=nls, context=pb, verbose=True)
pb.set_solver(tss)
if options.probe:
# Prepare probe data.
probes, labels = gen_probes(pb)
ev = pb.evaluate
order = 2 * (options.order - 1)
gfield = Field.from_args('gu', nm.float64, 'vector', omega,
approx_order=options.order - 1)
dvel = FieldVariable('dvel', 'parameter', gfield,
primary_var_name='(set-to-None)')
cfield = Field.from_args('gu', nm.float64, 'scalar', omega,
approx_order=options.order - 1)
component = FieldVariable('component', 'parameter', cfield,
primary_var_name='(set-to-None)')
nls_options = {'eps_a' : 1e-16, 'i_max' : 1}
suffix = tss.ts.suffix
def poststep_fun(ts, vec):
_poststep_fun(ts, vec)
# Probe the solution.
dvel_qp = ev('ev_diffusion_velocity.%d.Omega(m.diffusivity, T)'
% order, copy_materials=False, mode='qp')
project_by_component(dvel, dvel_qp, component, order,
nls_options=nls_options)
all_results = []
for ii, probe in enumerate(probes):
fig, results = probe_results(ii, T, dvel, probe, labels[ii])
all_results.append(results)
plt.tight_layout()
fig.savefig('time_poisson_interactive_probe_%s.png'
% (suffix % ts.step), bbox_inches='tight')
for ii, results in enumerate(all_results):
output('probe %d (%s):' % (ii, probes[ii].name))
output.level += 2
for key, res in ordered_iteritems(results):
output(key + ':')
val = res[1]
output(' min: %+.2e, mean: %+.2e, max: %+.2e'
% (val.min(), val.mean(), val.max()))
output.level -= 2
else:
poststep_fun = _poststep_fun
pb.time_update(tss.ts)
state0.apply_ebc()
# This is required if {'is_linear' : True} is passed to Newton.
mtx = prepare_matrix(pb, state0)
pb.try_presolve(mtx)
tss_status = IndexedStruct()
tss(state0.get_vec(pb.active_only),
init_fun=init_fun, prestep_fun=prestep_fun, poststep_fun=poststep_fun,
status=tss_status)
output(tss_status)
if options.show:
plt.show()
if __name__ == '__main__':
main()
|
[
"sfepy.discrete.conditions.InitialCondition",
"sfepy.base.base.IndexedStruct",
"sfepy.solvers.ts_solvers.SimpleTimeSteppingSolver",
"sfepy.base.base.assert_",
"sfepy.discrete.Equation",
"sfepy.base.base.ordered_iteritems",
"sfepy.discrete.projections.project_by_component",
"sfepy.discrete.conditions.EssentialBC",
"sfepy.discrete.fem.Mesh.from_file",
"sfepy.solvers.ls.ScipyDirect",
"sfepy.discrete.conditions.Conditions",
"sfepy.discrete.probes.CircleProbe",
"sfepy.discrete.fem.Field.from_args",
"sfepy.solvers.nls.Newton",
"sfepy.discrete.Function",
"sfepy.terms.Term.new",
"sfepy.discrete.problem.prepare_matrix",
"sfepy.discrete.Equations",
"sfepy.discrete.Integral",
"sfepy.discrete.fem.FEDomain",
"sfepy.discrete.Problem",
"sfepy.discrete.FieldVariable",
"sfepy.base.base.output",
"sfepy.discrete.probes.LineProbe"
] |
[((648, 668), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (663, 668), False, 'import sys\n'), ((1705, 1752), 'sfepy.discrete.probes.LineProbe', 'LineProbe', (['p0', 'p1', 'n_point'], {'share_geometry': '(True)'}), '(p0, p1, n_point, share_geometry=True)\n', (1714, 1752), False, 'from sfepy.discrete.probes import LineProbe, CircleProbe\n'), ((1926, 1986), 'sfepy.discrete.probes.CircleProbe', 'CircleProbe', (['centre', 'normal', 'r', 'n_point'], {'share_geometry': '(True)'}), '(centre, normal, r, n_point, share_geometry=True)\n', (1937, 1986), False, 'from sfepy.discrete.probes import LineProbe, CircleProbe\n'), ((2468, 2481), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (2478, 2481), True, 'import matplotlib.pyplot as plt\n'), ((2492, 2525), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2 * ax_num + 1)'], {}), '(2, 2, 2 * ax_num + 1)\n', (2503, 2525), True, 'import matplotlib.pyplot as plt\n'), ((2853, 2886), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2 * ax_num + 2)'], {}), '(2, 2, 2 * ax_num + 2)\n', (2864, 2886), True, 'import matplotlib.pyplot as plt\n'), ((2947, 2967), 'six.moves.range', 'range', (['vals.shape[1]'], {}), '(vals.shape[1])\n', (2952, 2967), False, 'from six.moves import range\n'), ((3789, 3874), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'RawDescriptionHelpFormatter'}), '(description=__doc__, formatter_class=RawDescriptionHelpFormatter\n )\n', (3803, 3874), False, 'from argparse import ArgumentParser, RawDescriptionHelpFormatter\n'), ((5052, 5137), 'sfepy.base.base.assert_', 'assert_', (['(0 < options.order)', '"""temperature approximation order must be at least 1!"""'], {}), "(0 < options.order,\n 'temperature approximation order must be at least 1!')\n", (5059, 5137), False, 'from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct\n'), ((5153, 5176), 'sfepy.base.base.output', 'output', (['"""using values:"""'], {}), "('using values:')\n", (5159, 5176), False, 'from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct\n'), ((5181, 5226), 'sfepy.base.base.output', 'output', (['""" diffusivity:"""', 'options.diffusivity'], {}), "(' diffusivity:', options.diffusivity)\n", (5187, 5226), False, 'from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct\n'), ((5231, 5273), 'sfepy.base.base.output', 'output', (['""" max. IC value:"""', 'options.ic_max'], {}), "(' max. IC value:', options.ic_max)\n", (5237, 5273), False, 'from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct\n'), ((5278, 5334), 'sfepy.base.base.output', 'output', (['"""uniform mesh refinement level:"""', 'options.refine'], {}), "('uniform mesh refinement level:', options.refine)\n", (5284, 5334), False, 'from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct\n'), ((5347, 5400), 'sfepy.discrete.fem.Mesh.from_file', 'Mesh.from_file', (["(data_dir + '/meshes/3d/cylinder.mesh')"], {}), "(data_dir + '/meshes/3d/cylinder.mesh')\n", (5361, 5400), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((5414, 5438), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['"""domain"""', 'mesh'], {}), "('domain', mesh)\n", (5422, 5438), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((5976, 6054), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""fu"""', 'nm.float64', '"""scalar"""', 'omega'], {'approx_order': 'options.order'}), "('fu', nm.float64, 'scalar', omega, approx_order=options.order)\n", (5991, 6054), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((6092, 6139), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""T"""', '"""unknown"""', 'field'], {'history': '(1)'}), "('T', 'unknown', field, history=1)\n", (6105, 6139), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((6148, 6203), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""s"""', '"""test"""', 'field'], {'primary_var_name': '"""T"""'}), "('s', 'test', field, primary_var_name='T')\n", (6161, 6203), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((6288, 6326), 'sfepy.discrete.Integral', 'Integral', (['"""i"""'], {'order': '(2 * options.order)'}), "('i', order=2 * options.order)\n", (6296, 6326), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((6335, 6412), 'sfepy.terms.Term.new', 'Term.new', (['"""dw_diffusion(m.diffusivity, s, T)"""', 'integral', 'omega'], {'m': 'm', 's': 's', 'T': 'T'}), "('dw_diffusion(m.diffusivity, s, T)', integral, omega, m=m, s=s, T=T)\n", (6343, 6412), False, 'from sfepy.terms import Term\n'), ((6440, 6502), 'sfepy.terms.Term.new', 'Term.new', (['"""dw_volume_dot(s, dT/dt)"""', 'integral', 'omega'], {'s': 's', 'T': 'T'}), "('dw_volume_dot(s, dT/dt)', integral, omega, s=s, T=T)\n", (6448, 6502), False, 'from sfepy.terms import Term\n'), ((6530, 6558), 'sfepy.discrete.Equation', 'Equation', (['"""balance"""', '(t1 + t2)'], {}), "('balance', t1 + t2)\n", (6538, 6558), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((6569, 6584), 'sfepy.discrete.Equations', 'Equations', (['[eq]'], {}), '([eq])\n', (6578, 6584), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((6624, 6661), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', (['"""T1"""', 'left', "{'T.0': 2.0}"], {}), "('T1', left, {'T.0': 2.0})\n", (6635, 6661), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition\n'), ((6674, 6713), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', (['"""T2"""', 'right', "{'T.0': -2.0}"], {}), "('T2', right, {'T.0': -2.0})\n", (6685, 6713), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition\n'), ((6883, 6909), 'sfepy.discrete.Function', 'Function', (['"""ic_fun"""', 'get_ic'], {}), "('ic_fun', get_ic)\n", (6891, 6909), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((6919, 6965), 'sfepy.discrete.conditions.InitialCondition', 'InitialCondition', (['"""ic"""', 'omega', "{'T.0': ic_fun}"], {}), "('ic', omega, {'T.0': ic_fun})\n", (6935, 6965), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition\n'), ((6977, 7007), 'sfepy.discrete.Problem', 'Problem', (['"""heat"""'], {'equations': 'eqs'}), "('heat', equations=eqs)\n", (6984, 7007), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((7206, 7221), 'sfepy.solvers.ls.ScipyDirect', 'ScipyDirect', (['{}'], {}), '({})\n', (7217, 7221), False, 'from sfepy.solvers.ls import ScipyDirect\n'), ((7239, 7254), 'sfepy.base.base.IndexedStruct', 'IndexedStruct', ([], {}), '()\n', (7252, 7254), False, 'from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct\n'), ((7265, 7326), 'sfepy.solvers.nls.Newton', 'Newton', (["{'is_linear': True}"], {'lin_solver': 'ls', 'status': 'nls_status'}), "({'is_linear': True}, lin_solver=ls, status=nls_status)\n", (7271, 7326), False, 'from sfepy.solvers.nls import Newton\n'), ((7338, 7441), 'sfepy.solvers.ts_solvers.SimpleTimeSteppingSolver', 'SimpleTimeSteppingSolver', (["{'t0': 0.0, 't1': 100.0, 'n_step': 11}"], {'nls': 'nls', 'context': 'pb', 'verbose': '(True)'}), "({'t0': 0.0, 't1': 100.0, 'n_step': 11}, nls=nls,\n context=pb, verbose=True)\n", (7362, 7441), False, 'from sfepy.solvers.ts_solvers import SimpleTimeSteppingSolver\n'), ((9631, 9657), 'sfepy.discrete.problem.prepare_matrix', 'prepare_matrix', (['pb', 'state0'], {}), '(pb, state0)\n', (9645, 9657), False, 'from sfepy.discrete.problem import prepare_matrix\n'), ((9701, 9716), 'sfepy.base.base.IndexedStruct', 'IndexedStruct', ([], {}), '()\n', (9714, 9716), False, 'from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct\n'), ((9868, 9886), 'sfepy.base.base.output', 'output', (['tss_status'], {}), '(tss_status)\n', (9874, 9886), False, 'from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct\n'), ((1641, 1666), 'numpy.array', 'nm.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (1649, 1666), True, 'import numpy as nm\n'), ((1668, 1693), 'numpy.array', 'nm.array', (['[0.1, 0.0, 0.0]'], {}), '([0.1, 0.0, 0.0])\n', (1676, 1693), True, 'import numpy as nm\n'), ((5485, 5506), 'six.moves.range', 'range', (['options.refine'], {}), '(options.refine)\n', (5490, 5506), False, 'from six.moves import range\n'), ((7069, 7085), 'sfepy.discrete.conditions.Conditions', 'Conditions', (['[ic]'], {}), '([ic])\n', (7079, 7085), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition\n'), ((7676, 7763), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""gu"""', 'nm.float64', '"""vector"""', 'omega'], {'approx_order': '(options.order - 1)'}), "('gu', nm.float64, 'vector', omega, approx_order=options.\n order - 1)\n", (7691, 7763), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((7806, 7882), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""dvel"""', '"""parameter"""', 'gfield'], {'primary_var_name': '"""(set-to-None)"""'}), "('dvel', 'parameter', gfield, primary_var_name='(set-to-None)')\n", (7819, 7882), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((7929, 8016), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""gu"""', 'nm.float64', '"""scalar"""', 'omega'], {'approx_order': '(options.order - 1)'}), "('gu', nm.float64, 'scalar', omega, approx_order=options.\n order - 1)\n", (7944, 8016), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((8064, 8150), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""component"""', '"""parameter"""', 'cfield'], {'primary_var_name': '"""(set-to-None)"""'}), "('component', 'parameter', cfield, primary_var_name=\n '(set-to-None)')\n", (8077, 8150), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((9917, 9927), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9925, 9927), True, 'import matplotlib.pyplot as plt\n'), ((5520, 5547), 'sfepy.base.base.output', 'output', (["('refine %d...' % ii)"], {}), "('refine %d...' % ii)\n", (5526, 5547), False, 'from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct\n'), ((5597, 5673), 'sfepy.base.base.output', 'output', (["('... %d nodes %d elements' % (domain.shape.n_nod, domain.shape.n_el))"], {}), "('... %d nodes %d elements' % (domain.shape.n_nod, domain.shape.n_el))\n", (5603, 5673), False, 'from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct\n'), ((7028, 7052), 'sfepy.discrete.conditions.Conditions', 'Conditions', (['[ebc1, ebc2]'], {}), '([ebc1, ebc2])\n', (7038, 7052), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition\n'), ((8526, 8604), 'sfepy.discrete.projections.project_by_component', 'project_by_component', (['dvel', 'dvel_qp', 'component', 'order'], {'nls_options': 'nls_options'}), '(dvel, dvel_qp, component, order, nls_options=nls_options)\n', (8546, 8604), False, 'from sfepy.discrete.projections import project_by_component\n'), ((8851, 8869), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8867, 8869), True, 'import matplotlib.pyplot as plt\n'), ((6261, 6270), 'numpy.eye', 'nm.eye', (['(3)'], {}), '(3)\n', (6267, 6270), True, 'import numpy as nm\n'), ((6842, 6869), 'numpy.sin', 'nm.sin', (['(4 * nm.pi * x / 0.1)'], {}), '(4 * nm.pi * x / 0.1)\n', (6848, 6869), True, 'import numpy as nm\n'), ((9073, 9121), 'sfepy.base.base.output', 'output', (["('probe %d (%s):' % (ii, probes[ii].name))"], {}), "('probe %d (%s):' % (ii, probes[ii].name))\n", (9079, 9121), False, 'from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct\n'), ((9188, 9214), 'sfepy.base.base.ordered_iteritems', 'ordered_iteritems', (['results'], {}), '(results)\n', (9205, 9214), False, 'from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct\n'), ((9236, 9253), 'sfepy.base.base.output', 'output', (["(key + ':')"], {}), "(key + ':')\n", (9242, 9253), False, 'from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct\n')]
|
from fastapi import FastAPI, Request
from fastapi.responses import HTMLResponse
from fastapi.templating import Jinja2Templates
from geoalchemy2.types import Geometry
from sqlmodel import Session, cast, select
from .database import engine
from .models import Country
app = FastAPI()
templates = Jinja2Templates(directory="geo_info_svg/templates")
@app.get("/", response_class=HTMLResponse)
async def test(request: Request):
with Session(engine) as session:
countries = session.exec(
select( # type: ignore
Country.name,
Country.population,
cast(Country.geometry, Geometry).ST_XMin(),
cast(Country.geometry, Geometry).ST_YMin(),
cast(Country.geometry, Geometry).ST_XMax(),
cast(Country.geometry, Geometry).ST_YMax(),
Country.geometry.ST_AsSVG(),
)
.order_by(Country.population.desc()) # type: ignore
.limit(10)
).all()
return templates.TemplateResponse(
"countries.html", {"request": request, "countries": countries}
)
|
[
"sqlmodel.cast",
"sqlmodel.Session"
] |
[((274, 283), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (281, 283), False, 'from fastapi import FastAPI, Request\n'), ((296, 347), 'fastapi.templating.Jinja2Templates', 'Jinja2Templates', ([], {'directory': '"""geo_info_svg/templates"""'}), "(directory='geo_info_svg/templates')\n", (311, 347), False, 'from fastapi.templating import Jinja2Templates\n'), ((436, 451), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (443, 451), False, 'from sqlmodel import Session, cast, select\n'), ((616, 648), 'sqlmodel.cast', 'cast', (['Country.geometry', 'Geometry'], {}), '(Country.geometry, Geometry)\n', (620, 648), False, 'from sqlmodel import Session, cast, select\n'), ((676, 708), 'sqlmodel.cast', 'cast', (['Country.geometry', 'Geometry'], {}), '(Country.geometry, Geometry)\n', (680, 708), False, 'from sqlmodel import Session, cast, select\n'), ((736, 768), 'sqlmodel.cast', 'cast', (['Country.geometry', 'Geometry'], {}), '(Country.geometry, Geometry)\n', (740, 768), False, 'from sqlmodel import Session, cast, select\n'), ((796, 828), 'sqlmodel.cast', 'cast', (['Country.geometry', 'Geometry'], {}), '(Country.geometry, Geometry)\n', (800, 828), False, 'from sqlmodel import Session, cast, select\n')]
|
r"""
Incompressible Stokes flow with Navier (slip) boundary conditions, flow driven
by a moving wall and a small diffusion for stabilization.
This example demonstrates the use of `no-penetration` and `edge direction`
boundary conditions together with Navier or slip boundary conditions.
Alternatively the `no-penetration` boundary conditions can be applied in a weak
sense using the penalty term ``dw_non_penetration_p``.
Find :math:`\ul{u}`, :math:`p` such that:
.. math::
\int_{\Omega} \nu\ \nabla \ul{v} : \nabla \ul{u}
- \int_{\Omega} p\ \nabla \cdot \ul{v}
+ \int_{\Gamma_1} \beta \ul{v} \cdot (\ul{u} - \ul{u}_d)
+ \int_{\Gamma_2} \beta \ul{v} \cdot \ul{u}
= 0
\;, \quad \forall \ul{v} \;,
\int_{\Omega} \mu \nabla q \cdot \nabla p
+ \int_{\Omega} q\ \nabla \cdot \ul{u}
= 0
\;, \quad \forall q \;,
where :math:`\nu` is the fluid viscosity, :math:`\beta` is the slip
coefficient, :math:`\mu` is the (small) numerical diffusion coefficient,
:math:`\Gamma_1` is the top wall that moves with the given driving velocity
:math:`\ul{u}_d` and :math:`\Gamma_2` are the remaining walls. The Navier
conditions are in effect on both :math:`\Gamma_1`, :math:`\Gamma_2` and are
expressed by the corresponding integrals in the equations above.
The `no-penetration` boundary conditions are applied on :math:`\Gamma_1`,
:math:`\Gamma_2`, except the vertices of the block edges, where the `edge
direction` boundary conditions are applied.
The penalty term formulation is given by the following equations.
Find :math:`\ul{u}`, :math:`p` such that:
.. math::
\int_{\Omega} \nu\ \nabla \ul{v} : \nabla \ul{u}
- \int_{\Omega} p\ \nabla \cdot \ul{v}
+ \int_{\Gamma_1} \beta \ul{v} \cdot (\ul{u} - \ul{u}_d)
+ \int_{\Gamma_2} \beta \ul{v} \cdot \ul{u}
+ \int_{\Gamma_1 \cup \Gamma_2} \epsilon (\ul{n} \cdot \ul{v})
(\ul{n} \cdot \ul{u})
= 0
\;, \quad \forall \ul{v} \;,
\int_{\Omega} \mu \nabla q \cdot \nabla p
+ \int_{\Omega} q\ \nabla \cdot \ul{u}
= 0
\;, \quad \forall q \;,
where :math:`\epsilon` is the penalty coefficient (sufficiently large). The
`no-penetration` boundary conditions are applied on :math:`\Gamma_1`,
:math:`\Gamma_2`.
Optionally, Dirichlet boundary conditions can be applied on
the inlet in the both cases, see below.
For large meshes use the ``'ls_i'`` linear solver - PETSc + petsc4py are needed
in that case.
Several parameters can be set using the ``--define`` option of ``simple.py``,
see :func:`define()` and the examples below.
Examples
--------
Specify the inlet velocity and a finer mesh::
python3 simple.py examples/navier_stokes/stokes_slip_bc -d shape="(11,31,31),u_inlet=0.5"
python3 resview.py -f p:p0 u:o.4:p1 u:g:f0.2:p1 -- user_block.vtk
Use the penalty term formulation and einsum-based terms with the default
(numpy) backend::
python3 simple.py examples/navier_stokes/stokes_slip_bc -d "mode=penalty,term_mode=einsum"
python3 resview.py -f p:p0 u:o.4:p1 u:g:f0.2:p1 -- user_block.vtk
Change backend to opt_einsum (needs to be installed) and use the quadratic velocity approximation order::
python3 simple.py examples/navier_stokes/stokes_slip_bc -d "u_order=2,mode=penalty,term_mode=einsum,backend=opt_einsum,optimize=auto"
python3 resview.py -f p:p0 u:o.4:p1 u:g:f0.2:p1 -- user_block.vtk
Note the pressure field distribution improvement w.r.t. the previous examples. IfPETSc + petsc4py are installed, try using the iterative solver to speed up the solution::
python3 simple.py examples/navier_stokes/stokes_slip_bc -d "u_order=2,ls=ls_i,mode=penalty,term_mode=einsum,backend=opt_einsum,optimize=auto"
python3 resview.py -f p:p0 u:o.4:p1 u:g:f0.2:p1 -- user_block.vtk
"""
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import assert_, output
from sfepy.discrete.fem.meshio import UserMeshIO
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.homogenization.utils import define_box_regions
def define(dims=(3, 1, 0.5), shape=(11, 15, 15), u_order=1, refine=0,
ls='ls_d', u_inlet=None, mode='lcbc', term_mode='original',
backend='numpy', optimize='optimal', verbosity=0):
"""
Parameters
----------
dims : tuple
The block domain dimensions.
shape : tuple
The mesh resolution: increase to improve accuracy.
u_order : int
The velocity field approximation order.
refine : int
The refinement level.
ls : 'ls_d' or 'ls_i'
The pre-configured linear solver name.
u_inlet : float, optional
The x-component of the inlet velocity.
mode : 'lcbc' or 'penalty'
The alternative formulations.
term_mode : 'original' or 'einsum'
The switch to use either the original or new experimental einsum-based
terms.
backend : str
The einsum mode backend.
optimize : str
The einsum mode optimization (backend dependent).
verbosity : 0, 1, 2, 3
The verbosity level of einsum-based terms.
"""
output('dims: {}, shape: {}, u_order: {}, refine: {}, u_inlet: {}'
.format(dims, shape, u_order, refine, u_inlet))
output('linear solver: {}'.format(ls))
output('mode: {}, term_mode: {}'.format(mode, term_mode))
if term_mode == 'einsum':
output('backend: {}, optimize: {}, verbosity: {}'
.format(backend, optimize, verbosity))
assert_(mode in {'lcbc', 'penalty'})
assert_(term_mode in {'original', 'einsum'})
if u_order > 1:
assert_(mode == 'penalty', msg='set mode=penalty to use u_order > 1!')
dims = nm.array(dims, dtype=nm.float64)
shape = nm.array(shape, dtype=nm.int32)
def mesh_hook(mesh, mode):
"""
Generate the block mesh.
"""
if mode == 'read':
mesh = gen_block_mesh(dims, shape, [0, 0, 0], name='user_block',
verbose=False)
return mesh
elif mode == 'write':
pass
filename_mesh = UserMeshIO(mesh_hook)
regions = define_box_regions(3, 0.5 * dims)
regions.update({
'Omega' : 'all',
'Edges_v' : ("""(r.Near *v r.Bottom) +v
(r.Bottom *v r.Far) +v
(r.Far *v r.Top) +v
(r.Top *v r.Near)""", 'edge'),
'Gamma1_f' : ('copy r.Top', 'face'),
'Gamma2_f' : ('r.Near +v r.Bottom +v r.Far', 'face'),
'Gamma_f' : ('r.Gamma1_f +v r.Gamma2_f', 'face'),
'Gamma_v' : ('r.Gamma_f -v r.Edges_v', 'face'),
'Inlet_f' : ('r.Left -v r.Gamma_f', 'face'),
})
fields = {
'velocity' : ('real', 3, 'Omega', u_order),
'pressure' : ('real', 1, 'Omega', 1),
}
def get_u_d(ts, coors, region=None):
"""
Given stator velocity.
"""
out = nm.zeros_like(coors)
out[:] = [1.0, 1.0, 0.0]
return out
functions = {
'get_u_d' : (get_u_d,),
}
variables = {
'u' : ('unknown field', 'velocity', 0),
'v' : ('test field', 'velocity', 'u'),
'u_d' : ('parameter field', 'velocity',
{'setter' : 'get_u_d'}),
'p' : ('unknown field', 'pressure', 1),
'q' : ('test field', 'pressure', 'p'),
}
materials = {
'm' : ({
'nu' : 1e-3,
'beta' : 1e-2,
'mu' : 1e-10,
},),
}
ebcs = {
}
if u_inlet is not None:
ebcs['inlet'] = ('Inlet_f', {'u.0' : u_inlet, 'u.[1, 2]' : 0.0})
if mode == 'lcbc':
lcbcs = {
'walls' : ('Gamma_v', {'u.all' : None}, None, 'no_penetration',
'normals_Gamma.vtk'),
'edges' : ('Edges_v', [(-0.5, 1.5)], {'u.all' : None}, None,
'edge_direction', 'edges_Edges.vtk'),
}
if term_mode == 'original':
equations = {
'balance' :
"""dw_div_grad.5.Omega(m.nu, v, u)
- dw_stokes.5.Omega(v, p)
+ dw_dot.5.Gamma1_f(m.beta, v, u)
+ dw_dot.5.Gamma2_f(m.beta, v, u)
=
+ dw_dot.5.Gamma1_f(m.beta, v, u_d)""",
'incompressibility' :
"""dw_laplace.5.Omega(m.mu, q, p)
+ dw_stokes.5.Omega(u, q) = 0""",
}
else:
equations = {
'balance' :
"""de_div_grad.5.Omega(m.nu, v, u)
- de_stokes.5.Omega(v, p)
+ de_dot.5.Gamma1_f(m.beta, v, u)
+ de_dot.5.Gamma2_f(m.beta, v, u)
=
+ de_dot.5.Gamma1_f(m.beta, v, u_d)""",
'incompressibility' :
"""de_laplace.5.Omega(m.mu, q, p)
+ de_stokes.5.Omega(u, q) = 0""",
}
else:
materials['m'][0]['np_eps'] = 1e3
if term_mode == 'original':
equations = {
'balance' :
"""dw_div_grad.5.Omega(m.nu, v, u)
- dw_stokes.5.Omega(v, p)
+ dw_dot.5.Gamma1_f(m.beta, v, u)
+ dw_dot.5.Gamma2_f(m.beta, v, u)
+ dw_non_penetration_p.5.Gamma1_f(m.np_eps, v, u)
+ dw_non_penetration_p.5.Gamma2_f(m.np_eps, v, u)
=
+ dw_dot.5.Gamma1_f(m.beta, v, u_d)""",
'incompressibility' :
"""dw_laplace.5.Omega(m.mu, q, p)
+ dw_stokes.5.Omega(u, q) = 0""",
}
else:
equations = {
'balance' :
"""de_div_grad.5.Omega(m.nu, v, u)
- de_stokes.5.Omega(v, p)
+ de_dot.5.Gamma1_f(m.beta, v, u)
+ de_dot.5.Gamma2_f(m.beta, v, u)
+ de_non_penetration_p.5.Gamma1_f(m.np_eps, v, u)
+ de_non_penetration_p.5.Gamma2_f(m.np_eps, v, u)
=
+ de_dot.5.Gamma1_f(m.beta, v, u_d)""",
'incompressibility' :
"""de_laplace.5.Omega(m.mu, q, p)
+ de_stokes.5.Omega(u, q) = 0""",
}
solvers = {
'ls_d' : ('ls.auto_direct', {}),
'ls_i' : ('ls.petsc', {
'method' : 'bcgsl', # ksp_type
'precond' : 'bjacobi', # pc_type
'sub_precond' : 'ilu', # sub_pc_type
'eps_a' : 0.0, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e10, # Divergence tolerance.
'i_max' : 200, # maxits
}),
'newton' : ('nls.newton', {
'i_max' : 1,
'eps_a' : 1e-10,
}),
}
options = {
'nls' : 'newton',
'ls' : ls,
'eterm': {
'verbosity' : verbosity,
'backend_args' : {
'backend' : backend,
'optimize' : optimize,
'layout' : None,
},
},
'refinement_level' : refine,
}
return locals()
|
[
"sfepy.base.base.assert_",
"sfepy.discrete.fem.meshio.UserMeshIO",
"sfepy.homogenization.utils.define_box_regions",
"sfepy.mesh.mesh_generators.gen_block_mesh"
] |
[((5428, 5464), 'sfepy.base.base.assert_', 'assert_', (["(mode in {'lcbc', 'penalty'})"], {}), "(mode in {'lcbc', 'penalty'})\n", (5435, 5464), False, 'from sfepy.base.base import assert_, output\n'), ((5469, 5513), 'sfepy.base.base.assert_', 'assert_', (["(term_mode in {'original', 'einsum'})"], {}), "(term_mode in {'original', 'einsum'})\n", (5476, 5513), False, 'from sfepy.base.base import assert_, output\n'), ((5624, 5656), 'numpy.array', 'nm.array', (['dims'], {'dtype': 'nm.float64'}), '(dims, dtype=nm.float64)\n', (5632, 5656), True, 'import numpy as nm\n'), ((5669, 5700), 'numpy.array', 'nm.array', (['shape'], {'dtype': 'nm.int32'}), '(shape, dtype=nm.int32)\n', (5677, 5700), True, 'import numpy as nm\n'), ((6036, 6057), 'sfepy.discrete.fem.meshio.UserMeshIO', 'UserMeshIO', (['mesh_hook'], {}), '(mesh_hook)\n', (6046, 6057), False, 'from sfepy.discrete.fem.meshio import UserMeshIO\n'), ((6073, 6106), 'sfepy.homogenization.utils.define_box_regions', 'define_box_regions', (['(3)', '(0.5 * dims)'], {}), '(3, 0.5 * dims)\n', (6091, 6106), False, 'from sfepy.homogenization.utils import define_box_regions\n'), ((5542, 5612), 'sfepy.base.base.assert_', 'assert_', (["(mode == 'penalty')"], {'msg': '"""set mode=penalty to use u_order > 1!"""'}), "(mode == 'penalty', msg='set mode=penalty to use u_order > 1!')\n", (5549, 5612), False, 'from sfepy.base.base import assert_, output\n'), ((6859, 6879), 'numpy.zeros_like', 'nm.zeros_like', (['coors'], {}), '(coors)\n', (6872, 6879), True, 'import numpy as nm\n'), ((5836, 5908), 'sfepy.mesh.mesh_generators.gen_block_mesh', 'gen_block_mesh', (['dims', 'shape', '[0, 0, 0]'], {'name': '"""user_block"""', 'verbose': '(False)'}), "(dims, shape, [0, 0, 0], name='user_block', verbose=False)\n", (5850, 5908), False, 'from sfepy.mesh.mesh_generators import gen_block_mesh\n')]
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import megengine as mge
import megengine.functional as F
import megengine.module as M
__all__ = ["fuse_conv_and_bn", "fuse_model", "replace_module"]
def fuse_conv_and_bn(conv, bn):
# Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/
fusedconv = M.Conv2d(
conv.in_channels,
conv.out_channels,
kernel_size=conv.kernel_size,
stride=conv.stride,
padding=conv.padding,
groups=conv.groups,
bias=True,
)
# fused_conv.weight = bn.weight / running_var * conv.weight
w_conv = conv.weight.reshape(conv.out_channels, -1)
factor = (bn.weight / F.sqrt(bn.eps + bn.running_var)).reshape(-1)
# diag_factor = diag(factor)
fusedconv.weight = mge.Parameter(
(factor.reshape(-1, 1) * w_conv).reshape(fusedconv.weight.shape)
)
# fused_conv.bias = bn.bias + (conv.bias - running_mean) * bn.weight / runing_var
conv_bias = F.zeros(bn.running_mean.shape) if conv.bias is None else conv.bias
fuse_bias = bn.bias + (conv_bias - bn.running_mean) * factor.reshape(1, -1, 1, 1)
fusedconv.bias = mge.Parameter(fuse_bias)
return fusedconv
def fuse_model(model):
from yolox.models.network_blocks import BaseConv
for m in model.modules():
if type(m) is BaseConv and hasattr(m, "bn"):
m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
delattr(m, "bn") # remove batchnorm
m.forward = m.fuseforward # update forward
return model
def replace_module(module, replaced_module_type, new_module_type, replace_func=None):
"""
Replace given type in module to a new type. mostly used in deploy.
Args:
module (nn.Module): model to apply replace operation.
replaced_module_type (Type): module type to be replaced.
new_module_type (Type)
replace_func (function): python function to describe replace logic. Defalut value None.
Returns:
model (nn.Module): module that already been replaced.
"""
def default_replace_func(replaced_module_type, new_module_type):
return new_module_type()
if replace_func is None:
replace_func = default_replace_func
model = module
if isinstance(module, replaced_module_type):
model = replace_func(replaced_module_type, new_module_type)
else: # recurrsively replace
for name, child in module.named_children():
new_child = replace_module(child, replaced_module_type, new_module_type)
if new_child is not child: # child is already replaced
model.add_module(name, new_child)
return model
|
[
"megengine.functional.zeros",
"megengine.Parameter",
"megengine.functional.sqrt",
"megengine.module.Conv2d"
] |
[((403, 555), 'megengine.module.Conv2d', 'M.Conv2d', (['conv.in_channels', 'conv.out_channels'], {'kernel_size': 'conv.kernel_size', 'stride': 'conv.stride', 'padding': 'conv.padding', 'groups': 'conv.groups', 'bias': '(True)'}), '(conv.in_channels, conv.out_channels, kernel_size=conv.kernel_size,\n stride=conv.stride, padding=conv.padding, groups=conv.groups, bias=True)\n', (411, 555), True, 'import megengine.module as M\n'), ((1234, 1258), 'megengine.Parameter', 'mge.Parameter', (['fuse_bias'], {}), '(fuse_bias)\n', (1247, 1258), True, 'import megengine as mge\n'), ((1060, 1090), 'megengine.functional.zeros', 'F.zeros', (['bn.running_mean.shape'], {}), '(bn.running_mean.shape)\n', (1067, 1090), True, 'import megengine.functional as F\n'), ((762, 793), 'megengine.functional.sqrt', 'F.sqrt', (['(bn.eps + bn.running_var)'], {}), '(bn.eps + bn.running_var)\n', (768, 793), True, 'import megengine.functional as F\n')]
|
from sqlmodel import Session, select
from config.Database import Database
class UserDatabase(Database):
def __init__(self) -> None:
super(UserDatabase, self).__init__()
async def get_by_params(self, object: object, email: str):
with Session(self._engine) as session:
statement = select(object).where(object.email == email)
return session.exec(statement).first()
|
[
"sqlmodel.Session",
"sqlmodel.select"
] |
[((259, 280), 'sqlmodel.Session', 'Session', (['self._engine'], {}), '(self._engine)\n', (266, 280), False, 'from sqlmodel import Session, select\n'), ((317, 331), 'sqlmodel.select', 'select', (['object'], {}), '(object)\n', (323, 331), False, 'from sqlmodel import Session, select\n')]
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from typing import Tuple, Union
import numpy as np
import megengine._internal as mgb
from ... import module as Float
from ...core import Parameter
from ...functional import conv_bias_activation
from ..qat import conv as QAT
from .module import QuantizedModule
class Conv2d(Float.Conv2d, QuantizedModule):
r"""quantized version of :class:`~.qat.conv.Conv2d`."""
r"""Applies a 2D convolution over an quantized input tensor, inference only.
The parameter is same with :class: `~.Conv2d`
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int, int]],
stride: Union[int, Tuple[int, int]] = 1,
padding: Union[int, Tuple[int, int]] = 0,
dilation: Union[int, Tuple[int, int]] = 1,
groups: int = 1,
conv_mode: str = "CROSS_CORRELATION",
compute_mode: str = "DEFAULT",
dtype=None,
):
super().__init__(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
True,
conv_mode,
compute_mode,
)
self.output_dtype = dtype
def calc_conv_quantized(self, inp, nonlinear_mode="IDENTITY"):
inp_scale = mgb.dtype.get_scale(inp.dtype)
w_scale = mgb.dtype.get_scale(self.weight.dtype)
bias_scale = inp_scale * w_scale
return conv_bias_activation(
inp,
self.weight,
self.bias.astype(mgb.dtype.qint32(bias_scale)),
self.output_dtype,
self.stride,
self.padding,
self.dilation,
self.groups,
conv_mode=self.conv_mode,
compute_mode=self.compute_mode,
nonlinear_mode=nonlinear_mode,
)
@classmethod
def from_qat_module(cls, qat_module: QAT.Conv2d):
r"""
return a :class:`~.QuantizedModule` instance converted from a
:class:`~.QATModule` instance.
"""
output_dtype = qat_module.get_activation_dtype()
qconv = cls(
qat_module.in_channels,
qat_module.out_channels,
qat_module.kernel_size,
qat_module.stride,
qat_module.padding,
qat_module.dilation,
qat_module.groups,
dtype=output_dtype,
)
weight = qat_module.weight.astype(qat_module.get_weight_dtype())
qconv.weight = Parameter(weight.numpy())
if qat_module.bias is not None:
qconv.bias = Parameter(qat_module.bias.numpy())
else:
qconv.bias = Parameter(
np.zeros(qat_module._infer_bias_shape(), dtype=np.float32)
)
return qconv
def forward(self, inp):
return self.calc_conv_quantized(inp, nonlinear_mode="IDENTITY")
class ConvRelu2d(Conv2d):
r"""quantized version of :class:`~.qat.conv.ConvRelu2d`."""
def forward(self, inp):
return self.calc_conv_quantized(inp, nonlinear_mode="RELU")
|
[
"megengine._internal.dtype.get_scale",
"megengine._internal.dtype.qint32"
] |
[((1670, 1700), 'megengine._internal.dtype.get_scale', 'mgb.dtype.get_scale', (['inp.dtype'], {}), '(inp.dtype)\n', (1689, 1700), True, 'import megengine._internal as mgb\n'), ((1719, 1757), 'megengine._internal.dtype.get_scale', 'mgb.dtype.get_scale', (['self.weight.dtype'], {}), '(self.weight.dtype)\n', (1738, 1757), True, 'import megengine._internal as mgb\n'), ((1907, 1935), 'megengine._internal.dtype.qint32', 'mgb.dtype.qint32', (['bias_scale'], {}), '(bias_scale)\n', (1923, 1935), True, 'import megengine._internal as mgb\n')]
|
# Copyright 2021 Modelyst LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from string import ascii_lowercase
from typing import Any, Dict, Tuple, Type, TypeVar
from uuid import UUID
from hypothesis import strategies as st
from hypothesis.strategies._internal.strategies import SearchStrategy
from sqlalchemy.orm import registry
from sqlmodel import Field
from dbgen.core.entity import BaseEntity, EntityMetaclass
protected_words = {"mro"}
uni_text = lambda x: st.text(ascii_lowercase, min_size=x)
non_private_attr = uni_text(1).filter(lambda x: x[0] != "_").filter(lambda x: x not in protected_words)
pydantic_type_strat = st.sampled_from((str, float, bool, int))
id_field = Field(
default=None,
primary_key=True,
sa_column_kwargs={"autoincrement": False, "unique": True},
)
fk_field = lambda x: Field(default=None, foreign_key=x)
ID_TYPE = UUID
reserved_words = {'hex', 'uuid', 'hash'}
@st.composite
def example_entity(
draw,
class_name: str = None,
fks: Dict[str, str] = None,
attrs: Dict[str, Tuple[type, Any]] = None,
draw_attrs: bool = True,
registry_: registry = None,
) -> SearchStrategy[Type[BaseEntity]]:
class_name = class_name or draw(uni_text(1))
if fks is None:
fks = draw(
st.dictionaries(
non_private_attr.filter(
lambda x: attrs and x not in attrs and x != 'id' and x not in reserved_words
),
non_private_attr,
)
)
annotations: Dict[str, type] = {"id": UUID}
if draw_attrs:
annotations.update(
draw(
st.dictionaries(
non_private_attr.filter(lambda x: x not in fks and x not in reserved_words),
pydantic_type_strat,
min_size=1,
)
)
)
added_attrs = {"id": id_field}
for fk_name, fk_col_reference in fks.items():
annotations[fk_name] = UUID
added_attrs[fk_name] = fk_field(fk_col_reference)
attrs = attrs or {}
for attr_name, attr_dets in attrs.items():
if len(attr_dets) == 1:
type_ = attr_dets[0]
else:
type_, default = attr_dets
added_attrs[attr_name] = default
annotations[attr_name] = type_
identifying = draw(st.sets(st.sampled_from(list(annotations.keys()))))
data = {
"__annotations__": annotations,
"__identifying__": identifying,
"__module__": "tests.strategies.entity",
"__qualname__": class_name,
"__tablename__": f"table_{class_name}",
**added_attrs,
}
new_class = EntityMetaclass(
class_name,
(BaseEntity,),
data,
table=True,
registry=registry_ or registry(),
force_validation=True,
)
return new_class
T = TypeVar("T")
def fill_required_fields(
entity_class: Type[BaseEntity],
default_values={},
):
required_fields = [
(name, default_values.get(val.type_, val.type_))
for name, val in entity_class.__fields__.items()
if val.required
]
return entity_class.validate({x: y() for x, y in required_fields})
|
[
"sqlmodel.Field"
] |
[((1149, 1189), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['(str, float, bool, int)'], {}), '((str, float, bool, int))\n', (1164, 1189), True, 'from hypothesis import strategies as st\n'), ((1202, 1303), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)', 'sa_column_kwargs': "{'autoincrement': False, 'unique': True}"}), "(default=None, primary_key=True, sa_column_kwargs={'autoincrement': \n False, 'unique': True})\n", (1207, 1303), False, 'from sqlmodel import Field\n'), ((3375, 3387), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (3382, 3387), False, 'from typing import Any, Dict, Tuple, Type, TypeVar\n'), ((986, 1022), 'hypothesis.strategies.text', 'st.text', (['ascii_lowercase'], {'min_size': 'x'}), '(ascii_lowercase, min_size=x)\n', (993, 1022), True, 'from hypothesis import strategies as st\n'), ((1335, 1369), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': 'x'}), '(default=None, foreign_key=x)\n', (1340, 1369), False, 'from sqlmodel import Field\n'), ((3299, 3309), 'sqlalchemy.orm.registry', 'registry', ([], {}), '()\n', (3307, 3309), False, 'from sqlalchemy.orm import registry\n')]
|
from enum import Enum
from typing import Optional
from sqlmodel import Field, SQLModel
class ExtensionType(str, Enum):
DOCKER = 'DOCKER'
WEBHOOK = 'WEBHOOK'
BINARY = 'BINARY'
class Extension(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
# condition is a small piece of code that can be eval by the interpreter
# i.e., ext-server will run if(eval(condition)): ...
# in the condition, the extension has access to the event object, i.e., they can access as e.event_type=='build_image'
condition: str
extension_type: ExtensionType
# ext-server will create new process to execute the entrypoint. For different types, different format of entrypoint will be needed.
# 1. for Docker image, it should be like: docker exec xxx
# 2. for webhook, it should be a url address.
# 3. for a local binary, it should be a shell command
entrypoint: str
remote_url: Optional[str]
vendor: str
name: str
|
[
"sqlmodel.Field"
] |
[((254, 291), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (259, 291), False, 'from sqlmodel import Field, SQLModel\n')]
|
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import output, assert_, get_default, Struct
from sfepy.homogenization.coefs_base import CorrSolution, \
TCorrectorsViaPressureEVP, CorrMiniApp
from sfepy.solvers.ts import TimeStepper
from six.moves import range
class PressureRHSVector( CorrMiniApp ):
def __call__( self, problem = None, data = None ):
problem = get_default( problem, self.problem )
problem.select_variables( self.variables )
problem.set_equations( self.equations )
problem.select_bcs(ebc_names = self.ebcs, epbc_names = self.epbcs,
lcbc_names=self.get('lcbcs', []))
state = problem.create_state()
state.apply_ebc()
eqs = problem.equations
eqs.set_variables_from_state(state.vec)
vec = eqs.create_stripped_state_vector()
eqs.time_update_materials(problem.get_timestepper())
eqs.evaluate(mode='weak', dw_mode='vector', asm_obj=vec)
return vec
class TCorrectorsRSViaPressureEVP( TCorrectorsViaPressureEVP ):
def __call__( self, problem = None, data = None ):
"""data: corrs_rs, evp"""
problem = get_default( problem, self.problem )
self.init_solvers(problem)
ts = problem.get_timestepper()
corrs, evp = [data[ii] for ii in self.requires]
assert_( evp.ebcs == self.ebcs )
assert_( evp.epbcs == self.epbcs )
dim = problem.get_dim()
self.setup_equations(self.equations)
solve = self.compute_correctors
states = nm.zeros((dim, dim), dtype=nm.object)
clist = []
for ir in range( dim ):
for ic in range( dim ):
states[ir,ic] = solve(evp, -1.0, corrs.states[ir,ic], ts)
clist.append((ir, ic))
corr_sol = CorrSolution(name=self.name,
states=states,
n_step=ts.n_step,
components=clist)
self.save(corr_sol, problem, ts)
return corr_sol
class TCorrectorsPressureViaPressureEVP( TCorrectorsViaPressureEVP ):
def __call__( self, problem = None, data = None, save_hook = None ):
"""data: corrs_pressure, evp, optionally vec_g"""
problem = get_default( problem, self.problem )
self.init_solvers(problem)
ts = problem.get_timestepper()
corrs, evp = [data[ii] for ii in self.requires[:2]]
if len(self.requires) == 3:
vec_g = data[self.requires[2]]
else:
vec_g = None
assert_( evp.ebcs == self.ebcs )
assert_( evp.epbcs == self.epbcs )
self.setup_equations(self.equations)
solve = self.compute_correctors
state = solve(evp, 1.0, corrs.state, ts, vec_g=vec_g)
corr_sol = CorrSolution(name=self.name,
state=state,
n_step=ts.n_step)
self.save(corr_sol, problem, ts)
return corr_sol
|
[
"sfepy.homogenization.coefs_base.CorrSolution",
"sfepy.base.base.get_default",
"sfepy.base.base.assert_"
] |
[((416, 450), 'sfepy.base.base.get_default', 'get_default', (['problem', 'self.problem'], {}), '(problem, self.problem)\n', (427, 450), False, 'from sfepy.base.base import output, assert_, get_default, Struct\n'), ((1204, 1238), 'sfepy.base.base.get_default', 'get_default', (['problem', 'self.problem'], {}), '(problem, self.problem)\n', (1215, 1238), False, 'from sfepy.base.base import output, assert_, get_default, Struct\n'), ((1381, 1411), 'sfepy.base.base.assert_', 'assert_', (['(evp.ebcs == self.ebcs)'], {}), '(evp.ebcs == self.ebcs)\n', (1388, 1411), False, 'from sfepy.base.base import output, assert_, get_default, Struct\n'), ((1422, 1454), 'sfepy.base.base.assert_', 'assert_', (['(evp.epbcs == self.epbcs)'], {}), '(evp.epbcs == self.epbcs)\n', (1429, 1454), False, 'from sfepy.base.base import output, assert_, get_default, Struct\n'), ((1594, 1631), 'numpy.zeros', 'nm.zeros', (['(dim, dim)'], {'dtype': 'nm.object'}), '((dim, dim), dtype=nm.object)\n', (1602, 1631), True, 'import numpy as nm\n'), ((1669, 1679), 'six.moves.range', 'range', (['dim'], {}), '(dim)\n', (1674, 1679), False, 'from six.moves import range\n'), ((1852, 1931), 'sfepy.homogenization.coefs_base.CorrSolution', 'CorrSolution', ([], {'name': 'self.name', 'states': 'states', 'n_step': 'ts.n_step', 'components': 'clist'}), '(name=self.name, states=states, n_step=ts.n_step, components=clist)\n', (1864, 1931), False, 'from sfepy.homogenization.coefs_base import CorrSolution, TCorrectorsViaPressureEVP, CorrMiniApp\n'), ((2317, 2351), 'sfepy.base.base.get_default', 'get_default', (['problem', 'self.problem'], {}), '(problem, self.problem)\n', (2328, 2351), False, 'from sfepy.base.base import output, assert_, get_default, Struct\n'), ((2616, 2646), 'sfepy.base.base.assert_', 'assert_', (['(evp.ebcs == self.ebcs)'], {}), '(evp.ebcs == self.ebcs)\n', (2623, 2646), False, 'from sfepy.base.base import output, assert_, get_default, Struct\n'), ((2657, 2689), 'sfepy.base.base.assert_', 'assert_', (['(evp.epbcs == self.epbcs)'], {}), '(evp.epbcs == self.epbcs)\n', (2664, 2689), False, 'from sfepy.base.base import output, assert_, get_default, Struct\n'), ((2861, 2920), 'sfepy.homogenization.coefs_base.CorrSolution', 'CorrSolution', ([], {'name': 'self.name', 'state': 'state', 'n_step': 'ts.n_step'}), '(name=self.name, state=state, n_step=ts.n_step)\n', (2873, 2920), False, 'from sfepy.homogenization.coefs_base import CorrSolution, TCorrectorsViaPressureEVP, CorrMiniApp\n'), ((1705, 1715), 'six.moves.range', 'range', (['dim'], {}), '(dim)\n', (1710, 1715), False, 'from six.moves import range\n')]
|
import numpy as nm
from sfepy.base.base import assert_
from sfepy.linalg import dot_sequences
from sfepy.terms.terms import Term, terms
class ZeroTerm(Term):
r"""
A do-nothing term useful for introducing additional variables into the
equations.
:Definition:
.. math::
0
:Arguments:
- virtual : :math:`q` or :math:`\ul{v}`
- state : :math:`p` or :math:`\ul{u}`
"""
name = 'dw_zero'
arg_types = ('virtual', 'state')
arg_shapes = {'virtual' : ('N', None), 'state' : 'N'}
@staticmethod
def function(out):
out.fill(0.0)
return 0
def get_fargs(self, vvar, svar,
mode=None, term_mode=None, diff_var=None, **kwargs):
return ()
class IntegrateVolumeTerm(Term):
r"""
Evaluate (weighted) variable in a volume region.
Depending on evaluation mode, integrate a variable over a volume region
('eval'), average it in elements ('el_avg') or interpolate it into volume
quadrature points ('qp').
Supports 'eval', 'el_avg' and 'qp' evaluation modes.
:Definition:
.. math::
\int_\Omega y \mbox{ , } \int_\Omega \ul{y} \\
\int_\Omega c y \mbox{ , } \int_\Omega c \ul{y}
.. math::
\mbox{vector for } K \from \Ical_h:
\int_{T_K} y / \int_{T_K} 1 \mbox{ , }
\int_{T_K} \ul{y} / \int_{T_K} 1 \\
\mbox{vector for } K \from \Ical_h:
\int_{T_K} c y / \int_{T_K} 1 \mbox{ , }
\int_{T_K} c \ul{y} / \int_{T_K} 1
.. math::
y|_{qp} \mbox{ , } \ul{y}|_{qp} \\
c y|_{qp} \mbox{ , } c \ul{y}|_{qp}
:Arguments:
- material : :math:`c` (optional)
- parameter : :math:`y` or :math:`\ul{y}`
"""
name = 'ev_volume_integrate'
arg_types = ('opt_material', 'parameter')
arg_shapes = [{'opt_material' : '1, 1', 'parameter' : 'N'},
{'opt_material' : None}]
@staticmethod
def function(out, val_qp, vg, fmode):
if fmode == 2:
out[:] = val_qp
status = 0
else:
status = vg.integrate(out, val_qp, fmode)
return status
def get_fargs(self, material, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(parameter)
val_qp = self.get(parameter, 'val')
if material is not None:
val_qp *= material
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
return val_qp, vg, fmode
def get_eval_shape(self, material, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(parameter)
if mode != 'qp':
n_qp = 1
return (n_el, n_qp, n_c, 1), parameter.dtype
class IntegrateSurfaceTerm(Term):
r"""
Evaluate (weighted) variable in a surface region.
Depending on evaluation mode, integrate a variable over a surface region
('eval'), average it in element faces ('el_avg') or interpolate it into
surface quadrature points ('qp'). For vector variables, setting `term_mode`
to `'flux'` leads to computing corresponding fluxes for the three modes
instead.
Supports 'eval', 'el_avg' and 'qp' evaluation modes.
:Definition:
.. math::
\int_\Gamma y \mbox{ , } \int_\Gamma \ul{y}
\mbox{ , } \int_\Gamma \ul{y} \cdot \ul{n} \\
\int_\Gamma c y \mbox{ , } \int_\Gamma c \ul{y}
\mbox{ , } \int_\Gamma c \ul{y} \cdot \ul{n} \mbox{ flux }
.. math::
\mbox{vector for } K \from \Ical_h:
\int_{T_K} y / \int_{T_K} 1 \mbox{ , }
\int_{T_K} \ul{y} / \int_{T_K} 1 \mbox{ , }
\int_{T_K} (\ul{y} \cdot \ul{n}) / \int_{T_K} 1 \\
\mbox{vector for } K \from \Ical_h:
\int_{T_K} c y / \int_{T_K} 1 \mbox{ , }
\int_{T_K} c \ul{y} / \int_{T_K} 1 \mbox{ , }
\int_{T_K} (c \ul{y} \cdot \ul{n}) / \int_{T_K} 1
.. math::
y|_{qp} \mbox{ , } \ul{y}|_{qp}
\mbox{ , } (\ul{y} \cdot \ul{n})|_{qp} \mbox{ flux } \\
c y|_{qp} \mbox{ , } c \ul{y}|_{qp}
\mbox{ , } (c \ul{y} \cdot \ul{n})|_{qp} \mbox{ flux }
:Arguments:
- material : :math:`c` (optional)
- parameter : :math:`y` or :math:`\ul{y}`
"""
name = 'ev_surface_integrate'
arg_types = ('opt_material', 'parameter')
arg_shapes = [{'opt_material' : '1, 1', 'parameter' : 'N'},
{'opt_material' : None}]
integration = 'surface'
@staticmethod
def function(out, val_qp, sg, fmode):
if fmode == 2:
out[:] = val_qp
status = 0
elif fmode == 5:
normal = sg.normal
out[:] = dot_sequences(val_qp, normal)
status = 0
else:
status = sg.integrate(out, val_qp, fmode)
return status
def get_fargs(self, material, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
sg, _ = self.get_mapping(parameter)
val_qp = self.get(parameter, 'val')
if material is not None:
val_qp *= material
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
if term_mode == 'flux':
n_fa, n_qp, dim, n_fn, n_c = self.get_data_shape(parameter)
if n_c == dim:
fmode += 3
return val_qp, sg, fmode
def get_eval_shape(self, material, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_fa, n_qp, dim, n_fn, n_c = self.get_data_shape(parameter)
if mode != 'qp':
n_qp = 1
if term_mode == 'flux':
n_c = 1
return (n_fa, n_qp, n_c, 1), parameter.dtype
class IntegrateVolumeOperatorTerm(Term):
r"""
Volume integral of a test function weighted by a scalar function
:math:`c`.
:Definition:
.. math::
\int_\Omega q \mbox{ or } \int_\Omega c q
:Arguments:
- material : :math:`c` (optional)
- virtual : :math:`q`
"""
name = 'dw_volume_integrate'
arg_types = ('opt_material', 'virtual')
arg_shapes = [{'opt_material' : '1, 1', 'virtual' : (1, None)},
{'opt_material' : None}]
@staticmethod
def function(out, material, bf, geo):
bf_t = nm.tile(bf.transpose((0, 1, 3, 2)), (out.shape[0], 1, 1, 1))
bf_t = nm.ascontiguousarray(bf_t)
if material is not None:
status = geo.integrate(out, material * bf_t)
else:
status = geo.integrate(out, bf_t)
return status
def get_fargs(self, material, virtual,
mode=None, term_mode=None, diff_var=None, **kwargs):
assert_(virtual.n_components == 1)
geo, _ = self.get_mapping(virtual)
return material, geo.bf, geo
class IntegrateSurfaceOperatorTerm(IntegrateVolumeOperatorTerm):
r"""
Surface integral of a test function weighted by a scalar function
:math:`c`.
:Definition:
.. math::
\int_{\Gamma} q \mbox{ or } \int_\Gamma c q
:Arguments:
- material : :math:`c` (optional)
- virtual : :math:`q`
"""
name = 'dw_surface_integrate'
arg_types = ('opt_material', 'virtual')
arg_shapes = [{'opt_material' : '1, 1', 'virtual' : (1, None)},
{'opt_material' : None}]
integration = 'surface'
class VolumeTerm(Term):
r"""
Volume of a domain. Uses approximation of the parameter variable.
:Definition:
.. math::
\int_\Omega 1
:Arguments:
- parameter : any variable
"""
name = 'd_volume'
arg_types = ('parameter',)
arg_shapes = [{'parameter' : 'N'}]
@staticmethod
def function(out, geo):
out[:] = geo.volume
return 0
def get_fargs(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
geo, _ = self.get_mapping(parameter)
return geo,
def get_eval_shape(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_cell, n_qp, dim, n_n, n_c = self.get_data_shape(parameter)
return (n_cell, 1, 1, 1), parameter.dtype
class SurfaceTerm(VolumeTerm):
r"""
Surface of a domain. Uses approximation of the parameter variable.
:Definition:
.. math::
\int_\Gamma 1
:Arguments:
- parameter : any variable
"""
name = 'd_surface'
arg_types = ('parameter',)
arg_shapes = {'parameter' : 'N'}
integration = 'surface'
class VolumeSurfaceTerm(Term):
r"""
Volume of a :math:`D`-dimensional domain, using a surface integral. Uses
approximation of the parameter variable.
:Definition:
.. math::
1 / D \int_\Gamma \ul{x} \cdot \ul{n}
:Arguments:
- parameter : any variable
"""
name = 'd_volume_surface'
arg_types = ('parameter',)
arg_shapes = {'parameter' : 'N'}
integration = 'surface'
function = staticmethod(terms.d_volume_surface)
def get_fargs(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
sg, _ = self.get_mapping(parameter)
sd = parameter.field.surface_data[self.region.name]
coor = parameter.field.get_coor()
return coor, sg, sd.econn.copy()
def get_eval_shape(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_fa, n_qp, dim, n_fn, n_c = self.get_data_shape(parameter)
return (n_fa, 1, 1, 1), parameter.dtype
class SurfaceMomentTerm(Term):
r"""
Surface integral of the outer product of the unit outward normal
:math:`\ul{n}` and the coordinate :math:`\ul{x}` shifted by :math:`\ul{x}_0`
:Definition:
.. math::
\int_{\Gamma} \ul{n} (\ul{x} - \ul{x}_0)
:Arguments:
- material : :math:`\ul{x}_0` (special)
- parameter : any variable
"""
name = 'd_surface_moment'
arg_types = ('material', 'parameter')
arg_shapes = {'material' : '.: D', 'parameter' : 'N'}
integration = 'surface'
function = staticmethod(terms.di_surface_moment)
def get_fargs(self, material, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
sg, _ = self.get_mapping(parameter)
sd = parameter.field.surface_data[self.region.name]
coor = parameter.field.get_coor() \
- nm.asarray(material, dtype=nm.float64)[None,:]
return coor, sg, sd.econn.copy()
def get_eval_shape(self, material, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_fa, n_qp, dim, n_fn, n_c = self.get_data_shape(parameter)
return (n_fa, 1, dim, dim), parameter.dtype
class IntegrateVolumeMatTerm(Term):
r"""
Evaluate material parameter :math:`m` in a volume region.
Depending on evaluation mode, integrate a material parameter over a
volume region ('eval'), average it in elements ('el_avg') or
interpolate it into volume quadrature points ('qp').
Uses reference mapping of :math:`y` variable.
Supports 'eval', 'el_avg' and 'qp' evaluation modes.
:Definition:
.. math::
\int_\Omega m
.. math::
\mbox{vector for } K \from \Ical_h: \int_{T_K} m / \int_{T_K} 1
.. math::
m|_{qp}
:Arguments:
- material : :math:`m` (can have up to two dimensions)
- parameter : :math:`y`
"""
name = 'ev_volume_integrate_mat'
arg_types = ('material', 'parameter')
arg_shapes = [{'material' : 'N, N', 'parameter' : 'N'}]
@staticmethod
def function(out, mat, geo, fmode):
if fmode == 2:
out[:] = mat
status = 0
else:
status = geo.integrate(out, mat, fmode)
return status
def get_fargs(self, mat, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
geo, _ = self.get_mapping(parameter)
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
return mat, geo, fmode
def get_eval_shape(self, mat, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(parameter)
n_row, n_col = mat.shape[-2:]
if mode != 'qp':
n_qp = 1
return (n_el, n_qp, n_row, n_col), mat.dtype
class IntegrateSurfaceMatTerm(IntegrateVolumeMatTerm):
r"""
Evaluate material parameter :math:`m` in a surface region.
Depending on evaluation mode, integrate a material parameter over a
surface region ('eval'), average it in faces ('el_avg') or
interpolate it into surface quadrature points ('qp').
Uses reference mapping of :math:`y` variable.
Supports 'eval', 'el_avg' and 'qp' evaluation modes.
:Definition:
.. math::
\int_\Gamma m
.. math::
\mbox{vector for } K \from \Ical_h: \int_{T_K} m / \int_{T_K} 1
.. math::
m|_{qp}
:Arguments:
- material : :math:`m` (can have up to two dimensions)
- parameter : :math:`y`
"""
name = 'ev_surface_integrate_mat'
arg_types = ('material', 'parameter')
arg_shapes = [{'material' : 'N, N', 'parameter' : 'N'}]
integration = 'surface'
class SumNodalValuesTerm(Term):
r"""
Sum nodal values.
:Arguments:
- parameter : :math:`p` or :math:`\ul{u}`
"""
name = 'd_sum_vals'
arg_types = ('parameter',)
arg_shapes = {'parameter' : 'N'}
@staticmethod
def function(out, vec):
out[:] = nm.sum(vec, 0)
return 0
def get_fargs(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
vec = parameter.get_state_in_region(self.region)
return vec,
def get_eval_shape(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(parameter)
return (n_el, n_c), parameter.dtype
|
[
"sfepy.linalg.dot_sequences",
"sfepy.base.base.assert_"
] |
[((6427, 6453), 'numpy.ascontiguousarray', 'nm.ascontiguousarray', (['bf_t'], {}), '(bf_t)\n', (6447, 6453), True, 'import numpy as nm\n'), ((6749, 6783), 'sfepy.base.base.assert_', 'assert_', (['(virtual.n_components == 1)'], {}), '(virtual.n_components == 1)\n', (6756, 6783), False, 'from sfepy.base.base import assert_\n'), ((13648, 13662), 'numpy.sum', 'nm.sum', (['vec', '(0)'], {}), '(vec, 0)\n', (13654, 13662), True, 'import numpy as nm\n'), ((4748, 4777), 'sfepy.linalg.dot_sequences', 'dot_sequences', (['val_qp', 'normal'], {}), '(val_qp, normal)\n', (4761, 4777), False, 'from sfepy.linalg import dot_sequences\n'), ((10473, 10511), 'numpy.asarray', 'nm.asarray', (['material'], {'dtype': 'nm.float64'}), '(material, dtype=nm.float64)\n', (10483, 10511), True, 'import numpy as nm\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.optimizer import SGD, Adam
from megengine.test import assertTensorClose
def get_input():
batch_size = 2
input_dim = 28
data_shape = (batch_size, input_dim)
label_shape = (batch_size,)
data = tensor()
label = tensor(dtype=np.int32)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
return data, data_shape, label, label_shape
def test_sgd_simple():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, weight_decay=0.1)
for idx in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
if idx % 2:
opt.zero_grad()
else:
mlp.zero_grad()
opt.backward(loss)
grads = TensorDict()
orig_params = TensorDict()
for param in mlp.parameters():
grad = F.grad(loss, param, use_virtual_grad=False)
assertTensorClose(grad.numpy(), param.grad.numpy())
grads[param] = np.copy(grad.numpy())
orig_params[param] = np.copy(param.numpy())
opt.step()
for param in mlp.parameters():
assertTensorClose(
param.numpy(), orig_params[param] * 0.999 - grads[param] * 0.01
)
def test_sgd_momentum():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
orig_params = TensorDict()
grads = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
grads[param] = np.copy(param.grad.numpy())
opt.step()
for param in mlp.parameters():
slot = slots[param]
orig_param = orig_params[param]
slot *= 0.9
slot -= param.grad.numpy() * 0.01
assertTensorClose(param.numpy(), orig_param + slot)
# TODO: put opt.step() inside trace
def test_sgd_momentum_static():
_, data_shape, _, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
@trace
def f(data, label):
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
for _ in range(3):
f(
np.random.random(data_shape).astype(np.float32),
np.random.randint(0, 10, label_shape).astype(np.int32),
)
orig_params = TensorDict()
grads = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
grads[param] = np.copy(param.grad.numpy())
opt.step()
for param in mlp.parameters():
slot = slots[param]
orig_param = orig_params[param]
slot *= 0.9
slot -= param.grad.numpy() * 0.01
assertTensorClose(param.numpy(), orig_param + slot)
def test_update_lr():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01)
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
opt.step()
for group in opt.param_groups:
group["lr"] += 0.02
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
for param in mlp.parameters():
grad = F.grad(loss, param, use_virtual_grad=False)
assertTensorClose(grad.numpy(), param.grad.numpy())
orig_params = []
for param in mlp.parameters():
orig_params.append(np.copy(param.numpy()))
opt.step()
for param, orig_param in zip(mlp.parameters(), orig_params):
assertTensorClose(param.numpy(), orig_param - param.grad.numpy() * 0.03)
def test_adam():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
beta0 = 0.8
beta1 = 0.9
eps = 1e-4
opt = Adam(mlp.parameters(), lr=0.01, betas=(beta0, beta1), eps=eps)
m_slots = TensorDict()
v_slots = TensorDict()
for param in mlp.parameters():
m_slots[param] = np.zeros(param.shape).astype(np.float32)
v_slots[param] = np.zeros(param.shape).astype(np.float32)
step_size = 0
def check_value():
for param in mlp.parameters():
grad = param.grad.numpy()
orig_param = orig_params[param]
m = m_slots[param]
v = v_slots[param]
m *= beta0
m += (1 - beta0) * grad
v *= beta1
v += (1 - beta1) * grad * grad
update = (m / (1 - beta0 ** step_size)) / (
np.sqrt(v / (1 - beta1 ** step_size)) + eps
)
assertTensorClose(param.numpy(), orig_param - 0.01 * update)
# eager
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
grads = opt.backward(loss)
orig_params = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
opt.step()
step_size += 1
check_value()
# static
@trace
def f(data, label):
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.backward(loss)
for _ in range(3):
opt.zero_grad()
orig_params = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
f(
np.random.random(data_shape).astype(np.float32),
np.random.randint(0, 10, label_shape).astype(np.int32),
)
opt.step()
step_size += 1
check_value()
@graph_mode("eager", "static")
def test_optimizer_serialization():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
opt.step()
for param in mlp.parameters():
slot = slots[param]
slot *= 0.9
slot -= param.grad.numpy() * 0.01
with BytesIO() as fout:
save(opt.state_dict(), fout)
fout.seek(0)
state_dict = load(fout)
opt1 = SGD(mlp.parameters(), lr=0.02, momentum=0.8)
opt1.load_state_dict(state_dict)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt1.zero_grad()
opt1.backward(loss)
orig_params = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
opt1.step()
for param in mlp.parameters():
orig_param = orig_params[param]
slot = slots[param]
slot *= 0.9
slot -= param.grad.numpy() * 0.01
assertTensorClose(param.numpy(), orig_param + slot)
|
[
"megengine.load",
"megengine.functional.grad",
"megengine.core.TensorDict",
"megengine.core.tensor"
] |
[((7391, 7420), 'helpers.graph_mode', 'graph_mode', (['"""eager"""', '"""static"""'], {}), "('eager', 'static')\n", (7401, 7420), False, 'from helpers import MLP, graph_mode\n'), ((829, 837), 'megengine.core.tensor', 'tensor', ([], {}), '()\n', (835, 837), False, 'from megengine.core import TensorDict, tensor\n'), ((850, 872), 'megengine.core.tensor', 'tensor', ([], {'dtype': 'np.int32'}), '(dtype=np.int32)\n', (856, 872), False, 'from megengine.core import TensorDict, tensor\n'), ((1138, 1143), 'helpers.MLP', 'MLP', ([], {}), '()\n', (1141, 1143), False, 'from helpers import MLP, graph_mode\n'), ((2172, 2177), 'helpers.MLP', 'MLP', ([], {}), '()\n', (2175, 2177), False, 'from helpers import MLP, graph_mode\n'), ((2245, 2257), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (2255, 2257), False, 'from megengine.core import TensorDict, tensor\n'), ((3258, 3263), 'helpers.MLP', 'MLP', ([], {}), '()\n', (3261, 3263), False, 'from helpers import MLP, graph_mode\n'), ((3501, 3513), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (3511, 3513), False, 'from megengine.core import TensorDict, tensor\n'), ((4357, 4362), 'helpers.MLP', 'MLP', ([], {}), '()\n', (4360, 4362), False, 'from helpers import MLP, graph_mode\n'), ((5432, 5437), 'helpers.MLP', 'MLP', ([], {}), '()\n', (5435, 5437), False, 'from helpers import MLP, graph_mode\n'), ((5572, 5584), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (5582, 5584), False, 'from megengine.core import TensorDict, tensor\n'), ((5599, 5611), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (5609, 5611), False, 'from megengine.core import TensorDict, tensor\n'), ((7522, 7527), 'helpers.MLP', 'MLP', ([], {}), '()\n', (7525, 7527), False, 'from helpers import MLP, graph_mode\n'), ((7595, 7607), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (7605, 7607), False, 'from megengine.core import TensorDict, tensor\n'), ((961, 998), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', 'label_shape'], {}), '(0, 10, label_shape)\n', (978, 998), True, 'import numpy as np\n'), ((1578, 1590), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (1588, 1590), False, 'from megengine.core import TensorDict, tensor\n'), ((1613, 1625), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (1623, 1625), False, 'from megengine.core import TensorDict, tensor\n'), ((2670, 2682), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (2680, 2682), False, 'from megengine.core import TensorDict, tensor\n'), ((2699, 2711), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (2709, 2711), False, 'from megengine.core import TensorDict, tensor\n'), ((3808, 3820), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (3818, 3820), False, 'from megengine.core import TensorDict, tensor\n'), ((3837, 3849), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (3847, 3849), False, 'from megengine.core import TensorDict, tensor\n'), ((6666, 6678), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (6676, 6678), False, 'from megengine.core import TensorDict, tensor\n'), ((7066, 7078), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (7076, 7078), False, 'from megengine.core import TensorDict, tensor\n'), ((7975, 7984), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (7982, 7984), False, 'from io import BytesIO\n'), ((8073, 8083), 'megengine.load', 'load', (['fout'], {}), '(fout)\n', (8077, 8083), False, 'from megengine import load, save\n'), ((8478, 8490), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (8488, 8490), False, 'from megengine.core import TensorDict, tensor\n'), ((1324, 1361), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', 'label_shape'], {}), '(0, 10, label_shape)\n', (1341, 1361), True, 'import numpy as np\n'), ((1684, 1727), 'megengine.functional.grad', 'F.grad', (['loss', 'param'], {'use_virtual_grad': '(False)'}), '(loss, param, use_virtual_grad=False)\n', (1690, 1727), True, 'import megengine.functional as F\n'), ((2476, 2513), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', 'label_shape'], {}), '(0, 10, label_shape)\n', (2493, 2513), True, 'import numpy as np\n'), ((4718, 4755), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', 'label_shape'], {}), '(0, 10, label_shape)\n', (4735, 4755), True, 'import numpy as np\n'), ((4948, 4991), 'megengine.functional.grad', 'F.grad', (['loss', 'param'], {'use_virtual_grad': '(False)'}), '(loss, param, use_virtual_grad=False)\n', (4954, 4991), True, 'import megengine.functional as F\n'), ((6464, 6501), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', 'label_shape'], {}), '(0, 10, label_shape)\n', (6481, 6501), True, 'import numpy as np\n'), ((8282, 8319), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', 'label_shape'], {}), '(0, 10, label_shape)\n', (8299, 8319), True, 'import numpy as np\n'), ((892, 920), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (908, 920), True, 'import numpy as np\n'), ((2316, 2337), 'numpy.zeros', 'np.zeros', (['param.shape'], {}), '(param.shape)\n', (2324, 2337), True, 'import numpy as np\n'), ((3572, 3593), 'numpy.zeros', 'np.zeros', (['param.shape'], {}), '(param.shape)\n', (3580, 3593), True, 'import numpy as np\n'), ((5672, 5693), 'numpy.zeros', 'np.zeros', (['param.shape'], {}), '(param.shape)\n', (5680, 5693), True, 'import numpy as np\n'), ((5738, 5759), 'numpy.zeros', 'np.zeros', (['param.shape'], {}), '(param.shape)\n', (5746, 5759), True, 'import numpy as np\n'), ((7666, 7687), 'numpy.zeros', 'np.zeros', (['param.shape'], {}), '(param.shape)\n', (7674, 7687), True, 'import numpy as np\n'), ((1251, 1279), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (1267, 1279), True, 'import numpy as np\n'), ((2403, 2431), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (2419, 2431), True, 'import numpy as np\n'), ((3659, 3687), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (3675, 3687), True, 'import numpy as np\n'), ((3720, 3757), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', 'label_shape'], {}), '(0, 10, label_shape)\n', (3737, 3757), True, 'import numpy as np\n'), ((4645, 4673), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (4661, 4673), True, 'import numpy as np\n'), ((6201, 6238), 'numpy.sqrt', 'np.sqrt', (['(v / (1 - beta1 ** step_size))'], {}), '(v / (1 - beta1 ** step_size))\n', (6208, 6238), True, 'import numpy as np\n'), ((6391, 6419), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (6407, 6419), True, 'import numpy as np\n'), ((7197, 7225), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (7213, 7225), True, 'import numpy as np\n'), ((7258, 7295), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', 'label_shape'], {}), '(0, 10, label_shape)\n', (7275, 7295), True, 'import numpy as np\n'), ((8209, 8237), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (8225, 8237), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
"""
Convert a mesh file from one SfePy-supported format to another.
Examples::
$ ./script/convert_mesh.py meshes/3d/cylinder.mesh new.vtk
$ ./script/convert_mesh.py meshes/3d/cylinder.mesh new.vtk -s2.5
$ ./script/convert_mesh.py meshes/3d/cylinder.mesh new.vtk -s0.5,2,1
$ ./script/convert_mesh.py meshes/3d/cylinder.mesh new.vtk -s0.5,2,1 -c 0
"""
import sys
sys.path.append('.')
from optparse import OptionParser
from sfepy.base.base import nm, output
from sfepy.discrete.fem import Mesh, FEDomain
from sfepy.discrete.fem.meshio import (output_mesh_formats, MeshIO,
supported_cell_types)
usage = '%prog [options] filename_in filename_out\n' + __doc__.rstrip()
help = {
'scale' : 'scale factor (float or comma-separated list for each axis)'
' [default: %default]',
'center' : 'center of the output mesh (0 for origin or'
' comma-separated list for each axis) applied after scaling'
' [default: %default]',
'refine' : 'uniform refinement level [default: %default]',
'format' : 'output mesh format (overrides filename_out extension)',
'list' : 'list supported readable/writable output mesh formats',
}
def _parse_val_or_vec(option, name, parser):
if option is not None:
try:
try:
option = float(option)
except ValueError:
option = [float(ii) for ii in option.split(',')]
option = nm.array(option, dtype=nm.float64, ndmin=1)
except:
output('bad %s! (%s)' % (name, option))
parser.print_help()
sys.exit(1)
return option
def main():
parser = OptionParser(usage=usage)
parser.add_option('-s', '--scale', metavar='scale',
action='store', dest='scale',
default=None, help=help['scale'])
parser.add_option('-c', '--center', metavar='center',
action='store', dest='center',
default=None, help=help['center'])
parser.add_option('-r', '--refine', metavar='level',
action='store', type=int, dest='refine',
default=0, help=help['refine'])
parser.add_option('-f', '--format', metavar='format',
action='store', type='string', dest='format',
default=None, help=help['format'])
parser.add_option('-l', '--list', action='store_true',
dest='list', help=help['list'])
(options, args) = parser.parse_args()
if options.list:
output('Supported readable mesh formats:')
output('--------------------------------')
output_mesh_formats('r')
output('')
output('Supported writable mesh formats:')
output('--------------------------------')
output_mesh_formats('w')
sys.exit(0)
if len(args) != 2:
parser.print_help()
sys.exit(1)
scale = _parse_val_or_vec(options.scale, 'scale', parser)
center = _parse_val_or_vec(options.center, 'center', parser)
filename_in, filename_out = args
mesh = Mesh.from_file(filename_in)
if scale is not None:
if len(scale) == 1:
tr = nm.eye(mesh.dim, dtype=nm.float64) * scale
elif len(scale) == mesh.dim:
tr = nm.diag(scale)
else:
raise ValueError('bad scale! (%s)' % scale)
mesh.transform_coors(tr)
if center is not None:
cc = 0.5 * mesh.get_bounding_box().sum(0)
shift = center - cc
tr = nm.c_[nm.eye(mesh.dim, dtype=nm.float64), shift[:, None]]
mesh.transform_coors(tr)
if options.refine > 0:
domain = FEDomain(mesh.name, mesh)
output('initial mesh: %d nodes %d elements'
% (domain.shape.n_nod, domain.shape.n_el))
for ii in range(options.refine):
output('refine %d...' % ii)
domain = domain.refine()
output('... %d nodes %d elements'
% (domain.shape.n_nod, domain.shape.n_el))
mesh = domain.mesh
io = MeshIO.for_format(filename_out, format=options.format,
writable=True)
cell_types = ', '.join(supported_cell_types[io.format])
output('writing [%s] %s...' % (cell_types, filename_out))
mesh.write(filename_out, io=io)
output('...done')
if __name__ == '__main__':
main()
|
[
"sfepy.discrete.fem.meshio.output_mesh_formats",
"sfepy.base.base.nm.diag",
"sfepy.discrete.fem.Mesh.from_file",
"sfepy.base.base.nm.array",
"sfepy.base.base.nm.eye",
"sfepy.discrete.fem.meshio.MeshIO.for_format",
"sfepy.discrete.fem.FEDomain",
"sfepy.base.base.output"
] |
[((393, 413), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (408, 413), False, 'import sys\n'), ((1680, 1705), 'optparse.OptionParser', 'OptionParser', ([], {'usage': 'usage'}), '(usage=usage)\n', (1692, 1705), False, 'from optparse import OptionParser\n'), ((3131, 3158), 'sfepy.discrete.fem.Mesh.from_file', 'Mesh.from_file', (['filename_in'], {}), '(filename_in)\n', (3145, 3158), False, 'from sfepy.discrete.fem import Mesh, FEDomain\n'), ((4102, 4171), 'sfepy.discrete.fem.meshio.MeshIO.for_format', 'MeshIO.for_format', (['filename_out'], {'format': 'options.format', 'writable': '(True)'}), '(filename_out, format=options.format, writable=True)\n', (4119, 4171), False, 'from sfepy.discrete.fem.meshio import output_mesh_formats, MeshIO, supported_cell_types\n'), ((4264, 4321), 'sfepy.base.base.output', 'output', (["('writing [%s] %s...' % (cell_types, filename_out))"], {}), "('writing [%s] %s...' % (cell_types, filename_out))\n", (4270, 4321), False, 'from sfepy.base.base import nm, output\n'), ((4362, 4379), 'sfepy.base.base.output', 'output', (['"""...done"""'], {}), "('...done')\n", (4368, 4379), False, 'from sfepy.base.base import nm, output\n'), ((2580, 2622), 'sfepy.base.base.output', 'output', (['"""Supported readable mesh formats:"""'], {}), "('Supported readable mesh formats:')\n", (2586, 2622), False, 'from sfepy.base.base import nm, output\n'), ((2631, 2673), 'sfepy.base.base.output', 'output', (['"""--------------------------------"""'], {}), "('--------------------------------')\n", (2637, 2673), False, 'from sfepy.base.base import nm, output\n'), ((2682, 2706), 'sfepy.discrete.fem.meshio.output_mesh_formats', 'output_mesh_formats', (['"""r"""'], {}), "('r')\n", (2701, 2706), False, 'from sfepy.discrete.fem.meshio import output_mesh_formats, MeshIO, supported_cell_types\n'), ((2715, 2725), 'sfepy.base.base.output', 'output', (['""""""'], {}), "('')\n", (2721, 2725), False, 'from sfepy.base.base import nm, output\n'), ((2734, 2776), 'sfepy.base.base.output', 'output', (['"""Supported writable mesh formats:"""'], {}), "('Supported writable mesh formats:')\n", (2740, 2776), False, 'from sfepy.base.base import nm, output\n'), ((2785, 2827), 'sfepy.base.base.output', 'output', (['"""--------------------------------"""'], {}), "('--------------------------------')\n", (2791, 2827), False, 'from sfepy.base.base import nm, output\n'), ((2836, 2860), 'sfepy.discrete.fem.meshio.output_mesh_formats', 'output_mesh_formats', (['"""w"""'], {}), "('w')\n", (2855, 2860), False, 'from sfepy.discrete.fem.meshio import output_mesh_formats, MeshIO, supported_cell_types\n'), ((2869, 2880), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2877, 2880), False, 'import sys\n'), ((2941, 2952), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2949, 2952), False, 'import sys\n'), ((3701, 3726), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['mesh.name', 'mesh'], {}), '(mesh.name, mesh)\n', (3709, 3726), False, 'from sfepy.discrete.fem import Mesh, FEDomain\n'), ((3735, 3826), 'sfepy.base.base.output', 'output', (["('initial mesh: %d nodes %d elements' % (domain.shape.n_nod, domain.shape.n_el)\n )"], {}), "('initial mesh: %d nodes %d elements' % (domain.shape.n_nod, domain.\n shape.n_el))\n", (3741, 3826), False, 'from sfepy.base.base import nm, output\n'), ((1467, 1510), 'sfepy.base.base.nm.array', 'nm.array', (['option'], {'dtype': 'nm.float64', 'ndmin': '(1)'}), '(option, dtype=nm.float64, ndmin=1)\n', (1475, 1510), False, 'from sfepy.base.base import nm, output\n'), ((3891, 3918), 'sfepy.base.base.output', 'output', (["('refine %d...' % ii)"], {}), "('refine %d...' % ii)\n", (3897, 3918), False, 'from sfepy.base.base import nm, output\n'), ((3968, 4044), 'sfepy.base.base.output', 'output', (["('... %d nodes %d elements' % (domain.shape.n_nod, domain.shape.n_el))"], {}), "('... %d nodes %d elements' % (domain.shape.n_nod, domain.shape.n_el))\n", (3974, 4044), False, 'from sfepy.base.base import nm, output\n'), ((1539, 1578), 'sfepy.base.base.output', 'output', (["('bad %s! (%s)' % (name, option))"], {}), "('bad %s! (%s)' % (name, option))\n", (1545, 1578), False, 'from sfepy.base.base import nm, output\n'), ((1623, 1634), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1631, 1634), False, 'import sys\n'), ((3231, 3265), 'sfepy.base.base.nm.eye', 'nm.eye', (['mesh.dim'], {'dtype': 'nm.float64'}), '(mesh.dim, dtype=nm.float64)\n', (3237, 3265), False, 'from sfepy.base.base import nm, output\n'), ((3328, 3342), 'sfepy.base.base.nm.diag', 'nm.diag', (['scale'], {}), '(scale)\n', (3335, 3342), False, 'from sfepy.base.base import nm, output\n'), ((3571, 3605), 'sfepy.base.base.nm.eye', 'nm.eye', (['mesh.dim'], {'dtype': 'nm.float64'}), '(mesh.dim, dtype=nm.float64)\n', (3577, 3605), False, 'from sfepy.base.base import nm, output\n')]
|
import numpy as np
from sfepy.base.goptions import goptions
from sfepy.discrete.fem import Field
try:
from sfepy.discrete.fem import FEDomain as Domain
except ImportError:
from sfepy.discrete.fem import Domain
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC, PeriodicBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
import sfepy.discrete.fem.periodic as per
from sfepy.discrete import Functions
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.mechanics.matcoefs import ElasticConstants
from sfepy.base.base import output
from sfepy.discrete.conditions import LinearCombinationBC
goptions['verbose'] = False
output.set_output(quiet=True)
class ElasticFESimulation(object):
"""
Use SfePy to solve a linear strain problem in 2D with a varying
microstructure on a rectangular grid. The rectangle (cube) is held
at the negative edge (plane) and displaced by 1 on the positive x
edge (plane). Periodic boundary conditions are applied to the
other boundaries.
The microstructure is of shape (n_samples, n_x, n_y) or (n_samples, n_x,
n_y, n_z).
>>> X = np.zeros((1, 3, 3), dtype=int)
>>> X[0, :, 1] = 1
>>> sim = ElasticFESimulation(elastic_modulus=(1.0, 10.0),
... poissons_ratio=(0., 0.))
>>> sim.run(X)
>>> y = sim.strain
y is the strain with components as follows
>>> exx = y[..., 0]
>>> eyy = y[..., 1]
>>> exy = y[..., 2]
In this example, the strain is only in the x-direction and has a
uniform value of 1 since the displacement is always 1 and the size
of the domain is 1.
>>> assert np.allclose(exx, 1)
>>> assert np.allclose(eyy, 0)
>>> assert np.allclose(exy, 0)
The following example is for a system with contrast. It tests the
left/right periodic offset and the top/bottom periodicity.
>>> X = np.array([[[1, 0, 0, 1],
... [0, 1, 1, 1],
... [0, 0, 1, 1],
... [1, 0, 0, 1]]])
>>> n_samples, N, N = X.shape
>>> macro_strain = 0.1
>>> sim = ElasticFESimulation((10.0,1.0), (0.3,0.3), macro_strain=0.1)
>>> sim.run(X)
>>> u = sim.displacement[0]
Check that the offset for the left/right planes is `N *
macro_strain`.
>>> assert np.allclose(u[-1,:,0] - u[0,:,0], N * macro_strain)
Check that the left/right side planes are periodic in y.
>>> assert np.allclose(u[0,:,1], u[-1,:,1])
Check that the top/bottom planes are periodic in both x and y.
>>> assert np.allclose(u[:,0], u[:,-1])
"""
def __init__(self, elastic_modulus, poissons_ratio, macro_strain=1.,):
"""Instantiate a ElasticFESimulation.
Args:
elastic_modulus (1D array): array of elastic moduli for phases
poissons_ratio (1D array): array of Possion's ratios for phases
macro_strain (float, optional): Scalar for macroscopic strain
"""
self.macro_strain = macro_strain
self.dx = 1.0
self.elastic_modulus = elastic_modulus
self.poissons_ratio = poissons_ratio
if len(elastic_modulus) != len(poissons_ratio):
raise RuntimeError(
'elastic_modulus and poissons_ratio must be the same length')
def _convert_properties(self, dim):
"""
Convert from elastic modulus and Poisson's ratio to the Lame
parameter and shear modulus
>>> model = ElasticFESimulation(elastic_modulus=(1., 2.),
... poissons_ratio=(1., 1.))
>>> result = model._convert_properties(2)
>>> answer = np.array([[-0.5, 1. / 6.], [-1., 1. / 3.]])
>>> assert(np.allclose(result, answer))
Args:
dim (int): Scalar value for the dimension of the microstructure.
Returns:
array with the Lame parameter and the shear modulus for each phase.
"""
def _convert(E, nu):
ec = ElasticConstants(young=E, poisson=nu)
mu = dim / 3. * ec.mu
lame = ec.lam
return lame, mu
return np.array([_convert(E, nu) for E,
nu in zip(self.elastic_modulus, self.poissons_ratio)])
def _get_property_array(self, X):
"""
Generate property array with elastic_modulus and poissons_ratio for
each phase.
Test case for 2D with 3 phases.
>>> X2D = np.array([[[0, 1, 2, 1],
... [2, 1, 0, 0],
... [1, 0, 2, 2]]])
>>> model2D = ElasticFESimulation(elastic_modulus=(1., 2., 3.),
... poissons_ratio=(1., 1., 1.))
>>> lame = lame0, lame1, lame2 = -0.5, -1., -1.5
>>> mu = mu0, mu1, mu2 = 1. / 6, 1. / 3, 1. / 2
>>> lm = zip(lame, mu)
>>> X2D_property = np.array([[lm[0], lm[1], lm[2], lm[1]],
... [lm[2], lm[1], lm[0], lm[0]],
... [lm[1], lm[0], lm[2], lm[2]]])
>>> assert(np.allclose(model2D._get_property_array(X2D), X2D_property))
Test case for 3D with 2 phases.
>>> model3D = ElasticFESimulation(elastic_modulus=(1., 2.),
... poissons_ratio=(1., 1.))
>>> X3D = np.array([[[0, 1],
... [0, 0]],
... [[1, 1],
... [0, 1]]])
>>> X3D_property = np.array([[[lm[0], lm[1]],
... [lm[0], lm[0]]],
... [[lm[1], lm[1]],
... [lm[0], lm[1]]]])
>>> assert(np.allclose(model3D._get_property_array(X3D), X3D_property))
"""
dim = len(X.shape) - 1
n_phases = len(self.elastic_modulus)
if not issubclass(X.dtype.type, np.integer):
raise TypeError("X must be an integer array")
if np.max(X) >= n_phases or np.min(X) < 0:
raise RuntimeError(
"X must be between 0 and {N}.".format(N=n_phases - 1))
if not (2 <= dim <= 3):
raise RuntimeError("the shape of X is incorrect")
return self._convert_properties(dim)[X]
def run(self, X):
"""
Run the simulation.
Args:
X (ND array): microstructure with shape (n_samples, n_x, ...)
"""
X_property = self._get_property_array(X)
strain = []
displacement = []
stress = []
for x in X_property:
strain_, displacement_, stress_ = self._solve(x)
strain.append(strain_)
displacement.append(displacement_)
stress.append(stress_)
self.strain = np.array(strain)
self.displacement = np.array(displacement)
self.stress = np.array(stress)
@property
def response(self):
return self.strain[..., 0]
def _get_material(self, property_array, domain):
"""
Creates an SfePy material from the material property fields for the
quadrature points.
Args:
property_array: array of the properties with shape (n_x, n_y, n_z, 2)
Returns:
an SfePy material
"""
min_xyz = domain.get_mesh_bounding_box()[0]
dims = domain.get_mesh_bounding_box().shape[1]
def _material_func_(ts, coors, mode=None, **kwargs):
if mode == 'qp':
ijk_out = np.empty_like(coors, dtype=int)
ijk = np.floor((coors - min_xyz[None]) / self.dx,
ijk_out, casting="unsafe")
ijk_tuple = tuple(ijk.swapaxes(0, 1))
property_array_qp = property_array[ijk_tuple]
lam = property_array_qp[..., 0]
mu = property_array_qp[..., 1]
lam = np.ascontiguousarray(lam.reshape((lam.shape[0], 1, 1)))
mu = np.ascontiguousarray(mu.reshape((mu.shape[0], 1, 1)))
from sfepy.mechanics.matcoefs import stiffness_from_lame
stiffness = stiffness_from_lame(dims, lam=lam, mu=mu)
return {'lam': lam, 'mu': mu, 'D': stiffness}
else:
return
material_func = Function('material_func', _material_func_)
return Material('m', function=material_func)
def _subdomain_func(self, x=(), y=(), z=(), max_x=None):
"""
Creates a function to mask subdomains in Sfepy.
Args:
x: tuple of lines or points to be masked in the x-plane
y: tuple of lines or points to be masked in the y-plane
z: tuple of lines or points to be masked in the z-plane
Returns:
array of masked location indices
"""
eps = 1e-3 * self.dx
def _func(coords, domain=None):
flag_x = len(x) == 0
flag_y = len(y) == 0
flag_z = len(z) == 0
for x_ in x:
flag = (coords[:, 0] < (x_ + eps)) & \
(coords[:, 0] > (x_ - eps))
flag_x = flag_x | flag
for y_ in y:
flag = (coords[:, 1] < (y_ + eps)) & \
(coords[:, 1] > (y_ - eps))
flag_y = flag_y | flag
for z_ in z:
flag = (coords[:, 2] < (z_ + eps)) & \
(coords[:, 2] > (z_ - eps))
flag_z = flag_z | flag
flag = flag_x & flag_y & flag_z
if max_x is not None:
flag = flag & (coords[:, 0] < (max_x - eps))
return np.where(flag)[0]
return _func
def _get_mesh(self, shape):
"""
Generate an Sfepy rectangular mesh
Args:
shape: proposed shape of domain (vertex shape) (n_x, n_y)
Returns:
Sfepy mesh
"""
center = np.zeros_like(shape)
return gen_block_mesh(shape, np.array(shape) + 1, center,
verbose=False)
def _get_fixed_displacementsBCs(self, domain):
"""
Fix the left top and bottom points in x, y and z
Args:
domain: an Sfepy domain
Returns:
the Sfepy boundary conditions
"""
min_xyz = domain.get_mesh_bounding_box()[0]
max_xyz = domain.get_mesh_bounding_box()[1]
kwargs = {}
fix_points_dict = {'u.0': 0.0, 'u.1': 0.0}
if len(min_xyz) == 3:
kwargs = {'z': (max_xyz[2], min_xyz[2])}
fix_points_dict['u.2'] = 0.0
fix_x_points_ = self._subdomain_func(x=(min_xyz[0],),
y=(max_xyz[1], min_xyz[1]),
**kwargs)
fix_x_points = Function('fix_x_points', fix_x_points_)
region_fix_points = domain.create_region(
'region_fix_points',
'vertices by fix_x_points',
'vertex',
functions=Functions([fix_x_points]))
return EssentialBC('fix_points_BC', region_fix_points, fix_points_dict)
def _get_shift_displacementsBCs(self, domain):
"""
Fix the right top and bottom points in x, y and z
Args:
domain: an Sfepy domain
Returns:
the Sfepy boundary conditions
"""
min_xyz = domain.get_mesh_bounding_box()[0]
max_xyz = domain.get_mesh_bounding_box()[1]
kwargs = {}
if len(min_xyz) == 3:
kwargs = {'z': (max_xyz[2], min_xyz[2])}
displacement = self.macro_strain * (max_xyz[0] - min_xyz[0])
shift_points_dict = {'u.0': displacement}
shift_x_points_ = self._subdomain_func(x=(max_xyz[0],),
y=(max_xyz[1], min_xyz[1]),
**kwargs)
shift_x_points = Function('shift_x_points', shift_x_points_)
region_shift_points = domain.create_region(
'region_shift_points',
'vertices by shift_x_points',
'vertex',
functions=Functions([shift_x_points]))
return EssentialBC('shift_points_BC',
region_shift_points, shift_points_dict)
def _get_displacementBCs(self, domain):
shift_points_BC = self._get_shift_displacementsBCs(domain)
fix_points_BC = self._get_fixed_displacementsBCs(domain)
return Conditions([fix_points_BC, shift_points_BC])
def _get_linear_combinationBCs(self, domain):
"""
The right nodes are periodic with the left nodes but also displaced.
Args:
domain: an Sfepy domain
Returns:
the Sfepy boundary conditions
"""
min_xyz = domain.get_mesh_bounding_box()[0]
max_xyz = domain.get_mesh_bounding_box()[1]
xplus_ = self._subdomain_func(x=(max_xyz[0],))
xminus_ = self._subdomain_func(x=(min_xyz[0],))
xplus = Function('xplus', xplus_)
xminus = Function('xminus', xminus_)
region_x_plus = domain.create_region('region_x_plus',
'vertices by xplus',
'facet',
functions=Functions([xplus]))
region_x_minus = domain.create_region('region_x_minus',
'vertices by xminus',
'facet',
functions=Functions([xminus]))
match_x_plane = Function('match_x_plane', per.match_x_plane)
def shift_(ts, coors, region):
return np.ones_like(coors[:, 0]) * \
self.macro_strain * (max_xyz[0] - min_xyz[0])
shift = Function('shift', shift_)
lcbc = LinearCombinationBC(
'lcbc', [region_x_plus, region_x_minus], {
'u.0': 'u.0'}, match_x_plane, 'shifted_periodic',
arguments=(shift,))
return Conditions([lcbc])
def _get_periodicBC_X(self, domain, dim):
dim_dict = {1: ('y', per.match_y_plane),
2: ('z', per.match_z_plane)}
dim_string = dim_dict[dim][0]
match_plane = dim_dict[dim][1]
min_, max_ = domain.get_mesh_bounding_box()[:, dim]
min_x, max_x = domain.get_mesh_bounding_box()[:, 0]
plus_ = self._subdomain_func(max_x=max_x, **{dim_string: (max_,)})
minus_ = self._subdomain_func(max_x=max_x, **{dim_string: (min_,)})
plus_string = dim_string + 'plus'
minus_string = dim_string + 'minus'
plus = Function(plus_string, plus_)
minus = Function(minus_string, minus_)
region_plus = domain.create_region(
'region_{0}_plus'.format(dim_string),
'vertices by {0}'.format(
plus_string),
'facet',
functions=Functions([plus]))
region_minus = domain.create_region(
'region_{0}_minus'.format(dim_string),
'vertices by {0}'.format(
minus_string),
'facet',
functions=Functions([minus]))
match_plane = Function(
'match_{0}_plane'.format(dim_string), match_plane)
bc_dict = {'u.0': 'u.0'}
bc = PeriodicBC('periodic_{0}'.format(dim_string),
[region_plus, region_minus],
bc_dict,
match='match_{0}_plane'.format(dim_string))
return bc, match_plane
def _get_periodicBC_YZ(self, domain, dim):
dims = domain.get_mesh_bounding_box().shape[1]
dim_dict = {0: ('x', per.match_x_plane),
1: ('y', per.match_y_plane),
2: ('z', per.match_z_plane)}
dim_string = dim_dict[dim][0]
match_plane = dim_dict[dim][1]
min_, max_ = domain.get_mesh_bounding_box()[:, dim]
plus_ = self._subdomain_func(**{dim_string: (max_,)})
minus_ = self._subdomain_func(**{dim_string: (min_,)})
plus_string = dim_string + 'plus'
minus_string = dim_string + 'minus'
plus = Function(plus_string, plus_)
minus = Function(minus_string, minus_)
region_plus = domain.create_region(
'region_{0}_plus'.format(dim_string),
'vertices by {0}'.format(
plus_string),
'facet',
functions=Functions([plus]))
region_minus = domain.create_region(
'region_{0}_minus'.format(dim_string),
'vertices by {0}'.format(
minus_string),
'facet',
functions=Functions([minus]))
match_plane = Function(
'match_{0}_plane'.format(dim_string), match_plane)
bc_dict = {'u.1': 'u.1'}
if dims == 3:
bc_dict['u.2'] = 'u.2'
bc = PeriodicBC('periodic_{0}'.format(dim_string),
[region_plus, region_minus],
bc_dict,
match='match_{0}_plane'.format(dim_string))
return bc, match_plane
def _solve(self, property_array):
"""
Solve the Sfepy problem for one sample.
Args:
property_array: array of shape (n_x, n_y, 2) where the last
index is for Lame's parameter and shear modulus,
respectively.
Returns:
the strain field of shape (n_x, n_y, 2) where the last
index represents the x and y displacements
"""
shape = property_array.shape[:-1]
mesh = self._get_mesh(shape)
domain = Domain('domain', mesh)
region_all = domain.create_region('region_all', 'all')
field = Field.from_args('fu', np.float64, 'vector', region_all,
approx_order=2)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
m = self._get_material(property_array, domain)
integral = Integral('i', order=4)
t1 = Term.new('dw_lin_elastic_iso(m.lam, m.mu, v, u)',
integral, region_all, m=m, v=v, u=u)
eq = Equation('balance_of_forces', t1)
eqs = Equations([eq])
epbcs, functions = self._get_periodicBCs(domain)
ebcs = self._get_displacementBCs(domain)
lcbcs = self._get_linear_combinationBCs(domain)
ls = ScipyDirect({})
pb = Problem('elasticity', equations=eqs, auto_solvers=None)
pb.time_update(
ebcs=ebcs, epbcs=epbcs, lcbcs=lcbcs, functions=functions)
ev = pb.get_evaluator()
nls = Newton({}, lin_solver=ls,
fun=ev.eval_residual, fun_grad=ev.eval_tangent_matrix)
try:
pb.set_solvers_instances(ls, nls)
except AttributeError:
pb.set_solver(nls)
vec = pb.solve()
u = vec.create_output_dict()['u'].data
u_reshape = np.reshape(u, (tuple(x + 1 for x in shape) + u.shape[-1:]))
dims = domain.get_mesh_bounding_box().shape[1]
strain = np.squeeze(
pb.evaluate(
'ev_cauchy_strain.{dim}.region_all(u)'.format(
dim=dims),
mode='el_avg',
copy_materials=False))
strain_reshape = np.reshape(strain, (shape + strain.shape[-1:]))
stress = np.squeeze(
pb.evaluate(
'ev_cauchy_stress.{dim}.region_all(m.D, u)'.format(
dim=dims),
mode='el_avg',
copy_materials=False))
stress_reshape = np.reshape(stress, (shape + stress.shape[-1:]))
return strain_reshape, u_reshape, stress_reshape
def _get_periodicBCs(self, domain):
dims = domain.get_mesh_bounding_box().shape[1]
bc_list_YZ, func_list_YZ = list(
zip(*[self._get_periodicBC_YZ(domain, i) for i in range(0, dims)]))
bc_list_X, func_list_X = list(
zip(*[self._get_periodicBC_X(domain, i) for i in range(1, dims)]))
return Conditions(
bc_list_YZ + bc_list_X), Functions(func_list_YZ + func_list_X)
|
[
"sfepy.discrete.conditions.EssentialBC",
"sfepy.base.base.output.set_output",
"sfepy.discrete.Integral",
"sfepy.solvers.ls.ScipyDirect",
"sfepy.discrete.Equations",
"sfepy.discrete.fem.Field.from_args",
"sfepy.discrete.Equation",
"sfepy.discrete.Function",
"sfepy.mechanics.matcoefs.stiffness_from_lame",
"sfepy.mechanics.matcoefs.ElasticConstants",
"sfepy.terms.Term.new",
"sfepy.discrete.conditions.Conditions",
"sfepy.discrete.FieldVariable",
"sfepy.discrete.Material",
"sfepy.discrete.conditions.LinearCombinationBC",
"sfepy.discrete.Problem",
"sfepy.discrete.Functions",
"sfepy.discrete.fem.Domain",
"sfepy.solvers.nls.Newton"
] |
[((839, 868), 'sfepy.base.base.output.set_output', 'output.set_output', ([], {'quiet': '(True)'}), '(quiet=True)\n', (856, 868), False, 'from sfepy.base.base import output\n'), ((6954, 6970), 'numpy.array', 'np.array', (['strain'], {}), '(strain)\n', (6962, 6970), True, 'import numpy as np\n'), ((6999, 7021), 'numpy.array', 'np.array', (['displacement'], {}), '(displacement)\n', (7007, 7021), True, 'import numpy as np\n'), ((7044, 7060), 'numpy.array', 'np.array', (['stress'], {}), '(stress)\n', (7052, 7060), True, 'import numpy as np\n'), ((8474, 8516), 'sfepy.discrete.Function', 'Function', (['"""material_func"""', '_material_func_'], {}), "('material_func', _material_func_)\n", (8482, 8516), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((8532, 8569), 'sfepy.discrete.Material', 'Material', (['"""m"""'], {'function': 'material_func'}), "('m', function=material_func)\n", (8540, 8569), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((10104, 10124), 'numpy.zeros_like', 'np.zeros_like', (['shape'], {}), '(shape)\n', (10117, 10124), True, 'import numpy as np\n'), ((10991, 11030), 'sfepy.discrete.Function', 'Function', (['"""fix_x_points"""', 'fix_x_points_'], {}), "('fix_x_points', fix_x_points_)\n", (10999, 11030), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((11240, 11304), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', (['"""fix_points_BC"""', 'region_fix_points', 'fix_points_dict'], {}), "('fix_points_BC', region_fix_points, fix_points_dict)\n", (11251, 11304), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC, PeriodicBC\n'), ((12097, 12140), 'sfepy.discrete.Function', 'Function', (['"""shift_x_points"""', 'shift_x_points_'], {}), "('shift_x_points', shift_x_points_)\n", (12105, 12140), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((12358, 12428), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', (['"""shift_points_BC"""', 'region_shift_points', 'shift_points_dict'], {}), "('shift_points_BC', region_shift_points, shift_points_dict)\n", (12369, 12428), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC, PeriodicBC\n'), ((12648, 12692), 'sfepy.discrete.conditions.Conditions', 'Conditions', (['[fix_points_BC, shift_points_BC]'], {}), '([fix_points_BC, shift_points_BC])\n', (12658, 12692), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC, PeriodicBC\n'), ((13185, 13210), 'sfepy.discrete.Function', 'Function', (['"""xplus"""', 'xplus_'], {}), "('xplus', xplus_)\n", (13193, 13210), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((13228, 13255), 'sfepy.discrete.Function', 'Function', (['"""xminus"""', 'xminus_'], {}), "('xminus', xminus_)\n", (13236, 13255), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((13801, 13845), 'sfepy.discrete.Function', 'Function', (['"""match_x_plane"""', 'per.match_x_plane'], {}), "('match_x_plane', per.match_x_plane)\n", (13809, 13845), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((14013, 14038), 'sfepy.discrete.Function', 'Function', (['"""shift"""', 'shift_'], {}), "('shift', shift_)\n", (14021, 14038), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((14054, 14189), 'sfepy.discrete.conditions.LinearCombinationBC', 'LinearCombinationBC', (['"""lcbc"""', '[region_x_plus, region_x_minus]', "{'u.0': 'u.0'}", 'match_x_plane', '"""shifted_periodic"""'], {'arguments': '(shift,)'}), "('lcbc', [region_x_plus, region_x_minus], {'u.0': 'u.0'},\n match_x_plane, 'shifted_periodic', arguments=(shift,))\n", (14073, 14189), False, 'from sfepy.discrete.conditions import LinearCombinationBC\n'), ((14244, 14262), 'sfepy.discrete.conditions.Conditions', 'Conditions', (['[lcbc]'], {}), '([lcbc])\n', (14254, 14262), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC, PeriodicBC\n'), ((14857, 14885), 'sfepy.discrete.Function', 'Function', (['plus_string', 'plus_'], {}), '(plus_string, plus_)\n', (14865, 14885), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((14902, 14932), 'sfepy.discrete.Function', 'Function', (['minus_string', 'minus_'], {}), '(minus_string, minus_)\n', (14910, 14932), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((16372, 16400), 'sfepy.discrete.Function', 'Function', (['plus_string', 'plus_'], {}), '(plus_string, plus_)\n', (16380, 16400), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((16417, 16447), 'sfepy.discrete.Function', 'Function', (['minus_string', 'minus_'], {}), '(minus_string, minus_)\n', (16425, 16447), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((17843, 17865), 'sfepy.discrete.fem.Domain', 'Domain', (['"""domain"""', 'mesh'], {}), "('domain', mesh)\n", (17849, 17865), False, 'from sfepy.discrete.fem import Domain\n'), ((17947, 18018), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""fu"""', 'np.float64', '"""vector"""', 'region_all'], {'approx_order': '(2)'}), "('fu', np.float64, 'vector', region_all, approx_order=2)\n", (17962, 18018), False, 'from sfepy.discrete.fem import Field\n'), ((18064, 18100), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""u"""', '"""unknown"""', 'field'], {}), "('u', 'unknown', field)\n", (18077, 18100), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((18113, 18168), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""v"""', '"""test"""', 'field'], {'primary_var_name': '"""u"""'}), "('v', 'test', field, primary_var_name='u')\n", (18126, 18168), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((18245, 18267), 'sfepy.discrete.Integral', 'Integral', (['"""i"""'], {'order': '(4)'}), "('i', order=4)\n", (18253, 18267), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((18282, 18372), 'sfepy.terms.Term.new', 'Term.new', (['"""dw_lin_elastic_iso(m.lam, m.mu, v, u)"""', 'integral', 'region_all'], {'m': 'm', 'v': 'v', 'u': 'u'}), "('dw_lin_elastic_iso(m.lam, m.mu, v, u)', integral, region_all, m=m,\n v=v, u=u)\n", (18290, 18372), False, 'from sfepy.terms import Term\n'), ((18404, 18437), 'sfepy.discrete.Equation', 'Equation', (['"""balance_of_forces"""', 't1'], {}), "('balance_of_forces', t1)\n", (18412, 18437), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((18452, 18467), 'sfepy.discrete.Equations', 'Equations', (['[eq]'], {}), '([eq])\n', (18461, 18467), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((18645, 18660), 'sfepy.solvers.ls.ScipyDirect', 'ScipyDirect', (['{}'], {}), '({})\n', (18656, 18660), False, 'from sfepy.solvers.ls import ScipyDirect\n'), ((18675, 18730), 'sfepy.discrete.Problem', 'Problem', (['"""elasticity"""'], {'equations': 'eqs', 'auto_solvers': 'None'}), "('elasticity', equations=eqs, auto_solvers=None)\n", (18682, 18730), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((18873, 18958), 'sfepy.solvers.nls.Newton', 'Newton', (['{}'], {'lin_solver': 'ls', 'fun': 'ev.eval_residual', 'fun_grad': 'ev.eval_tangent_matrix'}), '({}, lin_solver=ls, fun=ev.eval_residual, fun_grad=ev.eval_tangent_matrix\n )\n', (18879, 18958), False, 'from sfepy.solvers.nls import Newton\n'), ((19550, 19595), 'numpy.reshape', 'np.reshape', (['strain', '(shape + strain.shape[-1:])'], {}), '(strain, shape + strain.shape[-1:])\n', (19560, 19595), True, 'import numpy as np\n'), ((19847, 19892), 'numpy.reshape', 'np.reshape', (['stress', '(shape + stress.shape[-1:])'], {}), '(stress, shape + stress.shape[-1:])\n', (19857, 19892), True, 'import numpy as np\n'), ((4180, 4217), 'sfepy.mechanics.matcoefs.ElasticConstants', 'ElasticConstants', ([], {'young': 'E', 'poisson': 'nu'}), '(young=E, poisson=nu)\n', (4196, 4217), False, 'from sfepy.mechanics.matcoefs import ElasticConstants\n'), ((20304, 20338), 'sfepy.discrete.conditions.Conditions', 'Conditions', (['(bc_list_YZ + bc_list_X)'], {}), '(bc_list_YZ + bc_list_X)\n', (20314, 20338), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC, PeriodicBC\n'), ((20353, 20390), 'sfepy.discrete.Functions', 'Functions', (['(func_list_YZ + func_list_X)'], {}), '(func_list_YZ + func_list_X)\n', (20362, 20390), False, 'from sfepy.discrete import Functions\n'), ((6163, 6172), 'numpy.max', 'np.max', (['X'], {}), '(X)\n', (6169, 6172), True, 'import numpy as np\n'), ((6188, 6197), 'numpy.min', 'np.min', (['X'], {}), '(X)\n', (6194, 6197), True, 'import numpy as np\n'), ((7682, 7713), 'numpy.empty_like', 'np.empty_like', (['coors'], {'dtype': 'int'}), '(coors, dtype=int)\n', (7695, 7713), True, 'import numpy as np\n'), ((7736, 7806), 'numpy.floor', 'np.floor', (['((coors - min_xyz[None]) / self.dx)', 'ijk_out'], {'casting': '"""unsafe"""'}), "((coors - min_xyz[None]) / self.dx, ijk_out, casting='unsafe')\n", (7744, 7806), True, 'import numpy as np\n'), ((8304, 8345), 'sfepy.mechanics.matcoefs.stiffness_from_lame', 'stiffness_from_lame', (['dims'], {'lam': 'lam', 'mu': 'mu'}), '(dims, lam=lam, mu=mu)\n', (8323, 8345), False, 'from sfepy.mechanics.matcoefs import stiffness_from_lame\n'), ((9824, 9838), 'numpy.where', 'np.where', (['flag'], {}), '(flag)\n', (9832, 9838), True, 'import numpy as np\n'), ((10162, 10177), 'numpy.array', 'np.array', (['shape'], {}), '(shape)\n', (10170, 10177), True, 'import numpy as np\n'), ((11198, 11223), 'sfepy.discrete.Functions', 'Functions', (['[fix_x_points]'], {}), '([fix_x_points])\n', (11207, 11223), False, 'from sfepy.discrete import Functions\n'), ((12314, 12341), 'sfepy.discrete.Functions', 'Functions', (['[shift_x_points]'], {}), '([shift_x_points])\n', (12323, 12341), False, 'from sfepy.discrete import Functions\n'), ((13493, 13511), 'sfepy.discrete.Functions', 'Functions', (['[xplus]'], {}), '([xplus])\n', (13502, 13511), False, 'from sfepy.discrete import Functions\n'), ((13756, 13775), 'sfepy.discrete.Functions', 'Functions', (['[xminus]'], {}), '([xminus])\n', (13765, 13775), False, 'from sfepy.discrete import Functions\n'), ((15138, 15155), 'sfepy.discrete.Functions', 'Functions', (['[plus]'], {}), '([plus])\n', (15147, 15155), False, 'from sfepy.discrete import Functions\n'), ((15365, 15383), 'sfepy.discrete.Functions', 'Functions', (['[minus]'], {}), '([minus])\n', (15374, 15383), False, 'from sfepy.discrete import Functions\n'), ((16653, 16670), 'sfepy.discrete.Functions', 'Functions', (['[plus]'], {}), '([plus])\n', (16662, 16670), False, 'from sfepy.discrete import Functions\n'), ((16880, 16898), 'sfepy.discrete.Functions', 'Functions', (['[minus]'], {}), '([minus])\n', (16889, 16898), False, 'from sfepy.discrete import Functions\n'), ((13905, 13930), 'numpy.ones_like', 'np.ones_like', (['coors[:, 0]'], {}), '(coors[:, 0])\n', (13917, 13930), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import os
from typing import Iterable, Union
from megengine import Parameter, tensor
from megengine.functional.inplace import _inplace_add_
from megengine.optimizer import Optimizer
class SGD(Optimizer):
r"""Implements stochastic gradient descent.
Nesterov momentum is based on the formula from
`"On the importance of initialization and momentum in deep learning"
<http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf>`_.
Args:
params: iterable of parameters to optimize or dicts defining parameter groups.
lr: learning rate.
momentum: momentum factor. Default: ``0.0``
nesterov: enables Nesterov momentum. Default: ``False``
weight_decay: weight decay (L2 penalty). Default: ``0.0``
"""
def __init__(
self,
params: Union[Iterable[Parameter], dict],
lr: float,
momentum: float = 0.0,
nesterov: bool = False,
weight_decay: float = 0.0,
):
if lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
if nesterov and momentum <= 0:
raise ValueError("Nesterov momentum requires a momentum")
defaults = dict(lr=lr, momentum=momentum, weight_decay=weight_decay)
super().__init__(params, defaults)
self.nesterov = nesterov
self._disable_type_convert = True
def _create_state(self, param_group):
if param_group["momentum"] != 0.0:
for param in param_group["params"]:
self._add_state(param, "momentum_buffer")
def _updates(self, param_group):
lr = param_group["lr"]
weight_decay = param_group["weight_decay"]
momentum = param_group["momentum"]
# since `conver_inputs` is disabled for param updates,
# scalar should be explicitly tansforred to tensor
_lr = tensor(lr)
_weight_decay = tensor(weight_decay)
_momentum = tensor(momentum)
inplace_mode = int(os.getenv("MEGENGINE_INPLACE_UPDATE", "0"))
if inplace_mode:
_neg_lr = tensor(-lr)
c1 = tensor([1.0])
for param in param_group["params"]:
if param.grad is None:
continue
grad = param.grad
if weight_decay != 0.0:
grad = grad + param * _weight_decay
if inplace_mode:
if momentum != 0.0:
v = self._state[param]["momentum_buffer"]
_inplace_add_(v, grad, alpha=_momentum, beta=c1)
if self.nesterov:
grad = grad + v * _momentum
else:
grad = v
_inplace_add_(param, grad, alpha=c1, beta=_neg_lr)
continue
if momentum != 0.0:
v = self._state[param]["momentum_buffer"]
v *= _momentum
v += grad
if self.nesterov:
grad = grad + v * _momentum
else:
grad = v
param -= _lr * grad
|
[
"megengine.tensor",
"megengine.functional.inplace._inplace_add_"
] |
[((2154, 2164), 'megengine.tensor', 'tensor', (['lr'], {}), '(lr)\n', (2160, 2164), False, 'from megengine import Parameter, tensor\n'), ((2189, 2209), 'megengine.tensor', 'tensor', (['weight_decay'], {}), '(weight_decay)\n', (2195, 2209), False, 'from megengine import Parameter, tensor\n'), ((2230, 2246), 'megengine.tensor', 'tensor', (['momentum'], {}), '(momentum)\n', (2236, 2246), False, 'from megengine import Parameter, tensor\n'), ((2275, 2317), 'os.getenv', 'os.getenv', (['"""MEGENGINE_INPLACE_UPDATE"""', '"""0"""'], {}), "('MEGENGINE_INPLACE_UPDATE', '0')\n", (2284, 2317), False, 'import os\n'), ((2366, 2377), 'megengine.tensor', 'tensor', (['(-lr)'], {}), '(-lr)\n', (2372, 2377), False, 'from megengine import Parameter, tensor\n'), ((2395, 2408), 'megengine.tensor', 'tensor', (['[1.0]'], {}), '([1.0])\n', (2401, 2408), False, 'from megengine import Parameter, tensor\n'), ((2995, 3045), 'megengine.functional.inplace._inplace_add_', '_inplace_add_', (['param', 'grad'], {'alpha': 'c1', 'beta': '_neg_lr'}), '(param, grad, alpha=c1, beta=_neg_lr)\n', (3008, 3045), False, 'from megengine.functional.inplace import _inplace_add_\n'), ((2781, 2829), 'megengine.functional.inplace._inplace_add_', '_inplace_add_', (['v', 'grad'], {'alpha': '_momentum', 'beta': 'c1'}), '(v, grad, alpha=_momentum, beta=c1)\n', (2794, 2829), False, 'from megengine.functional.inplace import _inplace_add_\n')]
|
from typing import Union
from uuid import uuid4
import inject
from sqlmodel import Session, select
from src.core.events import EventDescription
from src.core.models import Context, File
from src.core.services import Storage, Streamer
from src.utils.miscellaneous import get_file_hash
@inject.params(streamer=Streamer, storage=Storage)
def get_or_create_file(
session: Session,
preffix: str,
filename: str,
file: Union[str, bytes],
context: Context,
streamer: Streamer,
storage: Storage,
) -> File:
file_hash = get_file_hash(file)
file_obj = session.exec(select(File).where(File.hash == file_hash)).first()
extension = filename.split(".")[-1]
if not file_obj:
streamer.send_event(
EventDescription.UPLOAD_FILE, context=context, file={"filename": filename, "hash": file_hash}
)
file_obj = File(bucket_key=f"{preffix}-{uuid4()}.{extension}", hash=file_hash)
session.add(file_obj)
if not storage.check_file_exists(file_obj.bucket_key):
storage.upload_file(file, key=file_obj.bucket_key)
streamer.send_event(
EventDescription.UPLOAD_FILE,
context=context,
file={"filename": filename, "hash": file_hash, "bucket_key": file_obj.bucket_key},
)
return file_obj
@inject.params(storage=Storage)
def check_file_exists(bucket_key: str, storage: Storage) -> bool:
return storage.check_file_exists(bucket_key)
|
[
"sqlmodel.select"
] |
[((289, 338), 'inject.params', 'inject.params', ([], {'streamer': 'Streamer', 'storage': 'Storage'}), '(streamer=Streamer, storage=Storage)\n', (302, 338), False, 'import inject\n'), ((1318, 1348), 'inject.params', 'inject.params', ([], {'storage': 'Storage'}), '(storage=Storage)\n', (1331, 1348), False, 'import inject\n'), ((546, 565), 'src.utils.miscellaneous.get_file_hash', 'get_file_hash', (['file'], {}), '(file)\n', (559, 565), False, 'from src.utils.miscellaneous import get_file_hash\n'), ((594, 606), 'sqlmodel.select', 'select', (['File'], {}), '(File)\n', (600, 606), False, 'from sqlmodel import Session, select\n'), ((901, 908), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (906, 908), False, 'from uuid import uuid4\n')]
|
from datetime import date
from fastapi import FastAPI, Query
from sqlmodel import Session, create_engine, select
from .datatypes import ArtmuseumAddress, ArtmuseumTimeLabel
from .db.crud import init_db
from .db.models import ArtmuseumExhibition, PhilharmoniaConcert
sql_engine = create_engine(
"sqlite:///database.db", connect_args={"check_same_thread": False}
)
app = FastAPI(
title="Murmansk Culture API",
# description="",
version="0.0.1",
contact={
"name": "<NAME>",
"url": "https://github.com/anorlovsky",
"email": "<EMAIL>",
},
redoc_url="/",
docs_url=None,
)
@app.on_event("startup")
def on_startup():
init_db(sql_engine)
@app.get(
"/artmuseum",
response_model=list[ArtmuseumExhibition],
description="Возвращает список текущих и ближайших выставок [Мурманского областного художественного музея](https://artmmuseum.ru/)",
)
async def get_artmuseum_exhibitions(
time: ArtmuseumTimeLabel = Query(
None,
description='Вернуть только текущие (`"now"`) или только ближайшие (`"soon"`) выставки',
)
):
with Session(sql_engine) as session:
if time is None:
stmt = select(ArtmuseumExhibition)
elif time == ArtmuseumTimeLabel.NOW:
stmt = select(ArtmuseumExhibition).where(
ArtmuseumExhibition.start_date <= date.today()
)
elif time == ArtmuseumTimeLabel.SOON:
stmt = select(ArtmuseumExhibition).where(
ArtmuseumExhibition.start_date > date.today()
)
return session.exec(stmt).all()
@app.get(
"/philharmonia",
response_model=list[PhilharmoniaConcert],
description="Возвращает список ближайших концертов [Мурманской областной филармонии](https://www.murmansound.ru)",
)
async def get_philharmonia_concerts():
with Session(sql_engine) as session:
return session.exec(select(PhilharmoniaConcert)).all()
|
[
"sqlmodel.create_engine",
"sqlmodel.select",
"sqlmodel.Session"
] |
[((283, 369), 'sqlmodel.create_engine', 'create_engine', (['"""sqlite:///database.db"""'], {'connect_args': "{'check_same_thread': False}"}), "('sqlite:///database.db', connect_args={'check_same_thread': \n False})\n", (296, 369), False, 'from sqlmodel import Session, create_engine, select\n'), ((377, 557), 'fastapi.FastAPI', 'FastAPI', ([], {'title': '"""Murmansk Culture API"""', 'version': '"""0.0.1"""', 'contact': "{'name': '<NAME>', 'url': 'https://github.com/anorlovsky', 'email': '<EMAIL>'}", 'redoc_url': '"""/"""', 'docs_url': 'None'}), "(title='Murmansk Culture API', version='0.0.1', contact={'name':\n '<NAME>', 'url': 'https://github.com/anorlovsky', 'email': '<EMAIL>'},\n redoc_url='/', docs_url=None)\n", (384, 557), False, 'from fastapi import FastAPI, Query\n'), ((978, 1088), 'fastapi.Query', 'Query', (['None'], {'description': '"""Вернуть только текущие (`"now"`) или только ближайшие (`"soon"`) выставки"""'}), '(None, description=\n \'Вернуть только текущие (`"now"`) или только ближайшие (`"soon"`) выставки\'\n )\n', (983, 1088), False, 'from fastapi import FastAPI, Query\n'), ((1114, 1133), 'sqlmodel.Session', 'Session', (['sql_engine'], {}), '(sql_engine)\n', (1121, 1133), False, 'from sqlmodel import Session, create_engine, select\n'), ((1859, 1878), 'sqlmodel.Session', 'Session', (['sql_engine'], {}), '(sql_engine)\n', (1866, 1878), False, 'from sqlmodel import Session, create_engine, select\n'), ((1190, 1217), 'sqlmodel.select', 'select', (['ArtmuseumExhibition'], {}), '(ArtmuseumExhibition)\n', (1196, 1217), False, 'from sqlmodel import Session, create_engine, select\n'), ((1919, 1946), 'sqlmodel.select', 'select', (['PhilharmoniaConcert'], {}), '(PhilharmoniaConcert)\n', (1925, 1946), False, 'from sqlmodel import Session, create_engine, select\n'), ((1282, 1309), 'sqlmodel.select', 'select', (['ArtmuseumExhibition'], {}), '(ArtmuseumExhibition)\n', (1288, 1309), False, 'from sqlmodel import Session, create_engine, select\n'), ((1367, 1379), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1377, 1379), False, 'from datetime import date\n'), ((1459, 1486), 'sqlmodel.select', 'select', (['ArtmuseumExhibition'], {}), '(ArtmuseumExhibition)\n', (1465, 1486), False, 'from sqlmodel import Session, create_engine, select\n'), ((1543, 1555), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1553, 1555), False, 'from datetime import date\n')]
|
import numpy as nm
from sfepy.terms.terms import Term, terms
from sfepy.base.base import get_default
def grad_as_vector(grad):
grad = grad.transpose((0, 1, 3, 2))
sh = grad.shape
return grad.reshape((sh[0], sh[1], sh[2] * sh[3], 1))
class AdjDivGradTerm(Term):
r"""
Gateaux differential of :math:`\Psi(\ul{u}) = \int_{\Omega} \nu\
\nabla \ul{v} : \nabla \ul{u}` w.r.t. :math:`\ul{u}` in the direction
:math:`\ul{v}` or adjoint term to `dw_div_grad`.
:Definition:
.. math::
w \delta_{u} \Psi(\ul{u}) \circ \ul{v}
:Arguments:
- material_1 : :math:`w` (weight)
- material_2 : :math:`\nu` (viscosity)
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
"""
name = 'dw_adj_div_grad'
arg_types = ('material_1', 'material_2', 'virtual', 'parameter')
arg_shapes = {'material_1' : '1, 1', 'material_2' : '1, 1',
'virtual' : ('D', None), 'parameter' : 'D'}
function = staticmethod(terms.term_ns_asm_div_grad)
def get_fargs(self, mat1, mat2, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
if diff_var is None:
grad = grad_as_vector(self.get(state, 'grad'))
fmode = 0
else:
grad = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return grad, mat1 * mat2, vg, fmode
class AdjConvect1Term(Term):
r"""
The first adjoint term to nonlinear convective term `dw_convect`.
:Definition:
.. math::
\int_{\Omega} ((\ul{v} \cdot \nabla) \ul{u}) \cdot \ul{w}
:Arguments:
- virtual : :math:`\ul{v}`
- state : :math:`\ul{w}`
- parameter : :math:`\ul{u}`
"""
name = 'dw_adj_convect1'
arg_types = ('virtual', 'state', 'parameter' )
arg_shapes = {'virtual' : ('D', 'state'), 'state' : 'D', 'parameter' : 'D'}
function = staticmethod(terms.dw_adj_convect1)
def get_fargs(self, virtual, state, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
val_w = self.get(state, 'val')
grad_u = self.get(parameter, 'grad') # No transposition here!
fmode = diff_var is not None
return val_w, grad_u, vg, fmode
class AdjConvect2Term(Term):
r"""
The second adjoint term to nonlinear convective term `dw_convect`.
:Definition:
.. math::
\int_{\Omega} ((\ul{u} \cdot \nabla) \ul{v}) \cdot \ul{w}
:Arguments:
- virtual : :math:`\ul{v}`
- state : :math:`\ul{w}`
- parameter : :math:`\ul{u}`
"""
name = 'dw_adj_convect2'
arg_types = ('virtual', 'state', 'parameter' )
arg_shapes = {'virtual' : ('D', 'state'), 'state' : 'D', 'parameter' : 'D'}
function = staticmethod(terms.dw_adj_convect2)
def get_fargs(self, virtual, state, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
val_w = self.get(state, 'val')
val_u = self.get(parameter, 'val')
fmode = diff_var is not None
return val_w, val_u, vg, fmode
class SUPGCAdjStabilizationTerm(Term):
r"""
Adjoint term to SUPG stabilization term `dw_st_supg_c`.
:Definition:
.. math::
\sum_{K \in \Ical_h}\int_{T_K} \delta_K\ [ ((\ul{v} \cdot \nabla)
\ul{u}) ((\ul{u} \cdot \nabla) \ul{w}) + ((\ul{u} \cdot \nabla)
\ul{u}) ((\ul{v} \cdot \nabla) \ul{w}) ]
:Arguments:
- material : :math:`\delta_K`
- virtual : :math:`\ul{v}`
- state : :math:`\ul{w}`
- parameter : :math:`\ul{u}`
"""
name = 'dw_st_adj_supg_c'
arg_types = ('material', 'virtual', 'parameter', 'state')
arg_shapes = {'material' : '1, 1', 'virtual' : ('D', 'state'),
'state' : 'D', 'parameter' : 'D'}
function = staticmethod(terms.dw_st_adj_supg_c)
def get_fargs(self, mat, virtual, state, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
val_u = self.get(parameter, 'val')
grad_u = self.get(parameter, 'grad').transpose((0, 1, 3, 2)).copy()
conn = state.field.get_connectivity(self.region, self.integration)
fmode = diff_var is not None
return state(), val_u, grad_u, mat, vg, conn, fmode
class SUPGPAdj1StabilizationTerm(Term):
r"""
The first adjoint term to SUPG stabilization term `dw_st_supg_p`.
:Definition:
.. math::
\sum_{K \in \Ical_h}\int_{T_K} \delta_K\ \nabla p (\ul{v} \cdot
\nabla \ul{w})
:Arguments:
- material : :math:`\delta_K`
- virtual : :math:`\ul{v}`
- state : :math:`\ul{w}`
- parameter : :math:`p`
"""
name = 'dw_st_adj1_supg_p'
arg_types = ('material', 'virtual', 'state', 'parameter')
arg_shapes = {'material' : '1, 1', 'virtual' : ('D', 'state'),
'state' : 'D', 'parameter' : 1}
function = staticmethod(terms.dw_st_adj1_supg_p)
def get_fargs(self, mat, virtual, state, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg_w, _ = self.get_mapping(state)
grad_p = self.get(parameter, 'grad')
conn_w = state.field.get_connectivity(self.region, self.integration)
fmode = diff_var is not None
return state(), grad_p, mat, vg_w, conn_w, fmode
class SUPGPAdj2StabilizationTerm(Term):
r"""
The second adjoint term to SUPG stabilization term `dw_st_supg_p`
as well as adjoint term to PSPG stabilization term `dw_st_pspg_c`.
:Definition:
.. math::
\sum_{K \in \Ical_h}\int_{T_K} \tau_K\ \nabla r (\ul{v} \cdot \nabla
\ul{u})
:Arguments:
- material : :math:`\tau_K`
- virtual : :math:`\ul{v}`
- parameter : :math:`\ul{u}`
- state : :math:`r`
"""
name = 'dw_st_adj2_supg_p'
arg_types = ('material', 'virtual', 'parameter', 'state')
arg_shapes = {'material' : '1, 1', 'virtual' : ('D', 'state'),
'state' : 1, 'parameter' : 'D'}
function = staticmethod(terms.dw_st_adj2_supg_p)
def get_fargs(self, mat, virtual, parameter, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg_r, _ = self.get_mapping(state)
vg_u, _ = self.get_mapping(parameter)
grad_u = self.get(parameter, 'grad').transpose((0, 1, 3, 2)).copy()
conn_r = state.field.get_connectivity(self.region, self.integration)
fmode = diff_var is not None
return grad_u, state(), mat, vg_u, vg_r, conn_r, fmode
class SDDotVolumeTerm(Term):
r"""
Sensitivity (shape derivative) of dot product of scalars or vectors.
:Definition:
.. math::
\int_{\Omega} p q (\nabla \cdot \ul{\Vcal}) \mbox{ , }
\int_{\Omega} (\ul{u} \cdot \ul{w}) (\nabla \cdot \ul{\Vcal})
:Arguments:
- parameter_1 : :math:`p` or :math:`\ul{u}`
- parameter_2 : :math:`q` or :math:`\ul{w}`
- parameter_mv : :math:`\ul{\Vcal}`
"""
name = 'd_sd_volume_dot'
arg_types = ('parameter_1', 'parameter_2', 'parameter_mv')
arg_shapes = [{'parameter_1' : 'D', 'parameter_2' : 'D',
'parameter_mv' : 'D'},
{'parameter_1' : 1, 'parameter_2' : 1}]
function = staticmethod(terms.d_sd_volume_dot)
def get_fargs(self, par1, par2, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(par1)
val1 = self.get(par1, 'val')
val2 = self.get(par2, 'val')
div_mv = self.get(par_mv, 'div')
return val1, val2, div_mv, vg, get_default(term_mode, 1)
def get_eval_shape(self, par1, par2, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(par1)
return (n_el, 1, 1, 1), par1.dtype
class SDDivTerm(Term):
r"""
Sensitivity (shape derivative) of Stokes term `dw_stokes` in 'div' mode.
Supports the following term modes: 1 (sensitivity) or 0 (original term
value).
:Definition:
.. math::
\int_{\Omega} p [ (\nabla \cdot \ul{w}) (\nabla \cdot \ul{\Vcal})
- \pdiff{\Vcal_k}{x_i} \pdiff{w_i}{x_k} ]
:Arguments:
- parameter_u : :math:`\ul{u}`
- parameter_p : :math:`p`
- parameter_mv : :math:`\ul{\Vcal}`
"""
name = 'd_sd_div'
arg_types = ('parameter_u', 'parameter_p', 'parameter_mv')
arg_shapes = {'parameter_u' : 'D', 'parameter_p' : 1,
'parameter_mv' : 'D'}
function = staticmethod(terms.d_sd_div)
def get_fargs(self, par_u, par_p, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(par_u)
div_u = self.get(par_u, 'div')
grad_u = grad_as_vector(self.get(par_u, 'grad'))
val_p = self.get(par_p, 'val')
div_mv = self.get(par_mv, 'div')
grad_mv = grad_as_vector(self.get(par_mv, 'grad'))
return (div_u, grad_u, val_p, div_mv, grad_mv, vg,
get_default(term_mode, 1))
def get_eval_shape(self, par_u, par_p, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(par_u)
return (n_el, 1, 1, 1), par_u.dtype
class SDDivGradTerm(Term):
r"""
Sensitivity (shape derivative) of diffusion term `dw_div_grad`.
Supports the following term modes: 1 (sensitivity) or 0 (original term
value).
:Definition:
.. math::
w \nu \int_{\Omega} [ \pdiff{u_i}{x_k} \pdiff{w_i}{x_k}
(\nabla \cdot \ul{\Vcal})
- \pdiff{\Vcal_j}{x_k} \pdiff{u_i}{x_j} \pdiff{w_i}{x_k}
- \pdiff{u_i}{x_k} \pdiff{\Vcal_l}{x_k} \pdiff{w_i}{x_k} ]
:Arguments:
- material : :math:`\nu` (viscosity, optional)
- parameter_u : :math:`\ul{u}`
- parameter_w : :math:`\ul{w}`
- parameter_mv : :math:`\ul{\Vcal}`
"""
name = 'd_sd_div_grad'
arg_types = ('opt_material', 'parameter_u', 'parameter_w',
'parameter_mv')
arg_shapes = {'opt_material' : '1, 1',
'parameter_u' : 'D', 'parameter_w' : 'D',
'parameter_mv' : 'D'}
function = staticmethod(terms.d_sd_div_grad)
def get_fargs(self, mat, par_u, par_w, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(par_u)
grad_u = grad_as_vector(self.get(par_u, 'grad'))
grad_w = grad_as_vector(self.get(par_w, 'grad'))
div_mv = self.get(par_mv, 'div')
grad_mv = grad_as_vector(self.get(par_mv, 'grad'))
return (grad_u, grad_w, div_mv, grad_mv, mat, vg,
get_default(term_mode, 1))
def get_eval_shape(self, mat, par_u, par_w, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(par_u)
return (n_el, 1, 1, 1), par_u.dtype
class SDConvectTerm(Term):
r"""
Sensitivity (shape derivative) of convective term `dw_convect`.
Supports the following term modes: 1 (sensitivity) or 0 (original term
value).
:Definition:
.. math::
\int_{\Omega} [ u_k \pdiff{u_i}{x_k} w_i (\nabla \cdot \Vcal)
- u_k \pdiff{\Vcal_j}{x_k} \pdiff{u_i}{x_j} w_i ]
:Arguments:
- parameter_u : :math:`\ul{u}`
- parameter_w : :math:`\ul{w}`
- parameter_mv : :math:`\ul{\Vcal}`
"""
name = 'd_sd_convect'
arg_types = ('parameter_u', 'parameter_w', 'parameter_mv')
arg_shapes = {'parameter_u' : 'D', 'parameter_w' : 'D',
'parameter_mv' : 'D'}
function = staticmethod(terms.d_sd_convect)
def get_fargs(self, par_u, par_w, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(par_u)
val_u = self.get(par_u, 'val')
grad_u = grad_as_vector(self.get(par_u, 'grad'))
val_w = self.get(par_w, 'val')
div_mv = self.get(par_mv, 'div')
grad_mv = grad_as_vector(self.get(par_mv, 'grad'))
return (val_u, grad_u, val_w, div_mv, grad_mv, vg,
get_default(term_mode, 1))
def get_eval_shape(self, par_u, par_w, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(par_u)
return (n_el, 1, 1, 1), par_u.dtype
class NSOFMinGradTerm(Term):
name = 'd_of_ns_min_grad'
arg_types = ('material_1', 'material_2', 'parameter')
arg_shapes = {'material_1' : '1, 1', 'material_2' : '1, 1',
'parameter' : 1}
function = staticmethod(terms.d_of_nsMinGrad)
def get_fargs(self, weight, mat, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(parameter)
grad = grad_as_vector(self.get(parameter, 'grad'))
return grad, weight * mat, vg
def get_eval_shape(self, weight, mat, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
return (1, 1, 1, 1), parameter.dtype
class NSOFSurfMinDPressTerm(Term):
r"""
Sensitivity of :math:`\Psi(p)`.
:Definition:
.. math::
\delta \Psi(p) = \delta \left( \int_{\Gamma_{in}}p -
\int_{\Gamma_{out}}bpress \right)
:Arguments:
- material_1 : :math:`w` (weight)
- material_2 : :math:`bpress` (given pressure)
- parameter : :math:`p`
"""
name = 'd_of_ns_surf_min_d_press'
arg_types = ('material_1', 'material_2', 'parameter')
arg_shapes = {'material_1' : 1, 'material_2' : 1,
'parameter' : 1}
integration = 'surface'
function = staticmethod(terms.d_of_nsSurfMinDPress)
def get_fargs(self, weight, bpress, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
sg, _ = self.get_mapping(parameter)
val_p = self.get(parameter, 'val')
return val_p, weight, bpress, sg, 0
def get_eval_shape(self, weight, bpress, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
return (1, 1, 1, 1), parameter.dtype
class NSOFSurfMinDPressDiffTerm(NSOFSurfMinDPressTerm):
r"""
Gateaux differential of :math:`\Psi(p)` w.r.t. :math:`p` in the
direction :math:`q`.
:Definition:
.. math::
w \delta_{p} \Psi(p) \circ q
:Arguments:
- material : :math:`w` (weight)
- virtual : :math:`q`
"""
name = 'dw_of_ns_surf_min_d_press_diff'
arg_types = ('material', 'virtual')
arg_shapes = {'material' : 1, 'virtual' : (1, None)}
def get_fargs(self, weight, virtual,
mode=None, term_mode=None, diff_var=None, **kwargs):
sg, _ = self.get_mapping(virtual)
aux = nm.array([0], ndmin=4, dtype=nm.float64)
return aux, weight, 0.0, sg, 1
class SDGradDivStabilizationTerm(Term):
r"""
Sensitivity (shape derivative) of stabilization term `dw_st_grad_div`.
:Definition:
.. math::
\gamma \int_{\Omega} [ (\nabla \cdot \ul{u}) (\nabla \cdot \ul{w})
(\nabla \cdot \ul{\Vcal})
- \pdiff{u_i}{x_k} \pdiff{\Vcal_k}{x_i} (\nabla \cdot \ul{w})
- (\nabla \cdot \ul{u}) \pdiff{w_i}{x_k} \pdiff{\Vcal_k}{x_i} ]
:Arguments:
- material : :math:`\gamma`
- parameter_u : :math:`\ul{u}`
- parameter_w : :math:`\ul{w}`
- parameter_mv : :math:`\ul{\Vcal}`
- mode : 1 (sensitivity) or 0 (original term value)
"""
name = 'd_sd_st_grad_div'
arg_types = ('material', 'parameter_u', 'parameter_w',
'parameter_mv')
arg_shapes = {'material' : '1, 1',
'parameter_u' : 'D', 'parameter_w' : 'D',
'parameter_mv' : 'D'}
function = staticmethod(terms.d_sd_st_grad_div)
def get_fargs(self, mat, par_u, par_w, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(par_u)
div_u = self.get(par_u, 'div')
grad_u = grad_as_vector(self.get(par_u, 'grad'))
div_w = self.get(par_w, 'div')
grad_w = grad_as_vector(self.get(par_w, 'grad'))
div_mv = self.get(par_mv, 'div')
grad_mv = grad_as_vector(self.get(par_mv, 'grad'))
return (div_u, grad_u, div_w, grad_w, div_mv, grad_mv, mat, vg,
get_default(term_mode, 1))
def get_eval_shape(self, mat, par_u, par_w, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(par_u)
return (n_el, 1, 1, 1), par_u.dtype
class SDSUPGCStabilizationTerm(Term):
r"""
Sensitivity (shape derivative) of stabilization term `dw_st_supg_c`.
:Definition:
.. math::
\sum_{K \in \Ical_h}\int_{T_K} \delta_K\ [ (\ul{b} \cdot \nabla u_k)
(\ul{b} \cdot \nabla w_k) (\nabla \cdot \Vcal) -
(\ul{b} \cdot \nabla \Vcal_i) \pdiff{u_k}{x_i}
(\ul{b} \cdot \nabla w_k) - (\ul{u} \cdot \nabla u_k)
(\ul{b} \cdot \nabla \Vcal_i) \pdiff{w_k}{x_i} ]
:Arguments:
- material : :math:`\delta_K`
- parameter_b : :math:`\ul{b}`
- parameter_u : :math:`\ul{u}`
- parameter_w : :math:`\ul{w}`
- parameter_mv : :math:`\ul{\Vcal}`
- mode : 1 (sensitivity) or 0 (original term value)
"""
name = 'd_sd_st_supg_c'
arg_types = ('material', 'parameter_b', 'parameter_u', 'parameter_w',
'parameter_mv')
arg_shapes = {'material' : '1, 1',
'parameter_b' : 'D', 'parameter_u' : 'D', 'parameter_w' : 'D',
'parameter_mv' : 'D'}
function = staticmethod(terms.d_sd_st_supg_c)
def get_fargs(self, mat, par_b, par_u, par_w, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(par_u)
val_b = self.get(par_b, 'val')
grad_u = self.get(par_u, 'grad').transpose((0, 1, 3, 2)).copy()
grad_w = self.get(par_w, 'grad').transpose((0, 1, 3, 2)).copy()
div_mv = self.get(par_mv, 'div')
grad_mv = self.get(par_mv, 'grad').transpose((0, 1, 3, 2)).copy()
return (val_b, grad_u, grad_w, div_mv, grad_mv, mat, vg,
get_default(term_mode, 1))
def get_eval_shape(self, mat, par_b, par_u, par_w, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(par_u)
return (n_el, 1, 1, 1), par_u.dtype
class SDPSPGCStabilizationTerm(Term):
r"""
Sensitivity (shape derivative) of stabilization terms `dw_st_supg_p` or
`dw_st_pspg_c`.
:Definition:
.. math::
\sum_{K \in \Ical_h}\int_{T_K} \delta_K\
[ \pdiff{r}{x_i} (\ul{b} \cdot \nabla u_i) (\nabla \cdot \Vcal) -
\pdiff{r}{x_k} \pdiff{\Vcal_k}{x_i} (\ul{b} \cdot \nabla u_i)
- \pdiff{r}{x_k} (\ul{b} \cdot \nabla \Vcal_k) \pdiff{u_i}{x_k} ]
:Arguments:
- material : :math:`\delta_K`
- parameter_b : :math:`\ul{b}`
- parameter_u : :math:`\ul{u}`
- parameter_r : :math:`r`
- parameter_mv : :math:`\ul{\Vcal}`
- mode : 1 (sensitivity) or 0 (original term value)
"""
name = 'd_sd_st_pspg_c'
arg_types = ('material', 'parameter_b', 'parameter_u', 'parameter_r',
'parameter_mv')
arg_shapes = {'material' : '1, 1',
'parameter_b' : 'D', 'parameter_u' : 'D', 'parameter_r' : 1,
'parameter_mv' : 'D'}
function = staticmethod(terms.d_sd_st_pspg_c)
def get_fargs(self, mat, par_b, par_u, par_r, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(par_u)
val_b = self.get(par_b, 'val')
grad_u = self.get(par_u, 'grad').transpose((0, 1, 3, 2)).copy()
grad_r = self.get(par_r, 'grad')
div_mv = self.get(par_mv, 'div')
grad_mv = self.get(par_mv, 'grad').transpose((0, 1, 3, 2)).copy()
return (val_b, grad_u, grad_r, div_mv, grad_mv, mat, vg,
get_default(term_mode, 1))
def get_eval_shape(self, mat, par_b, par_u, par_r, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(par_u)
return (n_el, 1, 1, 1), par_u.dtype
class SDPSPGPStabilizationTerm(Term):
r"""
Sensitivity (shape derivative) of stabilization term `dw_st_pspg_p`.
:Definition:
.. math::
\sum_{K \in \Ical_h}\int_{T_K} \tau_K\ [ (\nabla r \cdot \nabla p)
(\nabla \cdot \Vcal) - \pdiff{r}{x_k} (\nabla \Vcal_k \cdot \nabla p) -
(\nabla r \cdot \nabla \Vcal_k) \pdiff{p}{x_k} ]
:Arguments:
- material : :math:`\tau_K`
- parameter_r : :math:`r`
- parameter_p : :math:`p`
- parameter_mv : :math:`\ul{\Vcal}`
- mode : 1 (sensitivity) or 0 (original term value)
"""
name = 'd_sd_st_pspg_p'
arg_types = ('material', 'parameter_r', 'parameter_p',
'parameter_mv')
arg_shapes = {'material' : '1, 1',
'parameter_r' : 1, 'parameter_p' : 1,
'parameter_mv' : 'D'}
function = staticmethod(terms.d_sd_st_pspg_p)
def get_fargs(self, mat, par_r, par_p, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(par_p)
grad_r = self.get(par_r, 'grad')
grad_p = self.get(par_p, 'grad')
div_mv = self.get(par_mv, 'div')
grad_mv = self.get(par_mv, 'grad').transpose((0, 1, 3, 2)).copy()
return (grad_r, grad_p, div_mv, grad_mv, mat, vg,
get_default(term_mode, 1))
def get_eval_shape(self, mat, par_r, par_p, par_mv,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(par_p)
return (n_el, 1, 1, 1), par_p.dtype
|
[
"sfepy.base.base.get_default"
] |
[((15137, 15177), 'numpy.array', 'nm.array', (['[0]'], {'ndmin': '(4)', 'dtype': 'nm.float64'}), '([0], ndmin=4, dtype=nm.float64)\n', (15145, 15177), True, 'import numpy as nm\n'), ((1341, 1381), 'numpy.array', 'nm.array', (['[0]'], {'ndmin': '(4)', 'dtype': 'nm.float64'}), '([0], ndmin=4, dtype=nm.float64)\n', (1349, 1381), True, 'import numpy as nm\n'), ((7835, 7860), 'sfepy.base.base.get_default', 'get_default', (['term_mode', '(1)'], {}), '(term_mode, 1)\n', (7846, 7860), False, 'from sfepy.base.base import get_default\n'), ((9288, 9313), 'sfepy.base.base.get_default', 'get_default', (['term_mode', '(1)'], {}), '(term_mode, 1)\n', (9299, 9313), False, 'from sfepy.base.base import get_default\n'), ((10979, 11004), 'sfepy.base.base.get_default', 'get_default', (['term_mode', '(1)'], {}), '(term_mode, 1)\n', (10990, 11004), False, 'from sfepy.base.base import get_default\n'), ((12456, 12481), 'sfepy.base.base.get_default', 'get_default', (['term_mode', '(1)'], {}), '(term_mode, 1)\n', (12467, 12481), False, 'from sfepy.base.base import get_default\n'), ((16739, 16764), 'sfepy.base.base.get_default', 'get_default', (['term_mode', '(1)'], {}), '(term_mode, 1)\n', (16750, 16764), False, 'from sfepy.base.base import get_default\n'), ((18659, 18684), 'sfepy.base.base.get_default', 'get_default', (['term_mode', '(1)'], {}), '(term_mode, 1)\n', (18670, 18684), False, 'from sfepy.base.base import get_default\n'), ((20530, 20555), 'sfepy.base.base.get_default', 'get_default', (['term_mode', '(1)'], {}), '(term_mode, 1)\n', (20541, 20555), False, 'from sfepy.base.base import get_default\n'), ((22155, 22180), 'sfepy.base.base.get_default', 'get_default', (['term_mode', '(1)'], {}), '(term_mode, 1)\n', (22166, 22180), False, 'from sfepy.base.base import get_default\n')]
|
import time
from datetime import datetime, timedelta, date
import typer
from sqlalchemy.exc import NoResultFound, OperationalError
from sqlmodel import Session, select, func
from tabulate import tabulate
from . import edit
from . import reports
from .database import create_db_and_tables, engine
from .functions_aux import Status, make_table_view, pop_up_msg
from .tables import ToDo, Timer
app = typer.Typer()
app.add_typer(reports.app, name='report', help='Print customized reports.')
app.add_typer(edit.app, name='edit', help='Edit records.')
@app.command()
def add(task: str, project: str = typer.Option(None, '--project', '-p'),
due_date: datetime = typer.Option(None, '--due-date', '-d',
formats=['%Y-%m-%d']),
reminder: datetime = typer.Option(None, '--reminder', '-r',
formats=['%Y-%m-%d']),
status: Status = typer.Option(Status.to_do, '--status', '-s'),
tag: str = typer.Option(None, '--tag', '-t')):
"""Add task to the to-do list."""
try:
today = datetime.today()
if due_date is not None and due_date <= today:
typer.secho(f'\ndue date must be grater than {today.date()}\n',
fg=typer.colors.RED)
raise typer.Exit(code=1)
if reminder is not None and reminder <= today:
typer.secho(f'\nreminder must be grater than {today.date()}\n',
fg=typer.colors.RED)
raise typer.Exit(code=1)
if due_date is not None and reminder is not None and \
reminder >= due_date:
typer.secho(f'\nreminder must be smaller than {due_date.date()}\n',
fg=typer.colors.RED)
raise typer.Exit(code=1)
with Session(engine) as session:
if project is not None:
new_project = session.exec(select(ToDo).where(
ToDo.project == project)).first()
if new_project is not None:
ongoing_project = session.exec(select(ToDo).where(
ToDo.project == project,
ToDo.status != 'done')).first()
if ongoing_project is None:
typer.secho(f'\nTasks already done in the project\n',
fg=typer.colors.RED)
raise typer.Exit(code=1)
new_entry = ToDo(task=task, project=project,
due_date=due_date, reminder=reminder,
status=status, tag=tag)
session.add(new_entry)
session.commit()
new_id = session.exec(select(func.max(ToDo.id))).one()
typer.secho(f'Add {task}. Task id: {new_id}\n',
fg=typer.colors.GREEN)
except OperationalError:
create_db_and_tables()
add(task=task, project=project, due_date=due_date, reminder=reminder,
status=status, tag=tag)
@app.command()
def start(task_id: int, duration: int = typer.Option(None, '--duration', '-d',
help='Duration in minutes')):
"""Start Timer for a given open task."""
with Session(engine) as session:
try:
session.exec(select(Timer).where(Timer.end == None)).one()
typer.secho('\nThe Timer must be stopped first\n',
fg=typer.colors.RED)
raise typer.Exit(code=1)
except NoResultFound:
pass
try:
query = session.get(ToDo, task_id)
if not query.status == 'done':
if query.status == 'to do':
query.status = 'doing'
session.add(query)
if duration is not None:
duration = timedelta(minutes=duration)
if duration <= timedelta(minutes=0):
typer.secho(
f'\nDuration must be grater than 0\n',
fg=typer.colors.RED)
raise typer.Exit(code=1)
total_seconds = int(duration.total_seconds())
session.add(Timer(id_todo=task_id))
session.commit()
new_id = session.exec(select(func.max(Timer.id))).one()
typer.secho(
f'\nTask Start task {task_id}. Timer id: {new_id}\n',
fg=typer.colors.GREEN)
with typer.progressbar(length=total_seconds) as progress:
end = datetime.utcnow() + duration
while datetime.utcnow() < end:
time.sleep(1)
progress.update(1)
else:
typer.secho('\n\nYour Time is over! Well done!\n',
blink=True,
fg=typer.colors.BRIGHT_GREEN)
pop_up_msg()
remark = typer.confirm("Any remark?")
if remark:
remark = typer.prompt('Enter your remarks.')
else:
remark = None
stop(remarks=remark)
typer.Exit()
else:
session.add(Timer(id_todo=task_id))
session.commit()
new_id = session.exec(select(func.max(Timer.id))).one()
typer.secho(
f'\nStart task {task_id}. Timer id: {new_id}\n',
fg=typer.colors.GREEN)
else:
typer.secho(f'\nTask already done\n',
fg=typer.colors.RED)
raise typer.Exit(code=1)
except AttributeError:
typer.secho(f'\nInvalid task id\n',
fg=typer.colors.RED)
raise typer.Exit(code=1)
@app.command()
def stop(remarks: str = typer.Option(None, '--remarks', '-r')):
"""Stop Timer."""
with Session(engine) as session:
try:
query_timer = session.exec(
select(Timer).where(Timer.end == None)).one()
query_timer.end = datetime.utcnow()
query_timer.duration = query_timer.end - query_timer.start
session.add(query_timer)
query = session.get(ToDo, query_timer.id_todo)
check = typer.confirm('Is the task done?')
if not check and not remarks:
pass
else:
if check:
query.status = 'done'
query.date_end = query_timer.end.date()
if remarks:
query.remarks = remarks
session.add(query)
session.commit()
new_id = session.exec(select(func.max(Timer.id))).one()
typer.secho(
f'\nStop task ({query.id}). Timer id: {new_id}\n',
fg=typer.colors.GREEN)
except NoResultFound:
typer.secho(f'\nNo task running\n', fg=typer.colors.RED)
raise typer.Exit(code=1)
@app.command()
def view(due_date: datetime = typer.Option(datetime.today() +
timedelta(weeks=1),
formats=['%Y-%m-%d'])):
"""Print to-do list view."""
overdue = select(ToDo).where(ToDo.due_date < date.today(),
ToDo.status != 'done').order_by(ToDo.due_date)
reminders = select(ToDo).where(ToDo.reminder <= date.today(),
ToDo.status != 'done').order_by(
ToDo.due_date)
due_in = select(ToDo).where(
ToDo.due_date < due_date, ToDo.due_date >= date.today(),
ToDo.status != 'done').order_by(ToDo.due_date)
no_due = select(ToDo).where(
ToDo.due_date == None, ToDo.status != 'done',
ToDo.reminder == None).order_by(ToDo.date_init)
if len(make_table_view(engine, overdue)) > 1:
typer.secho(f'\nOVERDUE\n', fg=typer.colors.BRIGHT_RED,
bold=True)
typer.secho(tabulate(make_table_view(engine, overdue),
headers="firstrow"), fg=typer.colors.BRIGHT_WHITE)
if len(make_table_view(engine, reminders)) > 1:
typer.secho(f'\nREMINDERS\n', fg=typer.colors.BRIGHT_YELLOW, bold=True)
typer.secho(tabulate(make_table_view(engine, reminders),
headers="firstrow"), fg=typer.colors.BRIGHT_WHITE)
if len(make_table_view(engine, due_in)) > 1:
typer.secho(f'\nDUE IN {due_date.date()}\n',
fg=typer.colors.BRIGHT_GREEN, bold=True)
typer.secho(tabulate(make_table_view(engine, due_in),
headers="firstrow"), fg=typer.colors.BRIGHT_WHITE)
if len(make_table_view(engine, no_due)) > 1:
typer.secho(f'\nNO DUE\n', fg=typer.colors.BRIGHT_BLUE, bold=True)
typer.secho(tabulate(make_table_view(engine, no_due),
headers="firstrow"), fg=typer.colors.BRIGHT_WHITE)
print('\n')
|
[
"sqlmodel.select",
"sqlmodel.Session",
"sqlmodel.func.max"
] |
[((400, 413), 'typer.Typer', 'typer.Typer', ([], {}), '()\n', (411, 413), False, 'import typer\n'), ((600, 637), 'typer.Option', 'typer.Option', (['None', '"""--project"""', '"""-p"""'], {}), "(None, '--project', '-p')\n", (612, 637), False, 'import typer\n'), ((668, 728), 'typer.Option', 'typer.Option', (['None', '"""--due-date"""', '"""-d"""'], {'formats': "['%Y-%m-%d']"}), "(None, '--due-date', '-d', formats=['%Y-%m-%d'])\n", (680, 728), False, 'import typer\n'), ((801, 861), 'typer.Option', 'typer.Option', (['None', '"""--reminder"""', '"""-r"""'], {'formats': "['%Y-%m-%d']"}), "(None, '--reminder', '-r', formats=['%Y-%m-%d'])\n", (813, 861), False, 'import typer\n'), ((930, 974), 'typer.Option', 'typer.Option', (['Status.to_do', '"""--status"""', '"""-s"""'], {}), "(Status.to_do, '--status', '-s')\n", (942, 974), False, 'import typer\n'), ((995, 1028), 'typer.Option', 'typer.Option', (['None', '"""--tag"""', '"""-t"""'], {}), "(None, '--tag', '-t')\n", (1007, 1028), False, 'import typer\n'), ((3098, 3164), 'typer.Option', 'typer.Option', (['None', '"""--duration"""', '"""-d"""'], {'help': '"""Duration in minutes"""'}), "(None, '--duration', '-d', help='Duration in minutes')\n", (3110, 3164), False, 'import typer\n'), ((6176, 6213), 'typer.Option', 'typer.Option', (['None', '"""--remarks"""', '"""-r"""'], {}), "(None, '--remarks', '-r')\n", (6188, 6213), False, 'import typer\n'), ((1094, 1110), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (1108, 1110), False, 'from datetime import datetime, timedelta, date\n'), ((3274, 3289), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (3281, 3289), False, 'from sqlmodel import Session, select, func\n'), ((6247, 6262), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (6254, 6262), False, 'from sqlmodel import Session, select, func\n'), ((8242, 8308), 'typer.secho', 'typer.secho', (['f"""\nOVERDUE\n"""'], {'fg': 'typer.colors.BRIGHT_RED', 'bold': '(True)'}), "(f'\\nOVERDUE\\n', fg=typer.colors.BRIGHT_RED, bold=True)\n", (8253, 8308), False, 'import typer\n'), ((8533, 8604), 'typer.secho', 'typer.secho', (['f"""\nREMINDERS\n"""'], {'fg': 'typer.colors.BRIGHT_YELLOW', 'bold': '(True)'}), "(f'\\nREMINDERS\\n', fg=typer.colors.BRIGHT_YELLOW, bold=True)\n", (8544, 8604), False, 'import typer\n'), ((9114, 9180), 'typer.secho', 'typer.secho', (['f"""\nNO DUE\n"""'], {'fg': 'typer.colors.BRIGHT_BLUE', 'bold': '(True)'}), "(f'\\nNO DUE\\n', fg=typer.colors.BRIGHT_BLUE, bold=True)\n", (9125, 9180), False, 'import typer\n'), ((1306, 1324), 'typer.Exit', 'typer.Exit', ([], {'code': '(1)'}), '(code=1)\n', (1316, 1324), False, 'import typer\n'), ((1520, 1538), 'typer.Exit', 'typer.Exit', ([], {'code': '(1)'}), '(code=1)\n', (1530, 1538), False, 'import typer\n'), ((1784, 1802), 'typer.Exit', 'typer.Exit', ([], {'code': '(1)'}), '(code=1)\n', (1794, 1802), False, 'import typer\n'), ((1817, 1832), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1824, 1832), False, 'from sqlmodel import Session, select, func\n'), ((2772, 2842), 'typer.secho', 'typer.secho', (['f"""Add {task}. Task id: {new_id}\n"""'], {'fg': 'typer.colors.GREEN'}), "(f'Add {task}. Task id: {new_id}\\n', fg=typer.colors.GREEN)\n", (2783, 2842), False, 'import typer\n'), ((3398, 3471), 'typer.secho', 'typer.secho', (['"""\nThe Timer must be stopped first\n"""'], {'fg': 'typer.colors.RED'}), '("""\nThe Timer must be stopped first\n""", fg=typer.colors.RED)\n', (3409, 3471), False, 'import typer\n'), ((3512, 3530), 'typer.Exit', 'typer.Exit', ([], {'code': '(1)'}), '(code=1)\n', (3522, 3530), False, 'import typer\n'), ((6420, 6437), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (6435, 6437), False, 'from datetime import datetime, timedelta, date\n'), ((6627, 6661), 'typer.confirm', 'typer.confirm', (['"""Is the task done?"""'], {}), "('Is the task done?')\n", (6640, 6661), False, 'import typer\n'), ((7086, 7178), 'typer.secho', 'typer.secho', (['f"""\nStop task ({query.id}). Timer id: {new_id}\n"""'], {'fg': 'typer.colors.GREEN'}), '(f"""\nStop task ({query.id}). Timer id: {new_id}\n""", fg=typer.\n colors.GREEN)\n', (7097, 7178), False, 'import typer\n'), ((7402, 7418), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (7416, 7418), False, 'from datetime import datetime, timedelta, date\n'), ((7464, 7482), 'datetime.timedelta', 'timedelta', ([], {'weeks': '(1)'}), '(weeks=1)\n', (7473, 7482), False, 'from datetime import datetime, timedelta, date\n'), ((5845, 5905), 'typer.secho', 'typer.secho', (['f"""\nTask already done\n"""'], {'fg': 'typer.colors.RED'}), '(f"""\nTask already done\n""", fg=typer.colors.RED)\n', (5856, 5905), False, 'import typer\n'), ((5954, 5972), 'typer.Exit', 'typer.Exit', ([], {'code': '(1)'}), '(code=1)\n', (5964, 5972), False, 'import typer\n'), ((6017, 6075), 'typer.secho', 'typer.secho', (['f"""\nInvalid task id\n"""'], {'fg': 'typer.colors.RED'}), '(f"""\nInvalid task id\n""", fg=typer.colors.RED)\n', (6028, 6075), False, 'import typer\n'), ((6116, 6134), 'typer.Exit', 'typer.Exit', ([], {'code': '(1)'}), '(code=1)\n', (6126, 6134), False, 'import typer\n'), ((7248, 7306), 'typer.secho', 'typer.secho', (['f"""\nNo task running\n"""'], {'fg': 'typer.colors.RED'}), '(f"""\nNo task running\n""", fg=typer.colors.RED)\n', (7259, 7306), False, 'import typer\n'), ((7323, 7341), 'typer.Exit', 'typer.Exit', ([], {'code': '(1)'}), '(code=1)\n', (7333, 7341), False, 'import typer\n'), ((3880, 3907), 'datetime.timedelta', 'timedelta', ([], {'minutes': 'duration'}), '(minutes=duration)\n', (3889, 3907), False, 'from datetime import datetime, timedelta, date\n'), ((4422, 4517), 'typer.secho', 'typer.secho', (['f"""\nTask Start task {task_id}. Timer id: {new_id}\n"""'], {'fg': 'typer.colors.GREEN'}), '(f"""\nTask Start task {task_id}. Timer id: {new_id}\n""", fg=\n typer.colors.GREEN)\n', (4433, 4517), False, 'import typer\n'), ((5677, 5767), 'typer.secho', 'typer.secho', (['f"""\nStart task {task_id}. Timer id: {new_id}\n"""'], {'fg': 'typer.colors.GREEN'}), '(f"""\nStart task {task_id}. Timer id: {new_id}\n""", fg=typer.\n colors.GREEN)\n', (5688, 5767), False, 'import typer\n'), ((7598, 7610), 'sqlmodel.select', 'select', (['ToDo'], {}), '(ToDo)\n', (7604, 7610), False, 'from sqlmodel import Session, select, func\n'), ((7633, 7645), 'datetime.date.today', 'date.today', ([], {}), '()\n', (7643, 7645), False, 'from datetime import datetime, timedelta, date\n'), ((7744, 7756), 'sqlmodel.select', 'select', (['ToDo'], {}), '(ToDo)\n', (7750, 7756), False, 'from sqlmodel import Session, select, func\n'), ((7780, 7792), 'datetime.date.today', 'date.today', ([], {}), '()\n', (7790, 7792), False, 'from datetime import datetime, timedelta, date\n'), ((7899, 7911), 'sqlmodel.select', 'select', (['ToDo'], {}), '(ToDo)\n', (7905, 7911), False, 'from sqlmodel import Session, select, func\n'), ((7970, 7982), 'datetime.date.today', 'date.today', ([], {}), '()\n', (7980, 7982), False, 'from datetime import datetime, timedelta, date\n'), ((8053, 8065), 'sqlmodel.select', 'select', (['ToDo'], {}), '(ToDo)\n', (8059, 8065), False, 'from sqlmodel import Session, select, func\n'), ((2290, 2366), 'typer.secho', 'typer.secho', (['f"""\nTasks already done in the project\n"""'], {'fg': 'typer.colors.RED'}), '(f"""\nTasks already done in the project\n""", fg=typer.colors.RED)\n', (2301, 2366), False, 'import typer\n'), ((2431, 2449), 'typer.Exit', 'typer.Exit', ([], {'code': '(1)'}), '(code=1)\n', (2441, 2449), False, 'import typer\n'), ((3943, 3963), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(0)'}), '(minutes=0)\n', (3952, 3963), False, 'from datetime import datetime, timedelta, date\n'), ((3989, 4062), 'typer.secho', 'typer.secho', (['f"""\nDuration must be grater than 0\n"""'], {'fg': 'typer.colors.RED'}), '(f"""\nDuration must be grater than 0\n""", fg=typer.colors.RED)\n', (4000, 4062), False, 'import typer\n'), ((4148, 4166), 'typer.Exit', 'typer.Exit', ([], {'code': '(1)'}), '(code=1)\n', (4158, 4166), False, 'import typer\n'), ((4585, 4624), 'typer.progressbar', 'typer.progressbar', ([], {'length': 'total_seconds'}), '(length=total_seconds)\n', (4602, 4624), False, 'import typer\n'), ((2734, 2751), 'sqlmodel.func.max', 'func.max', (['ToDo.id'], {}), '(ToDo.id)\n', (2742, 2751), False, 'from sqlmodel import Session, select, func\n'), ((4668, 4685), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (4683, 4685), False, 'from datetime import datetime, timedelta, date\n'), ((4727, 4744), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (4742, 4744), False, 'from datetime import datetime, timedelta, date\n'), ((4780, 4793), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4790, 4793), False, 'import time\n'), ((4899, 4997), 'typer.secho', 'typer.secho', (['"""\n\nYour Time is over! Well done!\n"""'], {'blink': '(True)', 'fg': 'typer.colors.BRIGHT_GREEN'}), '("""\n\nYour Time is over! Well done!\n""", blink=True, fg=typer.\n colors.BRIGHT_GREEN)\n', (4910, 4997), False, 'import typer\n'), ((5150, 5178), 'typer.confirm', 'typer.confirm', (['"""Any remark?"""'], {}), "('Any remark?')\n", (5163, 5178), False, 'import typer\n'), ((5452, 5464), 'typer.Exit', 'typer.Exit', ([], {}), '()\n', (5462, 5464), False, 'import typer\n'), ((7047, 7065), 'sqlmodel.func.max', 'func.max', (['Timer.id'], {}), '(Timer.id)\n', (7055, 7065), False, 'from sqlmodel import Session, select, func\n'), ((3340, 3353), 'sqlmodel.select', 'select', (['Timer'], {}), '(Timer)\n', (3346, 3353), False, 'from sqlmodel import Session, select, func\n'), ((5259, 5294), 'typer.prompt', 'typer.prompt', (['"""Enter your remarks."""'], {}), "('Enter your remarks.')\n", (5271, 5294), False, 'import typer\n'), ((6344, 6357), 'sqlmodel.select', 'select', (['Timer'], {}), '(Timer)\n', (6350, 6357), False, 'from sqlmodel import Session, select, func\n'), ((1924, 1936), 'sqlmodel.select', 'select', (['ToDo'], {}), '(ToDo)\n', (1930, 1936), False, 'from sqlmodel import Session, select, func\n'), ((4375, 4393), 'sqlmodel.func.max', 'func.max', (['Timer.id'], {}), '(Timer.id)\n', (4383, 4393), False, 'from sqlmodel import Session, select, func\n'), ((5630, 5648), 'sqlmodel.func.max', 'func.max', (['Timer.id'], {}), '(Timer.id)\n', (5638, 5648), False, 'from sqlmodel import Session, select, func\n'), ((2093, 2105), 'sqlmodel.select', 'select', (['ToDo'], {}), '(ToDo)\n', (2099, 2105), False, 'from sqlmodel import Session, select, func\n')]
|
from __future__ import absolute_import
from sfepy.discrete.fem import Mesh, FEDomain
import scipy.sparse as sps
import numpy as nm
from sfepy.base.compat import factorial
from sfepy.base.base import output
from six.moves import range
def elems_q2t(el):
nel, nnd = el.shape
if nnd > 4:
q2t = nm.array([[0, 2, 3, 6],
[0, 3, 7, 6],
[0, 7, 4, 6],
[0, 5, 6, 4],
[1, 5, 6, 0],
[1, 6, 2, 0]])
else:
q2t = nm.array([[0, 1, 2],
[0, 2, 3]])
ns, nn = q2t.shape
nel *= ns
out = nm.zeros((nel, nn), dtype=nm.int32);
for ii in range(ns):
idxs = nm.arange(ii, nel, ns)
out[idxs,:] = el[:, q2t[ii,:]]
return nm.ascontiguousarray(out)
def triangulate(mesh, verbose=False):
"""
Triangulate a 2D or 3D tensor product mesh: quadrilaterals->triangles,
hexahedrons->tetrahedrons.
Parameters
----------
mesh : Mesh
The input mesh.
Returns
-------
mesh : Mesh
The triangulated mesh.
"""
conns = None
for k, new_desc in [('3_8', '3_4'), ('2_4', '2_3')]:
if k in mesh.descs:
conns = mesh.get_conn(k)
break
if conns is not None:
nelo = conns.shape[0]
output('initial mesh: %d elements' % nelo, verbose=verbose)
new_conns = elems_q2t(conns)
nn = new_conns.shape[0] // nelo
new_cgroups = nm.repeat(mesh.cmesh.cell_groups, nn)
output('new mesh: %d elements' % new_conns.shape[0], verbose=verbose)
mesh = Mesh.from_data(mesh.name, mesh.coors,
mesh.cmesh.vertex_groups,
[new_conns], [new_cgroups], [new_desc])
return mesh
def smooth_mesh(mesh, n_iter=4, lam=0.6307, mu=-0.6347,
weights=None, bconstr=True,
volume_corr=False):
"""
FE mesh smoothing.
Based on:
[1] <NAME>, <NAME>, Smooth surface meshing for automated
finite element model generation from 3D image data, Journal of
Biomechanics, Volume 39, Issue 7, 2006, Pages 1287-1295,
ISSN 0021-9290, 10.1016/j.jbiomech.2005.03.006.
(http://www.sciencedirect.com/science/article/pii/S0021929005001442)
Parameters
----------
mesh : mesh
FE mesh.
n_iter : integer, optional
Number of iteration steps.
lam : float, optional
Smoothing factor, see [1].
mu : float, optional
Unshrinking factor, see [1].
weights : array, optional
Edge weights, see [1].
bconstr: logical, optional
Boundary constraints, if True only surface smoothing performed.
volume_corr: logical, optional
Correct volume after smoothing process.
Returns
-------
coors : array
Coordinates of mesh nodes.
"""
def laplacian(coors, weights):
n_nod = coors.shape[0]
displ = (weights - sps.identity(n_nod)) * coors
return displ
def taubin(coors0, weights, lam, mu, n_iter):
coors = coors0.copy()
for ii in range(n_iter):
displ = laplacian(coors, weights)
if nm.mod(ii, 2) == 0:
coors += lam * displ
else:
coors += mu * displ
return coors
def get_volume(el, nd):
from sfepy.linalg.utils import dets_fast
dim = nd.shape[1]
nnd = el.shape[1]
etype = '%d_%d' % (dim, nnd)
if etype == '2_4' or etype == '3_8':
el = elems_q2t(el)
nel = el.shape[0]
#bc = nm.zeros((dim, ), dtype=nm.double)
mul = 1.0 / factorial(dim)
if dim == 3:
mul *= -1.0
mtx = nm.ones((nel, dim + 1, dim + 1), dtype=nm.double)
mtx[:,:,:-1] = nd[el,:]
vols = mul * dets_fast(mtx)
vol = vols.sum()
bc = nm.dot(vols, mtx.sum(1)[:,:-1] / nnd)
bc /= vol
return vol, bc
from sfepy.base.timing import Timer
output('smoothing...')
timer = Timer(start=True)
if weights is None:
n_nod = mesh.n_nod
domain = FEDomain('mesh', mesh)
cmesh = domain.cmesh
# initiate all vertices as inner - hierarchy = 2
node_group = nm.ones((n_nod,), dtype=nm.int16) * 2
# boundary vertices - set hierarchy = 4
if bconstr:
# get "vertices of surface"
facets = cmesh.get_surface_facets()
f_verts = cmesh.get_incident(0, facets, cmesh.dim - 1)
node_group[f_verts] = 4
# generate costs matrix
e_verts = cmesh.get_conn(1, 0).indices
fc1, fc2 = e_verts[0::2], e_verts[1::2]
idxs = nm.where(node_group[fc2] >= node_group[fc1])
rows1 = fc1[idxs]
cols1 = fc2[idxs]
idxs = nm.where(node_group[fc1] >= node_group[fc2])
rows2 = fc2[idxs]
cols2 = fc1[idxs]
crows = nm.concatenate((rows1, rows2))
ccols = nm.concatenate((cols1, cols2))
costs = sps.coo_matrix((nm.ones_like(crows), (crows, ccols)),
shape=(n_nod, n_nod),
dtype=nm.double)
# generate weights matrix
idxs = list(range(n_nod))
aux = sps.coo_matrix((1.0 / nm.asarray(costs.sum(1)).squeeze(),
(idxs, idxs)),
shape=(n_nod, n_nod),
dtype=nm.double)
#aux.setdiag(1.0 / costs.sum(1))
weights = (aux.tocsc() * costs.tocsc()).tocsr()
coors = taubin(mesh.coors, weights, lam, mu, n_iter)
output('...done in %.2f s' % timer.stop())
if volume_corr:
output('rescaling...')
timer.start()
volume0, bc = get_volume(mesh.conns[0], mesh.coors)
volume, _ = get_volume(mesh.conns[0], coors)
scale = volume0 / volume
output('scale factor: %.2f' % scale)
coors = (coors - bc) * scale + bc
output('...done in %.2f s' % timer.stop())
return coors
def expand2d(mesh2d, dist, rep):
"""
Expand 2D planar mesh into 3D volume,
convert triangular/quad mesh to tetrahedrons/hexahedrons.
Parameters
----------
mesh2d : Mesh
The 2D mesh.
dist : float
The elements size in the 3rd direction.
rep : int
The number of elements in the 3rd direction.
Returns
-------
mesh3d : Mesh
The 3D mesh.
"""
if len(mesh2d.descs) > 1:
raise ValueError('More than one cell type (%s). Not supported!'
% ', '.join(mesh2d.descs))
nel = mesh2d.n_el
nnd = mesh2d.n_nod
et = mesh2d.descs[0]
coors = mesh2d.coors
conn = mesh2d.get_conn(et)
zcoor = nm.arange(rep + 1) * dist
coors3d = nm.hstack([nm.tile(coors, (rep + 1, 1)),
nm.tile(zcoor, (nnd,1)).T.flatten()[:,nm.newaxis]])
ngroups = nm.tile(mesh2d.cmesh.vertex_groups, (rep + 1,))
if et == '2_4':
descs3d = '3_8'
conn3d = nm.zeros((nel * rep, 8), dtype=nm.int32)
mats3d = nm.tile(mesh2d.cmesh.cell_groups, (1, rep)).squeeze()
elif et == '2_3':
descs3d = '3_4'
conn3d = nm.zeros((3 * nel * rep, 4), dtype=nm.int32)
mats3d = nm.tile(mesh2d.cmesh.cell_groups, (1, 3 * rep)).squeeze()
for ii in range(rep):
bgn0 = nnd * ii
bgn1 = bgn0 + nnd
if et == '2_4':
bge0 = nel * ii
bge1 = bge0 + nel
conn3d[bge0:bge1,:4] = conn + bgn0
conn3d[bge0:bge1,4:] = conn + bgn1
elif et == '2_3':
# 0 1 2 5
bge0 = 3 * nel * ii
bge1 = bge0 + nel
conn3d[bge0:bge1,:] = nm.array([conn[:,0] + bgn0,
conn[:,1] + bgn0,
conn[:,2] + bgn0,
conn[:,2] + bgn1]).T
# 0 1 5 4
bge0 += nel
bge1 += nel
conn3d[bge0:bge1,:] = nm.array([conn[:,0] + bgn0,
conn[:,1] + bgn0,
conn[:,2] + bgn1,
conn[:,1] + bgn1]).T
# 0 4 5 3
bge0 += nel
bge1 += nel
conn3d[bge0:bge1,:] = nm.array([conn[:,0] + bgn0,
conn[:,1] + bgn1,
conn[:,2] + bgn1,
conn[:,0] + bgn1]).T
mesh3d = Mesh.from_data('mesh', coors3d, ngroups, [conn3d],
[mats3d], [descs3d])
return mesh3d
|
[
"sfepy.base.compat.factorial",
"sfepy.linalg.utils.dets_fast",
"sfepy.base.timing.Timer",
"sfepy.discrete.fem.Mesh.from_data",
"sfepy.discrete.fem.FEDomain",
"sfepy.base.base.output"
] |
[((655, 690), 'numpy.zeros', 'nm.zeros', (['(nel, nn)'], {'dtype': 'nm.int32'}), '((nel, nn), dtype=nm.int32)\n', (663, 690), True, 'import numpy as nm\n'), ((707, 716), 'six.moves.range', 'range', (['ns'], {}), '(ns)\n', (712, 716), False, 'from six.moves import range\n'), ((808, 833), 'numpy.ascontiguousarray', 'nm.ascontiguousarray', (['out'], {}), '(out)\n', (828, 833), True, 'import numpy as nm\n'), ((4073, 4095), 'sfepy.base.base.output', 'output', (['"""smoothing..."""'], {}), "('smoothing...')\n", (4079, 4095), False, 'from sfepy.base.base import output\n'), ((4108, 4125), 'sfepy.base.timing.Timer', 'Timer', ([], {'start': '(True)'}), '(start=True)\n', (4113, 4125), False, 'from sfepy.base.timing import Timer\n'), ((6992, 7039), 'numpy.tile', 'nm.tile', (['mesh2d.cmesh.vertex_groups', '(rep + 1,)'], {}), '(mesh2d.cmesh.vertex_groups, (rep + 1,))\n', (6999, 7039), True, 'import numpy as nm\n'), ((7413, 7423), 'six.moves.range', 'range', (['rep'], {}), '(rep)\n', (7418, 7423), False, 'from six.moves import range\n'), ((8669, 8740), 'sfepy.discrete.fem.Mesh.from_data', 'Mesh.from_data', (['"""mesh"""', 'coors3d', 'ngroups', '[conn3d]', '[mats3d]', '[descs3d]'], {}), "('mesh', coors3d, ngroups, [conn3d], [mats3d], [descs3d])\n", (8683, 8740), False, 'from sfepy.discrete.fem import Mesh, FEDomain\n'), ((309, 407), 'numpy.array', 'nm.array', (['[[0, 2, 3, 6], [0, 3, 7, 6], [0, 7, 4, 6], [0, 5, 6, 4], [1, 5, 6, 0], [1, \n 6, 2, 0]]'], {}), '([[0, 2, 3, 6], [0, 3, 7, 6], [0, 7, 4, 6], [0, 5, 6, 4], [1, 5, 6,\n 0], [1, 6, 2, 0]])\n', (317, 407), True, 'import numpy as nm\n'), ((549, 581), 'numpy.array', 'nm.array', (['[[0, 1, 2], [0, 2, 3]]'], {}), '([[0, 1, 2], [0, 2, 3]])\n', (557, 581), True, 'import numpy as nm\n'), ((733, 755), 'numpy.arange', 'nm.arange', (['ii', 'nel', 'ns'], {}), '(ii, nel, ns)\n', (742, 755), True, 'import numpy as nm\n'), ((1360, 1419), 'sfepy.base.base.output', 'output', (["('initial mesh: %d elements' % nelo)"], {'verbose': 'verbose'}), "('initial mesh: %d elements' % nelo, verbose=verbose)\n", (1366, 1419), False, 'from sfepy.base.base import output\n'), ((1520, 1557), 'numpy.repeat', 'nm.repeat', (['mesh.cmesh.cell_groups', 'nn'], {}), '(mesh.cmesh.cell_groups, nn)\n', (1529, 1557), True, 'import numpy as nm\n'), ((1567, 1636), 'sfepy.base.base.output', 'output', (["('new mesh: %d elements' % new_conns.shape[0])"], {'verbose': 'verbose'}), "('new mesh: %d elements' % new_conns.shape[0], verbose=verbose)\n", (1573, 1636), False, 'from sfepy.base.base import output\n'), ((1652, 1759), 'sfepy.discrete.fem.Mesh.from_data', 'Mesh.from_data', (['mesh.name', 'mesh.coors', 'mesh.cmesh.vertex_groups', '[new_conns]', '[new_cgroups]', '[new_desc]'], {}), '(mesh.name, mesh.coors, mesh.cmesh.vertex_groups, [new_conns],\n [new_cgroups], [new_desc])\n', (1666, 1759), False, 'from sfepy.discrete.fem import Mesh, FEDomain\n'), ((3164, 3177), 'six.moves.range', 'range', (['n_iter'], {}), '(n_iter)\n', (3169, 3177), False, 'from six.moves import range\n'), ((3790, 3839), 'numpy.ones', 'nm.ones', (['(nel, dim + 1, dim + 1)'], {'dtype': 'nm.double'}), '((nel, dim + 1, dim + 1), dtype=nm.double)\n', (3797, 3839), True, 'import numpy as nm\n'), ((4195, 4217), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['"""mesh"""', 'mesh'], {}), "('mesh', mesh)\n", (4203, 4217), False, 'from sfepy.discrete.fem import Mesh, FEDomain\n'), ((4766, 4810), 'numpy.where', 'nm.where', (['(node_group[fc2] >= node_group[fc1])'], {}), '(node_group[fc2] >= node_group[fc1])\n', (4774, 4810), True, 'import numpy as nm\n'), ((4878, 4922), 'numpy.where', 'nm.where', (['(node_group[fc1] >= node_group[fc2])'], {}), '(node_group[fc1] >= node_group[fc2])\n', (4886, 4922), True, 'import numpy as nm\n'), ((4991, 5021), 'numpy.concatenate', 'nm.concatenate', (['(rows1, rows2)'], {}), '((rows1, rows2))\n', (5005, 5021), True, 'import numpy as nm\n'), ((5038, 5068), 'numpy.concatenate', 'nm.concatenate', (['(cols1, cols2)'], {}), '((cols1, cols2))\n', (5052, 5068), True, 'import numpy as nm\n'), ((5756, 5778), 'sfepy.base.base.output', 'output', (['"""rescaling..."""'], {}), "('rescaling...')\n", (5762, 5778), False, 'from sfepy.base.base import output\n'), ((5956, 5992), 'sfepy.base.base.output', 'output', (["('scale factor: %.2f' % scale)"], {}), "('scale factor: %.2f' % scale)\n", (5962, 5992), False, 'from sfepy.base.base import output\n'), ((6820, 6838), 'numpy.arange', 'nm.arange', (['(rep + 1)'], {}), '(rep + 1)\n', (6829, 6838), True, 'import numpy as nm\n'), ((7102, 7142), 'numpy.zeros', 'nm.zeros', (['(nel * rep, 8)'], {'dtype': 'nm.int32'}), '((nel * rep, 8), dtype=nm.int32)\n', (7110, 7142), True, 'import numpy as nm\n'), ((3715, 3729), 'sfepy.base.compat.factorial', 'factorial', (['dim'], {}), '(dim)\n', (3724, 3729), False, 'from sfepy.base.compat import factorial\n'), ((3893, 3907), 'sfepy.linalg.utils.dets_fast', 'dets_fast', (['mtx'], {}), '(mtx)\n', (3902, 3907), False, 'from sfepy.linalg.utils import dets_fast\n'), ((4326, 4359), 'numpy.ones', 'nm.ones', (['(n_nod,)'], {'dtype': 'nm.int16'}), '((n_nod,), dtype=nm.int16)\n', (4333, 4359), True, 'import numpy as nm\n'), ((5295, 5307), 'six.moves.range', 'range', (['n_nod'], {}), '(n_nod)\n', (5300, 5307), False, 'from six.moves import range\n'), ((6871, 6899), 'numpy.tile', 'nm.tile', (['coors', '(rep + 1, 1)'], {}), '(coors, (rep + 1, 1))\n', (6878, 6899), True, 'import numpy as nm\n'), ((7278, 7322), 'numpy.zeros', 'nm.zeros', (['(3 * nel * rep, 4)'], {'dtype': 'nm.int32'}), '((3 * nel * rep, 4), dtype=nm.int32)\n', (7286, 7322), True, 'import numpy as nm\n'), ((3012, 3031), 'scipy.sparse.identity', 'sps.identity', (['n_nod'], {}), '(n_nod)\n', (3024, 3031), True, 'import scipy.sparse as sps\n'), ((3240, 3253), 'numpy.mod', 'nm.mod', (['ii', '(2)'], {}), '(ii, 2)\n', (3246, 3253), True, 'import numpy as nm\n'), ((5101, 5120), 'numpy.ones_like', 'nm.ones_like', (['crows'], {}), '(crows)\n', (5113, 5120), True, 'import numpy as nm\n'), ((7160, 7203), 'numpy.tile', 'nm.tile', (['mesh2d.cmesh.cell_groups', '(1, rep)'], {}), '(mesh2d.cmesh.cell_groups, (1, rep))\n', (7167, 7203), True, 'import numpy as nm\n'), ((7340, 7387), 'numpy.tile', 'nm.tile', (['mesh2d.cmesh.cell_groups', '(1, 3 * rep)'], {}), '(mesh2d.cmesh.cell_groups, (1, 3 * rep))\n', (7347, 7387), True, 'import numpy as nm\n'), ((7796, 7887), 'numpy.array', 'nm.array', (['[conn[:, 0] + bgn0, conn[:, 1] + bgn0, conn[:, 2] + bgn0, conn[:, 2] + bgn1]'], {}), '([conn[:, 0] + bgn0, conn[:, 1] + bgn0, conn[:, 2] + bgn0, conn[:, \n 2] + bgn1])\n', (7804, 7887), True, 'import numpy as nm\n'), ((8117, 8208), 'numpy.array', 'nm.array', (['[conn[:, 0] + bgn0, conn[:, 1] + bgn0, conn[:, 2] + bgn1, conn[:, 1] + bgn1]'], {}), '([conn[:, 0] + bgn0, conn[:, 1] + bgn0, conn[:, 2] + bgn1, conn[:, \n 1] + bgn1])\n', (8125, 8208), True, 'import numpy as nm\n'), ((8438, 8529), 'numpy.array', 'nm.array', (['[conn[:, 0] + bgn0, conn[:, 1] + bgn1, conn[:, 2] + bgn1, conn[:, 0] + bgn1]'], {}), '([conn[:, 0] + bgn0, conn[:, 1] + bgn1, conn[:, 2] + bgn1, conn[:, \n 0] + bgn1])\n', (8446, 8529), True, 'import numpy as nm\n'), ((6926, 6950), 'numpy.tile', 'nm.tile', (['zcoor', '(nnd, 1)'], {}), '(zcoor, (nnd, 1))\n', (6933, 6950), True, 'import numpy as nm\n')]
|
import os
import math
import numpy as np
import six
import megengine._internal as mgb
from enum import Enum
from py_proto import mace_pb2
from transform import base_converter
from transform.base_converter import PoolingType
from transform.base_converter import ActivationType
from transform.base_converter import EltwiseType
from transform.base_converter import FrameworkType
from transform.base_converter import ReduceType
from transform.base_converter import DataFormat
from transform.base_converter import MaceOp
from transform.base_converter import MaceKeyword
from transform.base_converter import ConverterUtil
from transform.base_converter import RoundMode
from utils.util import mace_check
mge_kernel_h_str = "window_h"
mge_kernel_w_str = "window_w"
mge_stride_h_str = "stride_h"
mge_stride_w_str = "stride_w"
mge_pad_h_str = "pad_h"
mge_pad_w_str = "pad_w"
mge_dilate_h_str = "dilate_h"
mge_dilate_w_str = "dilate_w"
MGESupportedOps = [
"AxisAddRemove",
"BatchNormForward",
"Concat",
"ConvolutionForward",
"ConvolutionBackwardData",
"Dimshuffle",
"Elemwise",
"GetVarShape",
"Host2DeviceCopy",
"Identity",
"MarkNoBroadcastElemwise",
"MatrixMul",
"PoolingForward",
"Reduce",
"Reshape",
"SharedDeviceTensor",
"Subtensor",
]
MGEOpType = Enum("MGEOpType", [(op, op) for op in MGESupportedOps], type=str)
def get_symvar_value(mge_symvar):
if mge_symvar.inferred_value is not None:
val = mge_symvar.inferred_value
else:
cg = mge_symvar.owner_graph
func = cg.compile_outonly(mge_symvar)
val = func()
return val
def is_consumer_group_conv(mge_symvar, var2oprs, map_oprs):
consumer_ids = var2oprs[mge_symvar.id]
n_consumers = len(consumer_ids)
for consumer_id in consumer_ids:
consumer_op = map_oprs[consumer_id[0]]
if (mgb.cgtools.get_opr_type(consumer_op)
in ("ConvolutionForward", "ConvolutionBackwardData")
and consumer_op.params["sparse"] == "GROUP"):
mace_check(n_consumers == 1,
"This tensor should only feed depthwise conv/deconv")
return True
return False
class MegengineConverter(base_converter.ConverterInterface):
"""A class for convert megengine dumped model to mace model."""
compute_format_type = {
"NCHW": DataFormat.NCHW,
"NHWC": DataFormat.NHWC,
"DEFAULT": DataFormat.NCHW,
}
reduce_math_type = {
"SUM": ReduceType.SUM,
"PROD": ReduceType.PROD,
"MIN": ReduceType.MIN,
"MAX": ReduceType.MAX,
}
# SQE_DIFF, CLIP, SIGN maybe needed
eltwise_type = {
"ADD": EltwiseType.SUM,
"SUB": EltwiseType.SUB,
"MUL": EltwiseType.PROD,
"TRUE_DIV": EltwiseType.DIV,
"MIN": EltwiseType.MIN,
"MAX": EltwiseType.MAX,
"NEGATE": EltwiseType.NEG,
"ABS": EltwiseType.ABS,
"POW": EltwiseType.POW,
"EQ": EltwiseType.EQUAL,
"FLOOR_DIV": EltwiseType.FLOOR_DIV,
"EXP": EltwiseType.POW,
}
activation_type = {
"RELU": ActivationType.RELU,
"TANH": ActivationType.TANH,
"SIGMOID": ActivationType.SIGMOID,
}
def __init__(self, option, src_model_file):
self._op_converters = {
MGEOpType.AxisAddRemove.name: self.convert_axisaddrm,
MGEOpType.BatchNormForward.name: self.convert_batchnorm,
MGEOpType.Concat.name: self.convert_concat,
MGEOpType.ConvolutionForward.name: self.convert_conv2d,
MGEOpType.ConvolutionBackwardData.name: self.convert_deconv2d,
MGEOpType.Dimshuffle.name: self.convert_dimshuffle,
MGEOpType.Elemwise.name: self.convert_elemwise,
MGEOpType.GetVarShape.name: self.convert_shape,
MGEOpType.Host2DeviceCopy.name: self.convert_nop,
MGEOpType.Identity.name: self.convert_identity,
MGEOpType.MarkNoBroadcastElemwise.name: self.convert_identity,
MGEOpType.MatrixMul.name: self.convert_matmul,
MGEOpType.PoolingForward.name: self.convert_pooling,
MGEOpType.Reduce.name: self.convert_reduce,
MGEOpType.Reshape.name: self.convert_reshape,
MGEOpType.SharedDeviceTensor.name: self.convert_nop,
MGEOpType.Subtensor.name: self.convert_subtensor,
}
self._option = option
self._mace_net_def = mace_pb2.NetDef()
ConverterUtil.set_filter_format(self._mace_net_def, DataFormat.OIHW)
ConverterUtil.add_data_format_arg(self._mace_net_def, DataFormat.NCHW)
cg, _, outputs = mgb.load_comp_graph_from_file(src_model_file)
map_oprs, _, var2oprs, *_ = mgb.cgtools.graph_traversal(outputs)
# prune second input of reshape
# because it introduces several ops, may increase the overhead
operators = mgb.cgtools.get_oprs_seq(outputs, prune_reshape=True)
self._mge_cg = cg
self._mge_operators = operators
self._mge_map_oprs = map_oprs
self._mge_var2oprs = var2oprs
self._skip_tensors = set()
self._bn_statistis_tensors = {}
def run(self):
self.convert_ops()
self.replace_input_output_tensor_name()
return self._mace_net_def
# only change the input/output tensor name for whole model
def replace_input_output_tensor_name(self):
for op in self._mace_net_def.op:
for i in six.moves.range(len(op.input)):
if "," in op.input[i]:
op_name = op.input[i]
op_name = op_name.replace(",", "#")
if (op_name in self._option.input_nodes or
op_name in self._option.output_nodes):
op.input[i] = op_name
for i in six.moves.range(len(op.output)):
if "," in op.output[i]:
op_name = op.output[i]
op_name = op_name.replace(",", "#")
if op_name in self._option.output_nodes:
op.output[i] = op_name
# this method will be called by convert_conv2d/deconv2d and convert_pooling
@staticmethod
def add_stride_pad_kernel_arg(params, op_def):
stride = [params[mge_stride_h_str], params[mge_stride_w_str]]
pad = [params[mge_pad_h_str] * 2, params[mge_pad_w_str] * 2]
strides_arg = op_def.arg.add()
strides_arg.name = MaceKeyword.mace_strides_str
strides_arg.ints.extend(stride)
padding_arg = op_def.arg.add()
padding_arg.name = MaceKeyword.mace_padding_values_str
padding_arg.ints.extend(pad)
if op_def.type == MaceOp.Pooling.name:
kernel = [params[mge_kernel_h_str], params[mge_kernel_w_str]]
kernels_arg = op_def.arg.add()
kernels_arg.name = MaceKeyword.mace_kernel_str
kernels_arg.ints.extend(kernel)
if op_def.type in (MaceOp.Conv2D.name, MaceOp.DepthwiseConv2d.name,
MaceOp.Deconv2D.name,
MaceOp.DepthwiseDeconv2d.name):
dilation = [params[mge_dilate_h_str], params[mge_dilate_w_str]]
dilation_arg = op_def.arg.add()
dilation_arg.name = MaceKeyword.mace_dilations_str
dilation_arg.ints.extend(dilation)
def convert_ops(self):
for mge_op in self._mge_operators:
opr_type = mgb.cgtools.get_opr_type(mge_op)
# some reshape operators provide data for batchnorm
if opr_type == "Reshape":
output = mge_op.outputs[0]
next_ops = self._mge_var2oprs[output.id]
if len(next_ops) == 1:
(next_op_id, _) = next_ops[0]
next_op = self._mge_map_oprs[next_op_id]
if mgb.cgtools.get_opr_type(next_op) == "BatchNormForward":
self._skip_tensors.update(
[inp.name for inp in mge_op.inputs])
# using output name to address input symbol var
self._bn_statistis_tensors[mge_op.outputs[0].name] = \
mge_op.inputs[0]
# skip this reshape op
continue
self._op_converters[opr_type](mge_op)
self.convert_tensors()
def add_tensor(self, name, shape, data_type, value):
tensor = self._mace_net_def.tensors.add()
tensor.name = name
tensor.dims.extend(list(shape))
tensor.data_type = data_type
if data_type == mace_pb2.DT_INT32:
tensor.int32_data.extend(value)
else:
tensor.float_data.extend(value)
# convert all pre-calculated and constant tensors
def convert_tensors(self):
for mge_op in self._mge_operators:
type_opr = mgb.cgtools.get_opr_type(mge_op)
# all tensors generated by SharedDeviceTensor op
if type_opr == "SharedDeviceTensor":
output = mge_op.outputs[0]
if output.name not in self._skip_tensors:
nshape = output.imm_shape
# tensor used for depthwise conv/deconv should be reshaped
for_group_conv = is_consumer_group_conv(
output, self._mge_var2oprs, self._mge_map_oprs
)
if for_group_conv:
nshape = (
1,
output.imm_shape[0],
output.imm_shape[3],
output.imm_shape[4],
)
self.add_tensor(
output.name,
nshape,
mace_pb2.DT_FLOAT,
get_symvar_value(output).flatten())
else:
# handle all constant values
for const_tensor in mge_op.inputs:
if (const_tensor.inferred_value is not None
and const_tensor.name not in self._skip_tensors):
self.add_tensor(
const_tensor.name,
const_tensor.imm_shape,
mace_pb2.DT_INT32,
const_tensor.inferred_value.flatten())
def convert_nop(self, mge_op):
pass
def convert_general_op(self, mge_op):
op = self._mace_net_def.op.add()
op.name = mge_op.name
op.type = mgb.cgtools.get_opr_type(mge_op)
op.input.extend([mge_input.name for mge_input in mge_op.inputs])
op.output.extend([mge_output.name for mge_output in mge_op.outputs])
for mge_output in mge_op.outputs:
output_shape = op.output_shape.add()
output_shape.dims.extend(mge_output.imm_shape)
data_type_arg = op.arg.add()
data_type_arg.name = "T"
data_type_arg.i = self._option.data_type
framework_type_arg = op.arg.add()
framework_type_arg.name = MaceKeyword.mace_framework_type_str
framework_type_arg.i = FrameworkType.MEGENGINE.value
# check compute format of megengine
compute_format = DataFormat.NCHW
try:
if "format" in mge_op.params.keys():
compute_format = self.compute_format_type[
mge_op.params["format"]
]
except AttributeError:
compute_format = DataFormat.NCHW
ConverterUtil.add_data_format_arg(op, compute_format)
return op
def convert_identity(self, mge_op):
op = self.convert_general_op(mge_op)
op.type = MaceOp.Identity.name
def convert_conv2d(self, mge_op):
op = self.convert_general_op(mge_op)
if mge_op.params["sparse"] == "GROUP":
# weight shape in group conv2d:
# (groups, out_channel//groups, in_channels//groups, *kernel_size)
groups_divisible = mge_op.inputs[1].imm_shape[2]
mace_check(
groups_divisible == 1,
"Mace does not support group convolution yet",
)
op.type = MaceOp.DepthwiseConv2d.name
elif mge_op.params["sparse"] == "DENSE":
op.type = MaceOp.Conv2D.name
else:
raise Exception("Unknown sparse mode")
mace_check(
mge_op.params["mode"] != "CONVOLUTION",
"Mace does not support CONVOLUTION computation mode yet",
)
self.add_stride_pad_kernel_arg(mge_op.params, op)
del op.output[1:]
del op.output_shape[1:]
def convert_deconv2d(self, mge_op):
op = self.convert_general_op(mge_op)
if mge_op.params["sparse"] == "GROUP":
# weight shape in group conv2d:
# (groups, out_channel//groups, in_channels//groups, *kernel_size)
groups_divisible = mge_op.inputs[0].imm_shape[2]
mace_check(
groups_divisible == 1,
"Mace does not support group deconvolution yet",
)
op.type = MaceOp.DepthwiseConv2d.name
elif mge_op.params["sparse"] == "DENSE":
op.type = MaceOp.Deconv2D.name
else:
mace_check(False, "Unknown sparse mode")
mace_check(
mge_op.params["mode"] != "CONVOLUTION",
"Mace does not support CONVOLUTION computation mode yet",
)
self.add_stride_pad_kernel_arg(mge_op.params, op)
# inputs order is strange in megengine, fix it
swaped_list = [op.input[1], op.input[0]]
del op.input[:]
op.input.extend(swaped_list)
del op.output[1:]
del op.output_shape[1:]
def convert_dimshuffle(self, mge_op):
op = self.convert_general_op(mge_op)
op.type = MaceOp.Transpose.name
dims_arg = op.arg.add()
dims_arg.name = MaceKeyword.mace_dims_str
dims_arg.ints.extend(mge_op.params["pattern"])
def convert_math_elemwise(self, mge_op):
op = self.convert_general_op(mge_op)
op.type = MaceOp.Eltwise.name
type_arg = op.arg.add()
type_arg.name = MaceKeyword.mace_element_type_str
type_arg.i = self.eltwise_type[mge_op.params["mode"]].value
# EXP in megengine always use the np.e as base
if mge_op.params["mode"] == "EXP":
exp_tensor_name = mge_op.name + "_exp_base"
exp_shape = mge_op.outputs[0].imm_shape
exp_value = (np.e * np.ones(exp_shape)).flatten()
self.add_tensor(
exp_tensor_name, exp_shape, mace_pb2.DT_FLOAT, exp_value
)
del op.input[0]
op.input.extend([exp_tensor_name, mge_op.inputs[0].name])
def convert_activation(self, mge_op):
op = self.convert_general_op(mge_op)
op.type = MaceOp.Activation.name
type_arg = op.arg.add()
type_arg.name = MaceKeyword.mace_activation_type_str
type_arg.s = six.b(self.activation_type[mge_op.params["mode"]].name)
def convert_elemwise(self, mge_op):
mode = mge_op.params["mode"]
if mode in self.eltwise_type:
self.convert_math_elemwise(mge_op)
else:
self.convert_activation(mge_op)
def convert_pooling(self, mge_op):
op = self.convert_general_op(mge_op)
op.type = MaceOp.Pooling.name
pool_type_arg = op.arg.add()
pool_type_arg.name = MaceKeyword.mace_pooling_type_str
round_mode_arg = op.arg.add()
round_mode_arg.name = MaceKeyword.mace_round_mode_str
round_mode_arg.i = RoundMode.FLOOR.value
# check the case of counting include padding
mode = mge_op.params["mode"]
if mode == "AVERAGE_COUNT_EXCLUDE_PADDING" or \
(mode == "AVERAGE" and mge_op.params["pad_w"] == 0 and
mge_op.params["pad_h"] == 0):
pool_type_arg.i = PoolingType.AVG.value
elif mode == "MAX":
pool_type_arg.i = PoolingType.MAX.value
else:
mace_check(False,
"AVERAGE pooling should not count padding values")
self.add_stride_pad_kernel_arg(mge_op.params, op)
# delete workspace output, it's useless
del op.output[1:]
del op.output_shape[1:]
def convert_matmul(self, mge_op):
op = self.convert_general_op(mge_op)
op.type = MaceOp.MatMul.name
transpose_a = mge_op.params["transposeA"]
transpose_a_arg = op.arg.add()
transpose_a_arg.name = MaceKeyword.mace_transpose_a_str
transpose_a_arg.i = int(transpose_a)
transpose_b = mge_op.params["transposeB"]
transpose_b_arg = op.arg.add()
transpose_b_arg.name = MaceKeyword.mace_transpose_b_str
transpose_b_arg.i = int(transpose_b)
del op.output[1:]
del op.output_shape[1:]
def convert_reshape(self, mge_op):
op = self.convert_general_op(mge_op)
op.type = MaceOp.Reshape.name
# just use the output shape
del op.input[1]
t_shape = list(mge_op.outputs[0].imm_shape)
shape_tensor_name = mge_op.name + "_dest_shape"
self.add_tensor(
shape_tensor_name, [len(t_shape)], mace_pb2.DT_INT32, t_shape
)
op.input.extend([shape_tensor_name])
# usually after reduce operator, remove dimension with value 1
# it's hard to just follow this operator
# sometimes axis-add and axis-remove may exist at the same time
# for complicated use-case, using reshape is easier
def convert_axisaddrm(self, mge_op):
op = self.convert_general_op(mge_op)
if mge_op.params["nr_desc"] == 1:
if mge_op.params["desc"][0]["method"] == 0:
op.type = MaceOp.ExpandDims.name
else:
op.type = MaceOp.Squeeze.name
axis_arg = op.arg.add()
axis_arg.name = MaceKeyword.mace_axis_str
axis_arg.i = mge_op.params["desc"][0]["axisnum"]
else:
op.type = MaceOp.Reshape.name
dest_shape_tensor_name = op.name + "_dest_shape"
dest_shape = mge_op.outputs[0].imm_shape
self.add_tensor(
dest_shape_tensor_name,
(len(dest_shape),),
mace_pb2.DT_INT32,
dest_shape,
)
op.input.extend([dest_shape_tensor_name])
def convert_reduce(self, mge_op):
op = self.convert_general_op(mge_op)
op.type = MaceOp.Reduce.name
reduce_type_arg = op.arg.add()
reduce_type_arg.name = MaceKeyword.mace_reduce_type_str
reduce_type_arg.i = self.reduce_math_type[mge_op.params["mode"]].value
# in megengine axis won't be list, just int
axis_arg = op.arg.add()
axis_arg.name = MaceKeyword.mace_axis_str
axis_arg.ints.append(mge_op.params["axis"])
# megengine will always keep dims in Reduce operator
# dim removal will be done by operator AxisAddRemove
keep_dims_arg = op.arg.add()
keep_dims_arg.name = MaceKeyword.mace_keepdims_str
keep_dims_arg.i = 1
del op.output[1:]
del op.output_shape[1:]
def convert_concat(self, mge_op):
op = self.convert_general_op(mge_op)
op.type = MaceOp.Concat.name
axis_arg = op.arg.add()
axis_arg.name = MaceKeyword.mace_axis_str
axis_arg.i = mge_op.params["axis"]
def convert_batchnorm(self, mge_op):
op = self.convert_general_op(mge_op)
op.type = MaceOp.BatchNorm.name
gamma_value = get_symvar_value(
self._bn_statistis_tensors[mge_op.inputs[1].name]
).flatten()
beta_value = get_symvar_value(
self._bn_statistis_tensors[mge_op.inputs[2].name]
).flatten()
mean_value = get_symvar_value(mge_op.inputs[3]).flatten()
var_value = get_symvar_value(mge_op.inputs[4]).flatten()
epsilon_value = 1e-5
scale_name = mge_op.name + "_scale"
offset_name = mge_op.name + "_offset"
scale_value = (1.0 / np.vectorize(math.sqrt)(
var_value + epsilon_value)) * gamma_value
offset_value = (-mean_value * scale_value) + beta_value
self.add_tensor(
scale_name, scale_value.shape, mace_pb2.DT_FLOAT, scale_value
)
self.add_tensor(
offset_name, offset_value.shape, mace_pb2.DT_FLOAT, offset_value
)
self._skip_tensors.update([inp.name for inp in mge_op.inputs][1:])
del op.input[1:]
op.input.extend([scale_name, offset_name])
# outputs[4] is the correct output
del op.output[-1:]
del op.output_shape[-1:]
del op.output[:4]
del op.output_shape[:4]
def convert_shape(self, mge_op):
op = self.convert_general_op(mge_op)
op.type = MaceOp.Shape.name
op.output_type.extend([mace_pb2.DT_INT32])
# axis of subtensor should be constant
# subtensor in megengine: numpy-like indexing
def convert_subtensor(self, mge_op):
op1 = self.convert_general_op(mge_op)
op1.type = MaceOp.StridedSlice.name
axis = mge_op.inputs[1].inferred_value
t_shape = list(mge_op.inputs[0].imm_shape)
begin_tensor_name = mge_op.name + "_begin"
end_tensor_name = mge_op.name + "_end"
stride_tensor_name = mge_op.name + "_stride"
begin_tensor_shape = (len(t_shape),)
end_tensor_shape = (len(t_shape),)
stride_tensor_shape = (len(t_shape),)
begin_vals = [0] * len(t_shape)
end_vals = [shapei for shapei in t_shape]
stride_vals = [1] * len(t_shape)
def check_val(sym_var):
try:
val = sym_var.inferred_value[0]
except TypeError:
mace_check(
False, "you should feed const values for subtensor axis"
)
return val
squeeze_dims = []
idx = len(mge_op.inputs) - 1
while idx:
val = check_val(mge_op.inputs[idx])
for ai in mge_op.params[::-1]:
ai_idx = ai["axis"]
if ai["step"] > 0:
stride_vals[ai_idx] = val
idx -= 1
if idx == 0:
break
val = check_val(mge_op.inputs[idx])
if ai["end"] > 0:
if val < 0:
val = t_shape[ai_idx] + val
end_vals[ai_idx] = val
idx -= 1
if idx == 0:
break
val = check_val(mge_op.inputs[idx])
if ai["begin"] > 0:
if val < 0:
val = t_shape[ai_idx] + val
begin_vals[ai_idx] = val
idx -= 1
if idx == 0:
break
val = check_val(mge_op.inputs[idx])
if ai["idx"] > 0:
if val < 0:
val = t_shape[ai_idx] + val
squeeze_dims.append(ai_idx)
begin_vals[ai_idx] = val
end_vals[ai_idx] = val + 1
idx -= 1
if idx == 0:
break
val = check_val(mge_op.inputs[idx])
for ai_idx in range(len(t_shape)):
t_shape[ai_idx] = math.ceil(
(end_vals[ai_idx] - begin_vals[ai_idx]) / stride_vals[ai_idx]
)
self.add_tensor(
begin_tensor_name,
begin_tensor_shape,
mace_pb2.DT_INT32,
begin_vals,
)
self.add_tensor(
end_tensor_name, end_tensor_shape, mace_pb2.DT_INT32, end_vals
)
self.add_tensor(
stride_tensor_name,
stride_tensor_shape,
mace_pb2.DT_INT32,
stride_vals,
)
del op1.input[1:]
op1.input.extend(
[begin_tensor_name, end_tensor_name, stride_tensor_name]
)
if len(squeeze_dims) > 0:
# create squeeze op to remove shape=1 dims
mid_output_name = mge_op.name + "_mid_reshape"
del op1.output[0]
op1.output.extend([mid_output_name])
output_shape = op1.output_shape[0]
del output_shape.dims[:]
output_shape.dims.extend(t_shape)
op2 = self._mace_net_def.op.add()
op2.type = MaceOp.Squeeze.name
op2.name = mge_op.name + "_squeeze"
data_type_arg = op2.arg.add()
data_type_arg.name = "T"
data_type_arg.i = self._option.data_type
framework_type_arg = op2.arg.add()
framework_type_arg.name = MaceKeyword.mace_framework_type_str
framework_type_arg.i = FrameworkType.MEGENGINE.value
ConverterUtil.add_data_format_arg(op2, DataFormat.NCHW)
op2.input.extend([mid_output_name])
op2.output.extend([mge_op.outputs[0].name])
output_shape = op2.output_shape.add()
output_shape.dims.extend(mge_op.outputs[0].imm_shape)
axis_arg = op2.arg.add()
axis_arg.name = MaceKeyword.mace_axis_str
axis_arg.ints.extend(squeeze_dims)
|
[
"megengine._internal.cgtools.graph_traversal",
"megengine._internal.cgtools.get_opr_type",
"megengine._internal.cgtools.get_oprs_seq",
"megengine._internal.load_comp_graph_from_file"
] |
[((1313, 1378), 'enum.Enum', 'Enum', (['"""MGEOpType"""', '[(op, op) for op in MGESupportedOps]'], {'type': 'str'}), "('MGEOpType', [(op, op) for op in MGESupportedOps], type=str)\n", (1317, 1378), False, 'from enum import Enum\n'), ((4473, 4490), 'py_proto.mace_pb2.NetDef', 'mace_pb2.NetDef', ([], {}), '()\n', (4488, 4490), False, 'from py_proto import mace_pb2\n'), ((4499, 4567), 'transform.base_converter.ConverterUtil.set_filter_format', 'ConverterUtil.set_filter_format', (['self._mace_net_def', 'DataFormat.OIHW'], {}), '(self._mace_net_def, DataFormat.OIHW)\n', (4530, 4567), False, 'from transform.base_converter import ConverterUtil\n'), ((4576, 4646), 'transform.base_converter.ConverterUtil.add_data_format_arg', 'ConverterUtil.add_data_format_arg', (['self._mace_net_def', 'DataFormat.NCHW'], {}), '(self._mace_net_def, DataFormat.NCHW)\n', (4609, 4646), False, 'from transform.base_converter import ConverterUtil\n'), ((4673, 4718), 'megengine._internal.load_comp_graph_from_file', 'mgb.load_comp_graph_from_file', (['src_model_file'], {}), '(src_model_file)\n', (4702, 4718), True, 'import megengine._internal as mgb\n'), ((4755, 4791), 'megengine._internal.cgtools.graph_traversal', 'mgb.cgtools.graph_traversal', (['outputs'], {}), '(outputs)\n', (4782, 4791), True, 'import megengine._internal as mgb\n'), ((4923, 4976), 'megengine._internal.cgtools.get_oprs_seq', 'mgb.cgtools.get_oprs_seq', (['outputs'], {'prune_reshape': '(True)'}), '(outputs, prune_reshape=True)\n', (4947, 4976), True, 'import megengine._internal as mgb\n'), ((10640, 10672), 'megengine._internal.cgtools.get_opr_type', 'mgb.cgtools.get_opr_type', (['mge_op'], {}), '(mge_op)\n', (10664, 10672), True, 'import megengine._internal as mgb\n'), ((11620, 11673), 'transform.base_converter.ConverterUtil.add_data_format_arg', 'ConverterUtil.add_data_format_arg', (['op', 'compute_format'], {}), '(op, compute_format)\n', (11653, 11673), False, 'from transform.base_converter import ConverterUtil\n'), ((12488, 12600), 'utils.util.mace_check', 'mace_check', (["(mge_op.params['mode'] != 'CONVOLUTION')", '"""Mace does not support CONVOLUTION computation mode yet"""'], {}), "(mge_op.params['mode'] != 'CONVOLUTION',\n 'Mace does not support CONVOLUTION computation mode yet')\n", (12498, 12600), False, 'from utils.util import mace_check\n'), ((13428, 13540), 'utils.util.mace_check', 'mace_check', (["(mge_op.params['mode'] != 'CONVOLUTION')", '"""Mace does not support CONVOLUTION computation mode yet"""'], {}), "(mge_op.params['mode'] != 'CONVOLUTION',\n 'Mace does not support CONVOLUTION computation mode yet')\n", (13438, 13540), False, 'from utils.util import mace_check\n'), ((15136, 15191), 'six.b', 'six.b', (["self.activation_type[mge_op.params['mode']].name"], {}), "(self.activation_type[mge_op.params['mode']].name)\n", (15141, 15191), False, 'import six\n'), ((2048, 2134), 'utils.util.mace_check', 'mace_check', (['(n_consumers == 1)', '"""This tensor should only feed depthwise conv/deconv"""'], {}), "(n_consumers == 1,\n 'This tensor should only feed depthwise conv/deconv')\n", (2058, 2134), False, 'from utils.util import mace_check\n'), ((7486, 7518), 'megengine._internal.cgtools.get_opr_type', 'mgb.cgtools.get_opr_type', (['mge_op'], {}), '(mge_op)\n', (7510, 7518), True, 'import megengine._internal as mgb\n'), ((8937, 8969), 'megengine._internal.cgtools.get_opr_type', 'mgb.cgtools.get_opr_type', (['mge_op'], {}), '(mge_op)\n', (8961, 8969), True, 'import megengine._internal as mgb\n'), ((12146, 12231), 'utils.util.mace_check', 'mace_check', (['(groups_divisible == 1)', '"""Mace does not support group convolution yet"""'], {}), "(groups_divisible == 1, 'Mace does not support group convolution yet'\n )\n", (12156, 12231), False, 'from utils.util import mace_check\n'), ((13080, 13166), 'utils.util.mace_check', 'mace_check', (['(groups_divisible == 1)', '"""Mace does not support group deconvolution yet"""'], {}), "(groups_divisible == 1,\n 'Mace does not support group deconvolution yet')\n", (13090, 13166), False, 'from utils.util import mace_check\n'), ((23684, 23756), 'math.ceil', 'math.ceil', (['((end_vals[ai_idx] - begin_vals[ai_idx]) / stride_vals[ai_idx])'], {}), '((end_vals[ai_idx] - begin_vals[ai_idx]) / stride_vals[ai_idx])\n', (23693, 23756), False, 'import math\n'), ((25169, 25224), 'transform.base_converter.ConverterUtil.add_data_format_arg', 'ConverterUtil.add_data_format_arg', (['op2', 'DataFormat.NCHW'], {}), '(op2, DataFormat.NCHW)\n', (25202, 25224), False, 'from transform.base_converter import ConverterUtil\n'), ((1867, 1904), 'megengine._internal.cgtools.get_opr_type', 'mgb.cgtools.get_opr_type', (['consumer_op'], {}), '(consumer_op)\n', (1891, 1904), True, 'import megengine._internal as mgb\n'), ((13378, 13418), 'utils.util.mace_check', 'mace_check', (['(False)', '"""Unknown sparse mode"""'], {}), "(False, 'Unknown sparse mode')\n", (13388, 13418), False, 'from utils.util import mace_check\n'), ((16210, 16278), 'utils.util.mace_check', 'mace_check', (['(False)', '"""AVERAGE pooling should not count padding values"""'], {}), "(False, 'AVERAGE pooling should not count padding values')\n", (16220, 16278), False, 'from utils.util import mace_check\n'), ((20280, 20303), 'numpy.vectorize', 'np.vectorize', (['math.sqrt'], {}), '(math.sqrt)\n', (20292, 20303), True, 'import numpy as np\n'), ((22013, 22081), 'utils.util.mace_check', 'mace_check', (['(False)', '"""you should feed const values for subtensor axis"""'], {}), "(False, 'you should feed const values for subtensor axis')\n", (22023, 22081), False, 'from utils.util import mace_check\n'), ((7896, 7929), 'megengine._internal.cgtools.get_opr_type', 'mgb.cgtools.get_opr_type', (['next_op'], {}), '(next_op)\n', (7920, 7929), True, 'import megengine._internal as mgb\n'), ((14648, 14666), 'numpy.ones', 'np.ones', (['exp_shape'], {}), '(exp_shape)\n', (14655, 14666), True, 'import numpy as np\n')]
|
"""
Notes
-----
Important attributes of continuous (order > 0) :class:`Field` and
:class:`SurfaceField` instances:
- `vertex_remap` : `econn[:, :n_vertex] = vertex_remap[conn]`
- `vertex_remap_i` : `conn = vertex_remap_i[econn[:, :n_vertex]]`
where `conn` is the mesh vertex connectivity, `econn` is the
region-local field connectivity.
"""
import time
import numpy as nm
from sfepy.base.base import output, assert_
import fea
from sfepy.discrete.fem.utils import prepare_remap
from sfepy.discrete.common.dof_info import expand_nodes_to_dofs
from sfepy.discrete.fem.global_interp import get_ref_coors
from sfepy.discrete.fem.facets import get_facet_dof_permutations
from sfepy.discrete.fem.fields_base import (FEField, VolumeField, SurfaceField,
H1Mixin)
from sfepy.discrete.fem.extmods.bases import evaluate_in_rc
class H1NodalMixin(H1Mixin):
def _setup_facet_orientations(self):
order = self.approx_order
self.node_desc = self.interp.describe_nodes()
edge_nodes = self.node_desc.edge_nodes
if edge_nodes is not None:
n_fp = self.gel.edges.shape[1]
self.edge_dof_perms = get_facet_dof_permutations(n_fp, self.igs,
order)
face_nodes = self.node_desc.face_nodes
if face_nodes is not None:
n_fp = self.gel.faces.shape[1]
self.face_dof_perms = get_facet_dof_permutations(n_fp, self.igs,
order)
def _setup_edge_dofs(self):
"""
Setup edge DOF connectivity.
"""
if self.node_desc.edge is None:
return 0, None, None
return self._setup_facet_dofs(1, self.node_desc.edge,
self.edge_dof_perms,
self.n_vertex_dof)
def _setup_face_dofs(self):
"""
Setup face DOF connectivity.
"""
if self.node_desc.face is None:
return 0, None, None
return self._setup_facet_dofs(self.domain.shape.tdim - 1,
self.node_desc.face,
self.face_dof_perms,
self.n_vertex_dof + self.n_edge_dof)
def _setup_facet_dofs(self, dim, facet_desc, facet_perms, offset):
"""
Helper function to setup facet DOF connectivity, works for both
edges and faces.
"""
facet_desc = nm.array(facet_desc)
n_dof_per_facet = facet_desc.shape[1]
cmesh = self.domain.cmesh
facets = self.region.entities[dim]
ii = nm.arange(facets.shape[0], dtype=nm.int32)
all_dofs = offset + expand_nodes_to_dofs(ii, n_dof_per_facet)
# Prepare global facet id remapping to field-local numbering.
remap = prepare_remap(facets, cmesh.num[dim])
cconn = self.region.domain.cmesh.get_conn(self.region.tdim, dim)
offs = cconn.offsets
n_f = self.gel.edges.shape[0] if dim == 1 else self.gel.faces.shape[0]
oris = cmesh.get_orientations(dim)
for ig, ap in self.aps.iteritems():
gcells = self.region.get_cells(ig, offset=False)
n_el = gcells.shape[0]
indices = cconn.indices[offs[gcells[0]]:offs[gcells[-1]+1]]
facets_of_cells = remap[indices]
ori = oris[offs[gcells[0]]:offs[gcells[-1]+1]]
perms = facet_perms[ig][ori]
# Define global facet dof numbers.
gdofs = offset + expand_nodes_to_dofs(facets_of_cells,
n_dof_per_facet)
# Elements of facets.
iel = nm.arange(n_el, dtype=nm.int32).repeat(n_f)
ies = nm.tile(nm.arange(n_f, dtype=nm.int32), n_el)
# DOF columns in econn for each facet.
iep = facet_desc[ies]
iaux = nm.arange(gdofs.shape[0], dtype=nm.int32)
ap.econn[iel[:, None], iep] = gdofs[iaux[:, None], perms]
n_dof = n_dof_per_facet * facets.shape[0]
assert_(n_dof == nm.prod(all_dofs.shape))
return n_dof, all_dofs, remap
def _setup_bubble_dofs(self):
"""
Setup bubble DOF connectivity.
"""
if self.node_desc.bubble is None:
return 0, None, None
offset = self.n_vertex_dof + self.n_edge_dof + self.n_face_dof
n_dof = 0
n_dof_per_cell = self.node_desc.bubble.shape[0]
all_dofs = {}
remaps = {}
for ig, ap in self.aps.iteritems():
ii = self.region.get_cells(ig)
n_cell = ii.shape[0]
nd = n_dof_per_cell * n_cell
group = self.domain.groups[ig]
remaps[ig] = prepare_remap(ii, group.shape.n_el)
aux = nm.arange(offset + n_dof, offset + n_dof + nd,
dtype=nm.int32)
aux.shape = (n_cell, n_dof_per_cell)
iep = self.node_desc.bubble[0]
ap.econn[:,iep:] = aux
all_dofs[ig] = aux
n_dof += nd
return n_dof, all_dofs, remaps
def set_dofs(self, fun=0.0, region=None, dpn=None, warn=None):
"""
Set the values of DOFs in a given region using a function of space
coordinates or value `fun`.
"""
if region is None:
region = self.region
if dpn is None:
dpn = self.n_components
aux = self.get_dofs_in_region(region, clean=True, warn=warn)
nods = nm.unique(nm.hstack(aux))
if callable(fun):
vals = fun(self.get_coor(nods))
elif nm.isscalar(fun):
vals = nm.repeat([fun], nods.shape[0] * dpn)
elif isinstance(fun, nm.ndarray):
assert_(len(fun) == dpn)
vals = nm.repeat(fun, nods.shape[0])
else:
raise ValueError('unknown function/value type! (%s)' % type(fun))
return nods, vals
def evaluate_at(self, coors, source_vals, strategy='kdtree',
close_limit=0.1, cache=None, ret_cells=False,
ret_status=False, ret_ref_coors=False, verbose=True):
"""
Evaluate source DOF values corresponding to the field in the given
coordinates using the field interpolation.
Parameters
----------
coors : array
The coordinates the source values should be interpolated into.
source_vals : array
The source DOF values corresponding to the field.
strategy : str, optional
The strategy for finding the elements that contain the
coordinates. Only 'kdtree' is supported for the moment.
close_limit : float, optional
The maximum limit distance of a point from the closest
element allowed for extrapolation.
cache : Struct, optional
To speed up a sequence of evaluations, the field mesh, the inverse
connectivity of the field mesh and the KDTree instance can
be cached as `cache.mesh`, `cache.offsets`, `cache.iconn` and
`cache.kdtree`. Optionally, the cache can also contain the
reference element coordinates as `cache.ref_coors`,
`cache.cells` and `cache.status`, if the evaluation occurs
in the same coordinates repeatedly. In that case the KDTree
related data are ignored.
ret_cells : bool, optional
If True, return also the cell indices the coordinates are in.
ret_status : bool, optional
If True, return also the status for each point: 0 is
success, 1 is extrapolation within `close_limit`, 2 is
extrapolation outside `close_limit`, 3 is failure.
ret_ref_coors : bool, optional
If True, return also the found reference element coordinates.
verbose : bool
If False, reduce verbosity.
Returns
-------
vals : array
The interpolated values.
cells : array
The cell indices, if `ret_cells` or `ret_status` are True.
status : array
The status, if `ret_status` is True.
"""
output('evaluating in %d points...' % coors.shape[0], verbose=verbose)
ref_coors, cells, status = get_ref_coors(self, coors,
strategy=strategy,
close_limit=close_limit,
cache=cache,
verbose=verbose)
tt = time.clock()
vertex_coorss, nodess, orders, mtx_is = [], [], [], []
conns = []
for ap in self.aps.itervalues():
ps = ap.interp.poly_spaces['v']
vertex_coorss.append(ps.geometry.coors)
nodess.append(ps.nodes)
mtx_is.append(ps.get_mtx_i())
orders.append(ps.order)
conns.append(ap.econn)
orders = nm.array(orders, dtype=nm.int32)
# Interpolate to the reference coordinates.
vals = nm.empty((coors.shape[0], source_vals.shape[1]),
dtype=source_vals.dtype)
evaluate_in_rc(vals, ref_coors, cells, status, source_vals,
conns, vertex_coorss, nodess, orders, mtx_is,
1e-15)
output('interpolation: %f s' % (time.clock()-tt),verbose=verbose)
output('...done',verbose=verbose)
if ret_ref_coors:
return vals, ref_coors, cells, status
elif ret_status:
return vals, cells, status
elif ret_cells:
return vals, cells
else:
return vals
class H1NodalVolumeField(H1NodalMixin, VolumeField):
family_name = 'volume_H1_lagrange'
def interp_v_vals_to_n_vals(self, vec):
"""
Interpolate a function defined by vertex DOF values using the FE
geometry base (P1 or Q1) into the extra nodes, i.e. define the
extra DOF values.
"""
if not self.node_desc.has_extra_nodes():
enod_vol_val = vec.copy()
else:
dim = vec.shape[1]
enod_vol_val = nm.zeros((self.n_nod, dim), dtype=nm.float64)
for ig, ap in self.aps.iteritems():
group = self.domain.groups[ig]
econn = ap.econn
coors = ap.interp.poly_spaces['v'].node_coors
ginterp = ap.interp.gel.interp
bf = ginterp.poly_spaces['v'].eval_base(coors)
bf = bf[:,0,:].copy()
evec = nm.dot(bf, vec[group.conn])
enod_vol_val[econn] = nm.swapaxes(evec, 0, 1)
return enod_vol_val
def set_basis(self, maps, methods):
"""
This function along with eval_basis supports the general term IntFE.
It sets parameters and basis functions at reference element
according to its method (val, grad, div, etc).
Parameters
----------
maps : class
It provides information about mapping between reference and real
element. Quadrature points stored in maps.qp_coor are used here.
method : list of string
It stores methods for variable evaluation. At first position, there
is one of val, grad, or div.
self.bfref : numpy.array of shape = (n_qp, n_basis) + basis_shape
An array that stores basis functions evaluated at quadrature
points. Here n_qp is number of quadrature points, n_basis is
number of basis functions, and basis_shape is a shape of basis
function, i.e. (1,) for scalar-valued, (dim,) for vector-valued,
(dim, dim) for matrix-valued, etc.
Returns
-------
self.bfref : numpy.array
It stores a basis functions at quadrature points of shape according
to proceeded methods.
self.n_basis : int
number of basis functions
"""
self.eval_method = methods
def get_grad(maps, shape):
bfref0 = eval_base(maps.qp_coor, diff=True).swapaxes(1, 2)
if shape == (1,): # scalar variable
bfref = bfref0
elif len(shape) == 1: # vector variable
vec_shape = nm.array(bfref0.shape + shape)
vec_shape[1] *= shape[0]
bfref = nm.zeros(vec_shape)
for ii in nm.arange(shape[0]):
slc = slice(ii*bfref0.shape[1], (ii+1)*bfref0.shape[1])
bfref[:, slc, ii] = bfref0
else: # higher-order tensors variable
msg = "Evaluation of basis has not been implemented \
for higher-order tensors yet."
raise NotImplementedError(msg)
return bfref
def get_val(maps, shape):
bfref0 = eval_base(maps.qp_coor, diff=False).swapaxes(1, 2)
if self.shape == (1,): # scalar variable
bfref = bfref0
elif len(shape) == 1:
vec_shape = nm.array(bfref0.shape)
vec_shape[1:3] *= shape[0]
bfref = nm.zeros(vec_shape)
for ii in nm.arange(shape[0]):
slc = slice(ii*bfref0.shape[1], (ii+1)*bfref0.shape[1])
bfref[:, slc] = bfref0
else: # higher-order tensors variable
msg = "Evaluation of basis has not been implemented \
for higher-order tensors yet."
raise NotImplementedError(msg)
return bfref
eval_base = self.interp.poly_spaces['v'].eval_base
if self.eval_method[0] == 'val':
bfref = get_val(maps, self.shape)
elif self.eval_method[0] == 'grad':
bfref = get_grad(maps, self.shape)
elif self.eval_method[0] == 'div':
bfref = get_grad(maps, self.shape)
else:
raise NotImplementedError("The method '%s' is not implemented" \
% (self.eval_method))
self.bfref = bfref
self.n_basis = self.bfref.shape[1]
def eval_basis(self, maps):
"""
It returns basis functions evaluated at quadrature points and mapped
at reference element according to real element.
"""
if self.eval_method == ['grad']:
val = nm.tensordot(self.bfref, maps.inv_jac, axes=(-1, 0))
return val
elif self.eval_method == ['val']:
return self.bfref
elif self.eval_method == ['div']:
val = nm.tensordot(self.bfref, maps.inv_jac, axes=(-1, 0))
val = nm.atleast_3d(nm.einsum('ijkk', val))
return val
elif self.eval_method == ['grad', 'sym', 'Man']:
val = nm.tensordot(self.bfref, maps.inv_jac, axes=(-1, 0))
from sfepy.terms.terms_general import proceed_methods
val = proceed_methods(val, self.eval_method[1:])
return val
else:
msg = "Improper method '%s' for evaluation of basis functions" \
% (self.eval_method)
raise NotImplementedError(msg)
class H1DiscontinuousField(H1NodalMixin, VolumeField):
family_name = 'volume_H1_lagrange_discontinuous'
def _setup_approximations(self):
self.aps = {}
self.aps_by_name = {}
for ig in self.igs:
name = self.interp.name + '_%s_ig%d' % (self.region.name, ig)
ap = fea.DiscontinuousApproximation(name, self.interp,
self.region, ig)
self.aps[ig] = ap
self.aps_by_name[ap.name] = ap
def _setup_global_base(self):
"""
Setup global DOF/base function indices and connectivity of the field.
"""
self._setup_facet_orientations()
self._init_econn()
n_dof = 0
all_dofs = {}
remaps = {}
for ig, ap in self.aps.iteritems():
ii = self.region.get_cells(ig)
nd = nm.prod(ap.econn.shape)
group = self.domain.groups[ig]
remaps[ig] = prepare_remap(ii, group.shape.n_el)
aux = nm.arange(n_dof, n_dof + nd, dtype=nm.int32)
aux.shape = ap.econn.shape
ap.econn[:] = aux
all_dofs[ig] = aux
n_dof += nd
self.n_nod = n_dof
self.n_bubble_dof = n_dof
self.bubble_dofs = all_dofs
self.bubble_remaps = remaps
self.n_vertex_dof = self.n_edge_dof = self.n_face_dof = 0
self._setup_esurface()
def extend_dofs(self, dofs, fill_value=None):
"""
Extend DOFs to the whole domain using the `fill_value`, or the
smallest value in `dofs` if `fill_value` is None.
"""
if self.approx_order != 0:
dofs = self.average_to_vertices(dofs)
new_dofs = FEField.extend_dofs(self, dofs)
return new_dofs
def remove_extra_dofs(self, dofs):
"""
Remove DOFs defined in higher order nodes (order > 1).
"""
if self.approx_order != 0:
dofs = self.average_to_vertices(dofs)
new_dofs = FEField.remove_extra_dofs(self, dofs)
return new_dofs
def average_to_vertices(self, dofs):
"""
Average DOFs of the discontinuous field into the field region
vertices.
"""
data_qp, integral = self.interp_to_qp(dofs)
vertex_dofs = self.average_qp_to_vertices(data_qp, integral)
return vertex_dofs
class H1NodalSurfaceField(H1NodalMixin, SurfaceField):
"""
A field defined on a surface region.
"""
family_name = 'surface_H1_lagrange'
def interp_v_vals_to_n_vals(self, vec):
"""
Interpolate a function defined by vertex DOF values using the FE
surface geometry base (P1 or Q1) into the extra nodes, i.e. define the
extra DOF values.
"""
if not self.node_desc.has_extra_nodes():
enod_vol_val = vec.copy()
else:
msg = 'surface nodal fields do not support higher order nodes yet!'
raise NotImplementedError(msg)
return enod_vol_val
|
[
"sfepy.discrete.fem.fields_base.FEField.remove_extra_dofs",
"sfepy.discrete.fem.fields_base.FEField.extend_dofs",
"sfepy.discrete.fem.extmods.bases.evaluate_in_rc",
"sfepy.discrete.common.dof_info.expand_nodes_to_dofs",
"sfepy.discrete.fem.global_interp.get_ref_coors",
"sfepy.base.base.output",
"sfepy.discrete.fem.facets.get_facet_dof_permutations",
"sfepy.terms.terms_general.proceed_methods",
"sfepy.discrete.fem.utils.prepare_remap"
] |
[((2552, 2572), 'numpy.array', 'nm.array', (['facet_desc'], {}), '(facet_desc)\n', (2560, 2572), True, 'import numpy as nm\n'), ((2711, 2753), 'numpy.arange', 'nm.arange', (['facets.shape[0]'], {'dtype': 'nm.int32'}), '(facets.shape[0], dtype=nm.int32)\n', (2720, 2753), True, 'import numpy as nm\n'), ((2911, 2948), 'sfepy.discrete.fem.utils.prepare_remap', 'prepare_remap', (['facets', 'cmesh.num[dim]'], {}), '(facets, cmesh.num[dim])\n', (2924, 2948), False, 'from sfepy.discrete.fem.utils import prepare_remap\n'), ((8295, 8365), 'sfepy.base.base.output', 'output', (["('evaluating in %d points...' % coors.shape[0])"], {'verbose': 'verbose'}), "('evaluating in %d points...' % coors.shape[0], verbose=verbose)\n", (8301, 8365), False, 'from sfepy.base.base import output, assert_\n'), ((8402, 8506), 'sfepy.discrete.fem.global_interp.get_ref_coors', 'get_ref_coors', (['self', 'coors'], {'strategy': 'strategy', 'close_limit': 'close_limit', 'cache': 'cache', 'verbose': 'verbose'}), '(self, coors, strategy=strategy, close_limit=close_limit,\n cache=cache, verbose=verbose)\n', (8415, 8506), False, 'from sfepy.discrete.fem.global_interp import get_ref_coors\n'), ((8713, 8725), 'time.clock', 'time.clock', ([], {}), '()\n', (8723, 8725), False, 'import time\n'), ((9114, 9146), 'numpy.array', 'nm.array', (['orders'], {'dtype': 'nm.int32'}), '(orders, dtype=nm.int32)\n', (9122, 9146), True, 'import numpy as nm\n'), ((9215, 9288), 'numpy.empty', 'nm.empty', (['(coors.shape[0], source_vals.shape[1])'], {'dtype': 'source_vals.dtype'}), '((coors.shape[0], source_vals.shape[1]), dtype=source_vals.dtype)\n', (9223, 9288), True, 'import numpy as nm\n'), ((9322, 9438), 'sfepy.discrete.fem.extmods.bases.evaluate_in_rc', 'evaluate_in_rc', (['vals', 'ref_coors', 'cells', 'status', 'source_vals', 'conns', 'vertex_coorss', 'nodess', 'orders', 'mtx_is', '(1e-15)'], {}), '(vals, ref_coors, cells, status, source_vals, conns,\n vertex_coorss, nodess, orders, mtx_is, 1e-15)\n', (9336, 9438), False, 'from sfepy.discrete.fem.extmods.bases import evaluate_in_rc\n'), ((9564, 9598), 'sfepy.base.base.output', 'output', (['"""...done"""'], {'verbose': 'verbose'}), "('...done', verbose=verbose)\n", (9570, 9598), False, 'from sfepy.base.base import output, assert_\n'), ((17089, 17120), 'sfepy.discrete.fem.fields_base.FEField.extend_dofs', 'FEField.extend_dofs', (['self', 'dofs'], {}), '(self, dofs)\n', (17108, 17120), False, 'from sfepy.discrete.fem.fields_base import FEField, VolumeField, SurfaceField, H1Mixin\n'), ((17378, 17415), 'sfepy.discrete.fem.fields_base.FEField.remove_extra_dofs', 'FEField.remove_extra_dofs', (['self', 'dofs'], {}), '(self, dofs)\n', (17403, 17415), False, 'from sfepy.discrete.fem.fields_base import FEField, VolumeField, SurfaceField, H1Mixin\n'), ((1183, 1232), 'sfepy.discrete.fem.facets.get_facet_dof_permutations', 'get_facet_dof_permutations', (['n_fp', 'self.igs', 'order'], {}), '(n_fp, self.igs, order)\n', (1209, 1232), False, 'from sfepy.discrete.fem.facets import get_facet_dof_permutations\n'), ((1454, 1503), 'sfepy.discrete.fem.facets.get_facet_dof_permutations', 'get_facet_dof_permutations', (['n_fp', 'self.igs', 'order'], {}), '(n_fp, self.igs, order)\n', (1480, 1503), False, 'from sfepy.discrete.fem.facets import get_facet_dof_permutations\n'), ((2782, 2823), 'sfepy.discrete.common.dof_info.expand_nodes_to_dofs', 'expand_nodes_to_dofs', (['ii', 'n_dof_per_facet'], {}), '(ii, n_dof_per_facet)\n', (2802, 2823), False, 'from sfepy.discrete.common.dof_info import expand_nodes_to_dofs\n'), ((3984, 4025), 'numpy.arange', 'nm.arange', (['gdofs.shape[0]'], {'dtype': 'nm.int32'}), '(gdofs.shape[0], dtype=nm.int32)\n', (3993, 4025), True, 'import numpy as nm\n'), ((4827, 4862), 'sfepy.discrete.fem.utils.prepare_remap', 'prepare_remap', (['ii', 'group.shape.n_el'], {}), '(ii, group.shape.n_el)\n', (4840, 4862), False, 'from sfepy.discrete.fem.utils import prepare_remap\n'), ((4882, 4944), 'numpy.arange', 'nm.arange', (['(offset + n_dof)', '(offset + n_dof + nd)'], {'dtype': 'nm.int32'}), '(offset + n_dof, offset + n_dof + nd, dtype=nm.int32)\n', (4891, 4944), True, 'import numpy as nm\n'), ((5615, 5629), 'numpy.hstack', 'nm.hstack', (['aux'], {}), '(aux)\n', (5624, 5629), True, 'import numpy as nm\n'), ((5716, 5732), 'numpy.isscalar', 'nm.isscalar', (['fun'], {}), '(fun)\n', (5727, 5732), True, 'import numpy as nm\n'), ((10327, 10372), 'numpy.zeros', 'nm.zeros', (['(self.n_nod, dim)'], {'dtype': 'nm.float64'}), '((self.n_nod, dim), dtype=nm.float64)\n', (10335, 10372), True, 'import numpy as nm\n'), ((14557, 14609), 'numpy.tensordot', 'nm.tensordot', (['self.bfref', 'maps.inv_jac'], {'axes': '(-1, 0)'}), '(self.bfref, maps.inv_jac, axes=(-1, 0))\n', (14569, 14609), True, 'import numpy as nm\n'), ((15668, 15734), 'fea.DiscontinuousApproximation', 'fea.DiscontinuousApproximation', (['name', 'self.interp', 'self.region', 'ig'], {}), '(name, self.interp, self.region, ig)\n', (15698, 15734), False, 'import fea\n'), ((16227, 16250), 'numpy.prod', 'nm.prod', (['ap.econn.shape'], {}), '(ap.econn.shape)\n', (16234, 16250), True, 'import numpy as nm\n'), ((16320, 16355), 'sfepy.discrete.fem.utils.prepare_remap', 'prepare_remap', (['ii', 'group.shape.n_el'], {}), '(ii, group.shape.n_el)\n', (16333, 16355), False, 'from sfepy.discrete.fem.utils import prepare_remap\n'), ((16375, 16419), 'numpy.arange', 'nm.arange', (['n_dof', '(n_dof + nd)'], {'dtype': 'nm.int32'}), '(n_dof, n_dof + nd, dtype=nm.int32)\n', (16384, 16419), True, 'import numpy as nm\n'), ((3612, 3666), 'sfepy.discrete.common.dof_info.expand_nodes_to_dofs', 'expand_nodes_to_dofs', (['facets_of_cells', 'n_dof_per_facet'], {}), '(facets_of_cells, n_dof_per_facet)\n', (3632, 3666), False, 'from sfepy.discrete.common.dof_info import expand_nodes_to_dofs\n'), ((3840, 3870), 'numpy.arange', 'nm.arange', (['n_f'], {'dtype': 'nm.int32'}), '(n_f, dtype=nm.int32)\n', (3849, 3870), True, 'import numpy as nm\n'), ((4172, 4195), 'numpy.prod', 'nm.prod', (['all_dofs.shape'], {}), '(all_dofs.shape)\n', (4179, 4195), True, 'import numpy as nm\n'), ((5753, 5790), 'numpy.repeat', 'nm.repeat', (['[fun]', '(nods.shape[0] * dpn)'], {}), '([fun], nods.shape[0] * dpn)\n', (5762, 5790), True, 'import numpy as nm\n'), ((10737, 10764), 'numpy.dot', 'nm.dot', (['bf', 'vec[group.conn]'], {}), '(bf, vec[group.conn])\n', (10743, 10764), True, 'import numpy as nm\n'), ((10803, 10826), 'numpy.swapaxes', 'nm.swapaxes', (['evec', '(0)', '(1)'], {}), '(evec, 0, 1)\n', (10814, 10826), True, 'import numpy as nm\n'), ((3770, 3801), 'numpy.arange', 'nm.arange', (['n_el'], {'dtype': 'nm.int32'}), '(n_el, dtype=nm.int32)\n', (3779, 3801), True, 'import numpy as nm\n'), ((5890, 5919), 'numpy.repeat', 'nm.repeat', (['fun', 'nods.shape[0]'], {}), '(fun, nods.shape[0])\n', (5899, 5919), True, 'import numpy as nm\n'), ((9521, 9533), 'time.clock', 'time.clock', ([], {}), '()\n', (9531, 9533), False, 'import time\n'), ((12453, 12483), 'numpy.array', 'nm.array', (['(bfref0.shape + shape)'], {}), '(bfref0.shape + shape)\n', (12461, 12483), True, 'import numpy as nm\n'), ((12549, 12568), 'numpy.zeros', 'nm.zeros', (['vec_shape'], {}), '(vec_shape)\n', (12557, 12568), True, 'import numpy as nm\n'), ((12595, 12614), 'numpy.arange', 'nm.arange', (['shape[0]'], {}), '(shape[0])\n', (12604, 12614), True, 'import numpy as nm\n'), ((13236, 13258), 'numpy.array', 'nm.array', (['bfref0.shape'], {}), '(bfref0.shape)\n', (13244, 13258), True, 'import numpy as nm\n'), ((13326, 13345), 'numpy.zeros', 'nm.zeros', (['vec_shape'], {}), '(vec_shape)\n', (13334, 13345), True, 'import numpy as nm\n'), ((13372, 13391), 'numpy.arange', 'nm.arange', (['shape[0]'], {}), '(shape[0])\n', (13381, 13391), True, 'import numpy as nm\n'), ((14767, 14819), 'numpy.tensordot', 'nm.tensordot', (['self.bfref', 'maps.inv_jac'], {'axes': '(-1, 0)'}), '(self.bfref, maps.inv_jac, axes=(-1, 0))\n', (14779, 14819), True, 'import numpy as nm\n'), ((14852, 14874), 'numpy.einsum', 'nm.einsum', (['"""ijkk"""', 'val'], {}), "('ijkk', val)\n", (14861, 14874), True, 'import numpy as nm\n'), ((14975, 15027), 'numpy.tensordot', 'nm.tensordot', (['self.bfref', 'maps.inv_jac'], {'axes': '(-1, 0)'}), '(self.bfref, maps.inv_jac, axes=(-1, 0))\n', (14987, 15027), True, 'import numpy as nm\n'), ((15112, 15154), 'sfepy.terms.terms_general.proceed_methods', 'proceed_methods', (['val', 'self.eval_method[1:]'], {}), '(val, self.eval_method[1:])\n', (15127, 15154), False, 'from sfepy.terms.terms_general import proceed_methods\n')]
|
from sqlmodel import Session, select
from db import BaseDBModel
from service.base_crud import BaseCRUD
class TestTable(BaseDBModel, table=True):
test_str: str
test_int: int
TEST_ROW_DATA1 = {'test_str': 'str1', 'test_int': 1}
TEST_ROW_DATA2 = {'test_str': 'str2', 'test_int': 2}
class TestCRUD(BaseCRUD):
model = TestTable
test_crud = TestCRUD()
def test_get_returns_none_for_not_existing_rows(session: Session):
row = TestTable(**TEST_ROW_DATA1)
result_row = test_crud.get(session, row.id)
assert result_row is None
def test_get_returns_existing_row(session: Session):
row = TestTable(**TEST_ROW_DATA1)
session.add(row)
session.commit()
result_row: TestTable = test_crud.get(session, row.id)
assert row == result_row
def test_get_multiple_values_returns_empty_array_for_not_existing_rows(session: Session):
result_arr = test_crud.get_multiple_values(session)
assert len(result_arr) == 0
def create_and_return_multiple_rows(db_session: Session):
row1 = TestTable(**TEST_ROW_DATA1)
row2 = TestTable(**TEST_ROW_DATA2)
db_session.add(row1)
db_session.add(row2)
db_session.commit()
return [row1, row2]
def test_get_multiple_values_returns_existing_rows(session: Session):
rows = create_and_return_multiple_rows(session)
result_arr = test_crud.get_multiple_values(session)
assert result_arr == rows
def test_get_multiple_values_returns_limited_rows(session: Session):
rows = create_and_return_multiple_rows(session)
result_arr = test_crud.get_multiple_values(session, limit=1)
assert result_arr == [rows[0]]
def test_get_multiple_values_returns_offset_rows(session: Session):
rows = create_and_return_multiple_rows(session)
result_arr = test_crud.get_multiple_values(session, offset=1)
assert result_arr == [rows[1]]
def test_create_row_returns_inserted_row(session: Session):
row = TestTable(**TEST_ROW_DATA1)
inserted_row: TestTable = test_crud.create(session, row)
assert inserted_row == row
def test_create_all_does_not_throw_errors(session: Session):
rows = [TestTable(**TEST_ROW_DATA1), TestTable(**TEST_ROW_DATA2)]
test_crud.create_all(session, rows)
inserted_rows = session.exec(select(TestTable)).all()
assert inserted_rows == rows
def test_update_returns_updated_row(session: Session):
row = TestTable(**TEST_ROW_DATA1)
session.add(row)
session.commit()
new_item = TestTable.from_orm(row)
new_item.test_int = 10
updated_row: TestTable = test_crud.update(session, row, new_item)
assert new_item.test_int == updated_row.test_int
def test_delete_row_does_not_throw_error(session: Session):
row = TestTable(**TEST_ROW_DATA1)
session.add(row)
session.commit()
test_crud.delete(session, row.id)
inserted_row = session.exec(
select(TestTable).where(TestTable.id == row.id)).first()
assert inserted_row is None
def test_model_attribute_returns_test_model():
assert test_crud.model == TestTable
def test_model_in_base_crud_returns_none_when_not_implemented():
BaseCRUD.__abstractmethods__ = set()
class MockCRUD(BaseCRUD):
string: str
mock_crud = MockCRUD()
assert mock_crud.model is None
|
[
"sqlmodel.select"
] |
[((2249, 2266), 'sqlmodel.select', 'select', (['TestTable'], {}), '(TestTable)\n', (2255, 2266), False, 'from sqlmodel import Session, select\n'), ((2857, 2874), 'sqlmodel.select', 'select', (['TestTable'], {}), '(TestTable)\n', (2863, 2874), False, 'from sqlmodel import Session, select\n')]
|
# Copyright (c) Megvii, Inc. and its affiliates.
import math
import megengine.functional as F
import megengine.module as M
class LogitsFullyConnected(M.Module):
"""single fully connected layer, mapping embedding to logits with normalized weight
"""
def __init__(self, num_class, feature_dim):
super().__init__()
fc = M.Linear(feature_dim, num_class, bias=False)
self.weight = fc.weight
M.init.msra_uniform_(self.weight, a=math.sqrt(5))
def forward(self, embedding):
w = F.normalize(self.weight, axis=1)
x = embedding # embedding has been normalized already
logits = F.matmul(x, w.transpose(1, 0))
return logits
class AdditiveMarginSoftmax(M.Module):
"""additive margin softmax from
`"Additive Margin Softmax for Face Verification" <https://arxiv.org/pdf/1801.05599.pdf>`_
and
`"CosFace: Large Margin Cosine Loss for Deep Face Recognition" <https://arxiv.org/pdf/1801.09414.pdf>`_
"""
def __init__(self, num_class, scale, m1, m2, m3, feature_dim=512):
assert m1 == 1.0, f"m1 expected to be 1.0 in AdditiveMarginSoftmax, got {m1}"
assert m2 == 0.0, f"m2 expected to be 0.0 in AdditiveMarginSoftmax, got {m2}"
super().__init__()
self.fc = LogitsFullyConnected(num_class, feature_dim)
self.num_class = num_class
self.scale = scale
self.margin = m3
def forward(self, embedding, target):
origin_logits = self.fc(embedding)
one_hot_target = F.one_hot(target, self.num_class)
# get how much to decrease
delta_one_hot_target = one_hot_target * self.margin
# apply the decrease
logits = origin_logits - delta_one_hot_target
logits = logits * self.scale
loss = F.loss.cross_entropy(logits, target)
accuracy = F.topk_accuracy(origin_logits, target, topk=1)
return loss, accuracy
class AdditiveAngularMarginSoftmax(M.Module):
"""additive angular margin softmax from
`"ArcFace: Additive Angular Margin Loss for Deep Face Recognition" <https://arxiv.org/pdf/1801.07698.pdf>`_
"""
def __init__(self, num_class, scale, m1, m2, m3, feature_dim=512):
assert m1 == 1.0, f"m1 expected to be 1.0 in AdditiveAngularMarginSoftmax, got {m1}"
assert m3 == 0.0, f"m3 expected to be 0.0 in AdditiveAngularMarginSoftmax, got {m3}"
super().__init__()
self.fc = LogitsFullyConnected(num_class, feature_dim)
self.num_class = num_class
self.scale = scale
self.margin = m2
def forward(self, embedding, target):
origin_logits = self.fc(embedding)
one_hot_target = F.one_hot(target, self.num_class).astype("bool")
large_margined_logit = F.cos(F.acos(origin_logits) + self.margin)
small_margined_logit = origin_logits
margined_logit = F.where(origin_logits >= 0, large_margined_logit, small_margined_logit)
logits = F.where(one_hot_target, margined_logit, origin_logits)
logits = logits * self.scale
loss = F.loss.cross_entropy(logits, target)
accuracy = F.topk_accuracy(origin_logits, target, topk=1)
return loss, accuracy
def get_loss(name):
"""get loss class by name
Args:
name (str): costum name of loss
Returns:
M.Module: corresponding loss class
"""
mapping = {
"cosface": AdditiveMarginSoftmax,
"arcface": AdditiveAngularMarginSoftmax,
}
assert name in mapping, f"head {name} is not found, choose one from {mapping.keys()}"
return mapping[name]
|
[
"megengine.module.Linear",
"megengine.functional.normalize",
"megengine.functional.topk_accuracy",
"megengine.functional.where",
"megengine.functional.acos",
"megengine.functional.one_hot",
"megengine.functional.loss.cross_entropy"
] |
[((350, 394), 'megengine.module.Linear', 'M.Linear', (['feature_dim', 'num_class'], {'bias': '(False)'}), '(feature_dim, num_class, bias=False)\n', (358, 394), True, 'import megengine.module as M\n'), ((532, 564), 'megengine.functional.normalize', 'F.normalize', (['self.weight'], {'axis': '(1)'}), '(self.weight, axis=1)\n', (543, 564), True, 'import megengine.functional as F\n'), ((1527, 1560), 'megengine.functional.one_hot', 'F.one_hot', (['target', 'self.num_class'], {}), '(target, self.num_class)\n', (1536, 1560), True, 'import megengine.functional as F\n'), ((1793, 1829), 'megengine.functional.loss.cross_entropy', 'F.loss.cross_entropy', (['logits', 'target'], {}), '(logits, target)\n', (1813, 1829), True, 'import megengine.functional as F\n'), ((1849, 1895), 'megengine.functional.topk_accuracy', 'F.topk_accuracy', (['origin_logits', 'target'], {'topk': '(1)'}), '(origin_logits, target, topk=1)\n', (1864, 1895), True, 'import megengine.functional as F\n'), ((2878, 2949), 'megengine.functional.where', 'F.where', (['(origin_logits >= 0)', 'large_margined_logit', 'small_margined_logit'], {}), '(origin_logits >= 0, large_margined_logit, small_margined_logit)\n', (2885, 2949), True, 'import megengine.functional as F\n'), ((2967, 3021), 'megengine.functional.where', 'F.where', (['one_hot_target', 'margined_logit', 'origin_logits'], {}), '(one_hot_target, margined_logit, origin_logits)\n', (2974, 3021), True, 'import megengine.functional as F\n'), ((3074, 3110), 'megengine.functional.loss.cross_entropy', 'F.loss.cross_entropy', (['logits', 'target'], {}), '(logits, target)\n', (3094, 3110), True, 'import megengine.functional as F\n'), ((3130, 3176), 'megengine.functional.topk_accuracy', 'F.topk_accuracy', (['origin_logits', 'target'], {'topk': '(1)'}), '(origin_logits, target, topk=1)\n', (3145, 3176), True, 'import megengine.functional as F\n'), ((471, 483), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (480, 483), False, 'import math\n'), ((2685, 2718), 'megengine.functional.one_hot', 'F.one_hot', (['target', 'self.num_class'], {}), '(target, self.num_class)\n', (2694, 2718), True, 'import megengine.functional as F\n'), ((2771, 2792), 'megengine.functional.acos', 'F.acos', (['origin_logits'], {}), '(origin_logits)\n', (2777, 2792), True, 'import megengine.functional as F\n')]
|
import os
from pathlib import Path
from app import MyApp
from dotenv import load_dotenv
from sqlmodel import Session, SQLModel, create_engine
load_dotenv()
MOVIES_PATH = Path(os.getenv("MOVIES_FILEPATH", None))
dbfile = Path("database.db")
engine = create_engine("sqlite:///database.db", echo=False)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def main():
if not dbfile.exists():
create_db_and_tables()
with Session(engine) as session:
MyApp.run(
title="Media Organizer",
log="textual.log",
path=str(MOVIES_PATH.absolute()),
session=session,
)
if __name__ == "__main__":
main()
|
[
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.Session",
"sqlmodel.create_engine"
] |
[((144, 157), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (155, 157), False, 'from dotenv import load_dotenv\n'), ((224, 243), 'pathlib.Path', 'Path', (['"""database.db"""'], {}), "('database.db')\n", (228, 243), False, 'from pathlib import Path\n'), ((253, 303), 'sqlmodel.create_engine', 'create_engine', (['"""sqlite:///database.db"""'], {'echo': '(False)'}), "('sqlite:///database.db', echo=False)\n", (266, 303), False, 'from sqlmodel import Session, SQLModel, create_engine\n'), ((178, 212), 'os.getenv', 'os.getenv', (['"""MOVIES_FILEPATH"""', 'None'], {}), "('MOVIES_FILEPATH', None)\n", (187, 212), False, 'import os\n'), ((338, 374), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (366, 374), False, 'from sqlmodel import Session, SQLModel, create_engine\n'), ((458, 473), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (465, 473), False, 'from sqlmodel import Session, SQLModel, create_engine\n')]
|
import math
import numpy
import megengine as mge
import megengine.module as M
import megengine.functional as F
from .layer_norm import LayerNorm
class DecoderLayer(M.Module):
"""Single decoder layer module."""
def __init__(
self,
size,
self_attn,
src_attn,
feed_forward,
dropout_rate,
normalize_before=True,
):
"""Construct an DecoderLayer object."""
super(DecoderLayer, self).__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward = feed_forward
self.norm1 = LayerNorm(size)
self.norm2 = LayerNorm(size)
self.norm3 = LayerNorm(size)
self.dropout = M.dropout.Dropout(dropout_rate)
self.normalize_before = normalize_before
def forward(self, tgt, tgt_mask, memory, memory_mask, cache=None):
"""Compute decoded features.
Args:
tgt (megengine.Tensor): decoded previous target features (batch, max_time_out, size)
tgt_mask (megengine.Tensor): mask for x (batch, max_time_out)
memory (megengine.Tensor): encoded source features (batch, max_time_in, size)
memory_mask (megengine.Tensor): mask for memory (batch, max_time_in)
cache (megengine.Tensor): cached output (batch, max_time_out-1, size)
"""
residual = tgt
if self.normalize_before:
tgt = self.norm1(tgt)
if cache is None:
tgt_q = tgt
tgt_q_mask = tgt_mask
else:
# compute only the last frame query keeping dim: max_time_out -> 1
assert cache.shape == (
tgt.shape[0],
tgt.shape[1] - 1,
self.size,
), f"{cache.shape} == {(tgt.shape[0], tgt.shape[1] - 1, self.size)}"
tgt_q = tgt[:, -1:, :]
residual = residual[:, -1:, :]
tgt_q_mask = None
if tgt_mask is not None:
tgt_q_mask = tgt_mask[:, -1:, :]
x = residual + self.dropout(self.self_attn(tgt_q, tgt, tgt, tgt_q_mask))
if not self.normalize_before:
x = self.norm1(x)
residual = x
if self.normalize_before:
x = self.norm2(x)
x = residual + self.dropout(self.src_attn(x, memory, memory, memory_mask))
if not self.normalize_before:
x = self.norm2(x)
residual = x
if self.normalize_before:
x = self.norm3(x)
x = residual + self.dropout(self.feed_forward(x))
if not self.normalize_before:
x = self.norm3(x)
if cache is not None:
x = F.concat([cache, x], axis=1)
return x, tgt_mask, memory, memory_mask
|
[
"megengine.module.dropout.Dropout",
"megengine.functional.concat"
] |
[((744, 775), 'megengine.module.dropout.Dropout', 'M.dropout.Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (761, 775), True, 'import megengine.module as M\n'), ((2705, 2733), 'megengine.functional.concat', 'F.concat', (['[cache, x]'], {'axis': '(1)'}), '([cache, x], axis=1)\n', (2713, 2733), True, 'import megengine.functional as F\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import multiprocessing as mp
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.quantization as Q
import models
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument("-c", "--checkpoint", default=None, type=str,
help="pretrained model to finetune")
parser.add_argument("-m", "--mode", default="qat", type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only")
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
if args.mode == "quantized":
world_size = 1
args.report_freq = 1 # test is slow on cpu
mge.set_default_device("cpux")
logger.warning("quantized mode use cpu only")
if world_size > 1:
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
model = models.__dict__[args.arch]()
if args.mode != "normal":
Q.quantize_qat(model, Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
Q.quantize(model)
# Define valid graph
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss, "valid_loss") / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1, "valid_acc1") / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5, "valid_acc5") / dist.get_world_size()
return loss, acc1, acc5
# Build valid datasets
logger.info("preparing dataset..")
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[
T.Resize(256),
T.CenterCrop(224),
T.Normalize(mean=128),
T.ToMode("CHW"),
]
),
num_workers=args.workers,
)
_, valid_acc, valid_acc5 = infer(valid_func, valid_queue, args)
logger.info("TEST %f, %f", valid_acc, valid_acc5)
def infer(model, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
total_time = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
n = image.shape[0]
image = image.astype("float32") # convert np.uint8 to float32
label = label.astype("int32")
loss, acc1, acc5 = model(image, label)
objs.update(loss.numpy()[0], n)
top1.update(100 * acc1.numpy()[0], n)
top5.update(100 * acc5.numpy()[0], n)
total_time.update(time.time() - t)
t = time.time()
if step % args.report_freq == 0 and dist.get_rank() == 0:
logger.info("Step %d, %s %s %s %s",
step, objs, top1, top5, total_time)
return objs.avg, top1.avg, top5.avg
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=":.3f"):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
if __name__ == "__main__":
main()
|
[
"megengine.jit.trace",
"megengine.distributed.init_process_group",
"megengine.distributed.is_distributed",
"megengine.functional.cross_entropy_with_softmax",
"megengine.distributed.get_rank",
"megengine.distributed.get_world_size",
"megengine.data.transform.CenterCrop",
"megengine.get_device_count",
"megengine.load",
"megengine.data.transform.Resize",
"megengine.set_default_device",
"megengine.quantization.quantize_qat",
"megengine.get_logger",
"megengine.data.SequentialSampler",
"megengine.data.transform.Normalize",
"megengine.data.transform.ToMode",
"megengine.functional.accuracy",
"megengine.data.dataset.ImageNet",
"megengine.quantization.quantize",
"megengine.distributed.all_reduce_sum"
] |
[((909, 933), 'megengine.get_logger', 'mge.get_logger', (['__name__'], {}), '(__name__)\n', (923, 933), True, 'import megengine as mge\n'), ((961, 986), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (984, 986), False, 'import argparse\n'), ((3464, 3488), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)'}), '(symbolic=True)\n', (3473, 3488), True, 'import megengine.jit as jit\n'), ((4127, 4172), 'megengine.data.dataset.ImageNet', 'data.dataset.ImageNet', (['args.data'], {'train': '(False)'}), '(args.data, train=False)\n', (4148, 4172), True, 'import megengine.data as data\n'), ((4193, 4263), 'megengine.data.SequentialSampler', 'data.SequentialSampler', (['valid_dataset'], {'batch_size': '(100)', 'drop_last': '(False)'}), '(valid_dataset, batch_size=100, drop_last=False)\n', (4215, 4263), True, 'import megengine.data as data\n'), ((4919, 4930), 'time.time', 'time.time', ([], {}), '()\n', (4928, 4930), False, 'import time\n'), ((1896, 1923), 'megengine.get_device_count', 'mge.get_device_count', (['"""gpu"""'], {}), "('gpu')\n", (1916, 1923), True, 'import megengine as mge\n'), ((2079, 2109), 'megengine.set_default_device', 'mge.set_default_device', (['"""cpux"""'], {}), "('cpux')\n", (2101, 2109), True, 'import megengine as mge\n'), ((2257, 2285), 'multiprocessing.set_start_method', 'mp.set_start_method', (['"""spawn"""'], {}), "('spawn')\n", (2276, 2285), True, 'import multiprocessing as mp\n'), ((2808, 2921), 'megengine.distributed.init_process_group', 'dist.init_process_group', ([], {'master_ip': '"""localhost"""', 'master_port': '(23456)', 'world_size': 'world_size', 'rank': 'rank', 'dev': 'rank'}), "(master_ip='localhost', master_port=23456,\n world_size=world_size, rank=rank, dev=rank)\n", (2831, 2921), True, 'import megengine.distributed as dist\n'), ((3070, 3116), 'megengine.quantization.quantize_qat', 'Q.quantize_qat', (['model', 'Q.ema_fakequant_qconfig'], {}), '(model, Q.ema_fakequant_qconfig)\n', (3084, 3116), True, 'import megengine.quantization as Q\n'), ((3229, 3254), 'megengine.load', 'mge.load', (['args.checkpoint'], {}), '(args.checkpoint)\n', (3237, 3254), True, 'import megengine as mge\n'), ((3415, 3432), 'megengine.quantization.quantize', 'Q.quantize', (['model'], {}), '(model)\n', (3425, 3432), True, 'import megengine.quantization as Q\n'), ((3589, 3650), 'megengine.functional.cross_entropy_with_softmax', 'F.cross_entropy_with_softmax', (['logits', 'label'], {'label_smooth': '(0.1)'}), '(logits, label, label_smooth=0.1)\n', (3617, 3650), True, 'import megengine.functional as F\n'), ((3672, 3705), 'megengine.functional.accuracy', 'F.accuracy', (['logits', 'label', '(1, 5)'], {}), '(logits, label, (1, 5))\n', (3682, 3705), True, 'import megengine.functional as F\n'), ((3717, 3738), 'megengine.distributed.is_distributed', 'dist.is_distributed', ([], {}), '()\n', (3736, 3738), True, 'import megengine.distributed as dist\n'), ((5358, 5369), 'time.time', 'time.time', ([], {}), '()\n', (5367, 5369), False, 'import time\n'), ((2364, 2420), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(rank, world_size, args)'}), '(target=worker, args=(rank, world_size, args))\n', (2374, 2420), True, 'import multiprocessing as mp\n'), ((3778, 3817), 'megengine.distributed.all_reduce_sum', 'dist.all_reduce_sum', (['loss', '"""valid_loss"""'], {}), "(loss, 'valid_loss')\n", (3797, 3817), True, 'import megengine.distributed as dist\n'), ((3820, 3841), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (3839, 3841), True, 'import megengine.distributed as dist\n'), ((3861, 3900), 'megengine.distributed.all_reduce_sum', 'dist.all_reduce_sum', (['acc1', '"""valid_acc1"""'], {}), "(acc1, 'valid_acc1')\n", (3880, 3900), True, 'import megengine.distributed as dist\n'), ((3903, 3924), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (3922, 3924), True, 'import megengine.distributed as dist\n'), ((3944, 3983), 'megengine.distributed.all_reduce_sum', 'dist.all_reduce_sum', (['acc5', '"""valid_acc5"""'], {}), "(acc5, 'valid_acc5')\n", (3963, 3983), True, 'import megengine.distributed as dist\n'), ((3986, 4007), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (4005, 4007), True, 'import megengine.distributed as dist\n'), ((5329, 5340), 'time.time', 'time.time', ([], {}), '()\n', (5338, 5340), False, 'import time\n'), ((5415, 5430), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (5428, 5430), True, 'import megengine.distributed as dist\n'), ((4426, 4439), 'megengine.data.transform.Resize', 'T.Resize', (['(256)'], {}), '(256)\n', (4434, 4439), True, 'import megengine.data.transform as T\n'), ((4457, 4474), 'megengine.data.transform.CenterCrop', 'T.CenterCrop', (['(224)'], {}), '(224)\n', (4469, 4474), True, 'import megengine.data.transform as T\n'), ((4492, 4513), 'megengine.data.transform.Normalize', 'T.Normalize', ([], {'mean': '(128)'}), '(mean=128)\n', (4503, 4513), True, 'import megengine.data.transform as T\n'), ((4531, 4546), 'megengine.data.transform.ToMode', 'T.ToMode', (['"""CHW"""'], {}), "('CHW')\n", (4539, 4546), True, 'import megengine.data.transform as T\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import functools
import itertools
import os
from typing import Callable, Tuple, Union
import numpy as np
import megengine._internal as mgb
from megengine._internal.plugin import CompGraphProfiler
from ..core import Tensor, graph, tensor
from .sublinear_memory_config import SublinearMemoryConfig
def sideeffect(f):
# during eager tracing, wrapped function is called with proxy inputs
# during static tracing, wrapped function will not be called at all
@functools.wraps(f)
def wrapper(*args, **kwargs): # pylint: disable=inconsistent-return-statements
if not trace._active_instance:
return f(*args, **kwargs)
tensors = {}
for i, x in itertools.chain(enumerate(args), kwargs.items()):
if isinstance(x, Tensor):
tensors[i] = x
if tensors:
_keys, tensors = zip(*tensors.items())
else:
_keys, tensors = (), ()
def callback(*tensors, f=f, keys=_keys, args=args, kwargs=kwargs):
replace = dict(zip(keys, tensors))
args = tuple(replace.get(i, x) for i, x in enumerate(args))
kwargs = {i: replace.get(i, x) for i, x in kwargs.items()}
if f(*args, **kwargs) is not None:
raise TypeError("a sideeffect function should return None")
# TODO: clear memory
trace._active_instance._register_callback(callback, tensors)
return wrapper
def mark_impure(x):
if not trace._active_instance:
return x
return trace._active_instance._mark_impure(x)
def barrier(x):
if not trace._active_instance:
return x
return trace._active_instance._insert_barrier(x)
def _dummy():
return mgb.make_immutable(*graph._use_default_if_none(None, None), 0)
class unset:
pass
class trace:
"""
Wrap a callable and provide:
* tracing via :meth:`.trace` and :meth:`.dump`
* accelerated evalutaion via :meth:`.__call__`
:param func: Positional only argument.
:param symbolic: Whether to use symbolic tensor. Default: False
:param opt_level: Optimization level for compiling trace.
:param log_level: Log level.
:param sublinear_memory_config: Configuration for sublinear memory optimization.
If not None, it enables sublinear memory optimization with given setting.
:param allreduce_pack_max_size: Maximum size of an allreduce pack in MB.
If not None, multiple gradients will be packed and synchronized together
:param profiling: Whether to profile compiled trace. Default: False
"""
_active_instance = None
enabled = not os.getenv("MGE_DISABLE_TRACE")
_UNSTARTED = "unstarted"
_STARTED = "started"
_FINISHED = "finished"
def __new__(cls, *args, **kwargs):
if not args:
return functools.partial(cls, **kwargs)
return super().__new__(cls)
def __init__(
self,
func: Callable[..., Union[None, Tensor, Tuple[Tensor]]],
*,
symbolic: bool = False,
opt_level: int = None,
log_level: int = None,
sublinear_memory_config: SublinearMemoryConfig = None,
allreduce_pack_max_size: int = None,
profiling: bool = False
):
self.__wrapped__ = func
self._symbolic = symbolic
self._graph_opt_level = opt_level
self._log_level = log_level
self._sublinear_memory_config = sublinear_memory_config
self._allreduce_pack_max_size = allreduce_pack_max_size
self._status = self._UNSTARTED
self._args = None
self._kwargs = None
self._outputs = unset
self._sym_outputs = unset
self._outspec = None
self._checkpoint = None
self._compiled_func = None
self._profiling = profiling
self._profiler = None
@property
def _active(self):
c1 = self._status == self._STARTED
c2 = type(self)._active_instance is self
assert c1 == c2
return c1
def _register_callback(self, f, args=()):
assert self._active
assert isinstance(args, (tuple, list))
proxies = self._make_proxies(args)
self._forward(args, proxies, checkpoint=True)
# NOTE: under eager graph callback will fire immediately
job = mgb.opr.callback_injector(
self._insert_barrier(_dummy()), lambda _: f(*proxies)
)
self._insert_checkpoint(job)
self._outspec.append(job)
def _insert_barrier(self, x):
assert self._active
if self._checkpoint is None:
return x
if isinstance(x, Tensor):
x = x._symvar
wrap = True
else:
wrap = False
if not isinstance(x, mgb.SymbolVar):
raise TypeError
x = mgb.opr.virtual_dep([x, self._checkpoint])
if wrap:
x = Tensor(x)
return x
def _insert_checkpoint(self, *args, no_barrier=False):
assert self._active
if not args:
return
args = tuple(x._symvar if isinstance(x, Tensor) else x for x in args)
for x in args:
if not isinstance(x, mgb.SymbolVar):
raise TypeError
if not no_barrier and self._checkpoint is not None:
# normally no need to _insert_barrier here, but if
# someone forget to call _insert_barrier beforehand,
# this can make things less broken
args += (self._checkpoint,)
if len(args) == 1:
self._checkpoint = args[0]
else:
self._checkpoint = mgb.opr.virtual_dep(args)
def _mark_impure(self, x):
assert self._active
ret = x
if isinstance(x, Tensor):
x = x._symvar
if not isinstance(x, mgb.SymbolVar):
raise TypeError
self._outspec.append(x)
self._insert_checkpoint(x)
return ret
def _make_proxies(self, args):
assert isinstance(args, (tuple, list))
for x in args:
assert isinstance(x, Tensor)
return tuple(tensor(dtype=x.dtype, device=x.device) for x in args)
def _forward(self, srcs, dests, checkpoint=True):
# pseudo-op: does not run under static graph; traced
# TODO: use shared memory
assert len(srcs) == len(dests)
if not self._active:
for s, d in zip(srcs, dests):
d.set_value(s, share=False)
return
jobs = []
for s, d in zip(srcs, dests):
def callback(value, dest=d):
dest.set_value(value, share=False)
s = self._insert_barrier(s._symvar)
# NOTE: callback immediately fire in eager graph
jobs.append(mgb.opr.callback_injector(s, callback))
self._outspec.extend(jobs)
if checkpoint:
self._insert_checkpoint(*jobs, no_barrier=True)
def _forward_inputs(self, *args, **kwargs):
if self._kwargs is None:
self._kwargs = kwargs
elif self._kwargs != kwargs:
raise ValueError("kwargs must not change between invocations")
if self._args is None:
self._args = []
for i in args:
if isinstance(i, Tensor):
self._args.append(tensor(dtype=i.dtype, device=i.device))
self._args[-1].set_value(i, share=False)
else:
self._args.append(tensor(i))
else:
if not len(args) == len(self._args):
raise TypeError
for i, proxy in zip(args, self._args):
proxy.set_value(i, share=False)
# XXX: sync?
def _make_outputs(self, outputs):
if outputs is None:
self._outputs = None
return
if isinstance(outputs, Tensor):
# no one is able to call barrier after this, so no need to checkpoint
# but checkpoint do little harm anyway
(self._outputs,) = self._make_proxies([outputs])
return
if not isinstance(outputs, (tuple, list)):
raise TypeError("should return (tuple of) tensor")
for i in outputs:
if not isinstance(i, Tensor):
raise TypeError("should return (tuple of) tensor")
self._outputs = self._make_proxies(outputs)
def _foward_outputs(self, outputs):
# pseudo-op: does not run under static graph; traced
if self._outputs is unset:
self._make_outputs(outputs)
if self._outputs is None:
if outputs is not None:
raise TypeError("should return None")
elif isinstance(self._outputs, Tensor):
if not isinstance(outputs, Tensor):
raise TypeError("should return a tensor")
self._forward([outputs], [self._outputs])
else:
assert isinstance(self._outputs, tuple)
def check():
if not isinstance(outputs, (tuple, list)):
return False
if len(self._outputs) != len(outputs):
return False
for x in outputs:
if not isinstance(x, Tensor):
return False
return True
if not check():
raise TypeError(
"should return tuple of %d tensors" % len(self._outputs)
)
self._forward(outputs, self._outputs)
def _apply_graph_options(self, cg):
# graph opt level
if self._graph_opt_level is not None:
cg.set_option("graph_opt_level", self._graph_opt_level)
# log level
if self._log_level is not None:
cg.set_option("log_level", self._log_level)
# sublinear
if self._sublinear_memory_config is not None:
cg.set_option("enable_sublinear_memory_opt", True)
cg.set_option(
"sublinear_mem_cofig.lb_memory",
self._sublinear_memory_config.lb_memory,
)
cg.set_option(
"sublinear_mem_cofig.genetic_nr_iter",
self._sublinear_memory_config.genetic_nr_iter,
)
cg.set_option(
"sublinear_mem_cofig.genetic_pool_size",
self._sublinear_memory_config.genetic_pool_size,
)
cg.set_option(
"sublinear_mem_cofig.thresh_nr_try",
self._sublinear_memory_config.thresh_nr_try,
)
cg.set_option(
"sublinear_mem_cofig.num_worker",
self._sublinear_memory_config.num_worker,
)
# pack allreduce
if self._allreduce_pack_max_size is not None:
cg.set_option("allreduce_pack_max_size", self._allreduce_pack_max_size)
# profile
if self._profiling:
self._profiler = CompGraphProfiler(cg)
def _get_graph(self, eager):
if eager:
if not hasattr(self, "_eager_graph"):
# pylint: disable=attribute-defined-outside-init
self._eager_graph = graph.Graph(eager_evaluation=True)
self._apply_graph_options(self._eager_graph)
return self._eager_graph
else:
if not hasattr(self, "_static_graph"):
# pylint: disable=attribute-defined-outside-init
self._static_graph = graph.Graph(eager_evaluation=False)
self._apply_graph_options(self._static_graph)
return self._static_graph
@contextlib.contextmanager
def _prepare(self, args, kwargs, enable):
# prepare for execution
self._forward_inputs(*args, **kwargs)
if not enable:
# XXX: use our own graph here?
cg = None
elif self._status == self._FINISHED:
cg = None
elif self._symbolic:
cg = self._get_graph(eager=False)
else:
cg = self._get_graph(eager=True)
try:
# NOTE: always trace in a new graph, so capturing an undetached tensor
# will never work (would work if tracing in default graph)
if cg is None:
yield
else:
with cg:
yield
finally:
# XXX: properly release memory
if cg:
cg.clear_device_memory()
@contextlib.contextmanager
def _activate(self):
# prepare for tracing
if self._status != self._UNSTARTED:
raise RuntimeError("cannot trace a second time")
if type(self)._active_instance is not None:
raise RuntimeError("nested trace is unsupported")
self._status = self._STARTED
type(self)._active_instance = self
self._user_cache = {}
try:
yield
finally:
self._status = self._FINISHED
self._user_cache = None
type(self)._active_instance = None
def _run_wrapped(self):
outputs = self.__wrapped__(*self._args, **self._kwargs)
self._foward_outputs(outputs)
return outputs
def _do_trace(self):
with self._activate():
self._outspec = []
outputs = self._run_wrapped()
if outputs is None:
self._sym_outputs = None
else:
if isinstance(outputs, Tensor):
outputs = [outputs]
# _run_wrapped has checked validity of outputs
self._sym_outputs = tuple(i._symvar for i in outputs)
mgb.comp_graph_tools.set_priority_to_id(self._outspec)
self._compiled_func = graph.get_default_graph().compile(None, self._outspec)
def trace(self, *args: Tensor, **kwargs):
"""
Trace wrapped callable with provided arguments.
"""
with self._prepare(args, kwargs, enable=True):
self._do_trace()
return self
def __call__(self, *args: Tensor, **kwargs):
"""
Evaluate on provided arguments, using compiled trace
instead of the original callable if applicable.
:return: ``None`` or :class:`~.Tensor` or tuple of :class:`~.Tensor`, depending on the
return value of wrapped callable.
"""
with self._prepare(args, kwargs, enable=self.enabled):
if not self.enabled:
self._run_wrapped()
elif self._status == self._FINISHED:
self._compiled_func()
else:
if self._status == self._UNSTARTED:
self._do_trace()
if self._symbolic:
self._compiled_func()
return self._outputs
def dump(
self,
fpath,
*,
arg_names=None,
append=False,
optimize_for_inference=False,
**kwargs
):
"""
Serialize trace to file system.
:param fpath: positional only argument. Path of output file.
:param arg_names: names of the input tensors in the traced function.
:param append: whether output is appended to ``fpath``.
:param optimize_for_inference: whether to enable optimize_for_inference
pass before dump.
:param enable_io16xc32: whether to use float16 for I/O between oprs and use
float32 as internal computation precision. Note the output var would be
changed to float16.
:param enable_ioc16: whether to use float16 for both I/O and computation
precision.
:param enable_hwcd4: whether to use NHWCD4 data layout. This is faster on some
OpenCL backend.
:param enable_nchw88: whether to use NCHW88 data layout. it currently
used in X86 AVX backend.
:param enable_nchw44: whether to use NCHW44 data layout. it currently
used in arm backend.
:param enable_nchw44_dot: whether to use NCHW44_dot data layout. it currently
used in armv8.2+dotprod backend.
:param enable_nchw4: whether to use NCHW4 data layout. it currently
used in nvidia backend(based on cudnn).
:param enable_nchw32: whether to use NCHW32 data layout. it currently
used in nvidia backend with tensorcore(based on cudnn).
:param enable_chwn4: whether to use CHWN4 data layout. it currently
used in nvidia backend with tensorcore.
:param enable_fuse_conv_bias_nonlinearity: whether to fuse conv+bias+nonlinearty
into one opr.
:param enable_fuse_conv_bias_with_z: whether to fuse conv_bias with z
input for inference on nvidia backend(this optimization pass will
result in mismatch of the precision of output of training and
inference)
"""
if self._status != self._FINISHED:
raise ValueError("not traced")
assert isinstance(self._sym_outputs, (tuple, type(None)))
if not self._sym_outputs:
raise ValueError("not outputs")
if arg_names is None:
arg_names = ["arg_%d" % i for i in range(len(self._args))]
elif len(arg_names) != len(self._args):
raise ValueError(
"len(arg_names) should be {}, got {}".format(
len(self._args), len(arg_names)
)
)
optimize_for_inference_args_map = {
"enable_io16xc32": "f16_io_f32_comp",
"enable_ioc16": "f16_io_comp",
"enable_hwcd4": "use_nhwcd4",
"enable_nchw4": "use_nchw4",
"enable_nchw88": "use_nchw88",
"enable_nchw32": "use_nchw32",
"enable_nchw44": "use_nchw44",
"enable_nchw44_dot": "use_nchw44_dot",
"enable_chwn4": "use_chwn4",
"enable_fuse_conv_bias_nonlinearity": "fuse_conv_bias_nonlinearity",
"enable_fuse_conv_bias_with_z": "fuse_conv_bias_with_z",
}
if optimize_for_inference:
optimize_for_inference_kwargs = {}
for k, v in optimize_for_inference_args_map.items():
if kwargs.pop(k, False):
optimize_for_inference_kwargs[v] = True
else:
for k in optimize_for_inference_args_map:
if kwargs.get(k, False):
raise ValueError(
"cannot set %s when optimize_for_inference is not set" % k
)
if kwargs:
raise ValueError("unknown options: %s" % list(kwargs))
cg = self._sym_outputs[0].owner_graph
replace = {}
for t, name in zip(self._args, arg_names):
# relies on symvar dedup
s = t.__mgb_symvar__(comp_graph=cg)
replace[s] = mgb.make_arg(
t.device, cg, dtype=t.dtype, shape=t.shape, name=name
)
# Convert VolatileSharedDeviceTensor to SharedDeviceTensor,
# otherwise some optimizations would not work. The conversion is
# safe because there simply is no way (using builtin ops) to make
# a VolatileSharedDeviceTensor actually volatile.
for s in mgb.cgtools.get_dep_vars(
self._sym_outputs, "VolatileSharedDeviceTensor"
):
if s in replace:
continue # is an input
replace[s] = mgb.SharedND._from_symvar(s).symvar(
cg, name=s.name, volatile=False
)
sym_outputs = mgb.cgtools.replace_vars(self._sym_outputs, replace)
sym_outputs = list(sym_outputs)
if optimize_for_inference:
sym_outputs = mgb.optimize_for_inference(
sym_outputs, **optimize_for_inference_kwargs
)
mgb.serialize_comp_graph_to_file(fpath, sym_outputs, append=append)
def get_profile(self):
"""
Get profiling result for compiled trace.
:return: a json compatible object.
"""
if not self._profiler:
raise RuntimeError("trace is not set with profiling=True")
return self._profiler.get()
|
[
"megengine._internal.cgtools.get_dep_vars",
"megengine._internal.opr.callback_injector",
"megengine._internal.make_arg",
"megengine._internal.plugin.CompGraphProfiler",
"megengine._internal.serialize_comp_graph_to_file",
"megengine._internal.optimize_for_inference",
"megengine._internal.comp_graph_tools.set_priority_to_id",
"megengine._internal.opr.virtual_dep",
"megengine._internal.SharedND._from_symvar",
"megengine._internal.cgtools.replace_vars"
] |
[((865, 883), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (880, 883), False, 'import functools\n'), ((3018, 3048), 'os.getenv', 'os.getenv', (['"""MGE_DISABLE_TRACE"""'], {}), "('MGE_DISABLE_TRACE')\n", (3027, 3048), False, 'import os\n'), ((5194, 5236), 'megengine._internal.opr.virtual_dep', 'mgb.opr.virtual_dep', (['[x, self._checkpoint]'], {}), '([x, self._checkpoint])\n', (5213, 5236), True, 'import megengine._internal as mgb\n'), ((19650, 19723), 'megengine._internal.cgtools.get_dep_vars', 'mgb.cgtools.get_dep_vars', (['self._sym_outputs', '"""VolatileSharedDeviceTensor"""'], {}), "(self._sym_outputs, 'VolatileSharedDeviceTensor')\n", (19674, 19723), True, 'import megengine._internal as mgb\n'), ((19962, 20014), 'megengine._internal.cgtools.replace_vars', 'mgb.cgtools.replace_vars', (['self._sym_outputs', 'replace'], {}), '(self._sym_outputs, replace)\n', (19986, 20014), True, 'import megengine._internal as mgb\n'), ((20227, 20294), 'megengine._internal.serialize_comp_graph_to_file', 'mgb.serialize_comp_graph_to_file', (['fpath', 'sym_outputs'], {'append': 'append'}), '(fpath, sym_outputs, append=append)\n', (20259, 20294), True, 'import megengine._internal as mgb\n'), ((3211, 3243), 'functools.partial', 'functools.partial', (['cls'], {}), '(cls, **kwargs)\n', (3228, 3243), False, 'import functools\n'), ((5993, 6018), 'megengine._internal.opr.virtual_dep', 'mgb.opr.virtual_dep', (['args'], {}), '(args)\n', (6012, 6018), True, 'import megengine._internal as mgb\n'), ((11341, 11362), 'megengine._internal.plugin.CompGraphProfiler', 'CompGraphProfiler', (['cg'], {}), '(cg)\n', (11358, 11362), False, 'from megengine._internal.plugin import CompGraphProfiler\n'), ((14050, 14104), 'megengine._internal.comp_graph_tools.set_priority_to_id', 'mgb.comp_graph_tools.set_priority_to_id', (['self._outspec'], {}), '(self._outspec)\n', (14089, 14104), True, 'import megengine._internal as mgb\n'), ((19262, 19329), 'megengine._internal.make_arg', 'mgb.make_arg', (['t.device', 'cg'], {'dtype': 't.dtype', 'shape': 't.shape', 'name': 'name'}), '(t.device, cg, dtype=t.dtype, shape=t.shape, name=name)\n', (19274, 19329), True, 'import megengine._internal as mgb\n'), ((20116, 20188), 'megengine._internal.optimize_for_inference', 'mgb.optimize_for_inference', (['sym_outputs'], {}), '(sym_outputs, **optimize_for_inference_kwargs)\n', (20142, 20188), True, 'import megengine._internal as mgb\n'), ((7142, 7180), 'megengine._internal.opr.callback_injector', 'mgb.opr.callback_injector', (['s', 'callback'], {}), '(s, callback)\n', (7167, 7180), True, 'import megengine._internal as mgb\n'), ((19841, 19869), 'megengine._internal.SharedND._from_symvar', 'mgb.SharedND._from_symvar', (['s'], {}), '(s)\n', (19866, 19869), True, 'import megengine._internal as mgb\n')]
|
#!/usr/bin/env python3
from dataset import SIDDValData
from model import UNetD
import megengine.data as data
from utils import batch_PSNR
from tqdm import tqdm
import argparse
import pickle
import megengine
def test(args):
valid_dataset = SIDDValData(args.data)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=1, drop_last=False
)
valid_dataloader = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
num_workers=8,
)
model = UNetD(3)
with open(args.checkpoint, "rb") as f:
state = pickle.load(f)
model.load_state_dict(state["state_dict"])
model.eval()
def valid_step(image, label):
pred = model(image)
pred = image - pred
psnr_it = batch_PSNR(pred, label)
return psnr_it
def valid(func, data_queue):
psnr_v = 0.
for step, (image, label) in tqdm(enumerate(data_queue)):
image = megengine.tensor(image)
label = megengine.tensor(label)
psnr_it = func(image, label)
psnr_v += psnr_it
psnr_v /= step + 1
return psnr_v
psnr_v = valid(valid_step, valid_dataloader)
print("PSNR: {:.3f}".format(psnr_v.item()) )
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="MegEngine NBNet")
parser.add_argument("-d", "--data", default="/data/sidd", metavar="DIR", help="path to imagenet dataset")
parser.add_argument("-c", "--checkpoint", help="path to checkpoint")
args = parser.parse_args()
test(args)
# vim: ts=4 sw=4 sts=4 expandtab
|
[
"megengine.data.DataLoader",
"megengine.tensor",
"megengine.data.SequentialSampler"
] |
[((245, 267), 'dataset.SIDDValData', 'SIDDValData', (['args.data'], {}), '(args.data)\n', (256, 267), False, 'from dataset import SIDDValData\n'), ((288, 356), 'megengine.data.SequentialSampler', 'data.SequentialSampler', (['valid_dataset'], {'batch_size': '(1)', 'drop_last': '(False)'}), '(valid_dataset, batch_size=1, drop_last=False)\n', (310, 356), True, 'import megengine.data as data\n'), ((394, 462), 'megengine.data.DataLoader', 'data.DataLoader', (['valid_dataset'], {'sampler': 'valid_sampler', 'num_workers': '(8)'}), '(valid_dataset, sampler=valid_sampler, num_workers=8)\n', (409, 462), True, 'import megengine.data as data\n'), ((506, 514), 'model.UNetD', 'UNetD', (['(3)'], {}), '(3)\n', (511, 514), False, 'from model import UNetD\n'), ((1276, 1330), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""MegEngine NBNet"""'}), "(description='MegEngine NBNet')\n", (1299, 1330), False, 'import argparse\n'), ((574, 588), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (585, 588), False, 'import pickle\n'), ((762, 785), 'utils.batch_PSNR', 'batch_PSNR', (['pred', 'label'], {}), '(pred, label)\n', (772, 785), False, 'from utils import batch_PSNR\n'), ((948, 971), 'megengine.tensor', 'megengine.tensor', (['image'], {}), '(image)\n', (964, 971), False, 'import megengine\n'), ((992, 1015), 'megengine.tensor', 'megengine.tensor', (['label'], {}), '(label)\n', (1008, 1015), False, 'import megengine\n')]
|
from typing import Optional, List
from fastapi import FastAPI, File, UploadFile, Request
from sqlmodel import Field, Session, SQLModel, create_engine, select
from pydantic import BaseModel
from network import Network
import requests
from PIL import Image as ImagePIL
import torchvision as tv
app = FastAPI()
network = Network()
network.model.eval()
class Image(SQLModel, table=True):
key: Optional[int] = Field(default=None, primary_key=True)
image_name: str
label: str
image_url: str
engine = create_engine("sqlite:///image.db")
@app.get("/")
def read_images():
with Session(engine) as session:
statement = select(Image)
images = session.exec(statement).all()
return images
class Item(BaseModel):
key: int
label: str = ""
@app.post("/")
def update_heroes(item: Item):
with Session(engine) as session:
statement = select(Image).where(Image.key == item.key)
results = session.exec(statement)
image = results.one()
image.label = item.label
session.add(image)
session.commit()
@app.get("/predict/{item}")
def predict(item: int):
import torch
import numpy as np
with Session(engine) as session:
statement = select(Image).where(Image.key == item)
results = session.exec(statement)
image = results.one()
image_url = image.image_url
img = ImagePIL.open(requests.get(image_url, stream=True).raw)
img = tv.transforms.functional.pil_to_tensor(img).float().unsqueeze(0)
with torch.no_grad():
result = network.model(img)
result = torch.nn.functional.softmax(result)
ret = {
'1' : float(result[0][0]), '2': float(result[0][1])
}
return str(ret)
@app.post("/upload_image")
async def upload_image(files: List[UploadFile]):
# return {"filenames": [file.filename for file in files]}
from minio import Minio
import io
with Session(engine) as session:
client = Minio(
"localhost:9001",
secure=False,
access_key="<KEY>",
secret_key="<KEY>"
)
for item in files:
image_name = item.filename
cont = await item.read()
content = io.BytesIO(cont)
length = len(content.read())
content = io.BytesIO(cont)
client.put_object(
"image", image_name, content, length
)
image_url = f"http://localhost:9001/image/{image_name}"
data = Image(image_name=image_name[:-4], label="", image_url=image_url)
session.add(data)
session.commit()
|
[
"sqlmodel.create_engine",
"sqlmodel.select",
"sqlmodel.Session",
"sqlmodel.Field"
] |
[((299, 308), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (306, 308), False, 'from fastapi import FastAPI, File, UploadFile, Request\n'), ((319, 328), 'network.Network', 'Network', ([], {}), '()\n', (326, 328), False, 'from network import Network\n'), ((514, 549), 'sqlmodel.create_engine', 'create_engine', (['"""sqlite:///image.db"""'], {}), "('sqlite:///image.db')\n", (527, 549), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((412, 449), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (417, 449), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((593, 608), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (600, 608), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((641, 654), 'sqlmodel.select', 'select', (['Image'], {}), '(Image)\n', (647, 654), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((837, 852), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (844, 852), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((1187, 1202), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1194, 1202), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((1979, 1994), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1986, 1994), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((2024, 2101), 'minio.Minio', 'Minio', (['"""localhost:9001"""'], {'secure': '(False)', 'access_key': '"""<KEY>"""', 'secret_key': '"""<KEY>"""'}), "('localhost:9001', secure=False, access_key='<KEY>', secret_key='<KEY>')\n", (2029, 2101), False, 'from minio import Minio\n'), ((1544, 1559), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1557, 1559), False, 'import torch\n'), ((1622, 1657), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['result'], {}), '(result)\n', (1649, 1657), False, 'import torch\n'), ((2286, 2302), 'io.BytesIO', 'io.BytesIO', (['cont'], {}), '(cont)\n', (2296, 2302), False, 'import io\n'), ((2366, 2382), 'io.BytesIO', 'io.BytesIO', (['cont'], {}), '(cont)\n', (2376, 2382), False, 'import io\n'), ((885, 898), 'sqlmodel.select', 'select', (['Image'], {}), '(Image)\n', (891, 898), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((1235, 1248), 'sqlmodel.select', 'select', (['Image'], {}), '(Image)\n', (1241, 1248), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((1410, 1446), 'requests.get', 'requests.get', (['image_url'], {'stream': '(True)'}), '(image_url, stream=True)\n', (1422, 1446), False, 'import requests\n'), ((1466, 1509), 'torchvision.transforms.functional.pil_to_tensor', 'tv.transforms.functional.pil_to_tensor', (['img'], {}), '(img)\n', (1504, 1509), True, 'import torchvision as tv\n')]
|
from typing import Callable
from sqlmodel import select, Session
from . import BaseRepository, engine
from ..models import ProfileDB, UserDB
from app.shared.exc import (
EmailAlreadyTakenError,
UserDoesNotExist,
UsernameAlreadyTakenError)
class UserRepository(BaseRepository):
model = UserDB
@classmethod
def create_with_profile(cls, **kwargs) -> UserDB:
user = cls.model(**kwargs)
user.profile = ProfileDB()
user.profile.create_gravatar()
user.save()
return user
@classmethod
def get_all_ilike_username(cls,
search_for: str
) -> list[UserDB]:
with Session(engine) as session:
return session.exec(select(cls.model)
.where(cls.model.username.like(f"%{search_for}%"))
).unique().all()
@classmethod
def get_model_by_username(cls, username: str) -> UserDB | None:
return cls.get_model_by_attr(username=username)
@classmethod
def get_model_by_email(cls, email: str) -> UserDB | None:
return cls.get_model_by_attr(email=email)
@classmethod
def is_credentials_valid(cls,
username: str,
password: str,
verify_hash_func: Callable
) -> bool:
if not verify_hash_func:
raise NameError("Hash Function is not defined.")
if not (user := cls.get_model_by_username(username)):
return False
return verify_hash_func(password, user.hashed_password)
@classmethod
def change_username(cls, old_username: str, new_username: str) -> UserDB | None:
# IF the new username is the same as the user' current
# OR already taken by another user
if old_username == new_username or cls.get_model_by_username(new_username):
raise UsernameAlreadyTakenError("Please try a different username.")
if not (user := cls.get_model_by_username(old_username)):
raise UserDoesNotExist("User not found.")
user.username = new_username
user.save()
return user
@classmethod
def change_email(cls, old_email: str, new_email: str) -> UserDB | None:
# IF the new email is the same as the user' current
# OR already taken by another user
if old_email == new_email or cls.get_model_by_email(new_email):
raise EmailAlreadyTakenError("Please try a different username.")
if not (user := cls.get_model_by_email(old_email)):
raise UserDoesNotExist("User not found.")
user.email = new_email
user.save()
return user
@classmethod
def change_password(cls, user_id: int, new_password: str) -> bool:
pass
|
[
"sqlmodel.Session",
"sqlmodel.select"
] |
[((697, 712), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (704, 712), False, 'from sqlmodel import select, Session\n'), ((1973, 2034), 'app.shared.exc.UsernameAlreadyTakenError', 'UsernameAlreadyTakenError', (['"""Please try a different username."""'], {}), "('Please try a different username.')\n", (1998, 2034), False, 'from app.shared.exc import EmailAlreadyTakenError, UserDoesNotExist, UsernameAlreadyTakenError\n'), ((2120, 2155), 'app.shared.exc.UserDoesNotExist', 'UserDoesNotExist', (['"""User not found."""'], {}), "('User not found.')\n", (2136, 2155), False, 'from app.shared.exc import EmailAlreadyTakenError, UserDoesNotExist, UsernameAlreadyTakenError\n'), ((2521, 2579), 'app.shared.exc.EmailAlreadyTakenError', 'EmailAlreadyTakenError', (['"""Please try a different username."""'], {}), "('Please try a different username.')\n", (2543, 2579), False, 'from app.shared.exc import EmailAlreadyTakenError, UserDoesNotExist, UsernameAlreadyTakenError\n'), ((2659, 2694), 'app.shared.exc.UserDoesNotExist', 'UserDoesNotExist', (['"""User not found."""'], {}), "('User not found.')\n", (2675, 2694), False, 'from app.shared.exc import EmailAlreadyTakenError, UserDoesNotExist, UsernameAlreadyTakenError\n'), ((757, 774), 'sqlmodel.select', 'select', (['cls.model'], {}), '(cls.model)\n', (763, 774), False, 'from sqlmodel import select, Session\n')]
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from functools import partial
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
import megengine.module as Float
import megengine.module.qat as QAT
import megengine.module.quantized as Q
from megengine import Parameter, Tensor
from megengine.core.tensor import dtype
from megengine.quantization import (
FakeQuantize,
MinMaxObserver,
QConfig,
QuantMode,
create_qparams,
)
from megengine.quantization.quantize import (
disable_fake_quant,
disable_observer,
propagate_qconfig,
)
min_max_fakequant_qconfig = QConfig(
weight_observer=partial(MinMaxObserver, dtype="qint8_narrow"),
act_observer=partial(MinMaxObserver, dtype="qint8"),
weight_fake_quant=partial(FakeQuantize, dtype="qint8_narrow"),
act_fake_quant=partial(FakeQuantize, dtype="qint8"),
)
def gen_inp_scale():
return np.float32(np.random.rand() + 1)
min_val = np.random.randint(-127, 0, size=(2,)).astype("float32")
max_val = np.random.randint(1, 127, size=(2,)).astype("float32")
weight_scale = np.float32(np.max([-min_val[0], max_val[0]]) / 254 * 2)
act_scale = np.float32(np.max([-min_val[1], max_val[1]]) / 255 * 2)
def quant(x, scale):
inp_dtype = dtype.qint8(scale)
return x.astype(inp_dtype)
def fake_quant(x, scale, qmin, qmax):
x = x / scale
x = F.round(x)
x = F.clip(x, qmin, qmax)
x = x * scale
return x
fake_quant_act = partial(fake_quant, qmin=-128, qmax=127)
fake_quant_weight = partial(fake_quant, qmin=-127, qmax=127)
fake_quant_bias = partial(fake_quant, qmin=-(2 ** 31), qmax=2 ** 31 - 1)
def init_qat_net(net):
if net.with_weight:
net.weight_observer.min_val[...] = Tensor(min_val[0])
net.weight_observer.max_val[...] = Tensor(max_val[0])
if net.with_act:
net.act_observer.min_val[...] = Tensor(min_val[1])
net.act_observer.max_val[...] = Tensor(max_val[1])
def test_quant_stub():
normal_net = Float.QuantStub()
normal_net.eval()
qat_from_float = QAT.QuantStub.from_float_module(normal_net)
qat_from_float.eval()
disable_observer(qat_from_float)
disable_fake_quant(qat_from_float)
qat_net = QAT.QuantStub()
qat_net.eval()
disable_observer(qat_net)
propagate_qconfig(qat_net, min_max_fakequant_qconfig)
init_qat_net(qat_net)
q_net = Q.QuantStub.from_qat_module(qat_net)
q_net.eval()
x = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
normal = normal_net(x)
qat_without_fakequant = qat_from_float(x)
fake_quant_normal = fake_quant_act(normal_net(x), act_scale)
qat = qat_net(x)
q = q_net(x).numpy() * act_scale
np.testing.assert_allclose(qat_without_fakequant, normal)
np.testing.assert_allclose(qat, fake_quant_normal)
np.testing.assert_allclose(q, fake_quant_normal.numpy())
def test_dequant_stub():
normal_net = Float.DequantStub()
normal_net.eval()
qat_from_float = QAT.DequantStub.from_float_module(normal_net)
qat_from_float.eval()
disable_fake_quant(qat_from_float)
disable_observer(qat_from_float)
qat_net = QAT.DequantStub()
qat_net.eval()
disable_observer(qat_net)
propagate_qconfig(qat_net, min_max_fakequant_qconfig)
init_qat_net(qat_net)
q_net = Q.DequantStub.from_qat_module(qat_net)
q_net.eval()
x = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
inp_scale = gen_inp_scale()
x = fake_quant_act(x, inp_scale)
x.qparams.scale = inp_scale
normal = normal_net(x)
qat_without_fakequant = qat_from_float(x)
fake_quant_normal = normal_net(x)
qat = qat_net(x)
q = q_net(quant(x, inp_scale)).numpy()
np.testing.assert_allclose(qat_without_fakequant, normal)
np.testing.assert_allclose(qat, fake_quant_normal)
np.testing.assert_allclose(q, fake_quant_normal.numpy())
@pytest.mark.parametrize("kind", ["cos", "relu", "add", "mul", "fuse_add_relu"])
def test_elemwise(kind):
normal_net = Float.Elemwise(kind)
normal_net.eval()
qat_from_float = QAT.Elemwise.from_float_module(normal_net)
qat_from_float.eval()
disable_observer(qat_from_float)
disable_fake_quant(qat_from_float)
qat_net = QAT.Elemwise(kind)
qat_net.eval()
disable_observer(qat_net)
propagate_qconfig(qat_net, min_max_fakequant_qconfig)
init_qat_net(qat_net)
q_net = Q.Elemwise.from_qat_module(qat_net)
q_net.eval()
x1_scale = np.float32(np.random.rand() + 1)
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1 = fake_quant_act(x1, x1_scale)
x1.qparams.scale = x1_scale
x2_scale = np.float32(np.random.rand() + 1)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2 = fake_quant_act(x2, x2_scale)
x2.qparams.scale = x2_scale
x1_int8 = quant(x1, x1_scale)
x2_int8 = quant(x2, x2_scale)
# test correctness of `Float`, `QAT` and `Quantized`
if kind in ("add", "mul", "fuse_add_relu"):
normal = normal_net(x1, x2)
qat_without_fakequant = qat_from_float(x1, x2)
fake_quant_normal = fake_quant_act(normal_net(x1, x2), act_scale)
qat = qat_net(x1, x2)
q = q_net(x1_int8, x2_int8).numpy() * act_scale
else:
normal = normal_net(x1)
qat_without_fakequant = qat_from_float(x1)
fake_quant_normal = fake_quant_act(normal_net(x1), act_scale)
qat = qat_net(x1)
q = q_net(x1_int8).numpy() * act_scale
np.testing.assert_allclose(qat_without_fakequant, normal)
np.testing.assert_allclose(qat, fake_quant_normal)
np.testing.assert_allclose(q, fake_quant_normal.numpy())
def test_linear():
normal_net = Float.Linear(3, 3, bias=True)
normal_net.eval()
qat_net = QAT.Linear(3, 3, bias=True)
qat_net.eval()
disable_observer(qat_net)
propagate_qconfig(qat_net, min_max_fakequant_qconfig)
init_qat_net(qat_net)
x = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
inp_scale = gen_inp_scale()
x = fake_quant_act(x, inp_scale)
x.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", inp_scale))
x_int8 = quant(x, inp_scale)
weight = np.random.normal(size=(3, 3)).astype("float32")
bias = np.random.normal(size=(3,)).astype("float32")
normal_net.weight[...] = fake_quant_weight(weight, weight_scale)
normal_net.bias[...] = fake_quant_bias(bias, inp_scale * weight_scale)
qat_net.weight[...] = Parameter(weight)
qat_net.bias[...] = Parameter(bias)
qat_from_float = QAT.Linear.from_float_module(normal_net)
qat_from_float.eval()
disable_fake_quant(qat_from_float)
disable_observer(qat_from_float)
q_net = Q.Linear.from_qat_module(qat_net)
q_net.eval()
normal = normal_net(x)
qat_without_fakequant = qat_from_float(x)
fake_quant_normal = fake_quant_act(normal_net(x), act_scale)
qat = qat_net(x)
q = q_net(x_int8).numpy() * act_scale
np.testing.assert_allclose(qat_without_fakequant, normal)
np.testing.assert_allclose(qat, fake_quant_normal.numpy())
np.testing.assert_allclose(q, fake_quant_normal.numpy())
@pytest.mark.parametrize("module", ["Conv2d", "ConvBn2d", "ConvBnRelu2d"])
def test_conv(module):
normal_net = getattr(Float, module)(3, 3, 3, 1, 1, 1, bias=True)
normal_net.eval()
qat_net = getattr(QAT, module)(3, 3, 3, 1, 1, 1, bias=True)
qat_net.eval()
disable_observer(qat_net)
propagate_qconfig(qat_net, min_max_fakequant_qconfig)
init_qat_net(qat_net)
x = mge.tensor(np.random.normal(size=(1, 3, 3, 3)).astype("float32"))
inp_scale = gen_inp_scale()
x = fake_quant_act(x, inp_scale)
x.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", inp_scale))
x_int8 = quant(x, inp_scale)
weight = np.random.normal(size=(3, 3, 3, 3)).astype("float32")
bias = np.random.normal(size=(1, 3, 1, 1)).astype("float32")
if module in ("ConvBn2d", "ConvBnRelu2d"):
normal_net.conv.weight[...] = fake_quant_weight(weight, weight_scale)
normal_net.conv.bias[...] = fake_quant_bias(bias, inp_scale * weight_scale)
qat_net.conv.weight[...] = Parameter(weight)
qat_net.conv.bias[...] = Parameter(bias)
else:
normal_net.weight[...] = fake_quant_weight(weight, weight_scale)
normal_net.bias[...] = fake_quant_bias(bias, inp_scale * weight_scale)
qat_net.weight[...] = Parameter(weight)
qat_net.bias[...] = Parameter(bias)
qat_from_float = getattr(QAT, module).from_float_module(normal_net)
qat_from_float.eval()
disable_observer(qat_from_float)
disable_fake_quant(qat_from_float)
q_net = getattr(Q, module).from_qat_module(qat_net)
q_net.eval()
normal = normal_net(x)
qat_without_fakequant = qat_from_float(x)
fake_quant_normal = fake_quant_act(normal_net(x), act_scale)
qat = qat_net(x)
q = q_net(x_int8).numpy() * act_scale
np.testing.assert_allclose(qat_without_fakequant, normal, atol=1e-5)
np.testing.assert_allclose(qat, fake_quant_normal, atol=act_scale)
np.testing.assert_allclose(q, fake_quant_normal.numpy(), atol=act_scale)
def test_concat():
normal_net = Float.Concat()
normal_net.eval()
qat_net = QAT.Concat()
qat_net.eval()
disable_observer(qat_net)
propagate_qconfig(qat_net, min_max_fakequant_qconfig)
init_qat_net(qat_net)
inps = []
inps_int8 = []
for i in range(3):
inp_scale = gen_inp_scale()
inps.append(mge.tensor(np.random.normal(size=(3, 3)).astype("float32")))
inps[i] = fake_quant_act(inps[i], inp_scale)
inps[i].qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", inp_scale))
inps_int8.append(quant(inps[i], inp_scale))
qat_from_float = QAT.Concat.from_float_module(normal_net)
qat_from_float.eval()
disable_fake_quant(qat_from_float)
disable_observer(qat_from_float)
q_net = Q.Concat.from_qat_module(qat_net)
q_net.eval()
normal = normal_net(inps)
qat_without_fakequant = qat_from_float(inps)
fake_quant_normal = fake_quant_act(normal_net(inps), act_scale)
qat = qat_net(inps)
q = q_net(inps_int8).numpy() * act_scale
np.testing.assert_allclose(qat_without_fakequant, normal)
np.testing.assert_allclose(qat, fake_quant_normal.numpy())
np.testing.assert_allclose(q, fake_quant_normal.numpy())
|
[
"megengine.quantization.quantize.propagate_qconfig",
"megengine.module.Elemwise",
"megengine.module.quantized.Elemwise.from_qat_module",
"megengine.module.qat.Linear",
"megengine.core.tensor.dtype.qint8",
"megengine.module.DequantStub",
"megengine.module.qat.Linear.from_float_module",
"megengine.module.qat.Elemwise",
"megengine.module.quantized.QuantStub.from_qat_module",
"megengine.module.qat.QuantStub",
"megengine.module.quantized.Linear.from_qat_module",
"megengine.module.quantized.DequantStub.from_qat_module",
"megengine.module.qat.Concat",
"megengine.module.qat.Elemwise.from_float_module",
"megengine.Parameter",
"megengine.module.qat.Concat.from_float_module",
"megengine.module.QuantStub",
"megengine.module.qat.DequantStub",
"megengine.module.quantized.Concat.from_qat_module",
"megengine.quantization.quantize.disable_observer",
"megengine.functional.round",
"megengine.module.Linear",
"megengine.module.Concat",
"megengine.module.qat.QuantStub.from_float_module",
"megengine.functional.clip",
"megengine.quantization.quantize.disable_fake_quant",
"megengine.module.qat.DequantStub.from_float_module",
"megengine.Tensor",
"megengine.quantization.create_qparams"
] |
[((1778, 1818), 'functools.partial', 'partial', (['fake_quant'], {'qmin': '(-128)', 'qmax': '(127)'}), '(fake_quant, qmin=-128, qmax=127)\n', (1785, 1818), False, 'from functools import partial\n'), ((1839, 1879), 'functools.partial', 'partial', (['fake_quant'], {'qmin': '(-127)', 'qmax': '(127)'}), '(fake_quant, qmin=-127, qmax=127)\n', (1846, 1879), False, 'from functools import partial\n'), ((1898, 1950), 'functools.partial', 'partial', (['fake_quant'], {'qmin': '(-2 ** 31)', 'qmax': '(2 ** 31 - 1)'}), '(fake_quant, qmin=-2 ** 31, qmax=2 ** 31 - 1)\n', (1905, 1950), False, 'from functools import partial\n'), ((4210, 4289), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""kind"""', "['cos', 'relu', 'add', 'mul', 'fuse_add_relu']"], {}), "('kind', ['cos', 'relu', 'add', 'mul', 'fuse_add_relu'])\n", (4233, 4289), False, 'import pytest\n'), ((7476, 7549), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""module"""', "['Conv2d', 'ConvBn2d', 'ConvBnRelu2d']"], {}), "('module', ['Conv2d', 'ConvBn2d', 'ConvBnRelu2d'])\n", (7499, 7549), False, 'import pytest\n'), ((1571, 1589), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['scale'], {}), '(scale)\n', (1582, 1589), False, 'from megengine.core.tensor import dtype\n'), ((1687, 1697), 'megengine.functional.round', 'F.round', (['x'], {}), '(x)\n', (1694, 1697), True, 'import megengine.functional as F\n'), ((1706, 1727), 'megengine.functional.clip', 'F.clip', (['x', 'qmin', 'qmax'], {}), '(x, qmin, qmax)\n', (1712, 1727), True, 'import megengine.functional as F\n'), ((2307, 2324), 'megengine.module.QuantStub', 'Float.QuantStub', ([], {}), '()\n', (2322, 2324), True, 'import megengine.module as Float\n'), ((2369, 2412), 'megengine.module.qat.QuantStub.from_float_module', 'QAT.QuantStub.from_float_module', (['normal_net'], {}), '(normal_net)\n', (2400, 2412), True, 'import megengine.module.qat as QAT\n'), ((2443, 2475), 'megengine.quantization.quantize.disable_observer', 'disable_observer', (['qat_from_float'], {}), '(qat_from_float)\n', (2459, 2475), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((2480, 2514), 'megengine.quantization.quantize.disable_fake_quant', 'disable_fake_quant', (['qat_from_float'], {}), '(qat_from_float)\n', (2498, 2514), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((2530, 2545), 'megengine.module.qat.QuantStub', 'QAT.QuantStub', ([], {}), '()\n', (2543, 2545), True, 'import megengine.module.qat as QAT\n'), ((2569, 2594), 'megengine.quantization.quantize.disable_observer', 'disable_observer', (['qat_net'], {}), '(qat_net)\n', (2585, 2594), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((2600, 2653), 'megengine.quantization.quantize.propagate_qconfig', 'propagate_qconfig', (['qat_net', 'min_max_fakequant_qconfig'], {}), '(qat_net, min_max_fakequant_qconfig)\n', (2617, 2653), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((2693, 2729), 'megengine.module.quantized.QuantStub.from_qat_module', 'Q.QuantStub.from_qat_module', (['qat_net'], {}), '(qat_net)\n', (2720, 2729), True, 'import megengine.module.quantized as Q\n'), ((3017, 3074), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['qat_without_fakequant', 'normal'], {}), '(qat_without_fakequant, normal)\n', (3043, 3074), True, 'import numpy as np\n'), ((3079, 3129), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['qat', 'fake_quant_normal'], {}), '(qat, fake_quant_normal)\n', (3105, 3129), True, 'import numpy as np\n'), ((3235, 3254), 'megengine.module.DequantStub', 'Float.DequantStub', ([], {}), '()\n', (3252, 3254), True, 'import megengine.module as Float\n'), ((3299, 3344), 'megengine.module.qat.DequantStub.from_float_module', 'QAT.DequantStub.from_float_module', (['normal_net'], {}), '(normal_net)\n', (3332, 3344), True, 'import megengine.module.qat as QAT\n'), ((3375, 3409), 'megengine.quantization.quantize.disable_fake_quant', 'disable_fake_quant', (['qat_from_float'], {}), '(qat_from_float)\n', (3393, 3409), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((3414, 3446), 'megengine.quantization.quantize.disable_observer', 'disable_observer', (['qat_from_float'], {}), '(qat_from_float)\n', (3430, 3446), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((3462, 3479), 'megengine.module.qat.DequantStub', 'QAT.DequantStub', ([], {}), '()\n', (3477, 3479), True, 'import megengine.module.qat as QAT\n'), ((3503, 3528), 'megengine.quantization.quantize.disable_observer', 'disable_observer', (['qat_net'], {}), '(qat_net)\n', (3519, 3528), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((3534, 3587), 'megengine.quantization.quantize.propagate_qconfig', 'propagate_qconfig', (['qat_net', 'min_max_fakequant_qconfig'], {}), '(qat_net, min_max_fakequant_qconfig)\n', (3551, 3587), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((3627, 3665), 'megengine.module.quantized.DequantStub.from_qat_module', 'Q.DequantStub.from_qat_module', (['qat_net'], {}), '(qat_net)\n', (3656, 3665), True, 'import megengine.module.quantized as Q\n'), ((4033, 4090), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['qat_without_fakequant', 'normal'], {}), '(qat_without_fakequant, normal)\n', (4059, 4090), True, 'import numpy as np\n'), ((4095, 4145), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['qat', 'fake_quant_normal'], {}), '(qat, fake_quant_normal)\n', (4121, 4145), True, 'import numpy as np\n'), ((4332, 4352), 'megengine.module.Elemwise', 'Float.Elemwise', (['kind'], {}), '(kind)\n', (4346, 4352), True, 'import megengine.module as Float\n'), ((4397, 4439), 'megengine.module.qat.Elemwise.from_float_module', 'QAT.Elemwise.from_float_module', (['normal_net'], {}), '(normal_net)\n', (4427, 4439), True, 'import megengine.module.qat as QAT\n'), ((4470, 4502), 'megengine.quantization.quantize.disable_observer', 'disable_observer', (['qat_from_float'], {}), '(qat_from_float)\n', (4486, 4502), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((4507, 4541), 'megengine.quantization.quantize.disable_fake_quant', 'disable_fake_quant', (['qat_from_float'], {}), '(qat_from_float)\n', (4525, 4541), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((4557, 4575), 'megengine.module.qat.Elemwise', 'QAT.Elemwise', (['kind'], {}), '(kind)\n', (4569, 4575), True, 'import megengine.module.qat as QAT\n'), ((4599, 4624), 'megengine.quantization.quantize.disable_observer', 'disable_observer', (['qat_net'], {}), '(qat_net)\n', (4615, 4624), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((4630, 4683), 'megengine.quantization.quantize.propagate_qconfig', 'propagate_qconfig', (['qat_net', 'min_max_fakequant_qconfig'], {}), '(qat_net, min_max_fakequant_qconfig)\n', (4647, 4683), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((4723, 4758), 'megengine.module.quantized.Elemwise.from_qat_module', 'Q.Elemwise.from_qat_module', (['qat_net'], {}), '(qat_net)\n', (4749, 4758), True, 'import megengine.module.quantized as Q\n'), ((5818, 5875), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['qat_without_fakequant', 'normal'], {}), '(qat_without_fakequant, normal)\n', (5844, 5875), True, 'import numpy as np\n'), ((5880, 5930), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['qat', 'fake_quant_normal'], {}), '(qat, fake_quant_normal)\n', (5906, 5930), True, 'import numpy as np\n'), ((6030, 6059), 'megengine.module.Linear', 'Float.Linear', (['(3)', '(3)'], {'bias': '(True)'}), '(3, 3, bias=True)\n', (6042, 6059), True, 'import megengine.module as Float\n'), ((6097, 6124), 'megengine.module.qat.Linear', 'QAT.Linear', (['(3)', '(3)'], {'bias': '(True)'}), '(3, 3, bias=True)\n', (6107, 6124), True, 'import megengine.module.qat as QAT\n'), ((6148, 6173), 'megengine.quantization.quantize.disable_observer', 'disable_observer', (['qat_net'], {}), '(qat_net)\n', (6164, 6173), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((6179, 6232), 'megengine.quantization.quantize.propagate_qconfig', 'propagate_qconfig', (['qat_net', 'min_max_fakequant_qconfig'], {}), '(qat_net, min_max_fakequant_qconfig)\n', (6196, 6232), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((6798, 6815), 'megengine.Parameter', 'Parameter', (['weight'], {}), '(weight)\n', (6807, 6815), False, 'from megengine import Parameter, Tensor\n'), ((6840, 6855), 'megengine.Parameter', 'Parameter', (['bias'], {}), '(bias)\n', (6849, 6855), False, 'from megengine import Parameter, Tensor\n'), ((6878, 6918), 'megengine.module.qat.Linear.from_float_module', 'QAT.Linear.from_float_module', (['normal_net'], {}), '(normal_net)\n', (6906, 6918), True, 'import megengine.module.qat as QAT\n'), ((6949, 6983), 'megengine.quantization.quantize.disable_fake_quant', 'disable_fake_quant', (['qat_from_float'], {}), '(qat_from_float)\n', (6967, 6983), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((6988, 7020), 'megengine.quantization.quantize.disable_observer', 'disable_observer', (['qat_from_float'], {}), '(qat_from_float)\n', (7004, 7020), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((7034, 7067), 'megengine.module.quantized.Linear.from_qat_module', 'Q.Linear.from_qat_module', (['qat_net'], {}), '(qat_net)\n', (7058, 7067), True, 'import megengine.module.quantized as Q\n'), ((7291, 7348), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['qat_without_fakequant', 'normal'], {}), '(qat_without_fakequant, normal)\n', (7317, 7348), True, 'import numpy as np\n'), ((7752, 7777), 'megengine.quantization.quantize.disable_observer', 'disable_observer', (['qat_net'], {}), '(qat_net)\n', (7768, 7777), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((7783, 7836), 'megengine.quantization.quantize.propagate_qconfig', 'propagate_qconfig', (['qat_net', 'min_max_fakequant_qconfig'], {}), '(qat_net, min_max_fakequant_qconfig)\n', (7800, 7836), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((8920, 8952), 'megengine.quantization.quantize.disable_observer', 'disable_observer', (['qat_from_float'], {}), '(qat_from_float)\n', (8936, 8952), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((8957, 8991), 'megengine.quantization.quantize.disable_fake_quant', 'disable_fake_quant', (['qat_from_float'], {}), '(qat_from_float)\n', (8975, 8991), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((9272, 9341), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['qat_without_fakequant', 'normal'], {'atol': '(1e-05)'}), '(qat_without_fakequant, normal, atol=1e-05)\n', (9298, 9341), True, 'import numpy as np\n'), ((9345, 9411), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['qat', 'fake_quant_normal'], {'atol': 'act_scale'}), '(qat, fake_quant_normal, atol=act_scale)\n', (9371, 9411), True, 'import numpy as np\n'), ((9527, 9541), 'megengine.module.Concat', 'Float.Concat', ([], {}), '()\n', (9539, 9541), True, 'import megengine.module as Float\n'), ((9579, 9591), 'megengine.module.qat.Concat', 'QAT.Concat', ([], {}), '()\n', (9589, 9591), True, 'import megengine.module.qat as QAT\n'), ((9615, 9640), 'megengine.quantization.quantize.disable_observer', 'disable_observer', (['qat_net'], {}), '(qat_net)\n', (9631, 9640), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((9646, 9699), 'megengine.quantization.quantize.propagate_qconfig', 'propagate_qconfig', (['qat_net', 'min_max_fakequant_qconfig'], {}), '(qat_net, min_max_fakequant_qconfig)\n', (9663, 9699), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((10115, 10155), 'megengine.module.qat.Concat.from_float_module', 'QAT.Concat.from_float_module', (['normal_net'], {}), '(normal_net)\n', (10143, 10155), True, 'import megengine.module.qat as QAT\n'), ((10186, 10220), 'megengine.quantization.quantize.disable_fake_quant', 'disable_fake_quant', (['qat_from_float'], {}), '(qat_from_float)\n', (10204, 10220), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((10225, 10257), 'megengine.quantization.quantize.disable_observer', 'disable_observer', (['qat_from_float'], {}), '(qat_from_float)\n', (10241, 10257), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((10271, 10304), 'megengine.module.quantized.Concat.from_qat_module', 'Q.Concat.from_qat_module', (['qat_net'], {}), '(qat_net)\n', (10295, 10304), True, 'import megengine.module.quantized as Q\n'), ((10543, 10600), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['qat_without_fakequant', 'normal'], {}), '(qat_without_fakequant, normal)\n', (10569, 10600), True, 'import numpy as np\n'), ((963, 1008), 'functools.partial', 'partial', (['MinMaxObserver'], {'dtype': '"""qint8_narrow"""'}), "(MinMaxObserver, dtype='qint8_narrow')\n", (970, 1008), False, 'from functools import partial\n'), ((1027, 1065), 'functools.partial', 'partial', (['MinMaxObserver'], {'dtype': '"""qint8"""'}), "(MinMaxObserver, dtype='qint8')\n", (1034, 1065), False, 'from functools import partial\n'), ((1089, 1132), 'functools.partial', 'partial', (['FakeQuantize'], {'dtype': '"""qint8_narrow"""'}), "(FakeQuantize, dtype='qint8_narrow')\n", (1096, 1132), False, 'from functools import partial\n'), ((1153, 1189), 'functools.partial', 'partial', (['FakeQuantize'], {'dtype': '"""qint8"""'}), "(FakeQuantize, dtype='qint8')\n", (1160, 1189), False, 'from functools import partial\n'), ((1272, 1309), 'numpy.random.randint', 'np.random.randint', (['(-127)', '(0)'], {'size': '(2,)'}), '(-127, 0, size=(2,))\n', (1289, 1309), True, 'import numpy as np\n'), ((1338, 1374), 'numpy.random.randint', 'np.random.randint', (['(1)', '(127)'], {'size': '(2,)'}), '(1, 127, size=(2,))\n', (1355, 1374), True, 'import numpy as np\n'), ((2045, 2063), 'megengine.Tensor', 'Tensor', (['min_val[0]'], {}), '(min_val[0])\n', (2051, 2063), False, 'from megengine import Parameter, Tensor\n'), ((2107, 2125), 'megengine.Tensor', 'Tensor', (['max_val[0]'], {}), '(max_val[0])\n', (2113, 2125), False, 'from megengine import Parameter, Tensor\n'), ((2187, 2205), 'megengine.Tensor', 'Tensor', (['min_val[1]'], {}), '(min_val[1])\n', (2193, 2205), False, 'from megengine import Parameter, Tensor\n'), ((2246, 2264), 'megengine.Tensor', 'Tensor', (['max_val[1]'], {}), '(max_val[1])\n', (2252, 2264), False, 'from megengine import Parameter, Tensor\n'), ((6418, 6473), 'megengine.quantization.create_qparams', 'create_qparams', (['QuantMode.SYMMERTIC', '"""qint8"""', 'inp_scale'], {}), "(QuantMode.SYMMERTIC, 'qint8', inp_scale)\n", (6432, 6473), False, 'from megengine.quantization import FakeQuantize, MinMaxObserver, QConfig, QuantMode, create_qparams\n'), ((8028, 8083), 'megengine.quantization.create_qparams', 'create_qparams', (['QuantMode.SYMMERTIC', '"""qint8"""', 'inp_scale'], {}), "(QuantMode.SYMMERTIC, 'qint8', inp_scale)\n", (8042, 8083), False, 'from megengine.quantization import FakeQuantize, MinMaxObserver, QConfig, QuantMode, create_qparams\n'), ((8496, 8513), 'megengine.Parameter', 'Parameter', (['weight'], {}), '(weight)\n', (8505, 8513), False, 'from megengine import Parameter, Tensor\n'), ((8547, 8562), 'megengine.Parameter', 'Parameter', (['bias'], {}), '(bias)\n', (8556, 8562), False, 'from megengine import Parameter, Tensor\n'), ((8755, 8772), 'megengine.Parameter', 'Parameter', (['weight'], {}), '(weight)\n', (8764, 8772), False, 'from megengine import Parameter, Tensor\n'), ((8801, 8816), 'megengine.Parameter', 'Parameter', (['bias'], {}), '(bias)\n', (8810, 8816), False, 'from megengine import Parameter, Tensor\n'), ((1238, 1254), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1252, 1254), True, 'import numpy as np\n'), ((1419, 1452), 'numpy.max', 'np.max', (['[-min_val[0], max_val[0]]'], {}), '([-min_val[0], max_val[0]])\n', (1425, 1452), True, 'import numpy as np\n'), ((1487, 1520), 'numpy.max', 'np.max', (['[-min_val[1], max_val[1]]'], {}), '([-min_val[1], max_val[1]])\n', (1493, 1520), True, 'import numpy as np\n'), ((4803, 4819), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4817, 4819), True, 'import numpy as np\n'), ((4991, 5007), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (5005, 5007), True, 'import numpy as np\n'), ((6523, 6552), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3, 3)'}), '(size=(3, 3))\n', (6539, 6552), True, 'import numpy as np\n'), ((6582, 6609), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3,)'}), '(size=(3,))\n', (6598, 6609), True, 'import numpy as np\n'), ((8133, 8168), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3, 3, 3, 3)'}), '(size=(3, 3, 3, 3))\n', (8149, 8168), True, 'import numpy as np\n'), ((8198, 8233), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, 3, 1, 1)'}), '(size=(1, 3, 1, 1))\n', (8214, 8233), True, 'import numpy as np\n'), ((9984, 10039), 'megengine.quantization.create_qparams', 'create_qparams', (['QuantMode.SYMMERTIC', '"""qint8"""', 'inp_scale'], {}), "(QuantMode.SYMMERTIC, 'qint8', inp_scale)\n", (9998, 10039), False, 'from megengine.quantization import FakeQuantize, MinMaxObserver, QConfig, QuantMode, create_qparams\n'), ((2767, 2796), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3, 3)'}), '(size=(3, 3))\n', (2783, 2796), True, 'import numpy as np\n'), ((3703, 3732), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3, 3)'}), '(size=(3, 3))\n', (3719, 3732), True, 'import numpy as np\n'), ((4845, 4874), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3, 3)'}), '(size=(3, 3))\n', (4861, 4874), True, 'import numpy as np\n'), ((5033, 5062), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3, 3)'}), '(size=(3, 3))\n', (5049, 5062), True, 'import numpy as np\n'), ((6279, 6308), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3, 3)'}), '(size=(3, 3))\n', (6295, 6308), True, 'import numpy as np\n'), ((7883, 7918), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, 3, 3, 3)'}), '(size=(1, 3, 3, 3))\n', (7899, 7918), True, 'import numpy as np\n'), ((9850, 9879), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3, 3)'}), '(size=(3, 3))\n', (9866, 9879), True, 'import numpy as np\n')]
|
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core._imperative_rt.core2 import config_async_level, get_async_level
def test_basic():
config_async_level(2)
assert get_async_level() == 2
with pytest.raises(RuntimeError):
config_async_level(3)
def test_level1_infer_value():
config_async_level(1)
a = mge.tensor([[1, 2], [2, 3], [3, 4]], dtype="float32")
b = mge.tensor([1, 1], dtype="float32")
# make DepType::VALUE unknown
c = b * 2
with pytest.raises(RuntimeError):
d = F.reshape(a, c)
def test_level1_infer_shape_with_unknown():
config_async_level(2)
a = mge.tensor([[1, 2, 2, 3]], dtype="float32")
b = mge.tensor([1, 1])
c = b * 2
# make DepType::SHAPE unknown
d = F.reshape(a, c)
config_async_level(1)
e = mge.tensor([[1, 2]], dtype="float32")
with pytest.raises(RuntimeError):
f = F.matmul(d, e)
|
[
"megengine.core._imperative_rt.core2.get_async_level",
"megengine.core._imperative_rt.core2.config_async_level",
"megengine.tensor",
"megengine.functional.matmul",
"megengine.functional.reshape"
] |
[((180, 201), 'megengine.core._imperative_rt.core2.config_async_level', 'config_async_level', (['(2)'], {}), '(2)\n', (198, 201), False, 'from megengine.core._imperative_rt.core2 import config_async_level, get_async_level\n'), ((341, 362), 'megengine.core._imperative_rt.core2.config_async_level', 'config_async_level', (['(1)'], {}), '(1)\n', (359, 362), False, 'from megengine.core._imperative_rt.core2 import config_async_level, get_async_level\n'), ((371, 424), 'megengine.tensor', 'mge.tensor', (['[[1, 2], [2, 3], [3, 4]]'], {'dtype': '"""float32"""'}), "([[1, 2], [2, 3], [3, 4]], dtype='float32')\n", (381, 424), True, 'import megengine as mge\n'), ((433, 468), 'megengine.tensor', 'mge.tensor', (['[1, 1]'], {'dtype': '"""float32"""'}), "([1, 1], dtype='float32')\n", (443, 468), True, 'import megengine as mge\n'), ((633, 654), 'megengine.core._imperative_rt.core2.config_async_level', 'config_async_level', (['(2)'], {}), '(2)\n', (651, 654), False, 'from megengine.core._imperative_rt.core2 import config_async_level, get_async_level\n'), ((663, 706), 'megengine.tensor', 'mge.tensor', (['[[1, 2, 2, 3]]'], {'dtype': '"""float32"""'}), "([[1, 2, 2, 3]], dtype='float32')\n", (673, 706), True, 'import megengine as mge\n'), ((715, 733), 'megengine.tensor', 'mge.tensor', (['[1, 1]'], {}), '([1, 1])\n', (725, 733), True, 'import megengine as mge\n'), ((790, 805), 'megengine.functional.reshape', 'F.reshape', (['a', 'c'], {}), '(a, c)\n', (799, 805), True, 'import megengine.functional as F\n'), ((810, 831), 'megengine.core._imperative_rt.core2.config_async_level', 'config_async_level', (['(1)'], {}), '(1)\n', (828, 831), False, 'from megengine.core._imperative_rt.core2 import config_async_level, get_async_level\n'), ((840, 877), 'megengine.tensor', 'mge.tensor', (['[[1, 2]]'], {'dtype': '"""float32"""'}), "([[1, 2]], dtype='float32')\n", (850, 877), True, 'import megengine as mge\n'), ((213, 230), 'megengine.core._imperative_rt.core2.get_async_level', 'get_async_level', ([], {}), '()\n', (228, 230), False, 'from megengine.core._imperative_rt.core2 import config_async_level, get_async_level\n'), ((245, 272), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (258, 272), False, 'import pytest\n'), ((282, 303), 'megengine.core._imperative_rt.core2.config_async_level', 'config_async_level', (['(3)'], {}), '(3)\n', (300, 303), False, 'from megengine.core._imperative_rt.core2 import config_async_level, get_async_level\n'), ((526, 553), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (539, 553), False, 'import pytest\n'), ((567, 582), 'megengine.functional.reshape', 'F.reshape', (['a', 'c'], {}), '(a, c)\n', (576, 582), True, 'import megengine.functional as F\n'), ((887, 914), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (900, 914), False, 'import pytest\n'), ((928, 942), 'megengine.functional.matmul', 'F.matmul', (['d', 'e'], {}), '(d, e)\n', (936, 942), True, 'import megengine.functional as F\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2019 - present, Facebook, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2020 Megvii Inc. All rights reserved.
# ---------------------------------------------------------------------
import numpy as np
import megengine.functional as F
import megengine.module as M
from megengine import Parameter
class FrozenBatchNorm2d(M.Module):
"""
BatchNorm2d, which the weight, bias, running_mean, running_var
are immutable.
"""
def __init__(self, num_features, eps=1e-5):
super().__init__()
self.num_features = num_features
self.eps = eps
self.weight = Parameter(np.ones(num_features, dtype=np.float32))
self.bias = Parameter(np.zeros(num_features, dtype=np.float32))
self.running_mean = Parameter(np.zeros((1, num_features, 1, 1), dtype=np.float32))
self.running_var = Parameter(np.ones((1, num_features, 1, 1), dtype=np.float32))
def forward(self, x):
scale = self.weight.reshape(1, -1, 1, 1) * (
1.0 / F.sqrt(self.running_var + self.eps)
)
bias = self.bias.reshape(1, -1, 1, 1) - self.running_mean * scale
return x * scale.detach() + bias.detach()
class GroupNorm(M.Module):
def __init__(self, num_groups, num_channels, eps=1e-5, affine=True):
super().__init__()
self.num_groups = num_groups
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = Parameter(np.ones(num_channels, dtype=np.float32))
self.bias = Parameter(np.zeros(num_channels, dtype=np.float32))
else:
self.weight = None
self.bias = None
self.reset_parameters()
def reset_parameters(self):
if self.affine:
M.init.ones_(self.weight)
M.init.zeros_(self.bias)
def forward(self, x):
output = x.reshape(x.shape[0], self.num_groups, -1)
mean = F.mean(output, axis=2, keepdims=True)
mean2 = F.mean(output ** 2, axis=2, keepdims=True)
var = mean2 - mean * mean
output = (output - mean) / F.sqrt(var + self.eps)
output = output.reshape(x.shape)
if self.affine:
output = self.weight.reshape(1, -1, 1, 1) * output + \
self.bias.reshape(1, -1, 1, 1)
return output
def get_norm(norm):
"""
Args:
norm (str): currently support "BN", "SyncBN", "FrozenBN" and "GN"
Returns:
M.Module or None: the normalization layer
"""
if norm is None:
return None
norm = {
"BN": M.BatchNorm2d,
"SyncBN": M.SyncBatchNorm,
"FrozenBN": FrozenBatchNorm2d,
"GN": GroupNorm,
}[norm]
return norm
|
[
"megengine.functional.mean",
"megengine.functional.sqrt",
"megengine.module.init.zeros_",
"megengine.module.init.ones_"
] |
[((3029, 3066), 'megengine.functional.mean', 'F.mean', (['output'], {'axis': '(2)', 'keepdims': '(True)'}), '(output, axis=2, keepdims=True)\n', (3035, 3066), True, 'import megengine.functional as F\n'), ((3083, 3125), 'megengine.functional.mean', 'F.mean', (['(output ** 2)'], {'axis': '(2)', 'keepdims': '(True)'}), '(output ** 2, axis=2, keepdims=True)\n', (3089, 3125), True, 'import megengine.functional as F\n'), ((1691, 1730), 'numpy.ones', 'np.ones', (['num_features'], {'dtype': 'np.float32'}), '(num_features, dtype=np.float32)\n', (1698, 1730), True, 'import numpy as np\n'), ((1762, 1802), 'numpy.zeros', 'np.zeros', (['num_features'], {'dtype': 'np.float32'}), '(num_features, dtype=np.float32)\n', (1770, 1802), True, 'import numpy as np\n'), ((1843, 1894), 'numpy.zeros', 'np.zeros', (['(1, num_features, 1, 1)'], {'dtype': 'np.float32'}), '((1, num_features, 1, 1), dtype=np.float32)\n', (1851, 1894), True, 'import numpy as np\n'), ((1933, 1983), 'numpy.ones', 'np.ones', (['(1, num_features, 1, 1)'], {'dtype': 'np.float32'}), '((1, num_features, 1, 1), dtype=np.float32)\n', (1940, 1983), True, 'import numpy as np\n'), ((2864, 2889), 'megengine.module.init.ones_', 'M.init.ones_', (['self.weight'], {}), '(self.weight)\n', (2876, 2889), True, 'import megengine.module as M\n'), ((2902, 2926), 'megengine.module.init.zeros_', 'M.init.zeros_', (['self.bias'], {}), '(self.bias)\n', (2915, 2926), True, 'import megengine.module as M\n'), ((3196, 3218), 'megengine.functional.sqrt', 'F.sqrt', (['(var + self.eps)'], {}), '(var + self.eps)\n', (3202, 3218), True, 'import megengine.functional as F\n'), ((2083, 2118), 'megengine.functional.sqrt', 'F.sqrt', (['(self.running_var + self.eps)'], {}), '(self.running_var + self.eps)\n', (2089, 2118), True, 'import megengine.functional as F\n'), ((2572, 2611), 'numpy.ones', 'np.ones', (['num_channels'], {'dtype': 'np.float32'}), '(num_channels, dtype=np.float32)\n', (2579, 2611), True, 'import numpy as np\n'), ((2647, 2687), 'numpy.zeros', 'np.zeros', (['num_channels'], {'dtype': 'np.float32'}), '(num_channels, dtype=np.float32)\n', (2655, 2687), True, 'import numpy as np\n')]
|
# Imports from standard library
import os
from typing import Optional, Union
import asyncio
import time
import sqlite3
import hashlib
# Import these utilities
from utils.merkletree import MerkleTree, MerkleError
# Import the HTTP app server
from fastapi import FastAPI, BackgroundTasks
##################################
from sqlmodel import Field, SQLModel, create_engine
class MerkleTable(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
timestamp: int
ngsi_id_hash: str
ngsi_value_hash: str
ngsi_receipt: Optional[str] = None
# The table scripts
drop_table_script = """
DROP TABLE IF EXISTS testing;
"""
create_table_script = """
CREATE TABLE IF NOT EXISTS testing (
id INTEGER PRIMARY KEY,
timestamp INTEGER NOT NULL,
ngsi_id_hash TEXT NOT NULL,
ngsi_value_hash TEXT NOT NULL,
ngsi_receipt TEXT
);
"""
set_journal_wal = 'PRAGMA journal_mode=WAL'
query_journal_mode = """PRAGMA journal_mode"""
class MerkleBuffer:
def __init__(self,
db_name: str = 'mkbuffer.db', # Name of the database
db_max_elements: int = 10000, # Maximum size of database, in number of records
maxLeaves: int = 1024, # Maximum number of leaves of the Merkle Tree to notarize
maxInterval: int = 60, # Notarize every maxInterval (seconds) even if not enough leaves received yet
durability: int = 10 # Commit database every durability seconds, to make data permanent
) -> None:
self.db_name = db_name
self.maxLeaves = maxLeaves
self.db_max_elements = db_max_elements
self.maxInterval = maxInterval
self.durability = durability
self.next_record = 1
self.leaves = 0
self.last_notarization = time.time()
print(f'MaxLeaves: {maxLeaves}')
self.open()
# Create a background task which commits the db every durability secs (or 2 sec as a minimum),
# and registers the Merkle Tree even if not enough entries have been received
if durability > 0 or maxInterval > 0:
self.commit_task = asyncio.create_task(self.commit_background_task(min(durability, maxInterval)))
def db_name(self):
self.db_name
def open(self):
sqlite_file_name = "sqlmodel.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
# # Connect to db
# self.db = sqlite3.connect(self.db_name,
# detect_types=sqlite3.PARSE_DECLTYPES,)
# self.db.row_factory = sqlite3.Row
# self.db.close()
# self.db = sqlite3.connect(self.db_name,
# detect_types=sqlite3.PARSE_DECLTYPES,)
# self.db.row_factory = sqlite3.Row
def open_erase(self):
# Erase the database
if os.path.exists(self.db_name):
os.remove(self.db_name)
# Connect to db
self.db = sqlite3.connect(self.db_name,
detect_types=sqlite3.PARSE_DECLTYPES,)
self.db.row_factory = sqlite3.Row
# Create the table, dropping it before.
self.db.executescript(drop_table_script)
self.db.executescript(create_table_script)
# Set the db to WAL mode for better performance
self.db.execute(set_journal_wal)
def commit(self):
self.db.commit()
def close(self):
self.next_record = 1
self.leaves = 0
self.commit_task.cancel()
self.db.close()
def _hash(self, text: Union[str, bytes]) -> bytes:
if isinstance(text, str):
text = bytes(text, "utf-8")
h = hashlib.sha256()
h.update(text)
d = h.digest()
return d
def put(self, id: str, value: str):
need_process_batch = False
# calculate hashes
id_hash = self._hash(id)
value_hash = self._hash(value)
# Insert the record
try:
# Execute the INSERT or REPLACE
self.db.execute(
'''insert or replace into testing
(id, timestamp, ngsi_id_hash, ngsi_value_hash) values (?, ?, ?, ?)''',
(self.next_record, time.time_ns(), id_hash, value_hash))
# Increment the record number
self.next_record += 1
self.leaves += 1
except Exception as e:
raise e
# Check if we should create the Merkle Tree and notarize
if self.leaves >= self.maxLeaves:
# Process the batch of records, possibly asynchronously
need_process_batch = True
# self.processBatch()
self.leaves = 0
# Check if the database size has reached the maximum and start reusing rows
if self.next_record > self.db_max_elements:
print("Rotate the database")
self.next_record = 1
return need_process_batch
def processBatch(self, db):
# stmt = 'select * from testing where ngsi_receipt is null limit 100'
stmt = 'select * from testing'
result = db.execute(stmt)
rows = result.fetchall()
print(f'Rows: {len(rows)}')
# for row in rows:
# print(f'{row["timestamp"]}-{row["ngsi_id_hash"].hex()}-{row["ngsi_value_hash"].hex()}')
# Update last notarization
self.last_notarization = time.time()
# Create a background task to make sure commit is called for the last put,
# even if no more puts are coming
async def commit_background_task(self, frequency: int):
while True:
await asyncio.sleep(frequency)
print("BKG Task: committing")
# Commit the database
self.db.commit()
# Check if must notarize even though not enough records have arrived
now = time.time()
if now - self.last_notarization > self.maxInterval:
self.processBatch(self.db)
self.last_notarization = now
f: MerkleBuffer = None
app = FastAPI()
def processBatch():
global f
print(f'In return from call process')
# Connect to db
db = sqlite3.connect(f.db_name,
detect_types=sqlite3.PARSE_DECLTYPES,)
db.row_factory = sqlite3.Row
f.processBatch(db)
@app.on_event("startup")
async def startup_event():
global f
f = MerkleBuffer()
@app.on_event("shutdown")
def shutdown_event():
print("SHUTDOWN: closing the database")
global f
f.close()
@app.get("/store/initialize")
async def store_initialize():
global f
f = MerkleBuffer(maxLeaves=4)
return {"result": "OK"}
@app.get("/store/{item_id}/{value_id}")
async def store_item(item_id: str = "Hello", value_id: str = "Pepe", background_tasks: BackgroundTasks = None):
global f
result = f.put(item_id, item_id)
if result:
background_tasks.add_task(processBatch)
return {"result": "OK"}
import uvicorn
if __name__ == "__main__":
uvicorn.run("lserver:app", host="127.0.0.1", port=8000, log_level="warning")
|
[
"sqlmodel.create_engine",
"sqlmodel.Field"
] |
[((6014, 6023), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (6021, 6023), False, 'from fastapi import FastAPI, BackgroundTasks\n'), ((443, 480), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (448, 480), False, 'from sqlmodel import Field, SQLModel, create_engine\n'), ((6129, 6193), 'sqlite3.connect', 'sqlite3.connect', (['f.db_name'], {'detect_types': 'sqlite3.PARSE_DECLTYPES'}), '(f.db_name, detect_types=sqlite3.PARSE_DECLTYPES)\n', (6144, 6193), False, 'import sqlite3\n'), ((6950, 7026), 'uvicorn.run', 'uvicorn.run', (['"""lserver:app"""'], {'host': '"""127.0.0.1"""', 'port': '(8000)', 'log_level': '"""warning"""'}), "('lserver:app', host='127.0.0.1', port=8000, log_level='warning')\n", (6961, 7026), False, 'import uvicorn\n'), ((1790, 1801), 'time.time', 'time.time', ([], {}), '()\n', (1799, 1801), False, 'import time\n'), ((2390, 2426), 'sqlmodel.create_engine', 'create_engine', (['sqlite_url'], {'echo': '(True)'}), '(sqlite_url, echo=True)\n', (2403, 2426), False, 'from sqlmodel import Field, SQLModel, create_engine\n'), ((2843, 2871), 'os.path.exists', 'os.path.exists', (['self.db_name'], {}), '(self.db_name)\n', (2857, 2871), False, 'import os\n'), ((2952, 3019), 'sqlite3.connect', 'sqlite3.connect', (['self.db_name'], {'detect_types': 'sqlite3.PARSE_DECLTYPES'}), '(self.db_name, detect_types=sqlite3.PARSE_DECLTYPES)\n', (2967, 3019), False, 'import sqlite3\n'), ((3645, 3661), 'hashlib.sha256', 'hashlib.sha256', ([], {}), '()\n', (3659, 3661), False, 'import hashlib\n'), ((5359, 5370), 'time.time', 'time.time', ([], {}), '()\n', (5368, 5370), False, 'import time\n'), ((2885, 2908), 'os.remove', 'os.remove', (['self.db_name'], {}), '(self.db_name)\n', (2894, 2908), False, 'import os\n'), ((5818, 5829), 'time.time', 'time.time', ([], {}), '()\n', (5827, 5829), False, 'import time\n'), ((5587, 5611), 'asyncio.sleep', 'asyncio.sleep', (['frequency'], {}), '(frequency)\n', (5600, 5611), False, 'import asyncio\n'), ((4190, 4204), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (4202, 4204), False, 'import time\n')]
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from functools import partial
import numpy as np
import tabulate
import megengine as mge
import megengine._internal as mgb
import megengine.module as m
import megengine.module.qat as qatm
import megengine.module.quantized as qm
try:
mge.logger.MegEngineLogFormatter.max_lines = float("inf")
except AttributeError as e:
raise ValueError("set logger max lines failed")
logger = mge.get_logger(__name__)
CALC_FLOPS = {}
def _register_modules(*modules):
def callback(impl):
for module in modules:
CALC_FLOPS[module] = impl
return impl
return callback
@_register_modules(
m.Conv2d,
m.ConvTranspose2d,
m.LocalConv2d,
qm.Conv2d,
qm.ConvRelu2d,
qm.ConvBn2d,
qm.ConvBnRelu2d,
qatm.Conv2d,
qatm.ConvRelu2d,
qatm.ConvBn2d,
qatm.ConvBnRelu2d,
)
def count_convNd(module, input, output):
bias = 1 if module.bias is not None else 0
group = module.groups
ic = input[0].shape[1]
oc = output[0].shape[1]
goc = oc // group
gic = ic // group
N = output[0].shape[0]
HW = np.prod(output[0].shape[2:])
# N x Cout x H x W x (Cin x Kw x Kh + bias)
return N * HW * goc * (gic * np.prod(module.kernel_size) + bias)
@_register_modules(m.ConvTranspose2d)
def count_deconvNd(module, input, output):
return np.prod(input[0].shape) * output[0].shape[1] * np.prod(module.kernel_size)
@_register_modules(m.Linear, qatm.Linear, qm.Linear)
def count_linear(module, input, output):
return np.prod(output[0].shape) * module.in_features
# does not need import qat and quantized module since they inherit from float module.
hook_modules = (
m.Conv2d,
m.ConvTranspose2d,
m.LocalConv2d,
m.BatchNorm2d,
m.Linear,
)
def net_stats(model, input_size, bar_length_max=20, log_params=True, log_flops=True):
def dict2table(list_of_dict, header):
table_data = [header]
for d in list_of_dict:
row = []
for h in header:
v = ""
if h in d:
v = d[h]
row.append(v)
table_data.append(row)
return table_data
def sizeof_fmt(num, suffix="B"):
for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]:
if abs(num) < 1024.0:
return "{:3.3f} {}{}".format(num, unit, suffix)
num /= 1024.0
sign_str = "-" if num < 0 else ""
return "{}{:.1f} {}{}".format(sign_str, num, "Yi", suffix)
def get_byteswidth(tensor):
dtype = tensor.dtype
if mgb.dtype.is_quantize(dtype):
return 1
elif mgb.dtype.is_bfloat16(dtype):
return 2
else:
return 4
def print_flops_stats(flops):
flops_list = [i["flops_num"] for i in flops]
max_flops_num = max(flops_list + [0])
# calc total flops and set flops_cum
total_flops_num = 0
for d in flops:
total_flops_num += int(d["flops_num"])
d["flops_cum"] = sizeof_fmt(total_flops_num, suffix="OPs")
for i in flops:
f = i["flops_num"]
i["flops"] = sizeof_fmt(f, suffix="OPs")
r = i["ratio"] = f / total_flops_num
i["percentage"] = "{:.2f}%".format(r * 100)
bar_length = int(f / max_flops_num * bar_length_max)
i["bar"] = "#" * bar_length
header = [
"name",
"class_name",
"input_shapes",
"output_shapes",
"flops",
"flops_cum",
"percentage",
"bar",
]
total_flops_str = sizeof_fmt(total_flops_num, suffix="OPs")
total_var_size = sum(sum(s[1] for s in i["output_shapes"]) for i in flops)
flops.append(
dict(name="total", flops=total_flops_str, output_shapes=total_var_size)
)
logger.info(
"flops stats: \n" + tabulate.tabulate(dict2table(flops, header=header))
)
return total_flops_num
def print_params_stats(params):
total_param_dims, total_param_size = 0, 0
for d in params:
total_param_dims += int(d["param_dim"])
total_param_size += int(d["size"])
d["size"] = sizeof_fmt(d["size"])
d["size_cum"] = sizeof_fmt(total_param_size)
for d in params:
ratio = d["param_dim"] / total_param_dims
d["ratio"] = ratio
d["percentage"] = "{:.2f}%".format(ratio * 100)
# construct bar
max_ratio = max([d["ratio"] for d in params])
for d in params:
bar_length = int(d["ratio"] / max_ratio * bar_length_max)
d["size_bar"] = "#" * bar_length
param_size = sizeof_fmt(total_param_size)
params.append(dict(name="total", param_dim=total_param_dims, size=param_size,))
header = [
"name",
"shape",
"mean",
"std",
"param_dim",
"bits",
"size",
"size_cum",
"percentage",
"size_bar",
]
logger.info(
"param stats: \n" + tabulate.tabulate(dict2table(params, header=header))
)
return total_param_size
def net_stats_hook(module, input, output, name=""):
class_name = str(module.__class__).split(".")[-1].split("'")[0]
flops_fun = CALC_FLOPS.get(type(module))
if callable(flops_fun):
flops_num = flops_fun(module, input, output)
if not isinstance(output, (list, tuple)):
output = [output]
flops.append(
dict(
name=name,
class_name=class_name,
input_shapes=[i.shape for i in input],
output_shapes=[o.shape for o in output],
flops_num=flops_num,
flops_cum=0,
)
)
if hasattr(module, "weight") and module.weight is not None:
w = module.weight
value = w.numpy()
param_dim = np.prod(w.shape)
param_bytes = get_byteswidth(w)
params.append(
dict(
name=name + "-w",
shape=w.shape,
param_dim=param_dim,
bits=param_bytes * 8,
size=param_dim * param_bytes,
size_cum=0,
mean="{:.2g}".format(value.mean()),
std="{:.2g}".format(value.std()),
)
)
if hasattr(module, "bias") and module.bias is not None:
b = module.bias
value = b.numpy()
param_dim = np.prod(b.shape)
param_bytes = get_byteswidth(b)
params.append(
dict(
name=name + "-b",
shape=b.shape,
param_dim=param_dim,
bits=param_bytes * 8,
size=param_dim * param_bytes,
size_cum=0,
mean="{:.2g}".format(value.mean()),
std="{:.2g}".format(value.std()),
)
)
# multiple inputs to the network
if not isinstance(input_size[0], tuple):
input_size = [input_size]
params = []
flops = []
hooks = []
for (name, module) in model.named_modules():
if isinstance(module, hook_modules):
hooks.append(
module.register_forward_hook(partial(net_stats_hook, name=name))
)
inputs = [mge.zeros(in_size, dtype=np.float32) for in_size in input_size]
model.eval()
model(*inputs)
for h in hooks:
h.remove()
total_flops, total_params = 0, 0
if log_params:
total_params = print_params_stats(params)
if log_flops:
total_flops = print_flops_stats(flops)
return total_params, total_flops
|
[
"megengine._internal.dtype.is_quantize",
"megengine._internal.dtype.is_bfloat16",
"megengine.zeros",
"megengine.get_logger"
] |
[((741, 765), 'megengine.get_logger', 'mge.get_logger', (['__name__'], {}), '(__name__)\n', (755, 765), True, 'import megengine as mge\n'), ((1434, 1462), 'numpy.prod', 'np.prod', (['output[0].shape[2:]'], {}), '(output[0].shape[2:])\n', (1441, 1462), True, 'import numpy as np\n'), ((1722, 1749), 'numpy.prod', 'np.prod', (['module.kernel_size'], {}), '(module.kernel_size)\n', (1729, 1749), True, 'import numpy as np\n'), ((1857, 1881), 'numpy.prod', 'np.prod', (['output[0].shape'], {}), '(output[0].shape)\n', (1864, 1881), True, 'import numpy as np\n'), ((2922, 2950), 'megengine._internal.dtype.is_quantize', 'mgb.dtype.is_quantize', (['dtype'], {}), '(dtype)\n', (2943, 2950), True, 'import megengine._internal as mgb\n'), ((8007, 8043), 'megengine.zeros', 'mge.zeros', (['in_size'], {'dtype': 'np.float32'}), '(in_size, dtype=np.float32)\n', (8016, 8043), True, 'import megengine as mge\n'), ((1675, 1698), 'numpy.prod', 'np.prod', (['input[0].shape'], {}), '(input[0].shape)\n', (1682, 1698), True, 'import numpy as np\n'), ((2986, 3014), 'megengine._internal.dtype.is_bfloat16', 'mgb.dtype.is_bfloat16', (['dtype'], {}), '(dtype)\n', (3007, 3014), True, 'import megengine._internal as mgb\n'), ((6485, 6501), 'numpy.prod', 'np.prod', (['w.shape'], {}), '(w.shape)\n', (6492, 6501), True, 'import numpy as np\n'), ((7122, 7138), 'numpy.prod', 'np.prod', (['b.shape'], {}), '(b.shape)\n', (7129, 7138), True, 'import numpy as np\n'), ((1545, 1572), 'numpy.prod', 'np.prod', (['module.kernel_size'], {}), '(module.kernel_size)\n', (1552, 1572), True, 'import numpy as np\n'), ((7942, 7976), 'functools.partial', 'partial', (['net_stats_hook'], {'name': 'name'}), '(net_stats_hook, name=name)\n', (7949, 7976), False, 'from functools import partial\n')]
|
"""
Query related functions.
"""
from datetime import datetime, timezone
from typing import List, Tuple
import sqlparse
from sqlalchemy import text
from sqlmodel import Session, create_engine
from datajunction.config import Settings
from datajunction.models.query import (
ColumnMetadata,
Query,
QueryResults,
QueryState,
QueryWithResults,
StatementResults,
)
from datajunction.typing import ColumnType, Description, SQLADialect, Stream, TypeEnum
def get_columns_from_description(
description: Description,
dialect: SQLADialect,
) -> List[ColumnMetadata]:
"""
Extract column metadata from the cursor description.
For now this uses the information from the cursor description, which only allow us to
distinguish between 4 types (see ``TypeEnum``). In the future we should use a type
inferrer to determine the types based on the query.
"""
type_map = {
TypeEnum.STRING: ColumnType.STR,
TypeEnum.BINARY: ColumnType.BYTES,
TypeEnum.NUMBER: ColumnType.FLOAT,
TypeEnum.DATETIME: ColumnType.DATETIME,
}
columns = []
for column in description or []:
name, native_type = column[:2]
for dbapi_type in TypeEnum:
if native_type == getattr(dialect.dbapi, dbapi_type.value, None):
type_ = type_map[dbapi_type]
break
else:
# fallback to string
type_ = ColumnType.STR
columns.append(ColumnMetadata(name=name, type=type_))
return columns
def run_query(query: Query) -> List[Tuple[str, List[ColumnMetadata], Stream]]:
"""
Run a query and return its results.
For each statement we return a tuple with the statement SQL, a description of the
columns (name and type) and a stream of rows (tuples).
"""
engine = create_engine(query.database.URI, **query.database.extra_params)
connection = engine.connect()
output: List[Tuple[str, List[ColumnMetadata], Stream]] = []
statements = sqlparse.parse(query.executed_query)
for statement in statements:
# Druid doesn't like statements that end in a semicolon...
sql = str(statement).strip().rstrip(";")
results = connection.execute(text(sql))
stream = (tuple(row) for row in results)
columns = get_columns_from_description(
results.cursor.description,
engine.dialect,
)
output.append((sql, columns, stream))
return output
def process_query(
session: Session,
settings: Settings,
query: Query,
) -> QueryWithResults:
"""
Process a query.
"""
query.scheduled = datetime.now(timezone.utc)
query.state = QueryState.SCHEDULED
query.executed_query = query.submitted_query
errors = []
query.started = datetime.now(timezone.utc)
try:
root = []
for sql, columns, stream in run_query(query):
rows = list(stream)
root.append(
StatementResults(
sql=sql,
columns=columns,
rows=rows,
row_count=len(rows),
),
)
results = QueryResults(__root__=root)
query.state = QueryState.FINISHED
query.progress = 1.0
except Exception as ex: # pylint: disable=broad-except
results = QueryResults(__root__=[])
query.state = QueryState.FAILED
errors = [str(ex)]
query.finished = datetime.now(timezone.utc)
session.add(query)
session.commit()
session.refresh(query)
settings.results_backend.add(str(query.id), results.json())
return QueryWithResults(results=results, errors=errors, **query.dict())
|
[
"sqlmodel.create_engine"
] |
[((1834, 1898), 'sqlmodel.create_engine', 'create_engine', (['query.database.URI'], {}), '(query.database.URI, **query.database.extra_params)\n', (1847, 1898), False, 'from sqlmodel import Session, create_engine\n'), ((2015, 2051), 'sqlparse.parse', 'sqlparse.parse', (['query.executed_query'], {}), '(query.executed_query)\n', (2029, 2051), False, 'import sqlparse\n'), ((2657, 2683), 'datetime.datetime.now', 'datetime.now', (['timezone.utc'], {}), '(timezone.utc)\n', (2669, 2683), False, 'from datetime import datetime, timezone\n'), ((2809, 2835), 'datetime.datetime.now', 'datetime.now', (['timezone.utc'], {}), '(timezone.utc)\n', (2821, 2835), False, 'from datetime import datetime, timezone\n'), ((3490, 3516), 'datetime.datetime.now', 'datetime.now', (['timezone.utc'], {}), '(timezone.utc)\n', (3502, 3516), False, 'from datetime import datetime, timezone\n'), ((3197, 3224), 'datajunction.models.query.QueryResults', 'QueryResults', ([], {'__root__': 'root'}), '(__root__=root)\n', (3209, 3224), False, 'from datajunction.models.query import ColumnMetadata, Query, QueryResults, QueryState, QueryWithResults, StatementResults\n'), ((1479, 1516), 'datajunction.models.query.ColumnMetadata', 'ColumnMetadata', ([], {'name': 'name', 'type': 'type_'}), '(name=name, type=type_)\n', (1493, 1516), False, 'from datajunction.models.query import ColumnMetadata, Query, QueryResults, QueryState, QueryWithResults, StatementResults\n'), ((2239, 2248), 'sqlalchemy.text', 'text', (['sql'], {}), '(sql)\n', (2243, 2248), False, 'from sqlalchemy import text\n'), ((3375, 3400), 'datajunction.models.query.QueryResults', 'QueryResults', ([], {'__root__': '[]'}), '(__root__=[])\n', (3387, 3400), False, 'from datajunction.models.query import ColumnMetadata, Query, QueryResults, QueryState, QueryWithResults, StatementResults\n')]
|
import numpy as nm
from sfepy.terms.terms import Term, terms
_msg_missing_data = 'missing family data!'
class HyperElasticBase(Term):
"""
Base class for all hyperelastic terms in TL/UL formulation.
`HyperElasticBase.__call__()` computes element contributions given either
stress (-> rezidual) or tangent modulus (-> tangent sitffnes matrix),
i.e. constitutive relation type (CRT) related data. The CRT data are
computed in subclasses implementing particular CRT (e.g. neo-Hookean
material), in self.compute_crt_data().
Modes:
- 0: total formulation
- 1: updated formulation
Notes
-----
This is not a proper Term!
"""
arg_types = ('material', 'virtual', 'state')
arg_shapes = {'material' : '1, 1', 'virtual' : ('D', 'state'),
'state' : 'D'}
@staticmethod
def integrate(out, val_qp, vg, fmode):
if fmode == 2:
out[:] = val_qp
status = 0
else:
status = vg.integrate(out, val_qp, fmode)
return status
@staticmethod
def function(out, fun, *args):
return fun(out, *args)
def __init__(self, *args, **kwargs):
Term.__init__(self, *args, **kwargs)
self.stress_cache = None
def get_family_data(self, state, cache_name, data_names):
"""
Notes
-----
`data_names` argument is ignored for now.
"""
name = state.name
step_cache = state.evaluate_cache.setdefault(cache_name, {})
cache = step_cache.setdefault(self.arg_steps[name], {})
vg, _, key = self.get_mapping(state, return_key=True)
data_key = key + (self.arg_derivatives[name],)
if data_key in cache:
out = cache[data_key]
else:
out = self.compute_family_data(state)
cache[data_key] = out
return out
def compute_stress(self, mat, family_data, **kwargs):
out = nm.empty_like(family_data.green_strain)
get = family_data.get
fargs = [get(name, msg_if_none=_msg_missing_data)
for name in self.family_data_names]
self.stress_function(out, mat, *fargs, **kwargs)
return out
def compute_tan_mod(self, mat, family_data, **kwargs):
shape = list(family_data.green_strain.shape)
shape[-1] = shape[-2]
out = nm.empty(shape, dtype=nm.float64)
get = family_data.get
fargs = [get(name, msg_if_none=_msg_missing_data)
for name in self.family_data_names]
self.tan_mod_function(out, mat, *fargs, **kwargs)
return out
def get_fargs(self, mat, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
fd = self.get_family_data(state, self.fd_cache_name,
self.family_data_names)
if mode == 'weak':
if diff_var is None:
stress = self.compute_stress(mat, fd, **kwargs)
self.stress_cache = stress
tan_mod = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 0
else:
stress = self.stress_cache
if stress is None:
stress = self.compute_stress(mat, fd, **kwargs)
tan_mod = self.compute_tan_mod(mat, fd, **kwargs)
fmode = 1
return (self.weak_function,
stress, tan_mod, fd.mtx_f, fd.det_f, vg, fmode,
self.hyperelastic_mode)
elif mode in ('el_avg', 'qp'):
if term_mode == 'strain':
out_qp = fd.green_strain
elif term_mode == 'stress':
out_qp = self.compute_stress(mat, fd, **kwargs)
else:
raise ValueError('unsupported term mode in %s! (%s)'
% (self.name, term_mode))
fmode = {'el_avg' : 1, 'qp' : 2}[mode]
return self.integrate, out_qp, vg, fmode
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
def get_eval_shape(self, mat, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(state)
sym = dim * (dim + 1) / 2
if mode != 'qp':
n_qp = 1
return (n_el, n_qp, sym, 1), state.dtype
class DeformationGradientTerm(Term):
r"""
Deformation gradient :math:`\ull{F}` in quadrature points for
`term_mode='def_grad'` (default) or the jacobian :math:`J` if
`term_mode='jacobian'`.
Supports 'eval', 'el_avg' and 'qp' evaluation modes.
:Definition:
.. math::
\ull{F} = \pdiff{\ul{x}}{\ul{X}}|_{qp}
= \ull{I} + \pdiff{\ul{u}}{\ul{X}}|_{qp} \;, \\
\ul{x} = \ul{X} + \ul{u} \;, J = \det{(\ull{F})}
:Arguments:
- parameter : :math:`\ul{u}`
"""
name = 'ev_def_grad'
arg_types = ('parameter',)
arg_shapes = {'parameter' : 'D'}
@staticmethod
def function(out, vec, vg, econn, term_mode, fmode):
d = 1 if term_mode == 'jacobian' else vg.dim
out_qp = nm.empty((out.shape[0], vg.n_qp, d, d), dtype=out.dtype)
mode = 1 if term_mode == 'jacobian' else 0
terms.dq_def_grad(out_qp, vec, vg, econn, mode)
if fmode == 2:
out[:] = out_qp
status = 0
else:
status = vg.integrate(out, out_qp, fmode)
return status
def get_fargs(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
ap, vg = self.get_approximation(parameter)
vec = self.get_vector(parameter)
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
return vec, vg, ap.econn, term_mode, fmode
def get_eval_shape(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(parameter)
if mode != 'qp':
n_qp = 1
if term_mode == 'jacobian':
return (n_el, n_qp, 1, 1), parameter.dtype
else: # 'def_grad'
return (n_el, n_qp, dim, dim), parameter.dtype
|
[
"sfepy.terms.terms.Term.__init__",
"sfepy.terms.terms.terms.dq_def_grad"
] |
[((1195, 1231), 'sfepy.terms.terms.Term.__init__', 'Term.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (1208, 1231), False, 'from sfepy.terms.terms import Term, terms\n'), ((1967, 2006), 'numpy.empty_like', 'nm.empty_like', (['family_data.green_strain'], {}), '(family_data.green_strain)\n', (1980, 2006), True, 'import numpy as nm\n'), ((2384, 2417), 'numpy.empty', 'nm.empty', (['shape'], {'dtype': 'nm.float64'}), '(shape, dtype=nm.float64)\n', (2392, 2417), True, 'import numpy as nm\n'), ((5283, 5339), 'numpy.empty', 'nm.empty', (['(out.shape[0], vg.n_qp, d, d)'], {'dtype': 'out.dtype'}), '((out.shape[0], vg.n_qp, d, d), dtype=out.dtype)\n', (5291, 5339), True, 'import numpy as nm\n'), ((5400, 5447), 'sfepy.terms.terms.terms.dq_def_grad', 'terms.dq_def_grad', (['out_qp', 'vec', 'vg', 'econn', 'mode'], {}), '(out_qp, vec, vg, econn, mode)\n', (5417, 5447), False, 'from sfepy.terms.terms import Term, terms\n'), ((3110, 3150), 'numpy.array', 'nm.array', (['[0]'], {'ndmin': '(4)', 'dtype': 'nm.float64'}), '([0], ndmin=4, dtype=nm.float64)\n', (3118, 3150), True, 'import numpy as nm\n')]
|
r"""
Piezo-elasticity problem - linear elastic material with piezoelectric
effects.
Find :math:`\ul{u}`, :math:`\phi` such that:
.. math::
- \omega^2 \int_{Y} \rho\ \ul{v} \cdot \ul{u}
+ \int_{Y} D_{ijkl}\ e_{ij}(\ul{v}) e_{kl}(\ul{u})
- \int_{Y_2} g_{kij}\ e_{ij}(\ul{v}) \nabla_k \phi
= 0
\;, \quad \forall \ul{v} \;,
\int_{Y_2} g_{kij}\ e_{ij}(\ul{u}) \nabla_k \psi
+ \int_{Y} K_{ij} \nabla_i \psi \nabla_j \phi
= 0
\;, \quad \forall \psi \;,
where
.. math::
D_{ijkl} = \mu (\delta_{ik} \delta_{jl}+\delta_{il} \delta_{jk}) +
\lambda \ \delta_{ij} \delta_{kl}
\;.
"""
import os
import numpy as nm
from sfepy import data_dir
from sfepy.discrete.fem import MeshIO
filename_mesh = data_dir + '/meshes/2d/special/circle_in_square.mesh'
## filename_mesh = data_dir + '/meshes/2d/special/circle_in_square_small.mesh'
## filename_mesh = data_dir + '/meshes/3d/special/cube_sphere.mesh'
## filename_mesh = data_dir + '/meshes/2d/special/cube_cylinder.mesh'
omega = 1
omega_squared = omega**2
conf_dir = os.path.dirname(__file__)
io = MeshIO.any_from_filename(filename_mesh, prefix_dir=conf_dir)
bbox, dim = io.read_bounding_box( ret_dim = True )
geom = {3 : '3_4', 2 : '2_3'}[dim]
x_left, x_right = bbox[:,0]
regions = {
'Y' : 'all',
'Y1' : 'cells of group 1',
'Y2' : 'cells of group 2',
'Y2_Surface': ('r.Y1 *v r.Y2', 'facet'),
'Left' : ('vertices in (x < %f)' % (x_left + 1e-3), 'facet'),
'Right' : ('vertices in (x > %f)' % (x_right - 1e-3), 'facet'),
}
material_2 = {
'name' : 'inclusion',
# epoxy
'function' : 'get_inclusion_pars',
}
def get_inclusion_pars(ts, coor, mode=None, **kwargs):
"""TODO: implement proper 3D -> 2D transformation of constitutive
matrices."""
if mode == 'qp':
n_nod, dim = coor.shape
sym = (dim + 1) * dim / 2
dielectric = nm.eye( dim, dtype = nm.float64 )
# !!!
coupling = nm.ones( (dim, sym), dtype = nm.float64 )
# coupling[0,1] = 0.2
out = {
# Lame coefficients in 1e+10 Pa.
'lam' : 0.1798,
'mu' : 0.148,
# dielectric tensor
'dielectric' : dielectric,
# piezoelectric coupling
'coupling' : coupling,
'density' : 0.1142, # in 1e4 kg/m3
}
for key, val in out.iteritems():
out[key] = nm.tile(val, (coor.shape[0], 1, 1))
return out
functions = {
'get_inclusion_pars' : (get_inclusion_pars,),
}
field_0 = {
'name' : 'displacement',
'dtype' : nm.float64,
'shape' : dim,
'region' : 'Y',
'approx_order' : 1,
}
field_2 = {
'name' : 'potential',
'dtype' : nm.float64,
'shape' : (1,),
'region' : 'Y',
'approx_order' : 1,
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
'phi' : ('unknown field', 'potential', 1),
'psi' : ('test field', 'potential', 'phi'),
}
ebcs = {
'u1' : ('Left', {'u.all' : 0.0}),
'u2' : ('Right', {'u.0' : 0.1}),
'phi' : ('Y2_Surface', {'phi.all' : 0.0}),
}
integral_1 = {
'name' : 'i',
'order' : 2,
}
equations = {
'1' : """- %f * dw_volume_dot.i.Y( inclusion.density, v, u )
+ dw_lin_elastic_iso.i.Y( inclusion.lam, inclusion.mu, v, u )
- dw_piezo_coupling.i.Y2( inclusion.coupling, v, phi )
= 0""" % omega_squared,
'2' : """dw_piezo_coupling.i.Y2( inclusion.coupling, u, psi )
+ dw_diffusion.i.Y( inclusion.dielectric, psi, phi )
= 0""",
}
##
# Solvers etc.
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 1,
'eps_a' : 1e-10,
'eps_r' : 1.0,
'macheps' : 1e-16,
'lin_red' : 1e-2, # Linear system error < (eps_a * lin_red).
'ls_red' : 0.1,
'ls_red_warp': 0.001,
'ls_on' : 1.1,
'ls_min' : 1e-5,
'check' : 0,
'delta' : 1e-6,
'problem' : 'nonlinear', # 'nonlinear' or 'linear' (ignore i_max)
}
|
[
"sfepy.discrete.fem.MeshIO.any_from_filename"
] |
[((1055, 1080), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1070, 1080), False, 'import os\n'), ((1086, 1146), 'sfepy.discrete.fem.MeshIO.any_from_filename', 'MeshIO.any_from_filename', (['filename_mesh'], {'prefix_dir': 'conf_dir'}), '(filename_mesh, prefix_dir=conf_dir)\n', (1110, 1146), False, 'from sfepy.discrete.fem import MeshIO\n'), ((1884, 1913), 'numpy.eye', 'nm.eye', (['dim'], {'dtype': 'nm.float64'}), '(dim, dtype=nm.float64)\n', (1890, 1913), True, 'import numpy as nm\n'), ((1951, 1988), 'numpy.ones', 'nm.ones', (['(dim, sym)'], {'dtype': 'nm.float64'}), '((dim, sym), dtype=nm.float64)\n', (1958, 1988), True, 'import numpy as nm\n'), ((2407, 2442), 'numpy.tile', 'nm.tile', (['val', '(coor.shape[0], 1, 1)'], {}), '(val, (coor.shape[0], 1, 1))\n', (2414, 2442), True, 'import numpy as nm\n')]
|
# -*- coding: utf-8 -*-
from datetime import datetime
from typing import Optional
from sqlmodel import Column, DateTime, Field, Relationship, SQLModel
class Device(SQLModel, table=True): # type: ignore
"""The Device model.
It is used to model a device.
"""
# setup the primary key of the table
uuid: str = Field(primary_key=True)
# setup the columns / properties
name: Optional[str]
type: Optional[str]
arch: Optional[str]
cpid: Optional[str]
cpu_type: Optional[str]
cpu_architecture: Optional[str]
cpu_features: Optional[str]
processor_count: Optional[int]
coprocessor_count: Optional[int]
product_name: Optional[str]
floating_point_speed: Optional[float]
integer_speed: Optional[float]
total_disk_space: Optional[float]
free_disk_space: Optional[float]
swap_space: Optional[float]
domain_name: Optional[str]
operating_system_version: Optional[str]
boinc_version: Optional[str]
scitizen_version: Optional[str]
platform: Optional[str]
# setup the pseudo-columns (metadata related to the record)
created_at: Optional[datetime] = Field(
sa_column=Column(DateTime(), default=datetime.utcnow, name="_created_at")
)
updated_at: Optional[datetime] = Field(
sa_column=Column(DateTime(), onupdate=datetime.utcnow, name="_updated_at")
)
class Project(SQLModel, table=True): # type: ignore
"""The Project model.
It is used to model a project.
"""
# setup the primary key of the table
uuid: str = Field(primary_key=True)
# setup the columns / properties
name: Optional[str]
avatar: Optional[str]
description: Optional[str]
general_area: Optional[str]
home: Optional[str]
image: Optional[str]
is_active: Optional[bool] = True
keywords: Optional[str]
name: Optional[str] # type: ignore
platforms: Optional[str]
specific_area: Optional[str]
summary: Optional[str]
url: Optional[str]
web_url: Optional[str]
weak_authenticator: Optional[str]
# setup the pseudo-columns (metadata related to the record)
created_at: Optional[datetime] = Field(
sa_column=Column(DateTime(), default=datetime.utcnow, name="_created_at")
)
updated_at: Optional[datetime] = Field(
sa_column=Column(DateTime(), onupdate=datetime.utcnow, name="_updated_at")
)
# setup the relationship
task: Optional["Task"] = Relationship(back_populates="project_rel")
class Task(SQLModel, table=True): # type: ignore
"""The Task model.
It is used to model a task.
"""
# setup the primary key of the table
uuid: str = Field(primary_key=True)
# setup the columns / properties
active_task_state: Optional[str]
app_version_num: Optional[float]
bytes_received: Optional[float]
bytes_sent: Optional[float]
checkpoint_cpu_time: Optional[float]
completed_at: Optional[datetime]
current_cpu_time: Optional[float]
elapsed_time: Optional[float]
estimated_cpu_time_remaining: Optional[float]
exit_code: Optional[float]
exit_statement: Optional[str]
fraction_done: Optional[float]
name: Optional[str]
page_fault_rate: Optional[float]
pid: Optional[float]
plan_class: Optional[str]
platform: Optional[str]
progress_rate: Optional[float]
received_at: Optional[datetime]
report_deadline_at: Optional[datetime]
scheduler_state: Optional[str]
set_size: Optional[float]
slot: Optional[float]
slot_path: Optional[str]
state: Optional[str]
swap_size: Optional[float]
version_num: Optional[float]
wu_name: Optional[str]
# setup the pseudo-columns (metadata related to the record)
created_at: Optional[datetime] = Field(
sa_column=Column(DateTime(), default=datetime.utcnow, name="_created_at")
)
updated_at: Optional[datetime] = Field(
sa_column=Column(DateTime(), onupdate=datetime.utcnow, name="_updated_at")
)
# setup the relationship
project_url: Optional[str] = Field(foreign_key="project.url")
project_rel: Optional[Project] = Relationship(back_populates="task")
class TaskWithProject(Task):
"""The TaskWithProject model.
It is used to model a task linked to a project,
so the API will be able in one GET to fetch a task with the linked project.
"""
# setup the columns / properties
project: Optional[Project]
|
[
"sqlmodel.Field",
"sqlmodel.DateTime",
"sqlmodel.Relationship"
] |
[((333, 356), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (338, 356), False, 'from sqlmodel import Column, DateTime, Field, Relationship, SQLModel\n'), ((1560, 1583), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (1565, 1583), False, 'from sqlmodel import Column, DateTime, Field, Relationship, SQLModel\n'), ((2455, 2497), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""project_rel"""'}), "(back_populates='project_rel')\n", (2467, 2497), False, 'from sqlmodel import Column, DateTime, Field, Relationship, SQLModel\n'), ((2672, 2695), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (2677, 2695), False, 'from sqlmodel import Column, DateTime, Field, Relationship, SQLModel\n'), ((4063, 4095), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""project.url"""'}), "(foreign_key='project.url')\n", (4068, 4095), False, 'from sqlmodel import Column, DateTime, Field, Relationship, SQLModel\n'), ((4133, 4168), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""task"""'}), "(back_populates='task')\n", (4145, 4168), False, 'from sqlmodel import Column, DateTime, Field, Relationship, SQLModel\n'), ((1181, 1191), 'sqlmodel.DateTime', 'DateTime', ([], {}), '()\n', (1189, 1191), False, 'from sqlmodel import Column, DateTime, Field, Relationship, SQLModel\n'), ((1313, 1323), 'sqlmodel.DateTime', 'DateTime', ([], {}), '()\n', (1321, 1323), False, 'from sqlmodel import Column, DateTime, Field, Relationship, SQLModel\n'), ((2200, 2210), 'sqlmodel.DateTime', 'DateTime', ([], {}), '()\n', (2208, 2210), False, 'from sqlmodel import Column, DateTime, Field, Relationship, SQLModel\n'), ((2332, 2342), 'sqlmodel.DateTime', 'DateTime', ([], {}), '()\n', (2340, 2342), False, 'from sqlmodel import Column, DateTime, Field, Relationship, SQLModel\n'), ((3804, 3814), 'sqlmodel.DateTime', 'DateTime', ([], {}), '()\n', (3812, 3814), False, 'from sqlmodel import Column, DateTime, Field, Relationship, SQLModel\n'), ((3936, 3946), 'sqlmodel.DateTime', 'DateTime', ([], {}), '()\n', (3944, 3946), False, 'from sqlmodel import Column, DateTime, Field, Relationship, SQLModel\n')]
|
"""
Quantum oscillator.
See :ref:`quantum-quantum_common`.
"""
from __future__ import absolute_import
from sfepy.linalg import norm_l2_along_axis
from examples.quantum.quantum_common import common
def get_exact(n_eigs, box_size, dim):
if dim == 2:
eigs = [1] + [2]*2 + [3]*3 + [4]*4 + [5]*5 + [6]*6
elif dim == 3:
eigs = [float(1)/2 + x for x in [1] + [2]*3 + [3]*6 + [4]*10]
return eigs
def fun_v(ts, coor, mode=None, **kwargs):
if not mode == 'qp': return
out = {}
C = 0.5
val = C * norm_l2_along_axis(coor, axis=1, squared=True)
val.shape = (val.shape[0], 1, 1)
out['V'] = val
return out
def define(n_eigs=20, tau=0.0):
l = common(fun_v, get_exact=get_exact, n_eigs=n_eigs, tau=tau)
return l
|
[
"sfepy.linalg.norm_l2_along_axis"
] |
[((696, 754), 'examples.quantum.quantum_common.common', 'common', (['fun_v'], {'get_exact': 'get_exact', 'n_eigs': 'n_eigs', 'tau': 'tau'}), '(fun_v, get_exact=get_exact, n_eigs=n_eigs, tau=tau)\n', (702, 754), False, 'from examples.quantum.quantum_common import common\n'), ((536, 582), 'sfepy.linalg.norm_l2_along_axis', 'norm_l2_along_axis', (['coor'], {'axis': '(1)', 'squared': '(True)'}), '(coor, axis=1, squared=True)\n', (554, 582), False, 'from sfepy.linalg import norm_l2_along_axis\n')]
|
import os
import numpy as nm
try:
from enthought.tvtk.api import tvtk
from enthought.mayavi.sources.vtk_data_source import VTKDataSource
from enthought.pyface.timer.api import Timer
except:
from tvtk.api import tvtk
from mayavi.sources.vtk_data_source import VTKDataSource
from pyface.timer.api import Timer
from dataset_manager import DatasetManager
from sfepy.base.base import Struct, basestr
from sfepy.postprocess.utils import mlab
from sfepy.discrete.fem import Mesh
from sfepy.discrete.fem.meshio import MeshIO, vtk_cell_types, supported_formats
def create_file_source(filename, watch=False, offscreen=True):
"""Factory function to create a file source corresponding to the
given file format."""
kwargs = {'watch' : watch, 'offscreen' : offscreen}
if isinstance(filename, basestr):
fmt = os.path.splitext(filename)[1]
is_sequence = False
else: # A sequence.
fmt = os.path.splitext(filename[0])[1]
is_sequence = True
fmt = fmt.lower()
if fmt == '.vtk':
# VTK is supported directly by Mayavi, no need to use MeshIO.
if is_sequence:
return VTKSequenceFileSource(filename, **kwargs)
else:
return VTKFileSource(filename, **kwargs)
elif fmt in supported_formats.keys():
if is_sequence:
if fmt == '.h5':
raise ValueError('format .h5 does not support file sequences!')
else:
return GenericSequenceFileSource(filename, **kwargs)
else:
return GenericFileSource(filename, **kwargs)
else:
raise ValueError('unknown file format! (%s)' % fmt)
class FileSource(Struct):
"""General file source."""
def __init__(self, filename, watch=False, offscreen=True):
"""Create a file source using the given file name."""
mlab.options.offscreen = offscreen
self.watch = watch
self.filename = filename
self.reset()
def __call__(self, step=0):
"""Get the file source."""
if self.source is None:
self.source = self.create_source()
if self.watch:
self.timer = Timer(1000, self.poll_file)
return self.source
def reset(self):
"""Reset."""
self.mat_id_name = None
self.source = None
self.notify_obj = None
self.steps = []
self.times = []
self.step = 0
self.time = 0.0
if self.watch:
self.last_stat = os.stat(self.filename)
def setup_mat_id(self, mat_id_name='mat_id', single_color=False):
self.mat_id_name = mat_id_name
self.single_color = single_color
def get_step_time(self, step=None, time=None):
"""
Set current step and time to the values closest greater or equal to
either step or time. Return the found values.
"""
if (step is not None) and len(self.steps):
step = step if step >= 0 else self.steps[-1] + step + 1
ii = nm.searchsorted(self.steps, step)
ii = nm.clip(ii, 0, len(self.steps) - 1)
self.step = self.steps[ii]
if len(self.times):
self.time = self.times[ii]
elif (time is not None) and len(self.times):
ii = nm.searchsorted(self.times, time)
ii = nm.clip(ii, 0, len(self.steps) - 1)
self.step = self.steps[ii]
self.time = self.times[ii]
return self.step, self.time
def get_ts_info(self):
return self.steps, self.times
def get_mat_id(self, mat_id_name='mat_id'):
"""
Get material ID numbers of the underlying mesh elements.
"""
if self.source is not None:
dm = DatasetManager(dataset=self.source.outputs[0])
mat_id = dm.cell_scalars[mat_id_name]
return mat_id
def file_changed(self):
pass
def setup_notification(self, obj, attr):
"""The attribute 'attr' of the object 'obj' will be set to True
when the source file is watched and changes."""
self.notify_obj = obj
self.notify_attr = attr
def poll_file(self):
"""Check the source file's time stamp and notify the
self.notify_obj in case it changed. Subclasses should implement
the file_changed() method."""
if not self.notify_obj:
return
s = os.stat(self.filename)
if s[-2] == self.last_stat[-2]:
setattr(self.notify_obj, self.notify_attr, False)
else:
self.file_changed()
setattr(self.notify_obj, self.notify_attr, True)
self.last_stat = s
class VTKFileSource(FileSource):
"""A thin wrapper around mlab.pipeline.open()."""
def create_source(self):
"""Create a VTK file source """
return mlab.pipeline.open(self.filename)
def get_bounding_box(self):
bbox = nm.array(self.source.reader.unstructured_grid_output.bounds)
return bbox.reshape((3,2)).T
def set_filename(self, filename, vis_source):
self.filename = filename
vis_source.base_file_name = filename
# Force re-read.
vis_source.reader.modified()
vis_source.update()
# Propagate changes in the pipeline.
vis_source.data_changed = True
class VTKSequenceFileSource(VTKFileSource):
"""A thin wrapper around mlab.pipeline.open() for VTK file sequences."""
def __init__(self, *args, **kwargs):
FileSource.__init__(self, *args, **kwargs)
self.steps = nm.arange(len(self.filename), dtype=nm.int32)
def create_source(self):
"""Create a VTK file source """
return mlab.pipeline.open(self.filename[0])
def set_filename(self, filename, vis_source):
self.filename = filename
vis_source.base_file_name = filename[self.step]
class GenericFileSource(FileSource):
"""File source usable with any format supported by MeshIO classes."""
def __init__(self, *args, **kwargs):
FileSource.__init__(self, *args, **kwargs)
self.read_common(self.filename)
def read_common(self, filename):
self.io = MeshIO.any_from_filename(filename)
self.steps, self.times, _ = self.io.read_times()
self.mesh = Mesh.from_file(filename)
self.n_nod, self.dim = self.mesh.coors.shape
def create_source(self):
"""
Create a VTK source from data in a SfePy-supported file.
Notes
-----
All data need to be set here, otherwise time stepping will not
work properly - data added by user later will be thrown away on
time step change.
"""
if self.io is None:
self.read_common(self.filename)
dataset = self.create_dataset()
try:
out = self.io.read_data(self.step)
except ValueError:
out = None
if out is not None:
self.add_data_to_dataset(dataset, out)
if self.mat_id_name is not None:
mat_id = nm.concatenate(self.mesh.mat_ids)
if self.single_color:
rm = mat_id.min(), mat_id.max()
mat_id[mat_id > rm[0]] = rm[1]
dm = DatasetManager(dataset=dataset)
dm.add_array(mat_id, self.mat_id_name, 'cell')
src = VTKDataSource(data=dataset)
# src.print_traits()
# debug()
return src
def get_bounding_box(self):
bbox = self.mesh.get_bounding_box()
if self.dim == 2:
bbox = nm.c_[bbox, [0.0, 0.0]]
return bbox
def set_filename(self, filename, vis_source):
self.filename = filename
self.source = self.create_source()
vis_source.data = self.source.data
def get_mat_id(self, mat_id_name='mat_id'):
"""
Get material ID numbers of the underlying mesh elements.
"""
if self.source is not None:
mat_id = nm.concatenate(self.mesh.mat_ids)
return mat_id
def file_changed(self):
self.steps, self.times, _ = self.io.read_times()
def create_dataset(self):
"""Create a tvtk.UnstructuredGrid dataset from the Mesh instance of the
file source."""
mesh = self.mesh
n_nod, dim = self.n_nod, self.dim
n_el, n_els, n_e_ps = mesh.n_el, mesh.n_els, mesh.n_e_ps
if dim == 2:
nod_zz = nm.zeros((n_nod, 1), dtype=mesh.coors.dtype)
points = nm.c_[mesh.coors, nod_zz]
else:
points = mesh.coors
dataset = tvtk.UnstructuredGrid(points=points)
cell_types = []
cells = []
offset = [0]
for ig, conn in enumerate(mesh.conns):
cell_types += [vtk_cell_types[mesh.descs[ig]]] * n_els[ig]
nn = nm.array([conn.shape[1]] * n_els[ig])
aux = nm.c_[nn[:,None], conn]
cells.extend(aux.ravel())
offset.extend([aux.shape[1]] * n_els[ig])
cells = nm.array(cells)
cell_types = nm.array(cell_types)
offset = nm.cumsum(offset)[:-1]
cell_array = tvtk.CellArray()
cell_array.set_cells(n_el, cells)
dataset.set_cells(cell_types, offset, cell_array)
return dataset
def add_data_to_dataset(self, dataset, data):
"""Add point and cell data to the dataset."""
dim = self.dim
sym = (dim + 1) * dim / 2
dm = DatasetManager(dataset=dataset)
for key, val in data.iteritems():
vd = val.data
## print vd.shape
if val.mode == 'vertex':
if vd.shape[1] == 1:
aux = vd.reshape((vd.shape[0],))
elif vd.shape[1] == 2:
zz = nm.zeros((vd.shape[0], 1), dtype=vd.dtype)
aux = nm.c_[vd, zz]
elif vd.shape[1] == 3:
aux = vd
else:
raise ValueError('unknown vertex data format! (%s)'\
% vd.shape)
dm.add_array(aux, key, 'point')
elif val.mode == 'cell':
ne, aux, nr, nc = vd.shape
if (nr == 1) and (nc == 1):
aux = vd.reshape((ne,))
elif (nr == dim) and (nc == 1):
if dim == 3:
aux = vd.reshape((ne, dim))
else:
zz = nm.zeros((vd.shape[0], 1), dtype=vd.dtype);
aux = nm.c_[vd.squeeze(), zz]
elif (((nr == sym) or (nr == (dim * dim))) and (nc == 1)) \
or ((nr == dim) and (nc == dim)):
vd = vd.squeeze()
if dim == 3:
if nr == sym:
aux = vd[:,[0,3,4,3,1,5,4,5,2]]
elif nr == (dim * dim):
aux = vd[:,[0,3,4,6,1,5,7,8,2]]
else:
aux = vd.reshape((vd.shape[0], dim*dim))
else:
zz = nm.zeros((vd.shape[0], 1), dtype=vd.dtype);
if nr == sym:
aux = nm.c_[vd[:,[0,2]], zz, vd[:,[2,1]],
zz, zz, zz, zz]
elif nr == (dim * dim):
aux = nm.c_[vd[:,[0,2]], zz, vd[:,[3,1]],
zz, zz, zz, zz]
else:
aux = nm.c_[vd[:,0,[0,1]], zz, vd[:,1,[0,1]],
zz, zz, zz, zz]
dm.add_array(aux, key, 'cell')
class GenericSequenceFileSource(GenericFileSource):
"""File source usable with any format supported by MeshIO classes, with
exception of HDF5 (.h5), for file sequences."""
def read_common(self, filename):
self.steps = nm.arange(len(self.filename), dtype=nm.int32)
def create_source(self):
"""Create a VTK source from data in a SfePy-supported file."""
if self.io is None:
self.read_common(self.filename[self.step])
dataset = self.create_dataset()
src = VTKDataSource(data=dataset)
return src
def set_filename(self, filename, vis_source):
self.filename = filename
self.io = None
self.source = self.create_source()
vis_source.data = self.source.data
|
[
"sfepy.postprocess.utils.mlab.pipeline.open",
"sfepy.discrete.fem.Mesh.from_file",
"sfepy.discrete.fem.meshio.MeshIO.any_from_filename",
"sfepy.discrete.fem.meshio.supported_formats.keys"
] |
[((4437, 4459), 'os.stat', 'os.stat', (['self.filename'], {}), '(self.filename)\n', (4444, 4459), False, 'import os\n'), ((4873, 4906), 'sfepy.postprocess.utils.mlab.pipeline.open', 'mlab.pipeline.open', (['self.filename'], {}), '(self.filename)\n', (4891, 4906), False, 'from sfepy.postprocess.utils import mlab\n'), ((4955, 5015), 'numpy.array', 'nm.array', (['self.source.reader.unstructured_grid_output.bounds'], {}), '(self.source.reader.unstructured_grid_output.bounds)\n', (4963, 5015), True, 'import numpy as nm\n'), ((5725, 5761), 'sfepy.postprocess.utils.mlab.pipeline.open', 'mlab.pipeline.open', (['self.filename[0]'], {}), '(self.filename[0])\n', (5743, 5761), False, 'from sfepy.postprocess.utils import mlab\n'), ((6204, 6238), 'sfepy.discrete.fem.meshio.MeshIO.any_from_filename', 'MeshIO.any_from_filename', (['filename'], {}), '(filename)\n', (6228, 6238), False, 'from sfepy.discrete.fem.meshio import MeshIO, vtk_cell_types, supported_formats\n'), ((6317, 6341), 'sfepy.discrete.fem.Mesh.from_file', 'Mesh.from_file', (['filename'], {}), '(filename)\n', (6331, 6341), False, 'from sfepy.discrete.fem import Mesh\n'), ((7366, 7393), 'mayavi.sources.vtk_data_source.VTKDataSource', 'VTKDataSource', ([], {'data': 'dataset'}), '(data=dataset)\n', (7379, 7393), False, 'from mayavi.sources.vtk_data_source import VTKDataSource\n'), ((8602, 8638), 'tvtk.api.tvtk.UnstructuredGrid', 'tvtk.UnstructuredGrid', ([], {'points': 'points'}), '(points=points)\n', (8623, 8638), False, 'from tvtk.api import tvtk\n'), ((9030, 9045), 'numpy.array', 'nm.array', (['cells'], {}), '(cells)\n', (9038, 9045), True, 'import numpy as nm\n'), ((9067, 9087), 'numpy.array', 'nm.array', (['cell_types'], {}), '(cell_types)\n', (9075, 9087), True, 'import numpy as nm\n'), ((9158, 9174), 'tvtk.api.tvtk.CellArray', 'tvtk.CellArray', ([], {}), '()\n', (9172, 9174), False, 'from tvtk.api import tvtk\n'), ((9476, 9507), 'dataset_manager.DatasetManager', 'DatasetManager', ([], {'dataset': 'dataset'}), '(dataset=dataset)\n', (9490, 9507), False, 'from dataset_manager import DatasetManager\n'), ((12284, 12311), 'mayavi.sources.vtk_data_source.VTKDataSource', 'VTKDataSource', ([], {'data': 'dataset'}), '(data=dataset)\n', (12297, 12311), False, 'from mayavi.sources.vtk_data_source import VTKDataSource\n'), ((849, 875), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (865, 875), False, 'import os\n'), ((946, 975), 'os.path.splitext', 'os.path.splitext', (['filename[0]'], {}), '(filename[0])\n', (962, 975), False, 'import os\n'), ((1291, 1315), 'sfepy.discrete.fem.meshio.supported_formats.keys', 'supported_formats.keys', ([], {}), '()\n', (1313, 1315), False, 'from sfepy.discrete.fem.meshio import MeshIO, vtk_cell_types, supported_formats\n'), ((2525, 2547), 'os.stat', 'os.stat', (['self.filename'], {}), '(self.filename)\n', (2532, 2547), False, 'import os\n'), ((3041, 3074), 'numpy.searchsorted', 'nm.searchsorted', (['self.steps', 'step'], {}), '(self.steps, step)\n', (3056, 3074), True, 'import numpy as nm\n'), ((3774, 3820), 'dataset_manager.DatasetManager', 'DatasetManager', ([], {'dataset': 'self.source.outputs[0]'}), '(dataset=self.source.outputs[0])\n', (3788, 3820), False, 'from dataset_manager import DatasetManager\n'), ((7079, 7112), 'numpy.concatenate', 'nm.concatenate', (['self.mesh.mat_ids'], {}), '(self.mesh.mat_ids)\n', (7093, 7112), True, 'import numpy as nm\n'), ((7260, 7291), 'dataset_manager.DatasetManager', 'DatasetManager', ([], {'dataset': 'dataset'}), '(dataset=dataset)\n', (7274, 7291), False, 'from dataset_manager import DatasetManager\n'), ((7989, 8022), 'numpy.concatenate', 'nm.concatenate', (['self.mesh.mat_ids'], {}), '(self.mesh.mat_ids)\n', (8003, 8022), True, 'import numpy as nm\n'), ((8445, 8489), 'numpy.zeros', 'nm.zeros', (['(n_nod, 1)'], {'dtype': 'mesh.coors.dtype'}), '((n_nod, 1), dtype=mesh.coors.dtype)\n', (8453, 8489), True, 'import numpy as nm\n'), ((8840, 8877), 'numpy.array', 'nm.array', (['([conn.shape[1]] * n_els[ig])'], {}), '([conn.shape[1]] * n_els[ig])\n', (8848, 8877), True, 'import numpy as nm\n'), ((9105, 9122), 'numpy.cumsum', 'nm.cumsum', (['offset'], {}), '(offset)\n', (9114, 9122), True, 'import numpy as nm\n'), ((2190, 2217), 'pyface.timer.api.Timer', 'Timer', (['(1000)', 'self.poll_file'], {}), '(1000, self.poll_file)\n', (2195, 2217), False, 'from pyface.timer.api import Timer\n'), ((3314, 3347), 'numpy.searchsorted', 'nm.searchsorted', (['self.times', 'time'], {}), '(self.times, time)\n', (3329, 3347), True, 'import numpy as nm\n'), ((9798, 9840), 'numpy.zeros', 'nm.zeros', (['(vd.shape[0], 1)'], {'dtype': 'vd.dtype'}), '((vd.shape[0], 1), dtype=vd.dtype)\n', (9806, 9840), True, 'import numpy as nm\n'), ((10502, 10544), 'numpy.zeros', 'nm.zeros', (['(vd.shape[0], 1)'], {'dtype': 'vd.dtype'}), '((vd.shape[0], 1), dtype=vd.dtype)\n', (10510, 10544), True, 'import numpy as nm\n'), ((11168, 11210), 'numpy.zeros', 'nm.zeros', (['(vd.shape[0], 1)'], {'dtype': 'vd.dtype'}), '((vd.shape[0], 1), dtype=vd.dtype)\n', (11176, 11210), True, 'import numpy as nm\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import argparse
import importlib
import json
import multiprocessing as mp
import os
import pathlib
import sys
import megengine as mge
import megengine.distributed as dist
from basecore.config import ConfigDict
from loguru import logger
from basecls.engine import ClsTester
from basecls.models import build_model, load_model
from basecls.utils import default_logging, registers, set_nccl_env, set_num_threads, setup_logger
def make_parser() -> argparse.ArgumentParser:
"""Build args parser for testing script.
Returns:
The args parser.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dir", type=str, help="testing directory")
return parser
@logger.catch
def worker(args: argparse.Namespace):
"""Worker function for testing script.
Args:
args: args for testing script.
"""
logger.info(f"Init process group for gpu{dist.get_rank()} done")
args.dir = os.path.abspath(args.dir)
setup_logger(args.dir, "test_all_log.txt", to_loguru=True)
logger.info(f"args: {args}")
result = dict()
for f in pathlib.Path(args.dir).glob("**/*.py"):
sys.path.append(os.path.dirname(f))
module_name = os.path.splitext(os.path.basename(f))[0]
current_network = importlib.import_module(module_name)
cfg = current_network.Cfg()
weight_path = f"{os.path.splitext(f)[0]}.pkl"
if os.path.isfile(weight_path):
cfg.weights = weight_path
else:
sys.path.pop(-1)
continue
cfg.set_mode("freeze")
if cfg.fastrun:
logger.info("Using fastrun mode...")
mge.functional.debug_param.set_execution_strategy("PROFILE")
tester = build(cfg)
acc1, acc5 = tester.test()
result[module_name] = dict(acc1=acc1, acc5=acc5)
sys.path.pop(-1)
logger.info(json.dumps(result, indent=4))
with open("result.json", "w") as f:
json.dump(result, f)
def build(cfg: ConfigDict):
"""Build function for testing script.
Args:
cfg: config for testing.
Returns:
A tester.
"""
model = build_model(cfg)
load_model(model, cfg.weights)
model.eval()
default_logging(cfg, model)
dataloader = registers.dataloaders.get(cfg.data.name).build(cfg, False)
# FIXME: need atomic user_pop, maybe in MegEngine 1.5?
# tester = BaseTester(model, dataloader, AccEvaluator())
return ClsTester(cfg, model, dataloader)
def main():
"""Main function for testing script."""
parser = make_parser()
args = parser.parse_args()
mp.set_start_method("spawn")
set_nccl_env()
set_num_threads()
if not os.path.exists(args.dir):
raise ValueError("Directory does not exist")
device_count = mge.device.get_device_count("gpu")
if device_count == 0:
logger.warning("No GPU was found, testing on CPU")
worker(args)
elif device_count > 1:
mp_worker = dist.launcher(worker)
mp_worker(args)
else:
worker(args)
if __name__ == "__main__":
main()
|
[
"megengine.distributed.get_rank",
"megengine.device.get_device_count",
"megengine.functional.debug_param.set_execution_strategy",
"megengine.distributed.launcher"
] |
[((659, 684), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (682, 684), False, 'import argparse\n'), ((1017, 1042), 'os.path.abspath', 'os.path.abspath', (['args.dir'], {}), '(args.dir)\n', (1032, 1042), False, 'import os\n'), ((1047, 1105), 'basecls.utils.setup_logger', 'setup_logger', (['args.dir', '"""test_all_log.txt"""'], {'to_loguru': '(True)'}), "(args.dir, 'test_all_log.txt', to_loguru=True)\n", (1059, 1105), False, 'from basecls.utils import default_logging, registers, set_nccl_env, set_num_threads, setup_logger\n'), ((1110, 1138), 'loguru.logger.info', 'logger.info', (['f"""args: {args}"""'], {}), "(f'args: {args}')\n", (1121, 1138), False, 'from loguru import logger\n'), ((2228, 2244), 'basecls.models.build_model', 'build_model', (['cfg'], {}), '(cfg)\n', (2239, 2244), False, 'from basecls.models import build_model, load_model\n'), ((2249, 2279), 'basecls.models.load_model', 'load_model', (['model', 'cfg.weights'], {}), '(model, cfg.weights)\n', (2259, 2279), False, 'from basecls.models import build_model, load_model\n'), ((2302, 2329), 'basecls.utils.default_logging', 'default_logging', (['cfg', 'model'], {}), '(cfg, model)\n', (2317, 2329), False, 'from basecls.utils import default_logging, registers, set_nccl_env, set_num_threads, setup_logger\n'), ((2538, 2571), 'basecls.engine.ClsTester', 'ClsTester', (['cfg', 'model', 'dataloader'], {}), '(cfg, model, dataloader)\n', (2547, 2571), False, 'from basecls.engine import ClsTester\n'), ((2693, 2721), 'multiprocessing.set_start_method', 'mp.set_start_method', (['"""spawn"""'], {}), "('spawn')\n", (2712, 2721), True, 'import multiprocessing as mp\n'), ((2727, 2741), 'basecls.utils.set_nccl_env', 'set_nccl_env', ([], {}), '()\n', (2739, 2741), False, 'from basecls.utils import default_logging, registers, set_nccl_env, set_num_threads, setup_logger\n'), ((2746, 2763), 'basecls.utils.set_num_threads', 'set_num_threads', ([], {}), '()\n', (2761, 2763), False, 'from basecls.utils import default_logging, registers, set_nccl_env, set_num_threads, setup_logger\n'), ((2875, 2909), 'megengine.device.get_device_count', 'mge.device.get_device_count', (['"""gpu"""'], {}), "('gpu')\n", (2902, 2909), True, 'import megengine as mge\n'), ((1347, 1383), 'importlib.import_module', 'importlib.import_module', (['module_name'], {}), '(module_name)\n', (1370, 1383), False, 'import importlib\n'), ((1486, 1513), 'os.path.isfile', 'os.path.isfile', (['weight_path'], {}), '(weight_path)\n', (1500, 1513), False, 'import os\n'), ((1927, 1943), 'sys.path.pop', 'sys.path.pop', (['(-1)'], {}), '(-1)\n', (1939, 1943), False, 'import sys\n'), ((1961, 1989), 'json.dumps', 'json.dumps', (['result'], {'indent': '(4)'}), '(result, indent=4)\n', (1971, 1989), False, 'import json\n'), ((2039, 2059), 'json.dump', 'json.dump', (['result', 'f'], {}), '(result, f)\n', (2048, 2059), False, 'import json\n'), ((2776, 2800), 'os.path.exists', 'os.path.exists', (['args.dir'], {}), '(args.dir)\n', (2790, 2800), False, 'import os\n'), ((2945, 2995), 'loguru.logger.warning', 'logger.warning', (['"""No GPU was found, testing on CPU"""'], {}), "('No GPU was found, testing on CPU')\n", (2959, 2995), False, 'from loguru import logger\n'), ((1173, 1195), 'pathlib.Path', 'pathlib.Path', (['args.dir'], {}), '(args.dir)\n', (1185, 1195), False, 'import pathlib\n'), ((1237, 1255), 'os.path.dirname', 'os.path.dirname', (['f'], {}), '(f)\n', (1252, 1255), False, 'import os\n'), ((1579, 1595), 'sys.path.pop', 'sys.path.pop', (['(-1)'], {}), '(-1)\n', (1591, 1595), False, 'import sys\n'), ((1686, 1722), 'loguru.logger.info', 'logger.info', (['"""Using fastrun mode..."""'], {}), "('Using fastrun mode...')\n", (1697, 1722), False, 'from loguru import logger\n'), ((1735, 1795), 'megengine.functional.debug_param.set_execution_strategy', 'mge.functional.debug_param.set_execution_strategy', (['"""PROFILE"""'], {}), "('PROFILE')\n", (1784, 1795), True, 'import megengine as mge\n'), ((2348, 2388), 'basecls.utils.registers.dataloaders.get', 'registers.dataloaders.get', (['cfg.data.name'], {}), '(cfg.data.name)\n', (2373, 2388), False, 'from basecls.utils import default_logging, registers, set_nccl_env, set_num_threads, setup_logger\n'), ((3064, 3085), 'megengine.distributed.launcher', 'dist.launcher', (['worker'], {}), '(worker)\n', (3077, 3085), True, 'import megengine.distributed as dist\n'), ((978, 993), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (991, 993), True, 'import megengine.distributed as dist\n'), ((1297, 1316), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (1313, 1316), False, 'import os\n'), ((1446, 1465), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (1462, 1465), False, 'import os\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.