code
stringlengths 110
64.5k
| apis
list | extract_api
stringlengths 123
69.9k
|
---|---|---|
from unittest.mock import patch
from sqlmodel import create_engine
from ...conftest import get_testing_print_function
def test_tutorial(clear_sqlmodel):
from docs_src.tutorial.where import tutorial002 as mod
mod.sqlite_url = "sqlite://"
mod.engine = create_engine(mod.sqlite_url)
calls = []
new_print = get_testing_print_function(calls)
with patch("builtins.print", new=new_print):
mod.main()
assert calls == [
[
{
"name": "Spider-Boy",
"secret_name": "<NAME>",
"age": None,
"id": 2,
}
],
[{"name": "Rusty-Man", "secret_name": "<NAME>", "age": 48, "id": 3}],
]
|
[
"sqlmodel.create_engine"
] |
[((267, 296), 'sqlmodel.create_engine', 'create_engine', (['mod.sqlite_url'], {}), '(mod.sqlite_url)\n', (280, 296), False, 'from sqlmodel import create_engine\n'), ((373, 411), 'unittest.mock.patch', 'patch', (['"""builtins.print"""'], {'new': 'new_print'}), "('builtins.print', new=new_print)\n", (378, 411), False, 'from unittest.mock import patch\n'), ((421, 431), 'docs_src.tutorial.where.tutorial002.main', 'mod.main', ([], {}), '()\n', (429, 431), True, 'from docs_src.tutorial.where import tutorial002 as mod\n')]
|
# Lettura dati con SQLModel, all, first, one; update; delete
# https://sqlmodel.tiangolo.com/tutorial/select/ e seguito
from typing import Optional
from sqlmodel import Field, SQLModel, Session, create_engine, select
class Tag(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class ProductType(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_entities():
tag_offerta = Tag(name="Offerta")
tag_maionese = Tag(name="Con Maionese")
tipo_panino = ProductType(name="panino")
tipo_bibita = ProductType(name="bibita")
with Session(engine) as session:
session.add(tag_offerta)
session.add(tag_maionese)
session.add(tipo_panino)
session.add(tipo_bibita)
session.commit()
print("After committing the session")
print("Tag 1:", tag_offerta)
# No refresh, no print
print("Product Type 1:", tipo_panino)
# Refresh automatica
print("Product Type 1:", tipo_panino.name)
# Refresh esplicita
session.refresh(tipo_bibita)
session.refresh(tag_maionese)
print("Product Type 2:", tipo_bibita)
print("After the session closes")
print("Tag 2:", tag_maionese)
def select_product_types():
with Session(engine) as session:
statement = select(ProductType)
results = session.exec(statement)
for product_type in results:
print("product_type:", product_type)
def select_product_type_panino():
with Session(engine) as session:
statement = select(ProductType).where(ProductType.name == 'panino')
results = session.exec(statement)
for product_type in results:
print("panino:", product_type)
def select_first_row_tag():
with Session(engine) as session:
statement = select(Tag).where(Tag.name == 'Offerta')
results = session.exec(statement)
tag = results.first()
print("first:", tag)
def select_all_tags():
with Session(engine) as session:
statement = select(Tag)
results = session.exec(statement)
tags = results.all()
print(tags)
def select_four_tags():
with Session(engine) as session:
statement = select(Tag).limit(4)
results = session.exec(statement)
tags = results.all()
print(tags)
def select_next_four_tags():
with Session(engine) as session:
statement = select(Tag).offset(4).limit(4)
results = session.exec(statement)
tags = results.all()
print(tags)
def update_tag():
with Session(engine) as session:
statement = select(Tag).where(Tag.name == "Con Maionese")
results = session.exec(statement)
# mayo = results.one()
mayo = results.first()
print("Tag:", mayo)
mayo.name = "<NAME>"
session.add(mayo)
session.commit()
session.refresh(mayo)
print(mayo)
def delete_tag():
with Session(engine) as session:
statement = select(Tag).where(Tag.name == "<NAME>")
results = session.exec(statement)
no_mayo = results.first()
print("no_mayo: ", no_mayo)
session.delete(no_mayo)
session.commit()
print("Deleted:", no_mayo)
statement = select(Tag).where(Tag.name == "<NAME>")
results = session.exec(statement)
no_mayo = results.first()
if no_mayo is None:
print("There's no no_mayo")
def main():
create_db_and_tables()
create_entities()
# select_product_types()
# select_product_type_panino()
# select_first_row_tag()
# select_all_tags()
# select_four_tags()
# select_next_four_tags()
# update_tag()
delete_tag()
if __name__ == "__main__":
main()
|
[
"sqlmodel.create_engine",
"sqlmodel.Session",
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.select",
"sqlmodel.Field"
] |
[((536, 572), 'sqlmodel.create_engine', 'create_engine', (['sqlite_url'], {'echo': '(True)'}), '(sqlite_url, echo=True)\n', (549, 572), False, 'from sqlmodel import Field, SQLModel, Session, create_engine, select\n'), ((276, 313), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (281, 313), False, 'from sqlmodel import Field, SQLModel, Session, create_engine, select\n'), ((395, 432), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (400, 432), False, 'from sqlmodel import Field, SQLModel, Session, create_engine, select\n'), ((607, 643), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (635, 643), False, 'from sqlmodel import Field, SQLModel, Session, create_engine, select\n'), ((850, 865), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (857, 865), False, 'from sqlmodel import Field, SQLModel, Session, create_engine, select\n'), ((1536, 1551), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1543, 1551), False, 'from sqlmodel import Field, SQLModel, Session, create_engine, select\n'), ((1584, 1603), 'sqlmodel.select', 'select', (['ProductType'], {}), '(ProductType)\n', (1590, 1603), False, 'from sqlmodel import Field, SQLModel, Session, create_engine, select\n'), ((1777, 1792), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1784, 1792), False, 'from sqlmodel import Field, SQLModel, Session, create_engine, select\n'), ((2042, 2057), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (2049, 2057), False, 'from sqlmodel import Field, SQLModel, Session, create_engine, select\n'), ((2266, 2281), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (2273, 2281), False, 'from sqlmodel import Field, SQLModel, Session, create_engine, select\n'), ((2314, 2325), 'sqlmodel.select', 'select', (['Tag'], {}), '(Tag)\n', (2320, 2325), False, 'from sqlmodel import Field, SQLModel, Session, create_engine, select\n'), ((2452, 2467), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (2459, 2467), False, 'from sqlmodel import Field, SQLModel, Session, create_engine, select\n'), ((2652, 2667), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (2659, 2667), False, 'from sqlmodel import Field, SQLModel, Session, create_engine, select\n'), ((2851, 2866), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (2858, 2866), False, 'from sqlmodel import Field, SQLModel, Session, create_engine, select\n'), ((3236, 3251), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (3243, 3251), False, 'from sqlmodel import Field, SQLModel, Session, create_engine, select\n'), ((1825, 1844), 'sqlmodel.select', 'select', (['ProductType'], {}), '(ProductType)\n', (1831, 1844), False, 'from sqlmodel import Field, SQLModel, Session, create_engine, select\n'), ((2090, 2101), 'sqlmodel.select', 'select', (['Tag'], {}), '(Tag)\n', (2096, 2101), False, 'from sqlmodel import Field, SQLModel, Session, create_engine, select\n'), ((2500, 2511), 'sqlmodel.select', 'select', (['Tag'], {}), '(Tag)\n', (2506, 2511), False, 'from sqlmodel import Field, SQLModel, Session, create_engine, select\n'), ((2899, 2910), 'sqlmodel.select', 'select', (['Tag'], {}), '(Tag)\n', (2905, 2910), False, 'from sqlmodel import Field, SQLModel, Session, create_engine, select\n'), ((3284, 3295), 'sqlmodel.select', 'select', (['Tag'], {}), '(Tag)\n', (3290, 3295), False, 'from sqlmodel import Field, SQLModel, Session, create_engine, select\n'), ((3548, 3559), 'sqlmodel.select', 'select', (['Tag'], {}), '(Tag)\n', (3554, 3559), False, 'from sqlmodel import Field, SQLModel, Session, create_engine, select\n'), ((2700, 2711), 'sqlmodel.select', 'select', (['Tag'], {}), '(Tag)\n', (2706, 2711), False, 'from sqlmodel import Field, SQLModel, Session, create_engine, select\n')]
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: disable=import-error,no-name-in-module,no-member
from test.utils import LinearOpr
import megengine as mge
import megengine.module as M
import numpy as np
from megengine.core.tensor import dtype
from megengine.core.tensor.dtype import _builtin_quant_dtypes
from megengine.module.quant_dequant import QuantStub
from megengine.quantization.quantize import quantize_qat
from megengine.quantization.utils import create_qparams
from megengine.traced_module.fake_quant import FakeQuantize
from .test_caffe import _test_convert_result
from .tm_utils import get_traced_module
max_err = 1e-6
def get_qat_net(inp_dtype, net, num_inp=1, shape=(1, 16, 32, 32)):
qat_net = quantize_qat(net)
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_qint8(data1.numpy()))
inp1.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp1.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
inps.append(inp1)
return qat_net, inps
def get_qat_inputs_quint8(inp_dtype, num_inp=1, shape=(1, 16, 384, 512)):
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_quint8(data1.numpy()))
inp1.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp1.qparams.zero_point = mge.tensor(dtype.get_zero_point(inp_dtype))
inp1.qparams.dtype_meta = dtype._builtin_quant_dtypes["quint8"]
inps.append(inp1)
return inps
def test_linear():
net = LinearOpr()
inp_dtype = dtype.qint8(16.0 / 128.0)
qat_net, inps = get_qat_net(inp_dtype, net, shape=(10, 100))
traced_module, tm_result = get_traced_module(qat_net, inps[0])
inp = inps[0].astype(inp_dtype)
_test_convert_result(inp, traced_module, tm_result, max_err, require_quantize=False)
def test_add():
class ElemwiseOpr(M.Module):
def __init__(self,):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.add1 = M.Elemwise("add")
self.add2 = M.Elemwise("add")
self.add3 = M.Elemwise("add")
scale = mge.tensor((16.0 / 128.0))
self.quant_stub = QuantStub()
self.quant_stub.act_fake_quant = FakeQuantize(
_builtin_quant_dtypes["qint8"]
)
self.quant_stub.act_fake_quant.set_qparams(
create_qparams(
dtype_meta=_builtin_quant_dtypes["qint8"],
scale=scale,
zero_point=None,
)
)
self.quant_stub1 = QuantStub()
self.quant_stub1.act_fake_quant = FakeQuantize(
_builtin_quant_dtypes["qint8"]
)
self.quant_stub1.act_fake_quant.set_qparams(
create_qparams(
dtype_meta=_builtin_quant_dtypes["qint8"],
scale=scale,
zero_point=None,
)
)
def forward(self, a):
n = self.quant_stub(mge.tensor(np.float32(10)))
data1 = self.quant_stub1(mge.tensor(self.data1))
x = self.add1(a, n)
y = self.add2(a, data1)
z = self.add3(x, y)
return z
net = ElemwiseOpr()
inp_dtype = dtype.qint8(16.0 / 128.0)
qat_net, inps = get_qat_net(inp_dtype, net, shape=(1, 3, 1, 1))
traced_module, tm_result = get_traced_module(qat_net, inps[0])
print(traced_module.flatten().graph)
inp = inps[0].astype(inp_dtype)
_test_convert_result(
inp,
traced_module,
tm_result,
max_err,
require_quantize=False,
split_conv_relu=True,
)
def test_det_model():
net = mge.load("models_fire_det.fix_batch.fuse_scale_cpu.pkl")
inp_dtype = dtype.qint8(16.0 / 128.0)
qat_net, inps = get_qat_net(inp_dtype, net, shape=(1, 3, 512, 512))
traced_module, tm_result = get_traced_module(qat_net, inps[0])
inp = inps[0].astype(inp_dtype)
_test_convert_result(inp, traced_module, tm_result, max_err, require_quantize=False)
def test_snpe_model_8f():
model = "8w16f_backbone.tm"
net = mge.load(model)
print(net.flatten().graph)
inp_dtype = dtype.quint8(16.0 / 128.0, 128)
inps = get_qat_inputs_quint8(inp_dtype, num_inp=2, shape=(1, 16, 384, 512))
tm_result = dict(zip(net.graph.outputs, net(*inps)))
_test_convert_result(
inps,
net,
tm_result,
max_err,
input_data_type="quint8",
input_scales=inps[0].qparams.scale,
input_zero_points=inps[0].qparams.zero_point,
require_quantize=False,
param_fake_quant=True,
split_conv_relu=True,
input_name=["inp", "prev"],
)
|
[
"megengine.core.tensor.dtype.get_scale",
"megengine.quantization.quantize.quantize_qat",
"megengine.tensor",
"megengine.core.tensor.dtype.get_zero_point",
"megengine.core.tensor.dtype.qint8",
"megengine.core.tensor.dtype.quint8",
"megengine.module.quant_dequant.QuantStub",
"megengine.quantization.utils.create_qparams",
"megengine.module.Elemwise",
"megengine.traced_module.fake_quant.FakeQuantize",
"megengine.load"
] |
[((1033, 1050), 'megengine.quantization.quantize.quantize_qat', 'quantize_qat', (['net'], {}), '(net)\n', (1045, 1050), False, 'from megengine.quantization.quantize import quantize_qat\n'), ((2023, 2034), 'test.utils.LinearOpr', 'LinearOpr', ([], {}), '()\n', (2032, 2034), False, 'from test.utils import LinearOpr\n'), ((2051, 2076), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['(16.0 / 128.0)'], {}), '(16.0 / 128.0)\n', (2062, 2076), False, 'from megengine.core.tensor import dtype\n'), ((3910, 3935), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['(16.0 / 128.0)'], {}), '(16.0 / 128.0)\n', (3921, 3935), False, 'from megengine.core.tensor import dtype\n'), ((4348, 4404), 'megengine.load', 'mge.load', (['"""models_fire_det.fix_batch.fuse_scale_cpu.pkl"""'], {}), "('models_fire_det.fix_batch.fuse_scale_cpu.pkl')\n", (4356, 4404), True, 'import megengine as mge\n'), ((4421, 4446), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['(16.0 / 128.0)'], {}), '(16.0 / 128.0)\n', (4432, 4446), False, 'from megengine.core.tensor import dtype\n'), ((4781, 4796), 'megengine.load', 'mge.load', (['model'], {}), '(model)\n', (4789, 4796), True, 'import megengine as mge\n'), ((4844, 4875), 'megengine.core.tensor.dtype.quint8', 'dtype.quint8', (['(16.0 / 128.0)', '(128)'], {}), '(16.0 / 128.0, 128)\n', (4856, 4875), False, 'from megengine.core.tensor import dtype\n'), ((1298, 1324), 'megengine.core.tensor.dtype.get_scale', 'dtype.get_scale', (['inp_dtype'], {}), '(inp_dtype)\n', (1313, 1324), False, 'from megengine.core.tensor import dtype\n'), ((1772, 1798), 'megengine.core.tensor.dtype.get_scale', 'dtype.get_scale', (['inp_dtype'], {}), '(inp_dtype)\n', (1787, 1798), False, 'from megengine.core.tensor import dtype\n'), ((1845, 1876), 'megengine.core.tensor.dtype.get_zero_point', 'dtype.get_zero_point', (['inp_dtype'], {}), '(inp_dtype)\n', (1865, 1876), False, 'from megengine.core.tensor import dtype\n'), ((2613, 2630), 'megengine.module.Elemwise', 'M.Elemwise', (['"""add"""'], {}), "('add')\n", (2623, 2630), True, 'import megengine.module as M\n'), ((2655, 2672), 'megengine.module.Elemwise', 'M.Elemwise', (['"""add"""'], {}), "('add')\n", (2665, 2672), True, 'import megengine.module as M\n'), ((2697, 2714), 'megengine.module.Elemwise', 'M.Elemwise', (['"""add"""'], {}), "('add')\n", (2707, 2714), True, 'import megengine.module as M\n'), ((2736, 2760), 'megengine.tensor', 'mge.tensor', (['(16.0 / 128.0)'], {}), '(16.0 / 128.0)\n', (2746, 2760), True, 'import megengine as mge\n'), ((2793, 2804), 'megengine.module.quant_dequant.QuantStub', 'QuantStub', ([], {}), '()\n', (2802, 2804), False, 'from megengine.module.quant_dequant import QuantStub\n'), ((2850, 2894), 'megengine.traced_module.fake_quant.FakeQuantize', 'FakeQuantize', (["_builtin_quant_dtypes['qint8']"], {}), "(_builtin_quant_dtypes['qint8'])\n", (2862, 2894), False, 'from megengine.traced_module.fake_quant import FakeQuantize\n'), ((3209, 3220), 'megengine.module.quant_dequant.QuantStub', 'QuantStub', ([], {}), '()\n', (3218, 3220), False, 'from megengine.module.quant_dequant import QuantStub\n'), ((3267, 3311), 'megengine.traced_module.fake_quant.FakeQuantize', 'FakeQuantize', (["_builtin_quant_dtypes['qint8']"], {}), "(_builtin_quant_dtypes['qint8'])\n", (3279, 3311), False, 'from megengine.traced_module.fake_quant import FakeQuantize\n'), ((1121, 1144), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (1137, 1144), True, 'import numpy as np\n'), ((1594, 1617), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (1610, 1617), True, 'import numpy as np\n'), ((2997, 3088), 'megengine.quantization.utils.create_qparams', 'create_qparams', ([], {'dtype_meta': "_builtin_quant_dtypes['qint8']", 'scale': 'scale', 'zero_point': 'None'}), "(dtype_meta=_builtin_quant_dtypes['qint8'], scale=scale,\n zero_point=None)\n", (3011, 3088), False, 'from megengine.quantization.utils import create_qparams\n'), ((3415, 3506), 'megengine.quantization.utils.create_qparams', 'create_qparams', ([], {'dtype_meta': "_builtin_quant_dtypes['qint8']", 'scale': 'scale', 'zero_point': 'None'}), "(dtype_meta=_builtin_quant_dtypes['qint8'], scale=scale,\n zero_point=None)\n", (3429, 3506), False, 'from megengine.quantization.utils import create_qparams\n'), ((3724, 3746), 'megengine.tensor', 'mge.tensor', (['self.data1'], {}), '(self.data1)\n', (3734, 3746), True, 'import megengine as mge\n'), ((2469, 2494), 'numpy.ones', 'np.ones', (['(2, 3, 224, 224)'], {}), '((2, 3, 224, 224))\n', (2476, 2494), True, 'import numpy as np\n'), ((2539, 2569), 'numpy.random.random', 'np.random.random', (['(1, 3, 1, 1)'], {}), '((1, 3, 1, 1))\n', (2555, 2569), True, 'import numpy as np\n'), ((3670, 3684), 'numpy.float32', 'np.float32', (['(10)'], {}), '(10)\n', (3680, 3684), True, 'import numpy as np\n')]
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
import numpy as np
import pytest
from megengine.core.tensor.dtype import intb1, intb2, intb4
from megengine.tensor import Tensor
def bit_define_test(bit, low_bit_type):
max_value = (1 << bit) - 1
min_value = 1 - (1 << bit)
a = np.array([i for i in range(min_value, max_value + 2, 2)], dtype=low_bit_type)
for i in range(max_value + 1):
np.testing.assert_equal(a[i], i * 2 - max_value)
np.testing.assert_equal(str(a[i]), str(i * 2 - max_value))
with pytest.raises(ValueError):
np.arange(min_value, max_value, dtype=low_bit_type)
with pytest.raises(ValueError):
np.arange(min_value - 2, max_value + 4, 2, dtype=low_bit_type)
np.testing.assert_allclose(
np.arange(min_value, 12, 2, dtype=low_bit_type),
(np.arange((13 - min_value) // 2, dtype=np.int8) % (max_value + 1)) * 2
- max_value,
)
np.testing.assert_allclose(
np.arange(max_value, max_value - 20, -2, dtype=low_bit_type),
(np.arange(max_value, max_value - 10, -1, dtype=np.int8) % (max_value + 1)) * 2
- max_value,
)
def test_define():
bit_define_test(1, intb1)
bit_define_test(2, intb2)
bit_define_test(4, intb4)
def _bit_cast_test(bit, low_bit_type):
dtypes = [np.int8, np.int16, np.int32, np.float32, np.float64]
max_value = (1 << bit) - 1
min_value = 1 - (1 << bit)
for dtype in dtypes:
np.testing.assert_allclose(
np.arange(min_value, max_value + 2, 2, dtype=low_bit_type).astype(dtype),
np.arange(min_value, max_value + 2, 2, dtype=dtype),
)
with pytest.raises(ValueError):
np.array([2, 1, -1], dtype=int).astype(low_bit_type)
with pytest.raises(ValueError):
np.array([min_value - 2, 1, max_value + 2], dtype=int).astype(low_bit_type)
def test_cast():
_bit_cast_test(1, intb1)
_bit_cast_test(2, intb2)
_bit_cast_test(4, intb4)
def _shared_nd_test(bit, low_bit_type):
max_value = (1 << bit) - 1
min_value = 1 - (1 << bit)
data = np.arange(min_value, max_value + 2, 2, dtype=low_bit_type)
snd = Tensor(data, dtype=low_bit_type, device="xpux")
np.testing.assert_allclose(snd.numpy(), range(min_value, max_value + 2, 2))
data = np.arange(min_value, max_value + 2, 4, dtype=low_bit_type)
snd = Tensor(data, dtype=low_bit_type, device="xpux")
np.testing.assert_allclose(snd.numpy(), range(min_value, max_value + 2, 4))
def test_shared_nd():
_shared_nd_test(1, intb1)
_shared_nd_test(2, intb2)
_shared_nd_test(4, intb4)
def test_pickle():
x = np.ascontiguousarray(np.random.randint(2, size=8192) * 2 - 1, dtype=intb1)
pkl = pickle.dumps(x, pickle.HIGHEST_PROTOCOL)
y = pickle.loads(pkl)
assert x.dtype is y.dtype
np.testing.assert_allclose(x.astype(np.float32), y.astype(np.float32))
|
[
"megengine.tensor.Tensor"
] |
[((2414, 2472), 'numpy.arange', 'np.arange', (['min_value', '(max_value + 2)', '(2)'], {'dtype': 'low_bit_type'}), '(min_value, max_value + 2, 2, dtype=low_bit_type)\n', (2423, 2472), True, 'import numpy as np\n'), ((2483, 2530), 'megengine.tensor.Tensor', 'Tensor', (['data'], {'dtype': 'low_bit_type', 'device': '"""xpux"""'}), "(data, dtype=low_bit_type, device='xpux')\n", (2489, 2530), False, 'from megengine.tensor import Tensor\n'), ((2623, 2681), 'numpy.arange', 'np.arange', (['min_value', '(max_value + 2)', '(4)'], {'dtype': 'low_bit_type'}), '(min_value, max_value + 2, 4, dtype=low_bit_type)\n', (2632, 2681), True, 'import numpy as np\n'), ((2692, 2739), 'megengine.tensor.Tensor', 'Tensor', (['data'], {'dtype': 'low_bit_type', 'device': '"""xpux"""'}), "(data, dtype=low_bit_type, device='xpux')\n", (2698, 2739), False, 'from megengine.tensor import Tensor\n'), ((3048, 3088), 'pickle.dumps', 'pickle.dumps', (['x', 'pickle.HIGHEST_PROTOCOL'], {}), '(x, pickle.HIGHEST_PROTOCOL)\n', (3060, 3088), False, 'import pickle\n'), ((3097, 3114), 'pickle.loads', 'pickle.loads', (['pkl'], {}), '(pkl)\n', (3109, 3114), False, 'import pickle\n'), ((734, 782), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['a[i]', '(i * 2 - max_value)'], {}), '(a[i], i * 2 - max_value)\n', (757, 782), True, 'import numpy as np\n'), ((860, 885), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (873, 885), False, 'import pytest\n'), ((895, 946), 'numpy.arange', 'np.arange', (['min_value', 'max_value'], {'dtype': 'low_bit_type'}), '(min_value, max_value, dtype=low_bit_type)\n', (904, 946), True, 'import numpy as np\n'), ((957, 982), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (970, 982), False, 'import pytest\n'), ((992, 1054), 'numpy.arange', 'np.arange', (['(min_value - 2)', '(max_value + 4)', '(2)'], {'dtype': 'low_bit_type'}), '(min_value - 2, max_value + 4, 2, dtype=low_bit_type)\n', (1001, 1054), True, 'import numpy as np\n'), ((1096, 1143), 'numpy.arange', 'np.arange', (['min_value', '(12)', '(2)'], {'dtype': 'low_bit_type'}), '(min_value, 12, 2, dtype=low_bit_type)\n', (1105, 1143), True, 'import numpy as np\n'), ((1293, 1353), 'numpy.arange', 'np.arange', (['max_value', '(max_value - 20)', '(-2)'], {'dtype': 'low_bit_type'}), '(max_value, max_value - 20, -2, dtype=low_bit_type)\n', (1302, 1353), True, 'import numpy as np\n'), ((1984, 2009), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1997, 2009), False, 'import pytest\n'), ((2081, 2106), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2094, 2106), False, 'import pytest\n'), ((1911, 1962), 'numpy.arange', 'np.arange', (['min_value', '(max_value + 2)', '(2)'], {'dtype': 'dtype'}), '(min_value, max_value + 2, 2, dtype=dtype)\n', (1920, 1962), True, 'import numpy as np\n'), ((2019, 2050), 'numpy.array', 'np.array', (['[2, 1, -1]'], {'dtype': 'int'}), '([2, 1, -1], dtype=int)\n', (2027, 2050), True, 'import numpy as np\n'), ((2116, 2170), 'numpy.array', 'np.array', (['[min_value - 2, 1, max_value + 2]'], {'dtype': 'int'}), '([min_value - 2, 1, max_value + 2], dtype=int)\n', (2124, 2170), True, 'import numpy as np\n'), ((2984, 3015), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(8192)'}), '(2, size=8192)\n', (3001, 3015), True, 'import numpy as np\n'), ((1154, 1201), 'numpy.arange', 'np.arange', (['((13 - min_value) // 2)'], {'dtype': 'np.int8'}), '((13 - min_value) // 2, dtype=np.int8)\n', (1163, 1201), True, 'import numpy as np\n'), ((1364, 1419), 'numpy.arange', 'np.arange', (['max_value', '(max_value - 10)', '(-1)'], {'dtype': 'np.int8'}), '(max_value, max_value - 10, -1, dtype=np.int8)\n', (1373, 1419), True, 'import numpy as np\n'), ((1825, 1883), 'numpy.arange', 'np.arange', (['min_value', '(max_value + 2)', '(2)'], {'dtype': 'low_bit_type'}), '(min_value, max_value + 2, 2, dtype=low_bit_type)\n', (1834, 1883), True, 'import numpy as np\n')]
|
from sqlmodel import SQLModel, create_engine
from sqlalchemy.orm import sessionmaker
from sqlmodel.ext.asyncio.session import AsyncSession, AsyncEngine
from app.settings import Settings
settings = Settings()
engine = AsyncEngine(create_engine(settings.ASYNC_DATABASE_URI, echo=True, future=True))
async def init_db():
async with engine.begin() as conn:
# await conn.run_sync(SQLModel.metadata.drop_all)
await conn.run_sync(SQLModel.metadata.create_all)
async def get_session() -> AsyncSession:
async_session = sessionmaker(
engine, class_=AsyncSession, expire_on_commit=False
)
async with async_session() as session:
yield session
|
[
"sqlmodel.create_engine"
] |
[((198, 208), 'app.settings.Settings', 'Settings', ([], {}), '()\n', (206, 208), False, 'from app.settings import Settings\n'), ((231, 297), 'sqlmodel.create_engine', 'create_engine', (['settings.ASYNC_DATABASE_URI'], {'echo': '(True)', 'future': '(True)'}), '(settings.ASYNC_DATABASE_URI, echo=True, future=True)\n', (244, 297), False, 'from sqlmodel import SQLModel, create_engine\n'), ((539, 604), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', (['engine'], {'class_': 'AsyncSession', 'expire_on_commit': '(False)'}), '(engine, class_=AsyncSession, expire_on_commit=False)\n', (551, 604), False, 'from sqlalchemy.orm import sessionmaker\n')]
|
"""
Biot problem with the non-penetration BC on the Walls boundary region.
The non-penetration condition is enforced weakly using the Lagrange
multiplier approach. There is also a rigid body movement constraint
imposed on the Outlet region using the linear combination boundary
conditions.
"""
from biot_npbc import cinc_simple, define_regions, get_pars
def define():
from sfepy import data_dir
filename = data_dir + '/meshes/3d/cylinder.mesh'
output_dir = 'output'
return define_input(filename, output_dir)
def post_process(out, pb, state, extend=False):
from sfepy.base.base import Struct
dvel = pb.evaluate('de_diffusion_velocity.2.Omega( m.K, p )')
out['dvel'] = Struct(name='output_data', var_name='p',
mode='cell', data=dvel, dofs=None)
stress = pb.evaluate('de_cauchy_stress.2.Omega( m.D, u )')
out['cauchy_stress'] = Struct(name='output_data', var_name='u',
mode='cell', data=stress, dofs=None)
return out
def define_input(filename, output_dir):
filename_mesh = filename
options = {
'output_dir' : output_dir,
'output_format' : 'vtk',
'post_process_hook' : 'post_process',
## 'file_per_var' : True,
'ls' : 'ls',
'nls' : 'newton',
}
functions = {
'cinc_simple0' : (lambda coors, domain:
cinc_simple(coors, 0),),
'cinc_simple1' : (lambda coors, domain:
cinc_simple(coors, 1),),
'cinc_simple2' : (lambda coors, domain:
cinc_simple(coors, 2),),
'get_pars' : (lambda ts, coors, mode=None, region=None, ig=None:
get_pars(ts, coors, mode, region, ig,
output_dir=output_dir),),
}
regions, dim = define_regions(filename_mesh)
fields = {
'displacement': ('real', 'vector', 'Omega', 1),
'pressure': ('real', 'scalar', 'Omega', 1),
'multiplier': ('real', 'scalar', ('Walls', 'surface'), 1),
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
'p' : ('unknown field', 'pressure', 1),
'q' : ('test field', 'pressure', 'p'),
'ul' : ('unknown field', 'multiplier', 2),
'vl' : ('test field', 'multiplier', 'ul'),
}
ebcs = {
'inlet' : ('Inlet', {'p.0' : 1.0, 'u.all' : 0.0}),
'outlet' : ('Outlet', {'p.0' : -1.0}),
}
lcbcs = {
'rigid' : ('Outlet', {'u.all' : 'rigid'}),
}
materials = {
'm' : 'get_pars',
}
equations = {
'eq_1' :
"""dw_lin_elastic.2.Omega( m.D, v, u )
- dw_biot.2.Omega( m.alpha, v, p )
+ dw_non_penetration.2.Walls( v, ul )
= 0""",
'eq_2' :
"""dw_biot.2.Omega( m.alpha, u, q )
+ dw_diffusion.2.Omega( m.K, q, p )
= 0""",
'eq_3' :
"""dw_non_penetration.2.Walls( u, vl )
= 0""",
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {}),
}
return locals()
|
[
"sfepy.base.base.Struct"
] |
[((699, 774), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'var_name': '"""p"""', 'mode': '"""cell"""', 'data': 'dvel', 'dofs': 'None'}), "(name='output_data', var_name='p', mode='cell', data=dvel, dofs=None)\n", (705, 774), False, 'from sfepy.base.base import Struct\n'), ((891, 968), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'var_name': '"""u"""', 'mode': '"""cell"""', 'data': 'stress', 'dofs': 'None'}), "(name='output_data', var_name='u', mode='cell', data=stress, dofs=None)\n", (897, 968), False, 'from sfepy.base.base import Struct\n'), ((1838, 1867), 'biot_npbc.define_regions', 'define_regions', (['filename_mesh'], {}), '(filename_mesh)\n', (1852, 1867), False, 'from biot_npbc import cinc_simple, define_regions, get_pars\n'), ((1400, 1421), 'biot_npbc.cinc_simple', 'cinc_simple', (['coors', '(0)'], {}), '(coors, 0)\n', (1411, 1421), False, 'from biot_npbc import cinc_simple, define_regions, get_pars\n'), ((1499, 1520), 'biot_npbc.cinc_simple', 'cinc_simple', (['coors', '(1)'], {}), '(coors, 1)\n', (1510, 1520), False, 'from biot_npbc import cinc_simple, define_regions, get_pars\n'), ((1598, 1619), 'biot_npbc.cinc_simple', 'cinc_simple', (['coors', '(2)'], {}), '(coors, 2)\n', (1609, 1619), False, 'from biot_npbc import cinc_simple, define_regions, get_pars\n'), ((1718, 1778), 'biot_npbc.get_pars', 'get_pars', (['ts', 'coors', 'mode', 'region', 'ig'], {'output_dir': 'output_dir'}), '(ts, coors, mode, region, ig, output_dir=output_dir)\n', (1726, 1778), False, 'from biot_npbc import cinc_simple, define_regions, get_pars\n')]
|
from typing import Optional
from sqlalchemy import create_engine, select
from sqlalchemy.orm import Session
from sqlmodel import Field, SQLModel
def test_allow_instantiation_without_arguments(clear_sqlmodel):
class Item(SQLModel):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
description: Optional[str] = None
class Config:
table = True
engine = create_engine("sqlite:///:memory:")
SQLModel.metadata.create_all(engine)
with Session(engine) as db:
item = Item()
item.name = "Rick"
db.add(item)
db.commit()
result = db.execute(select(Item)).scalars().all()
assert len(result) == 1
assert isinstance(item.id, int)
SQLModel.metadata.clear()
|
[
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.Field",
"sqlmodel.SQLModel.metadata.clear"
] |
[((426, 461), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///:memory:"""'], {}), "('sqlite:///:memory:')\n", (439, 461), False, 'from sqlalchemy import create_engine, select\n'), ((466, 502), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (494, 502), False, 'from sqlmodel import Field, SQLModel\n'), ((751, 776), 'sqlmodel.SQLModel.metadata.clear', 'SQLModel.metadata.clear', ([], {}), '()\n', (774, 776), False, 'from sqlmodel import Field, SQLModel\n'), ((266, 303), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (271, 303), False, 'from sqlmodel import Field, SQLModel\n'), ((512, 527), 'sqlalchemy.orm.Session', 'Session', (['engine'], {}), '(engine)\n', (519, 527), False, 'from sqlalchemy.orm import Session\n'), ((653, 665), 'sqlalchemy.select', 'select', (['Item'], {}), '(Item)\n', (659, 665), False, 'from sqlalchemy import create_engine, select\n')]
|
import math
import megengine.module as M
import megengine.functional as F
class PositionEncodingSine(M.Module):
"""
This is a sinusoidal position encoding that generalized to 2-dimensional images
"""
def __init__(self, d_model, max_shape=(256, 256)):
"""
Args:
max_shape (tuple): for 1/8 featmap, the max length of 256 corresponds to 2048 pixels
"""
super().__init__()
pe = F.zeros((d_model, *max_shape))
y_position = F.expand_dims(F.cumsum(F.ones(max_shape), 0), 0)
x_position = F.expand_dims(F.cumsum(F.ones(max_shape), 1), 0)
div_term = F.exp(
F.arange(0, d_model // 2, 2) * (-math.log(10000.0) / d_model // 2)
)
div_term = F.expand_dims(div_term, (1, 2)) # [C//4, 1, 1]
pe[0::4, :, :] = F.sin(x_position * div_term)
pe[1::4, :, :] = F.cos(x_position * div_term)
pe[2::4, :, :] = F.sin(y_position * div_term)
pe[3::4, :, :] = F.cos(y_position * div_term)
self.pe = F.expand_dims(pe, 0)
def forward(self, x):
"""
Args:
x: [N, C, H, W]
"""
return x + self.pe[:, :, : x.shape[2], : x.shape[3]].to(x.device)
|
[
"megengine.functional.arange",
"megengine.functional.cos",
"megengine.functional.zeros",
"megengine.functional.ones",
"megengine.functional.expand_dims",
"megengine.functional.sin"
] |
[((446, 476), 'megengine.functional.zeros', 'F.zeros', (['(d_model, *max_shape)'], {}), '((d_model, *max_shape))\n', (453, 476), True, 'import megengine.functional as F\n'), ((751, 782), 'megengine.functional.expand_dims', 'F.expand_dims', (['div_term', '(1, 2)'], {}), '(div_term, (1, 2))\n', (764, 782), True, 'import megengine.functional as F\n'), ((824, 852), 'megengine.functional.sin', 'F.sin', (['(x_position * div_term)'], {}), '(x_position * div_term)\n', (829, 852), True, 'import megengine.functional as F\n'), ((878, 906), 'megengine.functional.cos', 'F.cos', (['(x_position * div_term)'], {}), '(x_position * div_term)\n', (883, 906), True, 'import megengine.functional as F\n'), ((932, 960), 'megengine.functional.sin', 'F.sin', (['(y_position * div_term)'], {}), '(y_position * div_term)\n', (937, 960), True, 'import megengine.functional as F\n'), ((986, 1014), 'megengine.functional.cos', 'F.cos', (['(y_position * div_term)'], {}), '(y_position * div_term)\n', (991, 1014), True, 'import megengine.functional as F\n'), ((1034, 1054), 'megengine.functional.expand_dims', 'F.expand_dims', (['pe', '(0)'], {}), '(pe, 0)\n', (1047, 1054), True, 'import megengine.functional as F\n'), ((521, 538), 'megengine.functional.ones', 'F.ones', (['max_shape'], {}), '(max_shape)\n', (527, 538), True, 'import megengine.functional as F\n'), ((591, 608), 'megengine.functional.ones', 'F.ones', (['max_shape'], {}), '(max_shape)\n', (597, 608), True, 'import megengine.functional as F\n'), ((655, 683), 'megengine.functional.arange', 'F.arange', (['(0)', '(d_model // 2)', '(2)'], {}), '(0, d_model // 2, 2)\n', (663, 683), True, 'import megengine.functional as F\n'), ((688, 705), 'math.log', 'math.log', (['(10000.0)'], {}), '(10000.0)\n', (696, 705), False, 'import math\n')]
|
from typing import Optional
from sqlmodel import Field, SQLModel
class Example(SQLModel, table=True):
"""测试一下"""
id: Optional[int] = Field(default=None, primary_key=True)
message: str
|
[
"sqlmodel.Field"
] |
[((145, 182), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (150, 182), False, 'from sqlmodel import Field, SQLModel\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, optimizer, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def get_input():
batch_size, input_dim = 2, 28
data_shape, label_shape = (batch_size, input_dim), (batch_size,)
data, label = tensor(dtype=np.float32), tensor(dtype=np.int32)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
return data, data_shape, label, label_shape
@graph_mode("eager", "static")
def test_optimizer_serialization():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = optimizer.SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
opt.step()
for param in mlp.parameters():
slots[param] = slots[param] * 0.9 + param.grad.numpy()
with BytesIO() as fout:
save(opt.state_dict(), fout)
fout.seek(0)
state_dict = load(fout)
opt1 = optimizer.SGD(mlp.parameters(), lr=0.02, momentum=0.8)
opt1.load_state_dict(state_dict)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt1.zero_grad()
opt1.backward(loss)
orig_params = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
opt1.step()
for param in mlp.parameters():
orig_param = orig_params[param]
slots[param] = slots[param] * 0.9 + param.grad.numpy()
assertTensorClose(param.numpy(), orig_param - 0.01 * slots[param])
def _test_optimizer(opt_str, test_case, check_class, update_lr=False):
iter_num = 3
data, data_shape, label, label_shape = get_input()
net = MLP()
opt = getattr(optimizer, opt_str)(net.parameters(), **test_case)
check_func = check_class(net, **test_case)
step = 0
# eager graph
for i in range(iter_num):
if update_lr and i == 1: # change learning rate
for group in opt.param_groups:
group["lr"] += 0.01
check_func.lr += 0.01
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = net(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
ori_params = TensorDict()
for param in net.parameters():
ori_params[param] = np.copy(param.numpy())
opt.step()
step += 1
check_func(ori_params, net.parameters(), step)
# static graph
@trace
def train_func(data, label):
pred = net(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.backward(loss)
for i in range(iter_num):
if update_lr and i == 1: # change learning rate
for group in opt.param_groups:
group["lr"] += 0.01
check_func.lr += 0.01
opt.zero_grad()
ori_params = TensorDict()
for param in net.parameters():
ori_params[param] = np.copy(param.numpy())
train_func(
np.random.random(data_shape).astype(np.float32),
np.random.randint(0, 10, label_shape).astype(np.int32),
)
opt.step()
step += 1
check_func(ori_params, net.parameters(), step)
def test_sgd():
class CheckValue:
def __init__(self, net, **kwarg):
self.slots = TensorDict()
for param in net.parameters():
self.slots[param] = np.zeros(param.shape).astype(np.float32)
for k, v in kwarg.items():
setattr(self, k, v)
def __call__(self, ori_params, new_params, step):
for param in new_params:
grad = param.grad.numpy()
if hasattr(self, "momentum"):
self.slots[param] = grad + self.slots[param] * self.momentum
delta = -self.lr * self.slots[param]
else:
delta = -self.lr * grad
assertTensorClose(param.numpy(), ori_params[param] + delta)
cases = [
{"momentum": 0.9, "lr": 0.01}, # SGD with momentum
{"lr": 0.01}, # simple SGD
{"weight_decay": 0.1, "lr": 0.01}, # with weight_decay
]
for case in cases:
_test_optimizer("SGD", case, CheckValue)
_test_optimizer("SGD", case, CheckValue, update_lr=True)
def test_adam():
class CheckValue:
def __init__(self, net, **kwarg):
self.m_slots = TensorDict()
self.v_slots = TensorDict()
for param in net.parameters():
self.m_slots[param] = np.zeros(param.shape).astype(np.float32)
self.v_slots[param] = np.zeros(param.shape).astype(np.float32)
for k, v in kwarg.items():
setattr(self, k, v)
def __call__(self, ori_params, new_params, step):
for param in new_params:
grad = param.grad.numpy()
m = self.m_slots[param]
v = self.v_slots[param]
m *= self.betas[0]
m += (1 - self.betas[0]) * grad
v *= self.betas[1]
v += (1 - self.betas[1]) * grad * grad
delta = (m / (1 - self.betas[0] ** step)) / (
np.sqrt(v / (1 - self.betas[1] ** step)) + self.eps
)
assertTensorClose(param.numpy(), ori_params[param] - self.lr * delta)
cases = [
{"betas": (0.8, 0.9), "eps": 1e-04, "lr": 0.01},
{
"betas": (0.8, 0.9),
"eps": 1e-04,
"lr": 0.01,
"weight_decay": 0.1,
}, # with weight_decay
]
for case in cases:
_test_optimizer("Adam", case, CheckValue)
_test_optimizer("Adam", case, CheckValue, update_lr=True)
def test_adagrad():
class CheckValue:
def __init__(self, net, **kwarg):
self.s_slots = TensorDict()
for param in net.parameters():
self.s_slots[param] = np.zeros(param.shape).astype(np.float32)
for k, v in kwarg.items():
setattr(self, k, v)
def __call__(self, ori_params, new_params, step):
for param in new_params:
grad = param.grad.numpy()
self.s_slots[param] += grad ** 2
delta = grad / (self.s_slots[param] + self.eps) ** 0.5
delta *= -(self.lr / (1 + (step - 1) * self.lr_decay))
assertTensorClose(param.numpy(), ori_params[param] + delta)
cases = [
{"lr": 0.01, "eps": 1e-06, "lr_decay": 0.01},
{"lr": 0.01, "eps": 1e-06, "lr_decay": 0.0}, # without lr_decay
{
"lr": 0.01,
"eps": 1e-06,
"lr_decay": 0.01,
"weight_decay": 0.1,
}, # with weight_decay
]
for case in cases:
_test_optimizer("Adagrad", case, CheckValue)
_test_optimizer("Adagrad", case, CheckValue, update_lr=True)
def test_adadelta():
class CheckValue:
def __init__(self, net, **kwarg):
self.s_slots = TensorDict()
self.a_slots = TensorDict()
for param in net.parameters():
self.s_slots[param] = np.zeros(param.shape).astype(np.float32)
self.a_slots[param] = np.zeros(param.shape).astype(np.float32)
for k, v in kwarg.items():
setattr(self, k, v)
def __call__(self, ori_params, new_params, step):
for param in new_params:
grad = param.grad.numpy()
self.s_slots[param] = self.s_slots[param] * self.rho + grad ** 2 * (
1 - self.rho
)
delta = (
grad
* ((self.a_slots[param] + self.eps) ** 0.5)
/ (self.s_slots[param] + self.eps) ** 0.5
)
self.a_slots[param] = self.a_slots[param] * self.rho + delta ** 2 * (
1 - self.rho
)
delta *= -self.lr
assertTensorClose(param.numpy(), ori_params[param] + delta)
cases = [
{"lr": 1.0, "eps": 1e-06, "rho": 0.9},
{"lr": 1.0, "eps": 1e-06, "rho": 0.9, "weight_decay": 0.9}, # with weight_decay
]
for case in cases:
_test_optimizer("Adadelta", case, CheckValue)
_test_optimizer("Adadelta", case, CheckValue, update_lr=True)
|
[
"megengine.core.TensorDict",
"megengine.load",
"megengine.core.tensor"
] |
[((1024, 1053), 'helpers.graph_mode', 'graph_mode', (['"""eager"""', '"""static"""'], {}), "('eager', 'static')\n", (1034, 1053), False, 'from helpers import MLP, graph_mode\n'), ((1155, 1160), 'helpers.MLP', 'MLP', ([], {}), '()\n', (1158, 1160), False, 'from helpers import MLP, graph_mode\n'), ((1238, 1250), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (1248, 1250), False, 'from megengine.core import TensorDict, tensor\n'), ((2617, 2622), 'helpers.MLP', 'MLP', ([], {}), '()\n', (2620, 2622), False, 'from helpers import MLP, graph_mode\n'), ((797, 821), 'megengine.core.tensor', 'tensor', ([], {'dtype': 'np.float32'}), '(dtype=np.float32)\n', (803, 821), False, 'from megengine.core import TensorDict, tensor\n'), ((823, 845), 'megengine.core.tensor', 'tensor', ([], {'dtype': 'np.int32'}), '(dtype=np.int32)\n', (829, 845), False, 'from megengine.core import TensorDict, tensor\n'), ((934, 971), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', 'label_shape'], {}), '(0, 10, label_shape)\n', (951, 971), True, 'import numpy as np\n'), ((1591, 1600), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (1598, 1600), False, 'from io import BytesIO\n'), ((1689, 1699), 'megengine.load', 'load', (['fout'], {}), '(fout)\n', (1693, 1699), False, 'from megengine import load, optimizer, save\n'), ((2104, 2116), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (2114, 2116), False, 'from megengine.core import TensorDict, tensor\n'), ((3261, 3273), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (3271, 3273), False, 'from megengine.core import TensorDict, tensor\n'), ((3879, 3891), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (3889, 3891), False, 'from megengine.core import TensorDict, tensor\n'), ((1908, 1945), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', 'label_shape'], {}), '(0, 10, label_shape)\n', (1925, 1945), True, 'import numpy as np\n'), ((3068, 3105), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', 'label_shape'], {}), '(0, 10, label_shape)\n', (3085, 3105), True, 'import numpy as np\n'), ((4344, 4356), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (4354, 4356), False, 'from megengine.core import TensorDict, tensor\n'), ((5444, 5456), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (5454, 5456), False, 'from megengine.core import TensorDict, tensor\n'), ((5484, 5496), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (5494, 5496), False, 'from megengine.core import TensorDict, tensor\n'), ((6890, 6902), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (6900, 6902), False, 'from megengine.core import TensorDict, tensor\n'), ((8067, 8079), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (8077, 8079), False, 'from megengine.core import TensorDict, tensor\n'), ((8107, 8119), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (8117, 8119), False, 'from megengine.core import TensorDict, tensor\n'), ((865, 893), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (881, 893), True, 'import numpy as np\n'), ((1309, 1330), 'numpy.zeros', 'np.zeros', (['param.shape'], {}), '(param.shape)\n', (1317, 1330), True, 'import numpy as np\n'), ((1835, 1863), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (1851, 1863), True, 'import numpy as np\n'), ((2995, 3023), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (3011, 3023), True, 'import numpy as np\n'), ((4018, 4046), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (4034, 4046), True, 'import numpy as np\n'), ((4079, 4116), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', 'label_shape'], {}), '(0, 10, label_shape)\n', (4096, 4116), True, 'import numpy as np\n'), ((4436, 4457), 'numpy.zeros', 'np.zeros', (['param.shape'], {}), '(param.shape)\n', (4444, 4457), True, 'import numpy as np\n'), ((5578, 5599), 'numpy.zeros', 'np.zeros', (['param.shape'], {}), '(param.shape)\n', (5586, 5599), True, 'import numpy as np\n'), ((5657, 5678), 'numpy.zeros', 'np.zeros', (['param.shape'], {}), '(param.shape)\n', (5665, 5678), True, 'import numpy as np\n'), ((6246, 6286), 'numpy.sqrt', 'np.sqrt', (['(v / (1 - self.betas[1] ** step))'], {}), '(v / (1 - self.betas[1] ** step))\n', (6253, 6286), True, 'import numpy as np\n'), ((6984, 7005), 'numpy.zeros', 'np.zeros', (['param.shape'], {}), '(param.shape)\n', (6992, 7005), True, 'import numpy as np\n'), ((8201, 8222), 'numpy.zeros', 'np.zeros', (['param.shape'], {}), '(param.shape)\n', (8209, 8222), True, 'import numpy as np\n'), ((8280, 8301), 'numpy.zeros', 'np.zeros', (['param.shape'], {}), '(param.shape)\n', (8288, 8301), True, 'import numpy as np\n')]
|
from sfepy.base.base import Struct, output, insert_as_static_method, \
pause, debug
class Application( Struct ):
"""Base class for applications.
Subclasses should implement: __init__(), call().
Automates parametric studies, see parametrize().
"""
def __init__( self, conf, options, output_prefix, **kwargs ):
Struct.__init__( self,
conf = conf,
options = options,
output_prefix = output_prefix )
output.prefix = self.output_prefix
self.restore()
def setup_options( self ):
pass
def __call__( self, **kwargs ):
"""
This is either call_basic() or call_parametrized().
"""
pass
def call_basic( self, **kwargs ):
return self.call( **kwargs )
def call_parametrized( self, **kwargs ):
generator = self.parametric_hook( self.problem )
for aux in generator:
if isinstance( aux, tuple ) and (len( aux ) == 2):
problem, container = aux
mode = 'coroutine'
else:
problem = aux
mode = 'simple'
self.problem = problem
generator_prefix = output.prefix
output.prefix = self.output_prefix # Restore default.
"""Application options have to be re-processed here as they can
change in the parametric hook."""
self.setup_options()
out = self.call( **kwargs )
output.prefix = generator_prefix
if mode == 'coroutine':
# Pass application output to the generator.
container.append( out )
generator.next()
def restore( self ):
"""Removes parametric_hook, restores __call__ to call_basic."""
self.parametric_hook = None
insert_as_static_method( self.__class__, '__call__',
self.call_basic )
def parametrize( self, parametric_hook ):
"""Adds parametric_hook, sets __call__ to call_parametrized."""
if parametric_hook is None: return
self.parametric_hook = parametric_hook
insert_as_static_method( self.__class__, '__call__',
self.call_parametrized )
|
[
"sfepy.base.base.insert_as_static_method",
"sfepy.base.base.Struct.__init__"
] |
[((345, 423), 'sfepy.base.base.Struct.__init__', 'Struct.__init__', (['self'], {'conf': 'conf', 'options': 'options', 'output_prefix': 'output_prefix'}), '(self, conf=conf, options=options, output_prefix=output_prefix)\n', (360, 423), False, 'from sfepy.base.base import Struct, output, insert_as_static_method, pause, debug\n'), ((1897, 1965), 'sfepy.base.base.insert_as_static_method', 'insert_as_static_method', (['self.__class__', '"""__call__"""', 'self.call_basic'], {}), "(self.__class__, '__call__', self.call_basic)\n", (1920, 1965), False, 'from sfepy.base.base import Struct, output, insert_as_static_method, pause, debug\n'), ((2227, 2302), 'sfepy.base.base.insert_as_static_method', 'insert_as_static_method', (['self.__class__', '"""__call__"""', 'self.call_parametrized'], {}), "(self.__class__, '__call__', self.call_parametrized)\n", (2250, 2302), False, 'from sfepy.base.base import Struct, output, insert_as_static_method, pause, debug\n')]
|
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, **kwargs)
try:
solver = getattr( la, self.conf.method )
except AttributeError:
output( 'scipy solver %s does not exist!' % self.conf.method )
output( 'using cg instead' )
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
precond = get_default(kwargs.get('precond', None), self.conf.precond)
callback = get_default(kwargs.get('callback', None), self.conf.callback)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
callback=callback, **prec_args)
output('%s convergence: %s (%s)'
% (self.conf.method,
info, self.converged_reasons[nm.sign(info)]))
return sol
##
# c: 02.05.2008, r: 02.05.2008
class PyAMGSolver( LinearSolver ):
"""
Interface to PyAMG solvers.
Notes
-----
Uses relative convergence tolerance, i.e. eps_r is scaled by `||b||`.
"""
name = 'ls.pyamg'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_102 = {
'name' : 'ls102',
'kind' : 'ls.pyamg',
'method' : 'smoothed_aggregation_solver',
'accel' : 'cg'
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'smoothed_aggregation_solver'),
accel = get('accel', None),
i_max=None, eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
##
# c: 02.05.2008, r: 02.05.2008
def __init__( self, conf, **kwargs ):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr( pyamg, self.conf.method )
except AttributeError:
output( 'pyamg.%s does not exist!' % self.conf.method )
output( 'using pyamg.smoothed_aggregation_solver instead' )
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
if hasattr( self, 'mtx' ):
if self.mtx is not None:
self.mg = self.solver( self.mtx )
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
if (self.mg is None) or (mtx is not self.mtx):
self.mg = self.solver(mtx)
self.mtx = mtx
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r)
return sol
class PETScKrylovSolver( LinearSolver ):
"""
PETSc Krylov subspace solver.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_120 = {
'name' : 'ls120',
'kind' : 'ls.petsc',
'method' : 'cg', # ksp_type
'precond' : 'icc', # pc_type
'precond_side' : 'left', # ksp_pc_side
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', 'icc'),
precond_side=get('precond_side', None),
i_max=get('i_max', 100),
eps_a=get('eps_a', 1e-8),
eps_r=get('eps_r', 1e-8),
eps_d=get('eps_d', 1e5)) + common
def __init__( self, conf, **kwargs ):
try:
import petsc4py
petsc4py.init([])
from petsc4py import PETSc
except ImportError:
msg = 'cannot import petsc4py!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, petsc=PETSc, pmtx=None, **kwargs)
ksp = PETSc.KSP().create()
ksp.setType( self.conf.method )
ksp.getPC().setType( self.conf.precond )
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
self.ksp = ksp
self.converged_reasons = {}
for key, val in ksp.ConvergedReason.__dict__.iteritems():
if isinstance(val, int):
self.converged_reasons[val] = key
def set_matrix( self, mtx ):
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat().createAIJ( mtx.shape,
csr = (mtx.indptr,
mtx.indices,
mtx.data) )
sol, rhs = pmtx.getVecs()
return pmtx, sol, rhs
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
ksp.setInitialGuessNonzero(True)
prhs[...] = rhs
ksp.solve(prhs, psol)
sol = psol[...].copy()
output('%s(%s) convergence: %s (%s)'
% (self.conf.method, self.conf.precond,
ksp.reason, self.converged_reasons[ksp.reason]))
return sol
class PETScParallelKrylovSolver(PETScKrylovSolver):
"""
PETSc Krylov subspace solver able to run in parallel by storing the
system to disk and running a separate script via `mpiexec`.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc_parallel'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1 = {
'name' : 'ls',
'kind' : 'ls.petsc_parallel',
'log_dir' : '.', # Store logs here.
'n_proc' : 5, # Number of processes to run.
'method' : 'cg', # ksp_type
'precond' : 'bjacobi', # pc_type
'sub_precond' : 'icc', # sub_pc_type
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = PETScKrylovSolver.process_conf(conf, kwargs)
return Struct(log_dir=get('log_dir', '.'),
n_proc=get('n_proc', 1),
sub_precond=get('sub_precond', 'icc')) + common
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
import os, sys, shutil, tempfile
from sfepy import base_dir
from sfepy.base.ioutils import ensure_path
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
petsc = self.petsc
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
output_dir = tempfile.mkdtemp()
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
sol0_filename = os.path.join(output_dir, 'sol0.dat')
else:
sol0_filename = ''
prhs[...] = rhs
script_filename = os.path.join(base_dir, 'solvers/petsc_worker.py')
mtx_filename = os.path.join(output_dir, 'mtx.dat')
rhs_filename = os.path.join(output_dir, 'rhs.dat')
sol_filename = os.path.join(output_dir, 'sol.dat')
status_filename = os.path.join(output_dir, 'status.txt')
log_filename = os.path.join(self.conf.log_dir, 'sol.log')
ensure_path(log_filename)
output('storing system to %s...' % output_dir)
tt = time.clock()
view_mtx = petsc.Viewer().createBinary(mtx_filename, mode='w')
view_rhs = petsc.Viewer().createBinary(rhs_filename, mode='w')
pmtx.view(view_mtx)
prhs.view(view_rhs)
if sol0_filename:
view_sol0 = petsc.Viewer().createBinary(sol0_filename, mode='w')
psol.view(view_sol0)
output('...done in %.2f s' % (time.clock() - tt))
command = [
'mpiexec -n %d' % self.conf.n_proc,
sys.executable, script_filename,
'-mtx %s' % mtx_filename, '-rhs %s' % rhs_filename,
'-sol0 %s' % sol0_filename, '-sol %s' % sol_filename,
'-status %s' % status_filename,
'-ksp_type %s' % self.conf.method,
'-pc_type %s' % self.conf.precond,
'-sub_pc_type %s' % self.conf.sub_precond,
'-ksp_atol %.3e' % self.conf.eps_a,
'-ksp_rtol %.3e' % self.conf.eps_r,
'-ksp_max_it %d' % self.conf.i_max,
'-ksp_monitor %s' % log_filename,
'-ksp_view %s' % log_filename,
]
if self.conf.precond_side is not None:
command.append('-ksp_pc_side %s' % self.conf.precond_side)
out = os.system(" ".join(command))
assert_(out == 0)
output('reading solution...')
tt = time.clock()
view_sol = self.petsc.Viewer().createBinary(sol_filename, mode='r')
psol = petsc.Vec().load(view_sol)
fd = open(status_filename, 'r')
line = fd.readline().split()
reason = int(line[0])
elapsed = float(line[1])
fd.close()
output('...done in %.2f s' % (time.clock() - tt))
sol = psol[...].copy()
output('%s(%s, %s/proc) convergence: %s (%s)'
% (self.conf.method, self.conf.precond, self.conf.sub_precond,
reason, self.converged_reasons[reason]))
output('elapsed: %.2f [s]' % elapsed)
shutil.rmtree(output_dir)
return sol
class SchurGeneralized(ScipyDirect):
r"""
Generalized Schur complement.
Defines the matrix blocks and calls user defined function.
"""
name = 'ls.schur_generalized'
@staticmethod
def process_conf(conf, kwargs):
"""
Setup solver configuration options.
Example configuration::
solvers = {
'ls': ('ls.schur_generalized',
{'blocks':
{'u': ['displacement1', 'displacement2'],
'v': ['velocity1', 'velocity2'],
'w': ['pressure1', 'pressure2'],
},
'function': my_schur,
'needs_problem_instance': True,
})
}
"""
get = make_get_conf(conf, kwargs)
common = ScipyDirect.process_conf(conf, kwargs)
return Struct(blocks=get('blocks', None,
'missing "blocks" in options!'),
function=get('function', None,
'missing "function" in options!'),
needs_problem_instance=True) + common
def __init__(self, conf, **kwargs):
from sfepy.discrete.state import State
ScipyDirect.__init__(self, conf, **kwargs)
equations = self.problem.equations
aux_state = State(equations.variables)
conf.idxs = {}
for bk, bv in conf.blocks.iteritems():
aux_state.fill(0.0)
for jj in bv:
idx = equations.variables.di.indx[jj]
aux_state.vec[idx] = nm.nan
aux_state.apply_ebc()
vec0 = aux_state.get_reduced()
conf.idxs[bk] = nm.where(nm.isnan(vec0))[0]
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
mtxi= self.orig_conf.idxs
mtxslc_s = {}
mtxslc_f = {}
nn = {}
for ik, iv in mtxi.iteritems():
ptr = 0
nn[ik] = len(iv)
mtxslc_s[ik] = []
mtxslc_f[ik] = []
while ptr < nn[ik]:
idx0 = iv[ptr:]
idxrange = nm.arange(idx0[0], idx0[0] + len(idx0))
aux = nm.where(idx0 == idxrange)[0]
mtxslc_s[ik].append(slice(ptr + aux[0], ptr + aux[-1] + 1))
mtxslc_f[ik].append(slice(idx0[aux][0], idx0[aux][-1] + 1))
ptr += aux[-1] + 1
mtxs = {}
rhss = {}
ress = {}
for ir in mtxi.iterkeys():
rhss[ir] = nm.zeros((nn[ir],), dtype=nm.float64)
ress[ir] = nm.zeros((nn[ir],), dtype=nm.float64)
for jr, idxr in enumerate(mtxslc_f[ir]):
rhss[ir][mtxslc_s[ir][jr]] = rhs[idxr]
for ic in mtxi.iterkeys():
mtxid = '%s%s' % (ir, ic)
mtxs[mtxid] = nm.zeros((nn[ir], nn[ic]), dtype=nm.float64)
for jr, idxr in enumerate(mtxslc_f[ir]):
for jc, idxc in enumerate(mtxslc_f[ic]):
iir = mtxslc_s[ir][jr]
iic = mtxslc_s[ic][jc]
mtxs[mtxid][iir, iic] = mtx._get_submatrix(idxr, idxc).todense()
self.orig_conf.function(ress, mtxs, rhss, nn)
res = nm.zeros_like(rhs)
for ir in mtxi.iterkeys():
for jr, idxr in enumerate(mtxslc_f[ir]):
res[idxr] = ress[ir][mtxslc_s[ir][jr]]
return res
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class SchurComplement(SchurGeneralized):
r"""
Schur complement.
Solution of the linear system
.. math::
\left[ \begin{array}{cc}
A & B \\
C & D \end{array} \right]
\cdot
\left[ \begin{array}{c}
u \\
v \end{array} \right]
=
\left[ \begin{array}{c}
f \\
g \end{array} \right]
is obtained by solving the following equation:
.. math::
(D - C A^{-1} B) \cdot v = g - C A^{-1} f
variable(s) :math:`u` are specified in "eliminate" list,
variable(s) :math:`v` are specified in "keep" list,
See: http://en.wikipedia.org/wiki/Schur_complement
"""
name = 'ls.schur_complement'
@staticmethod
def process_conf(conf, kwargs):
"""
Setup solver configuration options.
Example configuration::
solvers = {
'ls': ('ls.schur_complement',
{'eliminate': ['displacement'],
'keep': ['pressure'],
'needs_problem_instance': True,
})
}
"""
get = make_get_conf(conf, kwargs)
conf.blocks = {'1': get('eliminate', None,
'missing "eliminate" in options!'),
'2': get('keep', None,
'missing "keep" in options!'),}
conf.function = SchurComplement.schur_fun
common = SchurGeneralized.process_conf(conf, kwargs)
return common
@staticmethod
def schur_fun(res, mtx, rhs, nn):
import scipy.sparse as scs
import scipy.sparse.linalg as sls
invA = sls.splu(scs.csc_matrix(mtx['11']))
invAB = nm.zeros_like(mtx['12'])
for j, b in enumerate(mtx['12'].T):
invAB[:,j] = invA.solve(b)
invAf = invA.solve(rhs['1'])
spC = scs.csc_matrix(mtx['21'])
k_rhs = rhs['2'] - spC * invAf
res['2'] = sls.spsolve(scs.csc_matrix(mtx['22'] - spC * invAB), k_rhs)
res['1'] = invAf - nm.dot(invAB, res['2'])
class MultiProblem(ScipyDirect):
r"""
Conjugate multiple problems.
Allows to define conjugate multiple problems.
"""
name = 'ls.cm_pb'
@staticmethod
def process_conf(conf, kwargs):
"""
Setup solver configuration options.
Example configuration::
solvers = {
'ls': ('ls.cm_pb',
{'others': ['acoustic_subproblem.py'],
'coupling_variables': ['g'],
'needs_problem_instance': True,
})
}
"""
get = make_get_conf(conf, kwargs)
common = ScipyDirect.process_conf(conf, kwargs)
return Struct(others=get('others', None,
'missing "others" in options!'),
coupling_variables=get('coupling_variables', None,
'missing "coupling_variables"!'),
needs_problem_instance=True) + common
def __init__(self, conf, problem, **kwargs):
from sfepy.discrete.state import State
from sfepy.discrete import Problem
from sfepy.base.conf import ProblemConf, get_standard_keywords
from scipy.spatial import cKDTree as KDTree
ScipyDirect.__init__(self, conf, **kwargs)
# init subproblems
pb_vars = problem.get_variables()
# get "master" DofInfo and last index
pb_adi_indx = problem.equations.variables.adi.indx
self.adi_indx = pb_adi_indx.copy()
last_indx = -1
for ii in self.adi_indx.itervalues():
last_indx = nm.max([last_indx, ii.stop])
# coupling variables
self.cvars_to_pb = {}
for jj in conf.coupling_variables:
self.cvars_to_pb[jj] = [None, None]
if jj in pb_vars.names:
if pb_vars[jj].dual_var_name is not None:
self.cvars_to_pb[jj][0] = -1
else:
self.cvars_to_pb[jj][1] = -1
# init subproblems
self.subpb = []
required, other = get_standard_keywords()
master_prefix = output.get_output_prefix()
for ii, ifname in enumerate(conf.others):
sub_prefix = master_prefix[:-1] + '-sub%d:' % (ii + 1)
output.set_output_prefix(sub_prefix)
kwargs['master_problem'] = problem
confi = ProblemConf.from_file(ifname, required, other,
define_args=kwargs)
pbi = Problem.from_conf(confi, init_equations=True)
sti = State(pbi.equations.variables)
pbi.equations.set_data(None, ignore_unknown=True)
pbi.time_update()
pbi.update_materials()
sti.apply_ebc()
pbi_vars = pbi.get_variables()
output.set_output_prefix(master_prefix)
self.subpb.append([pbi, sti, None])
# append "slave" DofInfo
for jj in pbi_vars.names:
if not(pbi_vars[jj].is_state()):
continue
didx = pbi.equations.variables.adi.indx[jj]
ndof = didx.stop - didx.start
if jj in self.adi_indx:
if ndof != \
(self.adi_indx[jj].stop - self.adi_indx[jj].start):
raise ValueError('DOFs do not match!')
else:
self.adi_indx.update({
jj: slice(last_indx, last_indx + ndof, None)})
last_indx += ndof
for jj in conf.coupling_variables:
if jj in pbi_vars.names:
if pbi_vars[jj].dual_var_name is not None:
self.cvars_to_pb[jj][0] = ii
else:
self.cvars_to_pb[jj][1] = ii
self.subpb.append([problem, None, None])
self.cvars_to_pb_map = {}
for varname, pbs in self.cvars_to_pb.iteritems():
# match field nodes
coors = []
for ii in pbs:
pbi = self.subpb[ii][0]
pbi_vars = pbi.get_variables()
fcoors = pbi_vars[varname].field.coors
dc = nm.abs(nm.max(fcoors, axis=0)\
- nm.min(fcoors, axis=0))
ax = nm.where(dc > 1e-9)[0]
coors.append(fcoors[:,ax])
if len(coors[0]) != len(coors[1]):
raise ValueError('number of nodes does not match!')
kdtree = KDTree(coors[0])
map_12 = kdtree.query(coors[1])[1]
pbi1 = self.subpb[pbs[0]][0]
pbi1_vars = pbi1.get_variables()
eq_map_1 = pbi1_vars[varname].eq_map
pbi2 = self.subpb[pbs[1]][0]
pbi2_vars = pbi2.get_variables()
eq_map_2 = pbi2_vars[varname].eq_map
dpn = eq_map_2.dpn
nnd = map_12.shape[0]
map_12_nd = nm.zeros((nnd * dpn,), dtype=nm.int32)
if dpn > 1:
for ii in range(dpn):
map_12_nd[ii::dpn] = map_12 * dpn + ii
else:
map_12_nd = map_12
idx = nm.where(eq_map_2.eq >= 0)[0]
self.cvars_to_pb_map[varname] = eq_map_1.eq[map_12[idx]]
def sparse_submat(self, Ad, Ar, Ac, gr, gc, S):
"""
A[gr,gc] = S
"""
if type(gr) is slice:
gr = nm.arange(gr.start, gr.stop)
if type(gc) is slice:
gc = nm.arange(gc.start, gc.stop)
for ii, lrow in enumerate(S):
m = lrow.indices.shape[0]
idxrow = nm.ones((m, ), dtype=nm.int32) * gr[ii]
Ar = nm.hstack([Ar, idxrow])
Ac = nm.hstack([Ac, gc[lrow.indices]])
Ad = nm.hstack([Ad, lrow.data])
return Ad, Ar, Ac
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
max_indx = 0
hst = nm.hstack
for ii in self.adi_indx.itervalues():
max_indx = nm.max([max_indx, ii.stop])
new_rhs = nm.zeros((max_indx,), dtype=rhs.dtype)
new_rhs[:rhs.shape[0]] = rhs
# copy "master" matrices
pbi = self.subpb[-1][0]
adi_indxi = pbi.equations.variables.adi.indx
mtxc = mtx.tocsc()
aux_data = nm.array([], dtype=mtxc.dtype)
aux_rows = nm.array([], dtype=nm.int32)
aux_cols = nm.array([], dtype=nm.int32)
for jk, jv in adi_indxi.iteritems():
if jk in self.cvars_to_pb:
if not(self.cvars_to_pb[jk][0] == -1):
continue
gjv = self.adi_indx[jk]
ii = gjv.start
for jj in nm.arange(jv.start, jv.stop):
ptr = mtxc.indptr[jj]
nn = mtxc.indptr[jj + 1] - ptr
sl = slice(ptr, ptr + nn, None)
aux_data = hst([aux_data, mtxc.data[sl]])
aux_rows = hst([aux_rows, mtxc.indices[sl]])
aux_cols = hst([aux_cols, nm.ones((nn,), dtype=nm.int32) * ii])
ii += 1
# copy "slave" (sub)matricies
mtxs = []
for kk, (pbi, sti0, _) in enumerate(self.subpb[:-1]):
x0i = sti0.get_reduced()
evi = pbi.get_evaluator()
mtxi = evi.eval_tangent_matrix(x0i, mtx=pbi.mtx_a)
rhsi = evi.eval_residual(x0i)
mtxs.append(mtxi)
adi_indxi = pbi.equations.variables.adi.indx
for ik, iv in adi_indxi.iteritems():
if ik in self.cvars_to_pb:
if not(self.cvars_to_pb[ik][0] == kk):
continue
giv = self.adi_indx[ik]
for jk, jv in adi_indxi.iteritems():
gjv = self.adi_indx[jk]
if jk in self.cvars_to_pb:
if not(self.cvars_to_pb[jk][0] == kk):
continue
aux_data, aux_rows, aux_cols =\
self.sparse_submat(aux_data, aux_rows, aux_cols,
giv, gjv, mtxi[iv, jv])
new_rhs[giv] = rhsi[iv]
mtxs.append(mtx)
# copy "coupling" (sub)matricies
for varname, pbs in self.cvars_to_pb.iteritems():
idx = pbs[1]
pbi = self.subpb[idx][0]
mtxi = mtxs[idx]
gjv = self.adi_indx[varname]
jv = pbi.equations.variables.adi.indx[varname]
adi_indxi = pbi.equations.variables.adi.indx
for ik, iv in adi_indxi.iteritems():
if ik == varname:
continue
giv = self.adi_indx[ik]
aux_mtx = mtxi[iv,:].tocsc()
for ll, jj in enumerate(nm.arange(jv.start, jv.stop)):
ptr = aux_mtx.indptr[jj]
nn = aux_mtx.indptr[jj + 1] - ptr
if nn < 1:
continue
sl = slice(ptr, ptr + nn, None)
aux_data = hst([aux_data, aux_mtx.data[sl]])
aux_rows = hst([aux_rows, aux_mtx.indices[sl] + giv.start])
jjr = gjv.start + self.cvars_to_pb_map[varname][ll]
aux_cols = hst([aux_cols,
nm.ones((nn,), dtype=nm.int32) * jjr])
# create new matrix
new_mtx = sps.coo_matrix((aux_data, (aux_rows, aux_cols))).tocsr()
res0 = ScipyDirect.__call__(self, new_rhs, mtx=new_mtx)
res = []
for kk, (pbi, sti0, _) in enumerate(self.subpb):
adi_indxi = pbi.equations.variables.adi.indx
max_indx = 0
for ii in adi_indxi.itervalues():
max_indx = nm.max([max_indx, ii.stop])
resi = nm.zeros((max_indx,), dtype=res0.dtype)
for ik, iv in adi_indxi.iteritems():
giv = self.adi_indx[ik]
if ik in self.cvars_to_pb:
if pbi is self.subpb[self.cvars_to_pb[ik][1]][0]:
giv = self.cvars_to_pb_map[ik] + giv.start
resi[iv] = res0[giv]
if sti0 is not None:
sti = sti0.copy()
sti.set_reduced(-resi)
pbi.setup_default_output()
pbi.save_state(pbi.get_output_name(), sti)
self.subpb[kk][-1] = sti
res.append(resi)
return res[-1]
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
|
[
"sfepy.base.conf.ProblemConf.from_file",
"sfepy.solvers.solvers.LinearSolver.__init__",
"sfepy.base.base.assert_",
"sfepy.base.base.output.set_output_prefix",
"sfepy.discrete.Problem.from_conf",
"sfepy.discrete.state.State",
"sfepy.base.conf.get_standard_keywords",
"sfepy.base.base.get_default",
"sfepy.base.base.try_imports",
"sfepy.base.base.output.get_output_prefix",
"sfepy.solvers.solvers.LinearSolver.process_conf",
"sfepy.base.ioutils.ensure_path",
"sfepy.solvers.solvers.make_get_conf",
"sfepy.base.base.output"
] |
[((77, 137), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'sps.SparseEfficiencyWarning'], {}), "('ignore', sps.SparseEfficiencyWarning)\n", (98, 137), False, 'import warnings\n'), ((556, 568), 'time.clock', 'time.clock', ([], {}), '()\n', (566, 568), False, 'import time\n'), ((585, 613), 'sfepy.base.base.get_default', 'get_default', (['conf', 'self.conf'], {}), '(conf, self.conf)\n', (596, 613), False, 'from sfepy.base.base import output, get_default, assert_, try_imports, Struct\n'), ((628, 654), 'sfepy.base.base.get_default', 'get_default', (['mtx', 'self.mtx'], {}), '(mtx, self.mtx)\n', (639, 654), False, 'from sfepy.base.base import output, get_default, assert_, try_imports, Struct\n'), ((672, 704), 'sfepy.base.base.get_default', 'get_default', (['status', 'self.status'], {}), '(status, self.status)\n', (683, 704), False, 'from sfepy.base.base import output, get_default, assert_, try_imports, Struct\n'), ((714, 767), 'sfepy.base.base.assert_', 'assert_', (['(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])'], {}), '(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])\n', (721, 767), False, 'from sfepy.base.base import output, get_default, assert_, try_imports, Struct\n'), ((1579, 1606), 'sfepy.solvers.solvers.make_get_conf', 'make_get_conf', (['conf', 'kwargs'], {}), '(conf, kwargs)\n', (1592, 1606), False, 'from sfepy.solvers.solvers import make_get_conf, LinearSolver\n'), ((1624, 1655), 'sfepy.solvers.solvers.LinearSolver.process_conf', 'LinearSolver.process_conf', (['conf'], {}), '(conf)\n', (1649, 1655), False, 'from sfepy.solvers.solvers import make_get_conf, LinearSolver\n'), ((1926, 1969), 'sfepy.solvers.solvers.LinearSolver.__init__', 'LinearSolver.__init__', (['self', 'conf'], {}), '(self, conf, **kwargs)\n', (1947, 1969), False, 'from sfepy.solvers.solvers import make_get_conf, LinearSolver\n'), ((2014, 2200), 'sfepy.base.base.try_imports', 'try_imports', (["['import scipy.linsolve as sls', 'import scipy.splinalg.dsolve as sls',\n 'import scipy.sparse.linalg.dsolve as sls']", '"""cannot import scipy sparse direct solvers!"""'], {}), "(['import scipy.linsolve as sls',\n 'import scipy.splinalg.dsolve as sls',\n 'import scipy.sparse.linalg.dsolve as sls'],\n 'cannot import scipy sparse direct solvers!')\n", (2025, 2200), False, 'from sfepy.base.base import output, get_default, assert_, try_imports, Struct\n'), ((2313, 2506), 'sfepy.base.base.try_imports', 'try_imports', (["['import scipy.linsolve.umfpack as um',\n 'import scipy.splinalg.dsolve.umfpack as um',\n 'import scipy.sparse.linalg.dsolve.umfpack as um',\n 'import scikits.umfpack as um']"], {}), "(['import scipy.linsolve.umfpack as um',\n 'import scipy.splinalg.dsolve.umfpack as um',\n 'import scipy.sparse.linalg.dsolve.umfpack as um',\n 'import scikits.umfpack as um'])\n", (2324, 2506), False, 'from sfepy.base.base import output, get_default, assert_, try_imports, Struct\n'), ((5111, 5138), 'sfepy.solvers.solvers.make_get_conf', 'make_get_conf', (['conf', 'kwargs'], {}), '(conf, kwargs)\n', (5124, 5138), False, 'from sfepy.solvers.solvers import make_get_conf, LinearSolver\n'), ((5156, 5187), 'sfepy.solvers.solvers.LinearSolver.process_conf', 'LinearSolver.process_conf', (['conf'], {}), '(conf)\n', (5181, 5187), False, 'from sfepy.solvers.solvers import make_get_conf, LinearSolver\n'), ((5581, 5624), 'sfepy.solvers.solvers.LinearSolver.__init__', 'LinearSolver.__init__', (['self', 'conf'], {}), '(self, conf, **kwargs)\n', (5602, 5624), False, 'from sfepy.solvers.solvers import make_get_conf, LinearSolver\n'), ((6234, 6269), 'sfepy.base.base.get_default', 'get_default', (['eps_r', 'self.conf.eps_r'], {}), '(eps_r, self.conf.eps_r)\n', (6245, 6269), False, 'from sfepy.base.base import output, get_default, assert_, try_imports, Struct\n'), ((6286, 6321), 'sfepy.base.base.get_default', 'get_default', (['i_max', 'self.conf.i_max'], {}), '(i_max, self.conf.i_max)\n', (6297, 6321), False, 'from sfepy.base.base import output, get_default, assert_, try_imports, Struct\n'), ((7586, 7613), 'sfepy.solvers.solvers.make_get_conf', 'make_get_conf', (['conf', 'kwargs'], {}), '(conf, kwargs)\n', (7599, 7613), False, 'from sfepy.solvers.solvers import make_get_conf, LinearSolver\n'), ((7631, 7662), 'sfepy.solvers.solvers.LinearSolver.process_conf', 'LinearSolver.process_conf', (['conf'], {}), '(conf)\n', (7656, 7662), False, 'from sfepy.solvers.solvers import make_get_conf, LinearSolver\n'), ((8131, 8183), 'sfepy.solvers.solvers.LinearSolver.__init__', 'LinearSolver.__init__', (['self', 'conf'], {'mg': 'None'}), '(self, conf, mg=None, **kwargs)\n', (8152, 8183), False, 'from sfepy.solvers.solvers import make_get_conf, LinearSolver\n'), ((8804, 8839), 'sfepy.base.base.get_default', 'get_default', (['eps_r', 'self.conf.eps_r'], {}), '(eps_r, self.conf.eps_r)\n', (8815, 8839), False, 'from sfepy.base.base import output, get_default, assert_, try_imports, Struct\n'), ((10187, 10214), 'sfepy.solvers.solvers.make_get_conf', 'make_get_conf', (['conf', 'kwargs'], {}), '(conf, kwargs)\n', (10200, 10214), False, 'from sfepy.solvers.solvers import make_get_conf, LinearSolver\n'), ((10232, 10263), 'sfepy.solvers.solvers.LinearSolver.process_conf', 'LinearSolver.process_conf', (['conf'], {}), '(conf)\n', (10257, 10263), False, 'from sfepy.solvers.solvers import make_get_conf, LinearSolver\n'), ((10900, 10967), 'sfepy.solvers.solvers.LinearSolver.__init__', 'LinearSolver.__init__', (['self', 'conf'], {'petsc': 'PETSc', 'pmtx': 'None'}), '(self, conf, petsc=PETSc, pmtx=None, **kwargs)\n', (10921, 10967), False, 'from sfepy.solvers.solvers import make_get_conf, LinearSolver\n'), ((11475, 11494), 'scipy.sparse.csr_matrix', 'sps.csr_matrix', (['mtx'], {}), '(mtx)\n', (11489, 11494), True, 'import scipy.sparse as sps\n'), ((11973, 12008), 'sfepy.base.base.get_default', 'get_default', (['eps_a', 'self.conf.eps_a'], {}), '(eps_a, self.conf.eps_a)\n', (11984, 12008), False, 'from sfepy.base.base import output, get_default, assert_, try_imports, Struct\n'), ((12025, 12060), 'sfepy.base.base.get_default', 'get_default', (['eps_r', 'self.conf.eps_r'], {}), '(eps_r, self.conf.eps_r)\n', (12036, 12060), False, 'from sfepy.base.base import output, get_default, assert_, try_imports, Struct\n'), ((12077, 12112), 'sfepy.base.base.get_default', 'get_default', (['i_max', 'self.conf.i_max'], {}), '(i_max, self.conf.i_max)\n', (12088, 12112), False, 'from sfepy.base.base import output, get_default, assert_, try_imports, Struct\n'), ((12728, 12857), 'sfepy.base.base.output', 'output', (["('%s(%s) convergence: %s (%s)' % (self.conf.method, self.conf.precond, ksp.\n reason, self.converged_reasons[ksp.reason]))"], {}), "('%s(%s) convergence: %s (%s)' % (self.conf.method, self.conf.precond,\n ksp.reason, self.converged_reasons[ksp.reason]))\n", (12734, 12857), False, 'from sfepy.base.base import output, get_default, assert_, try_imports, Struct\n'), ((14205, 14232), 'sfepy.solvers.solvers.make_get_conf', 'make_get_conf', (['conf', 'kwargs'], {}), '(conf, kwargs)\n', (14218, 14232), False, 'from sfepy.solvers.solvers import make_get_conf, LinearSolver\n'), ((14763, 14798), 'sfepy.base.base.get_default', 'get_default', (['eps_a', 'self.conf.eps_a'], {}), '(eps_a, self.conf.eps_a)\n', (14774, 14798), False, 'from sfepy.base.base import output, get_default, assert_, try_imports, Struct\n'), ((14815, 14850), 'sfepy.base.base.get_default', 'get_default', (['eps_r', 'self.conf.eps_r'], {}), '(eps_r, self.conf.eps_r)\n', (14826, 14850), False, 'from sfepy.base.base import output, get_default, assert_, try_imports, Struct\n'), ((14867, 14902), 'sfepy.base.base.get_default', 'get_default', (['i_max', 'self.conf.i_max'], {}), '(i_max, self.conf.i_max)\n', (14878, 14902), False, 'from sfepy.base.base import output, get_default, assert_, try_imports, Struct\n'), ((15309, 15327), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (15325, 15327), False, 'import os, sys, shutil, tempfile\n'), ((15612, 15661), 'os.path.join', 'os.path.join', (['base_dir', '"""solvers/petsc_worker.py"""'], {}), "(base_dir, 'solvers/petsc_worker.py')\n", (15624, 15661), False, 'import os, sys, shutil, tempfile\n'), ((15686, 15721), 'os.path.join', 'os.path.join', (['output_dir', '"""mtx.dat"""'], {}), "(output_dir, 'mtx.dat')\n", (15698, 15721), False, 'import os, sys, shutil, tempfile\n'), ((15745, 15780), 'os.path.join', 'os.path.join', (['output_dir', '"""rhs.dat"""'], {}), "(output_dir, 'rhs.dat')\n", (15757, 15780), False, 'import os, sys, shutil, tempfile\n'), ((15804, 15839), 'os.path.join', 'os.path.join', (['output_dir', '"""sol.dat"""'], {}), "(output_dir, 'sol.dat')\n", (15816, 15839), False, 'import os, sys, shutil, tempfile\n'), ((15866, 15904), 'os.path.join', 'os.path.join', (['output_dir', '"""status.txt"""'], {}), "(output_dir, 'status.txt')\n", (15878, 15904), False, 'import os, sys, shutil, tempfile\n'), ((15929, 15971), 'os.path.join', 'os.path.join', (['self.conf.log_dir', '"""sol.log"""'], {}), "(self.conf.log_dir, 'sol.log')\n", (15941, 15971), False, 'import os, sys, shutil, tempfile\n'), ((15980, 16005), 'sfepy.base.ioutils.ensure_path', 'ensure_path', (['log_filename'], {}), '(log_filename)\n', (15991, 16005), False, 'from sfepy.base.ioutils import ensure_path\n'), ((16015, 16061), 'sfepy.base.base.output', 'output', (["('storing system to %s...' % output_dir)"], {}), "('storing system to %s...' % output_dir)\n", (16021, 16061), False, 'from sfepy.base.base import output, get_default, assert_, try_imports, Struct\n'), ((16075, 16087), 'time.clock', 'time.clock', ([], {}), '()\n', (16085, 16087), False, 'import time\n'), ((17330, 17347), 'sfepy.base.base.assert_', 'assert_', (['(out == 0)'], {}), '(out == 0)\n', (17337, 17347), False, 'from sfepy.base.base import output, get_default, assert_, try_imports, Struct\n'), ((17357, 17386), 'sfepy.base.base.output', 'output', (['"""reading solution..."""'], {}), "('reading solution...')\n", (17363, 17386), False, 'from sfepy.base.base import output, get_default, assert_, try_imports, Struct\n'), ((17400, 17412), 'time.clock', 'time.clock', ([], {}), '()\n', (17410, 17412), False, 'import time\n'), ((17789, 17948), 'sfepy.base.base.output', 'output', (["('%s(%s, %s/proc) convergence: %s (%s)' % (self.conf.method, self.conf.\n precond, self.conf.sub_precond, reason, self.converged_reasons[reason]))"], {}), "('%s(%s, %s/proc) convergence: %s (%s)' % (self.conf.method, self.\n conf.precond, self.conf.sub_precond, reason, self.converged_reasons[\n reason]))\n", (17795, 17948), False, 'from sfepy.base.base import output, get_default, assert_, try_imports, Struct\n'), ((17980, 18017), 'sfepy.base.base.output', 'output', (["('elapsed: %.2f [s]' % elapsed)"], {}), "('elapsed: %.2f [s]' % elapsed)\n", (17986, 18017), False, 'from sfepy.base.base import output, get_default, assert_, try_imports, Struct\n'), ((18027, 18052), 'shutil.rmtree', 'shutil.rmtree', (['output_dir'], {}), '(output_dir)\n', (18040, 18052), False, 'import os, sys, shutil, tempfile\n'), ((18889, 18916), 'sfepy.solvers.solvers.make_get_conf', 'make_get_conf', (['conf', 'kwargs'], {}), '(conf, kwargs)\n', (18902, 18916), False, 'from sfepy.solvers.solvers import make_get_conf, LinearSolver\n'), ((19476, 19502), 'sfepy.discrete.state.State', 'State', (['equations.variables'], {}), '(equations.variables)\n', (19481, 19502), False, 'from sfepy.discrete.state import State\n'), ((21482, 21500), 'numpy.zeros_like', 'nm.zeros_like', (['rhs'], {}), '(rhs)\n', (21495, 21500), True, 'import numpy as nm\n'), ((22953, 22980), 'sfepy.solvers.solvers.make_get_conf', 'make_get_conf', (['conf', 'kwargs'], {}), '(conf, kwargs)\n', (22966, 22980), False, 'from sfepy.solvers.solvers import make_get_conf, LinearSolver\n'), ((23546, 23570), 'numpy.zeros_like', 'nm.zeros_like', (["mtx['12']"], {}), "(mtx['12'])\n", (23559, 23570), True, 'import numpy as nm\n'), ((23707, 23732), 'scipy.sparse.csc_matrix', 'scs.csc_matrix', (["mtx['21']"], {}), "(mtx['21'])\n", (23721, 23732), True, 'import scipy.sparse as scs\n'), ((24501, 24528), 'sfepy.solvers.solvers.make_get_conf', 'make_get_conf', (['conf', 'kwargs'], {}), '(conf, kwargs)\n', (24514, 24528), False, 'from sfepy.solvers.solvers import make_get_conf, LinearSolver\n'), ((26012, 26035), 'sfepy.base.conf.get_standard_keywords', 'get_standard_keywords', ([], {}), '()\n', (26033, 26035), False, 'from sfepy.base.conf import ProblemConf, get_standard_keywords\n'), ((26060, 26086), 'sfepy.base.base.output.get_output_prefix', 'output.get_output_prefix', ([], {}), '()\n', (26084, 26086), False, 'from sfepy.base.base import output, get_default, assert_, try_imports, Struct\n'), ((30097, 30135), 'numpy.zeros', 'nm.zeros', (['(max_indx,)'], {'dtype': 'rhs.dtype'}), '((max_indx,), dtype=rhs.dtype)\n', (30105, 30135), True, 'import numpy as nm\n'), ((30338, 30368), 'numpy.array', 'nm.array', (['[]'], {'dtype': 'mtxc.dtype'}), '([], dtype=mtxc.dtype)\n', (30346, 30368), True, 'import numpy as nm\n'), ((30388, 30416), 'numpy.array', 'nm.array', (['[]'], {'dtype': 'nm.int32'}), '([], dtype=nm.int32)\n', (30396, 30416), True, 'import numpy as nm\n'), ((30436, 30464), 'numpy.array', 'nm.array', (['[]'], {'dtype': 'nm.int32'}), '([], dtype=nm.int32)\n', (30444, 30464), True, 'import numpy as nm\n'), ((807, 843), 'sfepy.base.base.assert_', 'assert_', (['(x0.shape[0] == rhs.shape[0])'], {}), '(x0.shape[0] == rhs.shape[0])\n', (814, 843), False, 'from sfepy.base.base import output, get_default, assert_, try_imports, Struct\n'), ((969, 981), 'time.clock', 'time.clock', ([], {}), '()\n', (979, 981), False, 'import time\n'), ((10725, 10742), 'petsc4py.init', 'petsc4py.init', (['[]'], {}), '([])\n', (10738, 10742), False, 'import petsc4py\n'), ((15477, 15513), 'os.path.join', 'os.path.join', (['output_dir', '"""sol0.dat"""'], {}), "(output_dir, 'sol0.dat')\n", (15489, 15513), False, 'import os, sys, shutil, tempfile\n'), ((20747, 20784), 'numpy.zeros', 'nm.zeros', (['(nn[ir],)'], {'dtype': 'nm.float64'}), '((nn[ir],), dtype=nm.float64)\n', (20755, 20784), True, 'import numpy as nm\n'), ((20808, 20845), 'numpy.zeros', 'nm.zeros', (['(nn[ir],)'], {'dtype': 'nm.float64'}), '((nn[ir],), dtype=nm.float64)\n', (20816, 20845), True, 'import numpy as nm\n'), ((23503, 23528), 'scipy.sparse.csc_matrix', 'scs.csc_matrix', (["mtx['11']"], {}), "(mtx['11'])\n", (23517, 23528), True, 'import scipy.sparse as scs\n'), ((23803, 23842), 'scipy.sparse.csc_matrix', 'scs.csc_matrix', (["(mtx['22'] - spC * invAB)"], {}), "(mtx['22'] - spC * invAB)\n", (23817, 23842), True, 'import scipy.sparse as scs\n'), ((23878, 23901), 'numpy.dot', 'nm.dot', (['invAB', "res['2']"], {}), "(invAB, res['2'])\n", (23884, 23901), True, 'import numpy as nm\n'), ((25539, 25567), 'numpy.max', 'nm.max', (['[last_indx, ii.stop]'], {}), '([last_indx, ii.stop])\n', (25545, 25567), True, 'import numpy as nm\n'), ((26216, 26252), 'sfepy.base.base.output.set_output_prefix', 'output.set_output_prefix', (['sub_prefix'], {}), '(sub_prefix)\n', (26240, 26252), False, 'from sfepy.base.base import output, get_default, assert_, try_imports, Struct\n'), ((26320, 26386), 'sfepy.base.conf.ProblemConf.from_file', 'ProblemConf.from_file', (['ifname', 'required', 'other'], {'define_args': 'kwargs'}), '(ifname, required, other, define_args=kwargs)\n', (26341, 26386), False, 'from sfepy.base.conf import ProblemConf, get_standard_keywords\n'), ((26447, 26492), 'sfepy.discrete.Problem.from_conf', 'Problem.from_conf', (['confi'], {'init_equations': '(True)'}), '(confi, init_equations=True)\n', (26464, 26492), False, 'from sfepy.discrete import Problem\n'), ((26511, 26541), 'sfepy.discrete.state.State', 'State', (['pbi.equations.variables'], {}), '(pbi.equations.variables)\n', (26516, 26541), False, 'from sfepy.discrete.state import State\n'), ((26752, 26791), 'sfepy.base.base.output.set_output_prefix', 'output.set_output_prefix', (['master_prefix'], {}), '(master_prefix)\n', (26776, 26791), False, 'from sfepy.base.base import output, get_default, assert_, try_imports, Struct\n'), ((28469, 28485), 'scipy.spatial.cKDTree', 'KDTree', (['coors[0]'], {}), '(coors[0])\n', (28475, 28485), True, 'from scipy.spatial import cKDTree as KDTree\n'), ((28896, 28934), 'numpy.zeros', 'nm.zeros', (['(nnd * dpn,)'], {'dtype': 'nm.int32'}), '((nnd * dpn,), dtype=nm.int32)\n', (28904, 28934), True, 'import numpy as nm\n'), ((29373, 29401), 'numpy.arange', 'nm.arange', (['gr.start', 'gr.stop'], {}), '(gr.start, gr.stop)\n', (29382, 29401), True, 'import numpy as nm\n'), ((29450, 29478), 'numpy.arange', 'nm.arange', (['gc.start', 'gc.stop'], {}), '(gc.start, gc.stop)\n', (29459, 29478), True, 'import numpy as nm\n'), ((29634, 29657), 'numpy.hstack', 'nm.hstack', (['[Ar, idxrow]'], {}), '([Ar, idxrow])\n', (29643, 29657), True, 'import numpy as nm\n'), ((29675, 29708), 'numpy.hstack', 'nm.hstack', (['[Ac, gc[lrow.indices]]'], {}), '([Ac, gc[lrow.indices]])\n', (29684, 29708), True, 'import numpy as nm\n'), ((29726, 29752), 'numpy.hstack', 'nm.hstack', (['[Ad, lrow.data]'], {}), '([Ad, lrow.data])\n', (29735, 29752), True, 'import numpy as nm\n'), ((30050, 30077), 'numpy.max', 'nm.max', (['[max_indx, ii.stop]'], {}), '([max_indx, ii.stop])\n', (30056, 30077), True, 'import numpy as nm\n'), ((30720, 30748), 'numpy.arange', 'nm.arange', (['jv.start', 'jv.stop'], {}), '(jv.start, jv.stop)\n', (30729, 30748), True, 'import numpy as nm\n'), ((33838, 33877), 'numpy.zeros', 'nm.zeros', (['(max_indx,)'], {'dtype': 'res0.dtype'}), '((max_indx,), dtype=res0.dtype)\n', (33846, 33877), True, 'import numpy as nm\n'), ((5735, 5795), 'sfepy.base.base.output', 'output', (["('scipy solver %s does not exist!' % self.conf.method)"], {}), "('scipy solver %s does not exist!' % self.conf.method)\n", (5741, 5795), False, 'from sfepy.base.base import output, get_default, assert_, try_imports, Struct\n'), ((5810, 5836), 'sfepy.base.base.output', 'output', (['"""using cg instead"""'], {}), "('using cg instead')\n", (5816, 5836), False, 'from sfepy.base.base import output, get_default, assert_, try_imports, Struct\n'), ((8297, 8350), 'sfepy.base.base.output', 'output', (["('pyamg.%s does not exist!' % self.conf.method)"], {}), "('pyamg.%s does not exist!' % self.conf.method)\n", (8303, 8350), False, 'from sfepy.base.base import output, get_default, assert_, try_imports, Struct\n'), ((8365, 8422), 'sfepy.base.base.output', 'output', (['"""using pyamg.smoothed_aggregation_solver instead"""'], {}), "('using pyamg.smoothed_aggregation_solver instead')\n", (8371, 8422), False, 'from sfepy.base.base import output, get_default, assert_, try_imports, Struct\n'), ((10983, 10994), 'petsc4py.PETSc.KSP', 'PETSc.KSP', ([], {}), '()\n', (10992, 10994), False, 'from petsc4py import PETSc\n'), ((21066, 21110), 'numpy.zeros', 'nm.zeros', (['(nn[ir], nn[ic])'], {'dtype': 'nm.float64'}), '((nn[ir], nn[ic]), dtype=nm.float64)\n', (21074, 21110), True, 'import numpy as nm\n'), ((29128, 29154), 'numpy.where', 'nm.where', (['(eq_map_2.eq >= 0)'], {}), '(eq_map_2.eq >= 0)\n', (29136, 29154), True, 'import numpy as nm\n'), ((29577, 29606), 'numpy.ones', 'nm.ones', (['(m,)'], {'dtype': 'nm.int32'}), '((m,), dtype=nm.int32)\n', (29584, 29606), True, 'import numpy as nm\n'), ((33438, 33486), 'scipy.sparse.coo_matrix', 'sps.coo_matrix', (['(aux_data, (aux_rows, aux_cols))'], {}), '((aux_data, (aux_rows, aux_cols)))\n', (33452, 33486), True, 'import scipy.sparse as sps\n'), ((33790, 33817), 'numpy.max', 'nm.max', (['[max_indx, ii.stop]'], {}), '([max_indx, ii.stop])\n', (33796, 33817), True, 'import numpy as nm\n'), ((2968, 3015), 'sfepy.base.base.output', 'output', (['"""umfpack not available, using superlu!"""'], {}), "('umfpack not available, using superlu!')\n", (2974, 3015), False, 'from sfepy.base.base import output, get_default, assert_, try_imports, Struct\n'), ((16460, 16472), 'time.clock', 'time.clock', ([], {}), '()\n', (16470, 16472), False, 'import time\n'), ((17729, 17741), 'time.clock', 'time.clock', ([], {}), '()\n', (17739, 17741), False, 'import time\n'), ((19845, 19859), 'numpy.isnan', 'nm.isnan', (['vec0'], {}), '(vec0)\n', (19853, 19859), True, 'import numpy as nm\n'), ((20417, 20443), 'numpy.where', 'nm.where', (['(idx0 == idxrange)'], {}), '(idx0 == idxrange)\n', (20425, 20443), True, 'import numpy as nm\n'), ((28265, 28285), 'numpy.where', 'nm.where', (['(dc > 1e-09)'], {}), '(dc > 1e-09)\n', (28273, 28285), True, 'import numpy as nm\n'), ((32807, 32835), 'numpy.arange', 'nm.arange', (['jv.start', 'jv.stop'], {}), '(jv.start, jv.stop)\n', (32816, 32835), True, 'import numpy as nm\n'), ((6892, 6905), 'numpy.sign', 'nm.sign', (['info'], {}), '(info)\n', (6899, 6905), True, 'import numpy as nm\n'), ((28166, 28188), 'numpy.max', 'nm.max', (['fcoors'], {'axis': '(0)'}), '(fcoors, axis=0)\n', (28172, 28188), True, 'import numpy as nm\n'), ((28220, 28242), 'numpy.min', 'nm.min', (['fcoors'], {'axis': '(0)'}), '(fcoors, axis=0)\n', (28226, 28242), True, 'import numpy as nm\n'), ((31044, 31074), 'numpy.ones', 'nm.ones', (['(nn,)'], {'dtype': 'nm.int32'}), '((nn,), dtype=nm.int32)\n', (31051, 31074), True, 'import numpy as nm\n'), ((33352, 33382), 'numpy.ones', 'nm.ones', (['(nn,)'], {'dtype': 'nm.int32'}), '((nn,), dtype=nm.int32)\n', (33359, 33382), True, 'import numpy as nm\n')]
|
"""add verified result to application
Revision ID: d8a156ffaeae
Revises: <KEY>
Create Date: 2022-03-30 16:00:13.195216
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = "d8a156ffaeae"
down_revision = "<KEY>"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"job_applicant",
sa.Column("verified", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("job_applicant", "verified")
# ### end Alembic commands ###
|
[
"sqlmodel.sql.sqltypes.AutoString"
] |
[((659, 702), 'alembic.op.drop_column', 'op.drop_column', (['"""job_applicant"""', '"""verified"""'], {}), "('job_applicant', 'verified')\n", (673, 702), False, 'from alembic import op\n'), ((477, 511), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (509, 511), False, 'import sqlmodel\n')]
|
"""All database connection information is defined here"""
from sqlmodel import SQLModel, create_engine
from sqlalchemy.engine import Engine
from sqlalchemy import event
DB_FILE = "devices.db"
sqlite_url = f"sqlite:///{DB_FILE}"
connect_args = {"check_same_thread": False}
engine = create_engine(
sqlite_url, connect_args=connect_args, echo=True
) # set echo=True to view output
# From SQLAlchemy docs to allow foreign Key support
# https://docs.sqlalchemy.org/en/14/dialects/sqlite.html#foreign-key-support
@event.listens_for(Engine, "connect")
def set_sqlite_pragma(dbapi_connection, connection_record):
"""Used to enable foreign keys in sqlite"""
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
def create_db_and_tables():
"""Used to create and initialize DB"""
SQLModel.metadata.create_all(engine)
|
[
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.create_engine"
] |
[((284, 347), 'sqlmodel.create_engine', 'create_engine', (['sqlite_url'], {'connect_args': 'connect_args', 'echo': '(True)'}), '(sqlite_url, connect_args=connect_args, echo=True)\n', (297, 347), False, 'from sqlmodel import SQLModel, create_engine\n'), ((518, 554), 'sqlalchemy.event.listens_for', 'event.listens_for', (['Engine', '"""connect"""'], {}), "(Engine, 'connect')\n", (535, 554), False, 'from sqlalchemy import event\n'), ((843, 879), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (871, 879), False, 'from sqlmodel import SQLModel, create_engine\n')]
|
from datetime import datetime
from typing import List, Optional
from sqlmodel import Column, DateTime, Field, Relationship, SQLModel
class ObserverBase(SQLModel):
phone: str
email: str
class Config:
anystr_strip_whitespace = True
anystr_lower = True
class Observer(ObserverBase, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
measurements: List["Measurement"] = Relationship(
back_populates="observer", sa_relationship_kwargs={"cascade": "all,delete"}
)
class ObserverCreate(ObserverBase):
pass
class ObserverRead(ObserverBase):
id: int
class MeasurementBase(SQLModel):
temperaturescale: str
temperature: int
organizationid: int
siteid: int
date_time: Optional[datetime] = Field(
sa_column=Column(DateTime, default=datetime.utcnow)
)
observer_id: Optional[int] = Field(default=None, foreign_key="observer.id")
class Config:
anystr_strip_whitespace = True
anystr_lower = True
class Measurement(MeasurementBase, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
observer: Optional[Observer] = Relationship(back_populates="measurements")
class MeasurementCreate(MeasurementBase):
pass
class MeasurementRead(MeasurementBase):
id: int
|
[
"sqlmodel.Field",
"sqlmodel.Column",
"sqlmodel.Relationship"
] |
[((350, 387), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (355, 387), False, 'from sqlmodel import Column, DateTime, Field, Relationship, SQLModel\n'), ((428, 521), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""observer"""', 'sa_relationship_kwargs': "{'cascade': 'all,delete'}"}), "(back_populates='observer', sa_relationship_kwargs={'cascade':\n 'all,delete'})\n", (440, 521), False, 'from sqlmodel import Column, DateTime, Field, Relationship, SQLModel\n'), ((891, 937), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""observer.id"""'}), "(default=None, foreign_key='observer.id')\n", (896, 937), False, 'from sqlmodel import Column, DateTime, Field, Relationship, SQLModel\n'), ((1098, 1135), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (1103, 1135), False, 'from sqlmodel import Column, DateTime, Field, Relationship, SQLModel\n'), ((1171, 1214), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""measurements"""'}), "(back_populates='measurements')\n", (1183, 1214), False, 'from sqlmodel import Column, DateTime, Field, Relationship, SQLModel\n'), ((810, 851), 'sqlmodel.Column', 'Column', (['DateTime'], {'default': 'datetime.utcnow'}), '(DateTime, default=datetime.utcnow)\n', (816, 851), False, 'from sqlmodel import Column, DateTime, Field, Relationship, SQLModel\n')]
|
from sqlmodel import SQLModel, Field
from typing import Optional, List
from pydantic import validator
# --- model ---
class User(SQLModel, table=True):
id: Optional[int] = Field(None, primary_key=True)
username: str
password: str
# --- serializers ---
class UserOut(SQLModel):
username: str
class UserIn(SQLModel):
username: str
password: str
confirm_password: str
# @validator(confirm_password)
# def validate_password(cls, v, values):
# if v and v != values['password']:
# raise ValueError("aaaa")
# return v
UserList = List[UserOut]
|
[
"sqlmodel.Field"
] |
[((178, 207), 'sqlmodel.Field', 'Field', (['None'], {'primary_key': '(True)'}), '(None, primary_key=True)\n', (183, 207), False, 'from sqlmodel import SQLModel, Field\n')]
|
#%%
import numpy as np
from sfepy.discrete.fem import Mesh, FEDomain, Field
mesh_path = 'C:/Users/lzy71/miniconda3/envs/lego/lib/site-packages/sfepy/meshes/2d/rectangle_tri.mesh'
#%%
mesh = Mesh.from_file(mesh_path)
domain = FEDomain('domain', mesh)
min_x, max_x = domain.get_mesh_bounding_box()[:, 0]
eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
# %%
from sfepy.discrete import (
FieldVariable, Material, Integral, Function,
Equation, Equations, Problem
)
field = Field.from_args('fu', np.float64, 'vector', omega, approx_order=2)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
# %%
from sfepy.mechanics.matcoefs import stiffness_from_lame
m = Material('m', D=stiffness_from_lame(dim=2, lam=1.0, mu=1.0))
f = Material('f', val=[[0.02], [0.01]])
integral = Integral('i', order=3)
# %%
from sfepy.terms import Term
t1 = Term.new('dw_lin_elastic(m.D, v, u)',
integral, omega, m=m, v=v, u=u)
t2 = Term.new('dw_volume_lvf(f.val, v)',
integral, omega, f=f, v=v)
eq = Equation('balance', t1 + t2)
eqs = Equations([eq])
#%%
pb = Problem('elasticity', equations=eqs)
|
[
"sfepy.discrete.Material",
"sfepy.discrete.FieldVariable",
"sfepy.discrete.fem.Mesh.from_file",
"sfepy.terms.Term.new",
"sfepy.discrete.Equations",
"sfepy.discrete.Integral",
"sfepy.mechanics.matcoefs.stiffness_from_lame",
"sfepy.discrete.Equation",
"sfepy.discrete.fem.Field.from_args",
"sfepy.discrete.fem.FEDomain",
"sfepy.discrete.Problem"
] |
[((192, 217), 'sfepy.discrete.fem.Mesh.from_file', 'Mesh.from_file', (['mesh_path'], {}), '(mesh_path)\n', (206, 217), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((227, 251), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['"""domain"""', 'mesh'], {}), "('domain', mesh)\n", (235, 251), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((511, 577), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""fu"""', 'np.float64', '"""vector"""', 'omega'], {'approx_order': '(2)'}), "('fu', np.float64, 'vector', omega, approx_order=2)\n", (526, 577), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((582, 618), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""u"""', '"""unknown"""', 'field'], {}), "('u', 'unknown', field)\n", (595, 618), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((623, 678), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""v"""', '"""test"""', 'field'], {'primary_var_name': '"""u"""'}), "('v', 'test', field, primary_var_name='u')\n", (636, 678), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((812, 847), 'sfepy.discrete.Material', 'Material', (['"""f"""'], {'val': '[[0.02], [0.01]]'}), "('f', val=[[0.02], [0.01]])\n", (820, 847), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((860, 882), 'sfepy.discrete.Integral', 'Integral', (['"""i"""'], {'order': '(3)'}), "('i', order=3)\n", (868, 882), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((924, 993), 'sfepy.terms.Term.new', 'Term.new', (['"""dw_lin_elastic(m.D, v, u)"""', 'integral', 'omega'], {'m': 'm', 'v': 'v', 'u': 'u'}), "('dw_lin_elastic(m.D, v, u)', integral, omega, m=m, v=v, u=u)\n", (932, 993), False, 'from sfepy.terms import Term\n'), ((1005, 1067), 'sfepy.terms.Term.new', 'Term.new', (['"""dw_volume_lvf(f.val, v)"""', 'integral', 'omega'], {'f': 'f', 'v': 'v'}), "('dw_volume_lvf(f.val, v)', integral, omega, f=f, v=v)\n", (1013, 1067), False, 'from sfepy.terms import Term\n'), ((1079, 1107), 'sfepy.discrete.Equation', 'Equation', (['"""balance"""', '(t1 + t2)'], {}), "('balance', t1 + t2)\n", (1087, 1107), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((1114, 1129), 'sfepy.discrete.Equations', 'Equations', (['[eq]'], {}), '([eq])\n', (1123, 1129), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((1140, 1176), 'sfepy.discrete.Problem', 'Problem', (['"""elasticity"""'], {'equations': 'eqs'}), "('elasticity', equations=eqs)\n", (1147, 1176), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((763, 806), 'sfepy.mechanics.matcoefs.stiffness_from_lame', 'stiffness_from_lame', ([], {'dim': '(2)', 'lam': '(1.0)', 'mu': '(1.0)'}), '(dim=2, lam=1.0, mu=1.0)\n', (782, 806), False, 'from sfepy.mechanics.matcoefs import stiffness_from_lame\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
from megengine import tensor
def test_cross_entropy_with_logits():
data = tensor([[0, 50], [0, -150]]).astype(np.float32)
label = tensor([1, 0]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
label = tensor([0, 1]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 100)
label = np.array([1, 0])
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
def test_cross_entropy():
def softmax(x):
x = np.exp(x)
x /= x.sum(1, keepdims=True)
return x
def ref(x, y):
return np.mean([-np.log(x[i, y[i]]) for i in range(len(y))])
x = (np.random.rand(5, 10) - 0.5) * 4
y = np.random.randint(10, size=(5,))
for i in range(len(x)):
x[i, y[i]] += np.random.rand() * 2
x = softmax(x)
l_ref = ref(x, y)
l = F.nn.cross_entropy(tensor(x, "float32"), tensor(y, "int32"), with_logits=False)
np.testing.assert_allclose(l.numpy(), l_ref)
def test_cross_entropy_reduction():
logits = np.random.randn(16, 10)
label = np.random.randint(10, size=[16])
logits = tensor(logits, dtype="float32")
label = tensor(label, dtype="int32")
perm = np.random.permutation(16)
logits_perm = tensor(logits[perm], dtype="float32")
label_perm = tensor(label[perm], dtype="int32")
loss = F.nn.cross_entropy(logits, label, reduction="none")
loss_perm = F.nn.cross_entropy(logits_perm, label_perm, reduction="none")
np.testing.assert_allclose(loss.numpy()[perm], loss_perm.numpy())
loss_sum = F.nn.cross_entropy(logits, label, reduction="sum")
np.testing.assert_allclose(loss.numpy().sum(), loss_sum.numpy(), rtol=2e-7)
loss_mean = F.nn.cross_entropy(logits, label, reduction="mean")
np.testing.assert_allclose(loss_mean.numpy(), loss_sum.numpy() / 16)
loss_ls = F.nn.cross_entropy(logits, label, reduction="mean", label_smooth=0.1)
loss_ls_none_reduce = F.nn.cross_entropy(
logits, label, reduction="none", label_smooth=0.1
)
np.testing.assert_allclose(
loss_ls.numpy(), loss_ls_none_reduce.numpy().mean(), rtol=2e-7
)
with pytest.raises(ValueError):
F.nn.cross_entropy(logits, label, reduction="MEAN")
with pytest.raises(ValueError):
F.nn.cross_entropy(logits, label, reduction="max")
def ctc_nll_naive_npy(
pred,
pred_lengths,
label,
label_lengths,
blank=0,
reduction="mean",
time_major=False,
):
"""naive :func:`ctc_nll` using numpy arrays. Used for testing and helping
our user to understand how CTC works. Only ``LABEL_COMPACT`` mode is
supported."""
pred = np.asarray(pred, dtype=np.float32)
pred_lengths = np.asarray(pred_lengths, dtype=np.int8)
label = np.asarray(label, dtype=np.int32)
label_lengths = np.asarray(label_lengths, dtype=np.int32)
if time_major:
pred = np.transpose(pred, (1, 0, 2))
# pred in (N, T, P) format
batch_size, time_len, nr_class = pred.shape
assert pred_lengths.shape == (batch_size,) and pred_lengths.max() <= pred.shape[1]
assert label_lengths.shape == (batch_size,)
assert label.shape == (label_lengths.sum(),) and label.max() < nr_class
ret = np.empty((batch_size,), dtype=np.float32)
label_start = 0
for i in range(batch_size):
label_end = label_start + label_lengths[i]
ret[i] = _ctc_npy_single_seq(
pred[i][: pred_lengths[i]], label[label_start:label_end], blank
)
label_start = label_end
if reduction == "mean":
return (ret / label_lengths).mean()
elif reduction == "sum":
return ret.sum()
elif reduction == "none":
return ret
else:
raise ValueError("{} is not a valid value for reduction".format(reduction))
def _ctc_npy_single_seq(pred, label, blank):
def safelog(x):
eps = np.finfo(x.dtype).tiny
return np.log(np.maximum(x, eps))
def log_sum_exp(x, y):
x, y = np.maximum(x, y), np.minimum(x, y)
return x + np.log1p(np.exp(y - x))
assert np.abs(pred.sum(axis=1) - 1).max() <= 1e-3
len_pred, alphabet_size = pred.shape
(len_label,) = label.shape
len_ex_label = len_label * 2 + 1
ex_label = (np.zeros(len_ex_label)).astype(np.int32) + blank
ex_label[1::2] = label
prob = np.zeros(len_ex_label, dtype=np.float32)
prob[0] = pred[0][ex_label[0]]
prob[1] = pred[0][ex_label[1]]
prob = safelog(prob) # compute on log scale
ex_label_pmask = ex_label[2:] != ex_label[:-2]
for t in range(1, len_pred):
# enter loop: prob[i] = log(p(pred[:t+1], label[:i+1]))
new_prob = prob.copy()
new_prob[1:] = log_sum_exp(new_prob[1:], prob[:-1])
new_prob[2:] = (
new_prob[2:] * (1 - ex_label_pmask)
+ log_sum_exp(new_prob[2:], prob[:-2]) * ex_label_pmask
)
new_prob += safelog(pred[t, ex_label])
prob = new_prob
return -log_sum_exp(prob[-1], prob[-2])
def test_ctc_loss():
def test_func(T, C, N):
input = np.random.randn(T, N, C)
input = F.softmax(tensor(input), axis=-1).numpy()
input_lengths = np.ones(N, dtype=np.int32) * T
target_lengths = np.random.randint(low=1, high=T + 1, size=(N,), dtype=np.int32)
target = np.random.randint(
low=1, high=C, size=(sum(target_lengths)), dtype=np.int32
)
input_mge = tensor(input)
input_lengths_mge = tensor(input_lengths)
target_mge = tensor(target)
target_lengths_mge = tensor(target_lengths)
blank = np.random.randint(C)
for method in ["mean", "sum", "none"]:
np_out = ctc_nll_naive_npy(
input,
input_lengths,
target,
target_lengths,
blank=blank,
reduction=method,
time_major=True,
)
mge_out = F.nn.ctc_loss(
input_mge,
input_lengths_mge,
target_mge,
target_lengths_mge,
blank=blank,
reduction=method,
)
np.testing.assert_allclose(mge_out.numpy(), np_out, rtol=2e-6)
cases = [[1, 2, 1], [100, 50, 200], [100, 5, 1]]
for case in cases:
test_func(*case)
|
[
"megengine.functional.nn.cross_entropy",
"megengine.functional.nn.ctc_loss",
"megengine.tensor"
] |
[((627, 658), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['data', 'label'], {}), '(data, label)\n', (645, 658), True, 'import megengine.functional as F\n'), ((764, 795), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['data', 'label'], {}), '(data, label)\n', (782, 795), True, 'import megengine.functional as F\n'), ((859, 875), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (867, 875), True, 'import numpy as np\n'), ((887, 918), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['data', 'label'], {}), '(data, label)\n', (905, 918), True, 'import megengine.functional as F\n'), ((1233, 1265), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': '(5,)'}), '(10, size=(5,))\n', (1250, 1265), True, 'import numpy as np\n'), ((1566, 1589), 'numpy.random.randn', 'np.random.randn', (['(16)', '(10)'], {}), '(16, 10)\n', (1581, 1589), True, 'import numpy as np\n'), ((1602, 1634), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': '[16]'}), '(10, size=[16])\n', (1619, 1634), True, 'import numpy as np\n'), ((1648, 1679), 'megengine.tensor', 'tensor', (['logits'], {'dtype': '"""float32"""'}), "(logits, dtype='float32')\n", (1654, 1679), False, 'from megengine import tensor\n'), ((1692, 1720), 'megengine.tensor', 'tensor', (['label'], {'dtype': '"""int32"""'}), "(label, dtype='int32')\n", (1698, 1720), False, 'from megengine import tensor\n'), ((1733, 1758), 'numpy.random.permutation', 'np.random.permutation', (['(16)'], {}), '(16)\n', (1754, 1758), True, 'import numpy as np\n'), ((1777, 1814), 'megengine.tensor', 'tensor', (['logits[perm]'], {'dtype': '"""float32"""'}), "(logits[perm], dtype='float32')\n", (1783, 1814), False, 'from megengine import tensor\n'), ((1832, 1866), 'megengine.tensor', 'tensor', (['label[perm]'], {'dtype': '"""int32"""'}), "(label[perm], dtype='int32')\n", (1838, 1866), False, 'from megengine import tensor\n'), ((1879, 1930), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits', 'label'], {'reduction': '"""none"""'}), "(logits, label, reduction='none')\n", (1897, 1930), True, 'import megengine.functional as F\n'), ((1947, 2008), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits_perm', 'label_perm'], {'reduction': '"""none"""'}), "(logits_perm, label_perm, reduction='none')\n", (1965, 2008), True, 'import megengine.functional as F\n'), ((2095, 2145), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits', 'label'], {'reduction': '"""sum"""'}), "(logits, label, reduction='sum')\n", (2113, 2145), True, 'import megengine.functional as F\n'), ((2243, 2294), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits', 'label'], {'reduction': '"""mean"""'}), "(logits, label, reduction='mean')\n", (2261, 2294), True, 'import megengine.functional as F\n'), ((2383, 2452), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits', 'label'], {'reduction': '"""mean"""', 'label_smooth': '(0.1)'}), "(logits, label, reduction='mean', label_smooth=0.1)\n", (2401, 2452), True, 'import megengine.functional as F\n'), ((2479, 2548), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits', 'label'], {'reduction': '"""none"""', 'label_smooth': '(0.1)'}), "(logits, label, reduction='none', label_smooth=0.1)\n", (2497, 2548), True, 'import megengine.functional as F\n'), ((3189, 3223), 'numpy.asarray', 'np.asarray', (['pred'], {'dtype': 'np.float32'}), '(pred, dtype=np.float32)\n', (3199, 3223), True, 'import numpy as np\n'), ((3243, 3282), 'numpy.asarray', 'np.asarray', (['pred_lengths'], {'dtype': 'np.int8'}), '(pred_lengths, dtype=np.int8)\n', (3253, 3282), True, 'import numpy as np\n'), ((3295, 3328), 'numpy.asarray', 'np.asarray', (['label'], {'dtype': 'np.int32'}), '(label, dtype=np.int32)\n', (3305, 3328), True, 'import numpy as np\n'), ((3349, 3390), 'numpy.asarray', 'np.asarray', (['label_lengths'], {'dtype': 'np.int32'}), '(label_lengths, dtype=np.int32)\n', (3359, 3390), True, 'import numpy as np\n'), ((3758, 3799), 'numpy.empty', 'np.empty', (['(batch_size,)'], {'dtype': 'np.float32'}), '((batch_size,), dtype=np.float32)\n', (3766, 3799), True, 'import numpy as np\n'), ((4865, 4905), 'numpy.zeros', 'np.zeros', (['len_ex_label'], {'dtype': 'np.float32'}), '(len_ex_label, dtype=np.float32)\n', (4873, 4905), True, 'import numpy as np\n'), ((1029, 1038), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (1035, 1038), True, 'import numpy as np\n'), ((1405, 1425), 'megengine.tensor', 'tensor', (['x', '"""float32"""'], {}), "(x, 'float32')\n", (1411, 1425), False, 'from megengine import tensor\n'), ((1427, 1445), 'megengine.tensor', 'tensor', (['y', '"""int32"""'], {}), "(y, 'int32')\n", (1433, 1445), False, 'from megengine import tensor\n'), ((2682, 2707), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2695, 2707), False, 'import pytest\n'), ((2717, 2768), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits', 'label'], {'reduction': '"""MEAN"""'}), "(logits, label, reduction='MEAN')\n", (2735, 2768), True, 'import megengine.functional as F\n'), ((2779, 2804), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2792, 2804), False, 'import pytest\n'), ((2814, 2864), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits', 'label'], {'reduction': '"""max"""'}), "(logits, label, reduction='max')\n", (2832, 2864), True, 'import megengine.functional as F\n'), ((3426, 3455), 'numpy.transpose', 'np.transpose', (['pred', '(1, 0, 2)'], {}), '(pred, (1, 0, 2))\n', (3438, 3455), True, 'import numpy as np\n'), ((5599, 5623), 'numpy.random.randn', 'np.random.randn', (['T', 'N', 'C'], {}), '(T, N, C)\n', (5614, 5623), True, 'import numpy as np\n'), ((5762, 5825), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(T + 1)', 'size': '(N,)', 'dtype': 'np.int32'}), '(low=1, high=T + 1, size=(N,), dtype=np.int32)\n', (5779, 5825), True, 'import numpy as np\n'), ((5963, 5976), 'megengine.tensor', 'tensor', (['input'], {}), '(input)\n', (5969, 5976), False, 'from megengine import tensor\n'), ((6005, 6026), 'megengine.tensor', 'tensor', (['input_lengths'], {}), '(input_lengths)\n', (6011, 6026), False, 'from megengine import tensor\n'), ((6049, 6063), 'megengine.tensor', 'tensor', (['target'], {}), '(target)\n', (6055, 6063), False, 'from megengine import tensor\n'), ((6093, 6115), 'megengine.tensor', 'tensor', (['target_lengths'], {}), '(target_lengths)\n', (6099, 6115), False, 'from megengine import tensor\n'), ((6133, 6153), 'numpy.random.randint', 'np.random.randint', (['C'], {}), '(C)\n', (6150, 6153), True, 'import numpy as np\n'), ((524, 552), 'megengine.tensor', 'tensor', (['[[0, 50], [0, -150]]'], {}), '([[0, 50], [0, -150]])\n', (530, 552), False, 'from megengine import tensor\n'), ((584, 598), 'megengine.tensor', 'tensor', (['[1, 0]'], {}), '([1, 0])\n', (590, 598), False, 'from megengine import tensor\n'), ((721, 735), 'megengine.tensor', 'tensor', (['[0, 1]'], {}), '([0, 1])\n', (727, 735), False, 'from megengine import tensor\n'), ((1192, 1213), 'numpy.random.rand', 'np.random.rand', (['(5)', '(10)'], {}), '(5, 10)\n', (1206, 1213), True, 'import numpy as np\n'), ((1316, 1332), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1330, 1332), True, 'import numpy as np\n'), ((4410, 4427), 'numpy.finfo', 'np.finfo', (['x.dtype'], {}), '(x.dtype)\n', (4418, 4427), True, 'import numpy as np\n'), ((4455, 4473), 'numpy.maximum', 'np.maximum', (['x', 'eps'], {}), '(x, eps)\n', (4465, 4473), True, 'import numpy as np\n'), ((4518, 4534), 'numpy.maximum', 'np.maximum', (['x', 'y'], {}), '(x, y)\n', (4528, 4534), True, 'import numpy as np\n'), ((4536, 4552), 'numpy.minimum', 'np.minimum', (['x', 'y'], {}), '(x, y)\n', (4546, 4552), True, 'import numpy as np\n'), ((5706, 5732), 'numpy.ones', 'np.ones', (['N'], {'dtype': 'np.int32'}), '(N, dtype=np.int32)\n', (5713, 5732), True, 'import numpy as np\n'), ((6483, 6593), 'megengine.functional.nn.ctc_loss', 'F.nn.ctc_loss', (['input_mge', 'input_lengths_mge', 'target_mge', 'target_lengths_mge'], {'blank': 'blank', 'reduction': 'method'}), '(input_mge, input_lengths_mge, target_mge, target_lengths_mge,\n blank=blank, reduction=method)\n', (6496, 6593), True, 'import megengine.functional as F\n'), ((4581, 4594), 'numpy.exp', 'np.exp', (['(y - x)'], {}), '(y - x)\n', (4587, 4594), True, 'import numpy as np\n'), ((4777, 4799), 'numpy.zeros', 'np.zeros', (['len_ex_label'], {}), '(len_ex_label)\n', (4785, 4799), True, 'import numpy as np\n'), ((1138, 1156), 'numpy.log', 'np.log', (['x[i, y[i]]'], {}), '(x[i, y[i]])\n', (1144, 1156), True, 'import numpy as np\n'), ((5650, 5663), 'megengine.tensor', 'tensor', (['input'], {}), '(input)\n', (5656, 5663), False, 'from megengine import tensor\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from megengine.core import tensor
def test_mge_81():
np.random.seed(0)
N, D = 3, 4
x = mge.Parameter(value=np.random.normal(size=(N, D)).astype(np.float32))
y = mge.Parameter(value=np.random.normal(size=(N, D)).astype(np.float32))
z = mge.Parameter(value=np.random.normal(size=(N, D)).astype(np.float32))
a = x * y
b = a + z
c = F.sum(b)
grad_x = F.grad(c, x, use_virtual_grad=False)
grad_y = F.grad(c, y, use_virtual_grad=False)
grad_z = F.grad(c, z, use_virtual_grad=False)
print(grad_x.numpy())
print(grad_y.numpy())
print(grad_z.numpy())
m = M.BatchNorm2d(4)
input = tensor(np.zeros((64, 4, 32, 32), dtype=np.float32))
_ = m(input)
m = M.BatchNorm2d(4, affine=False)
_ = m(input)
|
[
"megengine.functional.sum",
"megengine.functional.grad",
"megengine.module.BatchNorm2d"
] |
[((542, 559), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (556, 559), True, 'import numpy as np\n'), ((846, 854), 'megengine.functional.sum', 'F.sum', (['b'], {}), '(b)\n', (851, 854), True, 'import megengine.functional as F\n'), ((868, 904), 'megengine.functional.grad', 'F.grad', (['c', 'x'], {'use_virtual_grad': '(False)'}), '(c, x, use_virtual_grad=False)\n', (874, 904), True, 'import megengine.functional as F\n'), ((918, 954), 'megengine.functional.grad', 'F.grad', (['c', 'y'], {'use_virtual_grad': '(False)'}), '(c, y, use_virtual_grad=False)\n', (924, 954), True, 'import megengine.functional as F\n'), ((968, 1004), 'megengine.functional.grad', 'F.grad', (['c', 'z'], {'use_virtual_grad': '(False)'}), '(c, z, use_virtual_grad=False)\n', (974, 1004), True, 'import megengine.functional as F\n'), ((1091, 1107), 'megengine.module.BatchNorm2d', 'M.BatchNorm2d', (['(4)'], {}), '(4)\n', (1104, 1107), True, 'import megengine.module as M\n'), ((1197, 1227), 'megengine.module.BatchNorm2d', 'M.BatchNorm2d', (['(4)'], {'affine': '(False)'}), '(4, affine=False)\n', (1210, 1227), True, 'import megengine.module as M\n'), ((1127, 1170), 'numpy.zeros', 'np.zeros', (['(64, 4, 32, 32)'], {'dtype': 'np.float32'}), '((64, 4, 32, 32), dtype=np.float32)\n', (1135, 1170), True, 'import numpy as np\n'), ((604, 633), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(N, D)'}), '(size=(N, D))\n', (620, 633), True, 'import numpy as np\n'), ((682, 711), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(N, D)'}), '(size=(N, D))\n', (698, 711), True, 'import numpy as np\n'), ((760, 789), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(N, D)'}), '(size=(N, D))\n', (776, 789), True, 'import numpy as np\n')]
|
import numpy as np
import megengine as mge
import megengine.autodiff as ad
import megengine.optimizer as optimizer
from megengine import Parameter, tensor
from megengine.core.tensor.raw_tensor import RawTensor
from megengine.module import Module
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter(1.23, dtype=np.float32)
def forward(self, x):
x = x * self.a
return x
def test_save_load():
net = Simple()
optim = optimizer.SGD(net.parameters(), lr=1.0, momentum=0.9)
optim.clear_grad()
gm = ad.GradManager().attach(net.parameters())
data = tensor([2.34])
with gm:
loss = net(data)
gm.backward(loss)
optim.step()
model_name = "simple.pkl"
print("save to {}".format(model_name))
mge.save(
{
"name": "simple",
"state_dict": net.state_dict(),
"opt_state": optim.state_dict(),
},
model_name,
)
# Load param to cpu
checkpoint = mge.load(model_name, map_location="cpu0")
device_save = mge.get_default_device()
mge.set_default_device("cpu0")
net = Simple()
net.load_state_dict(checkpoint["state_dict"])
optim = optimizer.SGD(net.parameters(), lr=1.0, momentum=0.9)
optim.load_state_dict(checkpoint["opt_state"])
print("load done")
with gm:
loss = net([1.23])
gm.backward(loss)
optim.step()
# Restore device
mge.set_default_device(device_save)
|
[
"megengine.get_default_device",
"megengine.load",
"megengine.tensor",
"megengine.set_default_device",
"megengine.autodiff.GradManager",
"megengine.Parameter"
] |
[((636, 650), 'megengine.tensor', 'tensor', (['[2.34]'], {}), '([2.34])\n', (642, 650), False, 'from megengine import Parameter, tensor\n'), ((1031, 1072), 'megengine.load', 'mge.load', (['model_name'], {'map_location': '"""cpu0"""'}), "(model_name, map_location='cpu0')\n", (1039, 1072), True, 'import megengine as mge\n'), ((1091, 1115), 'megengine.get_default_device', 'mge.get_default_device', ([], {}), '()\n', (1113, 1115), True, 'import megengine as mge\n'), ((1120, 1150), 'megengine.set_default_device', 'mge.set_default_device', (['"""cpu0"""'], {}), "('cpu0')\n", (1142, 1150), True, 'import megengine as mge\n'), ((1470, 1505), 'megengine.set_default_device', 'mge.set_default_device', (['device_save'], {}), '(device_save)\n', (1492, 1505), True, 'import megengine as mge\n'), ((339, 372), 'megengine.Parameter', 'Parameter', (['(1.23)'], {'dtype': 'np.float32'}), '(1.23, dtype=np.float32)\n', (348, 372), False, 'from megengine import Parameter, tensor\n'), ((582, 598), 'megengine.autodiff.GradManager', 'ad.GradManager', ([], {}), '()\n', (596, 598), True, 'import megengine.autodiff as ad\n')]
|
from datetime import datetime
from os import getenv
from typing import Optional
from fastapi import FastAPI
import strawberry
from strawberry.asgi import GraphQL
from sqlmodel import create_engine, SQLModel
from api.models import PostQL
from api.crud import create_post, get_posts
ENGINE = create_engine(getenv("DATABASE_URL"))
@strawberry.type
class Query:
@strawberry.field
def post(self, slug: Optional[str] = None) -> list[PostQL]:
return get_posts(ENGINE, slug)
@strawberry.type
class Mutation:
@strawberry.field
def add_post(self, slug: str, title: str, content: str, published: bool) -> PostQL:
return create_post(
ENGINE,
PostQL(
slug=slug,
title=title,
content=content,
published=published,
published_at=datetime.now(),
),
)
schema = strawberry.Schema(query=Query, mutation=Mutation)
SQLModel.metadata.create_all(ENGINE)
graphql_app = GraphQL(schema)
app = FastAPI()
app.add_route("/graphql", graphql_app)
app.add_websocket_route("/graphql", graphql_app)
|
[
"sqlmodel.SQLModel.metadata.create_all"
] |
[((908, 957), 'strawberry.Schema', 'strawberry.Schema', ([], {'query': 'Query', 'mutation': 'Mutation'}), '(query=Query, mutation=Mutation)\n', (925, 957), False, 'import strawberry\n'), ((959, 995), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['ENGINE'], {}), '(ENGINE)\n', (987, 995), False, 'from sqlmodel import create_engine, SQLModel\n'), ((1011, 1026), 'strawberry.asgi.GraphQL', 'GraphQL', (['schema'], {}), '(schema)\n', (1018, 1026), False, 'from strawberry.asgi import GraphQL\n'), ((1034, 1043), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (1041, 1043), False, 'from fastapi import FastAPI\n'), ((307, 329), 'os.getenv', 'getenv', (['"""DATABASE_URL"""'], {}), "('DATABASE_URL')\n", (313, 329), False, 'from os import getenv\n'), ((464, 487), 'api.crud.get_posts', 'get_posts', (['ENGINE', 'slug'], {}), '(ENGINE, slug)\n', (473, 487), False, 'from api.crud import create_post, get_posts\n'), ((856, 870), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (868, 870), False, 'from datetime import datetime\n')]
|
from popol.db.sqlmodel import models
from sqlmodel import Field
class Account(models.Model, table=True):
username: str = Field(max_length=255, nullable=False)
password: str = Field(max_length=255, nullable=False)
class Counter(models.Model, table=True):
value: int
|
[
"sqlmodel.Field"
] |
[((127, 164), 'sqlmodel.Field', 'Field', ([], {'max_length': '(255)', 'nullable': '(False)'}), '(max_length=255, nullable=False)\n', (132, 164), False, 'from sqlmodel import Field\n'), ((185, 222), 'sqlmodel.Field', 'Field', ([], {'max_length': '(255)', 'nullable': '(False)'}), '(max_length=255, nullable=False)\n', (190, 222), False, 'from sqlmodel import Field\n')]
|
#!/usr/bin/env python
r"""
This example shows the use of the `dw_tl_he_genyeoh` hyperelastic term, whose
contribution to the deformation energy density per unit reference volume is
given by
.. math::
W = K \, \left( \overline I_1 - 3 \right)^{p}
where :math:`\overline I_1` is the first main invariant of the deviatoric part
of the right Cauchy-Green deformation tensor :math:`\ull{C}` and `K` and `p`
are its parameters.
This term may be used to implement the generalized Yeoh hyperelastic material
model [1] by adding three such terms:
.. math::
W =
K_1 \, \left( \overline I_1 - 3 \right)^{m}
+K_2 \, \left( \overline I_1 - 3 \right)^{p}
+K_3 \, \left( \overline I_1 - 3 \right)^{q}
where the coefficients :math:`K_1, K_2, K_3` and exponents :math:`m, p, q` are
material parameters. Only a single term is used in this example for the sake of
simplicity.
Components of the second Piola-Kirchhoff stress are in the case of an
incompressible material
.. math::
S_{ij} = 2 \, \pdiff{W}{C_{ij}} - p \, F^{-1}_{ik} \, F^{-T}_{kj} \;,
where :math:`p` is the hydrostatic pressure.
The large deformation is described using the total Lagrangian formulation in
this example. The incompressibility is treated by mixed displacement-pressure
formulation. The weak formulation is:
Find the displacement field :math:`\ul{u}` and pressure field :math:`p`
such that:
.. math::
\intl{\Omega\suz}{} \ull{S}\eff(\ul{u}, p) : \ull{E}(\ul{v})
\difd{V} = 0
\;, \quad \forall \ul{v} \;,
\intl{\Omega\suz}{} q\, (J(\ul{u})-1) \difd{V} = 0
\;, \quad \forall q \;.
The following formula holds for the axial true (Cauchy) stress in the case of
uniaxial stress:
.. math::
\sigma(\lambda) =
\frac{2}{3} \, m \, K_1 \,
\left( \lambda^2 + \frac{2}{\lambda} - 3 \right)^{m-1} \,
\left( \lambda - \frac{1}{\lambda^2} \right) \;,
where :math:`\lambda = l/l_0` is the prescribed stretch (:math:`l_0` and
:math:`l` being the original and deformed specimen length respectively).
The boundary conditions are set so that a state of uniaxial stress is achieved,
i.e. appropriate components of displacement are fixed on the "Left", "Bottom",
and "Near" faces and a monotonously increasing displacement is prescribed on
the "Right" face. This prescribed displacement is then used to calculate
:math:`\lambda` and to convert the second Piola-Kirchhoff stress to the true
(Cauchy) stress.
Note on material parameters
---------------------------
The three-term generalized Yeoh model is meant to be used for modelling of
filled rubbers. The following choice of parameters is suggested [1] based on
experimental data and stability considerations:
:math:`K_1 > 0`,
:math:`K_2 < 0`,
:math:`K_3 > 0`,
:math:`0.7 < m < 1`,
:math:`m < p < q`.
Usage Examples
--------------
Default options::
$ python examples/large_deformation/gen_yeoh_tl_up_interactive.py
To show a comparison of stress against the analytic formula::
$ python examples/large_deformation/gen_yeoh_tl_up_interactive.py -p
Using different mesh fineness::
$ python examples/large_deformation/gen_yeoh_tl_up_interactive.py \
--shape "5, 5, 5"
Different dimensions of the computational domain::
$ python examples/large_deformation/gen_yeoh_tl_up_interactive.py \
--dims "2, 1, 3"
Different length of time interval and/or number of time steps::
$ python examples/large_deformation/gen_yeoh_tl_up_interactive.py \
-t 0,15,21
Use higher approximation order (the ``-t`` option to decrease the time step is
required for convergence here)::
$ python examples/large_deformation/gen_yeoh_tl_up_interactive.py \
--order 2 -t 0,2,21
Change material parameters::
$ python examples/large_deformation/gen_yeoh_tl_up_interactive.py -m 2,1
View the results using ``resview.py``
-------------------------------------
Show pressure on deformed mesh (use PgDn/PgUp to jump forward/back)::
$ python resview.py --fields=p:f1:wu:p1 domain.??.vtk
Show the axial component of stress (second Piola-Kirchhoff)::
$ python resview.py --fields=stress:c0 domain.??.vtk
[1] <NAME>, <NAME>, <NAME>, <NAME>.
Busfield. Aconstitutive Model For Both Lowand High Strain Nonlinearities In
Highly Filled Elastomers And Implementation With User-Defined Material
Subroutines In Abaqus. Rubber Chemistry And Technology, Vol. 92, No. 4, Pp.
653-686 (2019)
"""
from __future__ import print_function, absolute_import
import argparse
import sys
SFEPY_DIR = '.'
sys.path.append(SFEPY_DIR)
import matplotlib.pyplot as plt
import numpy as np
from sfepy.base.base import IndexedStruct, Struct
from sfepy.discrete import (
FieldVariable, Material, Integral, Function, Equation, Equations, Problem)
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.discrete.fem import FEDomain, Field
from sfepy.homogenization.utils import define_box_regions
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.solvers.ts_solvers import SimpleTimeSteppingSolver
from sfepy.terms import Term
DIMENSION = 3
def get_displacement(ts, coors, bc=None, problem=None):
"""
Define the time-dependent displacement.
"""
out = 1. * ts.time * coors[:, 0]
return out
def _get_analytic_stress(stretches, coef, exp):
out = np.array([
2 * coef * exp * (stretch**2 + 2 / stretch - 3)**(exp - 1)
* (stretch - stretch**-2)
if (stretch**2 + 2 / stretch > 3) else 0.
for stretch in stretches])
return out
def plot_graphs(
material_parameters, global_stress, global_displacement,
undeformed_length):
"""
Plot a comparison of the nominal stress computed by the FEM and using the
analytic formula.
Parameters
----------
material_parameters : list or tuple of float
The K_1 coefficient and exponent m.
global_displacement
The total displacement for each time step, from the FEM.
global_stress
The true (Cauchy) stress for each time step, from the FEM.
undeformed_length : float
The length of the undeformed specimen.
"""
coef, exp = material_parameters
stretch = 1 + np.array(global_displacement) / undeformed_length
# axial stress values
stress_fem_2pk = np.array([sig for sig in global_stress])
stress_fem = stress_fem_2pk * stretch
stress_analytic = _get_analytic_stress(stretch, coef, exp)
fig, (ax_stress, ax_difference) = plt.subplots(nrows=2, sharex=True)
ax_stress.plot(stretch, stress_fem, '.-', label='FEM')
ax_stress.plot(stretch, stress_analytic, '--', label='analytic')
ax_difference.plot(stretch, stress_fem - stress_analytic, '.-')
ax_stress.legend(loc='best').set_draggable(True)
ax_stress.set_ylabel(r'nominal stress $\mathrm{[Pa]}$')
ax_stress.grid()
ax_difference.set_ylabel(r'difference in nominal stress $\mathrm{[Pa]}$')
ax_difference.set_xlabel(r'stretch $\mathrm{[-]}$')
ax_difference.grid()
plt.tight_layout()
plt.show()
def stress_strain(
out, problem, _state, order=1, global_stress=None,
global_displacement=None, **_):
"""
Compute the stress and the strain and add them to the output.
Parameters
----------
out : dict
Holds the results of the finite element computation.
problem : sfepy.discrete.Problem
order : int
The approximation order of the displacement field.
global_displacement
Total displacement for each time step, current value will be appended.
global_stress
The true (Cauchy) stress for each time step, current value will be
appended.
Returns
-------
out : dict
"""
strain = problem.evaluate(
'dw_tl_he_genyeoh.%d.Omega(m1.par, v, u)' % (2*order),
mode='el_avg', term_mode='strain', copy_materials=False)
out['green_strain'] = Struct(
name='output_data', mode='cell', data=strain, dofs=None)
stress_1 = problem.evaluate(
'dw_tl_he_genyeoh.%d.Omega(m1.par, v, u)' % (2*order),
mode='el_avg', term_mode='stress', copy_materials=False)
stress_p = problem.evaluate(
'dw_tl_bulk_pressure.%d.Omega(v, u, p)' % (2*order),
mode='el_avg', term_mode='stress', copy_materials=False)
stress = stress_1 + stress_p
out['stress'] = Struct(
name='output_data', mode='cell', data=stress, dofs=None)
global_stress.append(stress[0, 0, 0, 0])
global_displacement.append(get_displacement(
problem.ts, np.array([[1., 0, 0]]))[0])
return out
def main(cli_args):
dims = parse_argument_list(cli_args.dims, float)
shape = parse_argument_list(cli_args.shape, int)
centre = parse_argument_list(cli_args.centre, float)
material_parameters = parse_argument_list(cli_args.material_parameters,
float)
order = cli_args.order
ts_vals = cli_args.ts.split(',')
ts = {
't0' : float(ts_vals[0]), 't1' : float(ts_vals[1]),
'n_step' : int(ts_vals[2])}
do_plot = cli_args.plot
### Mesh and regions ###
mesh = gen_block_mesh(
dims, shape, centre, name='block', verbose=False)
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
lbn, rtf = domain.get_mesh_bounding_box()
box_regions = define_box_regions(3, lbn, rtf)
regions = dict([
[r, domain.create_region(r, box_regions[r][0], box_regions[r][1])]
for r in box_regions])
### Fields ###
scalar_field = Field.from_args(
'fu', np.float64, 'scalar', omega, approx_order=order-1)
vector_field = Field.from_args(
'fv', np.float64, 'vector', omega, approx_order=order)
u = FieldVariable('u', 'unknown', vector_field, history=1)
v = FieldVariable('v', 'test', vector_field, primary_var_name='u')
p = FieldVariable('p', 'unknown', scalar_field, history=1)
q = FieldVariable('q', 'test', scalar_field, primary_var_name='p')
### Material ###
coefficient, exponent = material_parameters
m_1 = Material(
'm1', par=[coefficient, exponent],
)
### Boundary conditions ###
x_sym = EssentialBC('x_sym', regions['Left'], {'u.0' : 0.0})
y_sym = EssentialBC('y_sym', regions['Near'], {'u.1' : 0.0})
z_sym = EssentialBC('z_sym', regions['Bottom'], {'u.2' : 0.0})
disp_fun = Function('disp_fun', get_displacement)
displacement = EssentialBC(
'displacement', regions['Right'], {'u.0' : disp_fun})
ebcs = Conditions([x_sym, y_sym, z_sym, displacement])
### Terms and equations ###
integral = Integral('i', order=2*order+1)
term_1 = Term.new(
'dw_tl_he_genyeoh(m1.par, v, u)',
integral, omega, m1=m_1, v=v, u=u)
term_pressure = Term.new(
'dw_tl_bulk_pressure(v, u, p)',
integral, omega, v=v, u=u, p=p)
term_volume_change = Term.new(
'dw_tl_volume(q, u)',
integral, omega, q=q, u=u, term_mode='volume')
term_volume = Term.new(
'dw_volume_integrate(q)',
integral, omega, q=q)
eq_balance = Equation('balance', term_1 + term_pressure)
eq_volume = Equation('volume', term_volume_change - term_volume)
equations = Equations([eq_balance, eq_volume])
### Solvers ###
ls = ScipyDirect({})
nls_status = IndexedStruct()
nls = Newton(
{'i_max' : 20},
lin_solver=ls, status=nls_status
)
### Problem ###
pb = Problem('hyper', equations=equations)
pb.set_bcs(ebcs=ebcs)
pb.set_ics(ics=Conditions([]))
tss = SimpleTimeSteppingSolver(ts, nls=nls, context=pb)
pb.set_solver(tss)
### Solution ###
axial_stress = []
axial_displacement = []
def stress_strain_fun(*args, **kwargs):
return stress_strain(
*args, order=order, global_stress=axial_stress,
global_displacement=axial_displacement, **kwargs)
pb.solve(save_results=True, post_process_hook=stress_strain_fun)
if do_plot:
plot_graphs(
material_parameters, axial_stress, axial_displacement,
undeformed_length=dims[0])
def parse_argument_list(cli_arg, type_fun=None, value_separator=','):
"""
Split the command-line argument into a list of items of given type.
Parameters
----------
cli_arg : str
type_fun : function
A function to be called on each substring of `cli_arg`; default: str.
value_separator : str
"""
if type_fun is None:
type_fun = str
out = [type_fun(value) for value in cli_arg.split(value_separator)]
return out
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'--order', type=int, default=1, help='The approximation order of the '
'displacement field [default: %(default)s]')
parser.add_argument(
'-m', '--material-parameters', default='0.5, 0.9',
help='Material parameters - coefficient and exponent - of a single '
'term of the generalized Yeoh hyperelastic model. '
'[default: %(default)s]')
parser.add_argument(
'--dims', default="1.0, 1.0, 1.0",
help='Dimensions of the block [default: %(default)s]')
parser.add_argument(
'--shape', default='2, 2, 2',
help='Shape (counts of nodes in x, y, z) of the block [default: '
'%(default)s]')
parser.add_argument(
'--centre', default='0.5, 0.5, 0.5',
help='Centre of the block [default: %(default)s]')
parser.add_argument(
'-p', '--plot', action='store_true', default=False,
help='Whether to plot a comparison with analytical formula.')
parser.add_argument(
'-t', '--ts',
type=str, default='0.0,2.0,11',
help='Start time, end time, and number of time steps [default: '
'"%(default)s"]')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
main(args)
|
[
"sfepy.mesh.mesh_generators.gen_block_mesh",
"sfepy.solvers.ts_solvers.SimpleTimeSteppingSolver",
"sfepy.homogenization.utils.define_box_regions",
"sfepy.solvers.nls.Newton",
"sfepy.discrete.Problem",
"sfepy.discrete.Integral",
"sfepy.discrete.fem.Field.from_args",
"sfepy.base.base.Struct",
"sfepy.discrete.fem.FEDomain",
"sfepy.discrete.conditions.EssentialBC",
"sfepy.solvers.ls.ScipyDirect",
"sfepy.discrete.conditions.Conditions",
"sfepy.base.base.IndexedStruct",
"sfepy.discrete.Equation",
"sfepy.terms.Term.new",
"sfepy.discrete.FieldVariable",
"sfepy.discrete.Material",
"sfepy.discrete.Function",
"sfepy.discrete.Equations"
] |
[((4494, 4520), 'sys.path.append', 'sys.path.append', (['SFEPY_DIR'], {}), '(SFEPY_DIR)\n', (4509, 4520), False, 'import sys\n'), ((5366, 5546), 'numpy.array', 'np.array', (['[(2 * coef * exp * (stretch ** 2 + 2 / stretch - 3) ** (exp - 1) * (stretch -\n stretch ** -2) if stretch ** 2 + 2 / stretch > 3 else 0.0) for stretch in\n stretches]'], {}), '([(2 * coef * exp * (stretch ** 2 + 2 / stretch - 3) ** (exp - 1) *\n (stretch - stretch ** -2) if stretch ** 2 + 2 / stretch > 3 else 0.0) for\n stretch in stretches])\n', (5374, 5546), True, 'import numpy as np\n'), ((6333, 6373), 'numpy.array', 'np.array', (['[sig for sig in global_stress]'], {}), '([sig for sig in global_stress])\n', (6341, 6373), True, 'import numpy as np\n'), ((6518, 6552), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'sharex': '(True)'}), '(nrows=2, sharex=True)\n', (6530, 6552), True, 'import matplotlib.pyplot as plt\n'), ((7050, 7068), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7066, 7068), True, 'import matplotlib.pyplot as plt\n'), ((7073, 7083), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7081, 7083), True, 'import matplotlib.pyplot as plt\n'), ((7944, 8007), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""cell"""', 'data': 'strain', 'dofs': 'None'}), "(name='output_data', mode='cell', data=strain, dofs=None)\n", (7950, 8007), False, 'from sfepy.base.base import IndexedStruct, Struct\n'), ((8392, 8455), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""cell"""', 'data': 'stress', 'dofs': 'None'}), "(name='output_data', mode='cell', data=stress, dofs=None)\n", (8398, 8455), False, 'from sfepy.base.base import IndexedStruct, Struct\n'), ((9179, 9243), 'sfepy.mesh.mesh_generators.gen_block_mesh', 'gen_block_mesh', (['dims', 'shape', 'centre'], {'name': '"""block"""', 'verbose': '(False)'}), "(dims, shape, centre, name='block', verbose=False)\n", (9193, 9243), False, 'from sfepy.mesh.mesh_generators import gen_block_mesh\n'), ((9266, 9290), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['"""domain"""', 'mesh'], {}), "('domain', mesh)\n", (9274, 9290), False, 'from sfepy.discrete.fem import FEDomain, Field\n'), ((9406, 9437), 'sfepy.homogenization.utils.define_box_regions', 'define_box_regions', (['(3)', 'lbn', 'rtf'], {}), '(3, lbn, rtf)\n', (9424, 9437), False, 'from sfepy.homogenization.utils import define_box_regions\n'), ((9604, 9678), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""fu"""', 'np.float64', '"""scalar"""', 'omega'], {'approx_order': '(order - 1)'}), "('fu', np.float64, 'scalar', omega, approx_order=order - 1)\n", (9619, 9678), False, 'from sfepy.discrete.fem import FEDomain, Field\n'), ((9705, 9775), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""fv"""', 'np.float64', '"""vector"""', 'omega'], {'approx_order': 'order'}), "('fv', np.float64, 'vector', omega, approx_order=order)\n", (9720, 9775), False, 'from sfepy.discrete.fem import FEDomain, Field\n'), ((9794, 9848), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""u"""', '"""unknown"""', 'vector_field'], {'history': '(1)'}), "('u', 'unknown', vector_field, history=1)\n", (9807, 9848), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((9857, 9919), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""v"""', '"""test"""', 'vector_field'], {'primary_var_name': '"""u"""'}), "('v', 'test', vector_field, primary_var_name='u')\n", (9870, 9919), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((9928, 9982), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""p"""', '"""unknown"""', 'scalar_field'], {'history': '(1)'}), "('p', 'unknown', scalar_field, history=1)\n", (9941, 9982), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((9991, 10053), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""q"""', '"""test"""', 'scalar_field'], {'primary_var_name': '"""p"""'}), "('q', 'test', scalar_field, primary_var_name='p')\n", (10004, 10053), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((10135, 10178), 'sfepy.discrete.Material', 'Material', (['"""m1"""'], {'par': '[coefficient, exponent]'}), "('m1', par=[coefficient, exponent])\n", (10143, 10178), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((10239, 10290), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', (['"""x_sym"""', "regions['Left']", "{'u.0': 0.0}"], {}), "('x_sym', regions['Left'], {'u.0': 0.0})\n", (10250, 10290), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((10304, 10355), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', (['"""y_sym"""', "regions['Near']", "{'u.1': 0.0}"], {}), "('y_sym', regions['Near'], {'u.1': 0.0})\n", (10315, 10355), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((10369, 10422), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', (['"""z_sym"""', "regions['Bottom']", "{'u.2': 0.0}"], {}), "('z_sym', regions['Bottom'], {'u.2': 0.0})\n", (10380, 10422), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((10439, 10477), 'sfepy.discrete.Function', 'Function', (['"""disp_fun"""', 'get_displacement'], {}), "('disp_fun', get_displacement)\n", (10447, 10477), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((10497, 10561), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', (['"""displacement"""', "regions['Right']", "{'u.0': disp_fun}"], {}), "('displacement', regions['Right'], {'u.0': disp_fun})\n", (10508, 10561), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((10583, 10630), 'sfepy.discrete.conditions.Conditions', 'Conditions', (['[x_sym, y_sym, z_sym, displacement]'], {}), '([x_sym, y_sym, z_sym, displacement])\n', (10593, 10630), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((10679, 10713), 'sfepy.discrete.Integral', 'Integral', (['"""i"""'], {'order': '(2 * order + 1)'}), "('i', order=2 * order + 1)\n", (10687, 10713), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((10724, 10801), 'sfepy.terms.Term.new', 'Term.new', (['"""dw_tl_he_genyeoh(m1.par, v, u)"""', 'integral', 'omega'], {'m1': 'm_1', 'v': 'v', 'u': 'u'}), "('dw_tl_he_genyeoh(m1.par, v, u)', integral, omega, m1=m_1, v=v, u=u)\n", (10732, 10801), False, 'from sfepy.terms import Term\n'), ((10839, 10911), 'sfepy.terms.Term.new', 'Term.new', (['"""dw_tl_bulk_pressure(v, u, p)"""', 'integral', 'omega'], {'v': 'v', 'u': 'u', 'p': 'p'}), "('dw_tl_bulk_pressure(v, u, p)', integral, omega, v=v, u=u, p=p)\n", (10847, 10911), False, 'from sfepy.terms import Term\n'), ((10955, 11032), 'sfepy.terms.Term.new', 'Term.new', (['"""dw_tl_volume(q, u)"""', 'integral', 'omega'], {'q': 'q', 'u': 'u', 'term_mode': '"""volume"""'}), "('dw_tl_volume(q, u)', integral, omega, q=q, u=u, term_mode='volume')\n", (10963, 11032), False, 'from sfepy.terms import Term\n'), ((11068, 11124), 'sfepy.terms.Term.new', 'Term.new', (['"""dw_volume_integrate(q)"""', 'integral', 'omega'], {'q': 'q'}), "('dw_volume_integrate(q)', integral, omega, q=q)\n", (11076, 11124), False, 'from sfepy.terms import Term\n'), ((11160, 11203), 'sfepy.discrete.Equation', 'Equation', (['"""balance"""', '(term_1 + term_pressure)'], {}), "('balance', term_1 + term_pressure)\n", (11168, 11203), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((11220, 11272), 'sfepy.discrete.Equation', 'Equation', (['"""volume"""', '(term_volume_change - term_volume)'], {}), "('volume', term_volume_change - term_volume)\n", (11228, 11272), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((11289, 11323), 'sfepy.discrete.Equations', 'Equations', (['[eq_balance, eq_volume]'], {}), '([eq_balance, eq_volume])\n', (11298, 11323), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((11354, 11369), 'sfepy.solvers.ls.ScipyDirect', 'ScipyDirect', (['{}'], {}), '({})\n', (11365, 11369), False, 'from sfepy.solvers.ls import ScipyDirect\n'), ((11387, 11402), 'sfepy.base.base.IndexedStruct', 'IndexedStruct', ([], {}), '()\n', (11400, 11402), False, 'from sfepy.base.base import IndexedStruct, Struct\n'), ((11413, 11468), 'sfepy.solvers.nls.Newton', 'Newton', (["{'i_max': 20}"], {'lin_solver': 'ls', 'status': 'nls_status'}), "({'i_max': 20}, lin_solver=ls, status=nls_status)\n", (11419, 11468), False, 'from sfepy.solvers.nls import Newton\n'), ((11522, 11559), 'sfepy.discrete.Problem', 'Problem', (['"""hyper"""'], {'equations': 'equations'}), "('hyper', equations=equations)\n", (11529, 11559), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((11631, 11680), 'sfepy.solvers.ts_solvers.SimpleTimeSteppingSolver', 'SimpleTimeSteppingSolver', (['ts'], {'nls': 'nls', 'context': 'pb'}), '(ts, nls=nls, context=pb)\n', (11655, 11680), False, 'from sfepy.solvers.ts_solvers import SimpleTimeSteppingSolver\n'), ((12729, 12832), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(description=__doc__, formatter_class=argparse.\n RawDescriptionHelpFormatter)\n', (12752, 12832), False, 'import argparse\n'), ((6235, 6264), 'numpy.array', 'np.array', (['global_displacement'], {}), '(global_displacement)\n', (6243, 6264), True, 'import numpy as np\n'), ((11605, 11619), 'sfepy.discrete.conditions.Conditions', 'Conditions', (['[]'], {}), '([])\n', (11615, 11619), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((8580, 8603), 'numpy.array', 'np.array', (['[[1.0, 0, 0]]'], {}), '([[1.0, 0, 0]])\n', (8588, 8603), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright (c) 2020 <NAME>
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
"""EfficientNet Series
EfficientNet: `"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks"
<https://arxiv.org/abs/1905.11946>`_
References:
https://github.com/facebookresearch/pycls/blob/main/pycls/models/effnet.py
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mobilenetv3.py
"""
import math
from numbers import Real
from typing import Any, Callable, Mapping, Sequence, Union
import megengine.hub as hub
import megengine.module as M
from basecls.layers import (
SE,
DropPath,
activation,
build_head,
conv2d,
init_weights,
make_divisible,
norm2d,
)
from basecls.utils import recursive_update, registers
from .mbnet import MBConv
from .resnet import AnyStage, SimpleStem
__all__ = ["FuseMBConv", "EffNet"]
class FuseMBConv(M.Module):
"""Fusing the proj conv1x1 and depthwise conv into a conv2d.
Args:
w_in: input width.
w_out: output width.
stride: stride of conv.
kernel: kernel of conv.
exp_r: expansion ratio.
se_r: SE ratio.
has_skip: whether apply skip connection.
drop_path_prob: drop path probability.
norm_name: normalization function.
act_name: activation function.
"""
def __init__(
self,
w_in: int,
w_out: int,
stride: int,
kernel: int,
exp_r: float,
se_r: float,
has_skip: bool,
drop_path_prob: float,
norm_name: str,
act_name: str,
**kwargs,
):
super().__init__()
# Expansion
w_mid = w_in
w_exp = int(w_in * exp_r)
if exp_r != 1.0:
self.exp = conv2d(w_in, w_exp, kernel, stride=stride)
self.exp_bn = norm2d(norm_name, w_exp)
self.exp_act = activation(act_name)
w_mid = w_exp
# SE
if se_r > 0.0:
w_se = int(w_in * se_r)
self.se = SE(w_mid, w_se, act_name)
# PWConv
self.proj = conv2d(
w_mid, w_out, 1 if exp_r != 1.0 else kernel, stride=1 if exp_r != 1.0 else stride
)
self.proj_bn = norm2d(norm_name, w_out)
self.has_proj_act = exp_r == 1.0
if self.has_proj_act:
self.proj_act = activation(act_name)
# Skip
self.has_skip = has_skip and w_in == w_out and stride == 1
if self.has_skip:
self.drop_path = DropPath(drop_path_prob)
def forward(self, x):
x_p = x
if getattr(self, "exp", None) is not None:
x = self.exp(x)
x = self.exp_bn(x)
x = self.exp_act(x)
if getattr(self, "se", None) is not None:
x = self.se(x)
x = self.proj(x)
x = self.proj_bn(x)
if self.has_proj_act:
x = self.proj_act(x)
if self.has_skip:
x = self.drop_path(x)
x = x + x_p
return x
@registers.models.register()
class EffNet(M.Module):
"""EfficientNet model.
Args:
stem_w: stem width.
block_name: block name.
depths: depth for each stage (number of blocks in the stage).
widths: width for each stage (width of each block in the stage).
strides: strides for each stage (applies to the first block of each stage).
kernels: kernel sizes for each stage.
exp_rs: expansion ratios for MBConv blocks in each stage.
se_r: Squeeze-and-Excitation (SE) ratio. Default: ``0.25``
drop_path_prob: drop path probability. Default: ``0.0``
depth_mult: depth multiplier. Default: ``1.0``
width_mult: width multiplier. Default: ``1.0``
omit_mult: omit multiplier for stem width, head width, the first stage depth and
the last stage depth, enabled in EfficientNet-Lite. Default: ``False``
norm_name: normalization function. Default: ``"BN"``
act_name: activation function. Default: ``"silu"``
head: head args. Default: ``None``
"""
def __init__(
self,
stem_w: int,
block_name: Union[Union[str, Callable], Sequence[Union[str, Callable]]],
depths: Sequence[int],
widths: Sequence[int],
strides: Sequence[int],
kernels: Sequence[int],
exp_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 1.0,
se_rs: Union[float, Sequence[Union[float, Sequence[float]]]] = 0.0,
drop_path_prob: float = 0.0,
depth_mult: float = 1.0,
width_mult: float = 1.0,
omit_mult: bool = False,
norm_name: str = "BN",
act_name: str = "silu",
head: Mapping[str, Any] = None,
):
super().__init__()
depths = [
d if omit_mult and i in (0, len(depths) - 1) else math.ceil(d * depth_mult)
for i, d in enumerate(depths)
]
self.depths = depths
stem_w = stem_w if omit_mult else make_divisible(stem_w * width_mult, round_limit=0.9)
self.stem = SimpleStem(3, stem_w, norm_name, act_name)
if isinstance(block_name, (str, Callable)):
block_name = [block_name] * len(depths)
block_func = [self.get_block_func(bn) for bn in block_name]
widths = [make_divisible(w * width_mult, round_limit=0.9) for w in widths]
if isinstance(exp_rs, Real):
exp_rs = [exp_rs] * len(depths)
if isinstance(se_rs, Real):
se_rs = [se_rs] * len(depths)
drop_path_prob_iter = (i / sum(depths) * drop_path_prob for i in range(sum(depths)))
drop_path_probs = [[next(drop_path_prob_iter) for _ in range(d)] for d in depths]
model_args = [depths, widths, strides, block_func, kernels, exp_rs, se_rs, drop_path_probs]
prev_w = stem_w
for i, (d, w, s, bf, k, exp_r, se_r, dp_p) in enumerate(zip(*model_args)):
stage = AnyStage(
prev_w,
w,
s,
d,
bf,
kernel=k,
exp_r=exp_r,
se_r=se_r,
se_from_exp=False,
se_act_name=act_name,
se_approx=False,
se_rd_fn=int,
has_proj_act=False,
has_skip=True,
drop_path_prob=dp_p,
norm_name=norm_name,
act_name=act_name,
)
setattr(self, f"s{i + 1}", stage)
prev_w = w
if head:
if head.get("width", 0) > 0 and not omit_mult:
head["width"] = make_divisible(head["width"] * width_mult, round_limit=0.9)
self.head = build_head(prev_w, head, norm_name, act_name)
self.apply(init_weights)
def forward(self, x):
x = self.stem(x)
for i in range(len(self.depths)):
stage = getattr(self, f"s{i + 1}")
x = stage(x)
if getattr(self, "head", None) is not None:
x = self.head(x)
return x
@staticmethod
def get_block_func(name: Union[str, Callable]):
"""Retrieves the block function by name."""
if callable(name):
return name
if isinstance(name, str):
block_funcs = {
"FuseMBConv": FuseMBConv,
"MBConv": MBConv,
}
if name in block_funcs.keys():
return block_funcs[name]
raise ValueError(f"Block '{name}' not supported")
def _build_effnet(**kwargs):
model_args = dict(
stem_w=32,
block_name=MBConv,
depths=[1, 2, 2, 3, 3, 4, 1],
widths=[16, 24, 40, 80, 112, 192, 320],
strides=[1, 2, 2, 2, 1, 2, 1],
kernels=[3, 3, 5, 3, 5, 5, 3],
exp_rs=[1, 6, 6, 6, 6, 6, 6],
se_rs=0.25,
drop_path_prob=0.2,
head=dict(name="ClsHead", width=1280, dropout_prob=0.2),
)
recursive_update(model_args, kwargs)
return EffNet(**model_args)
def _build_effnet_lite(**kwargs):
model_args = dict(se_rs=0.0, omit_mult=True, act_name="relu6")
recursive_update(model_args, kwargs)
return _build_effnet(**model_args)
def _build_effnetv2(**kwargs):
model_args = dict(
stem_w=32,
block_name=[FuseMBConv, FuseMBConv, FuseMBConv, MBConv, MBConv, MBConv],
depths=[1, 2, 2, 3, 5, 8],
widths=[16, 32, 48, 96, 112, 192],
strides=[1, 2, 2, 2, 1, 2],
kernels=[3, 3, 3, 3, 3, 3],
exp_rs=[1, 4, 4, 4, 6, 6],
se_rs=[0, 0, 0, 0.25, 0.25, 0.25],
)
recursive_update(model_args, kwargs)
return _build_effnet(**model_args)
@registers.models.register()
@hub.pretrained(
"https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0/effnet_b0.pkl"
)
def effnet_b0(**kwargs):
model_args = dict(depth_mult=1.0, width_mult=1.0)
recursive_update(model_args, kwargs)
return _build_effnet(**model_args)
@registers.models.register()
@hub.pretrained(
"https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1/effnet_b1.pkl"
)
def effnet_b1(**kwargs):
model_args = dict(depth_mult=1.1, width_mult=1.0)
recursive_update(model_args, kwargs)
return _build_effnet(**model_args)
@registers.models.register()
@hub.pretrained(
"https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2/effnet_b2.pkl"
)
def effnet_b2(**kwargs):
model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3))
recursive_update(model_args, kwargs)
return _build_effnet(**model_args)
@registers.models.register()
@hub.pretrained(
"https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3/effnet_b3.pkl"
)
def effnet_b3(**kwargs):
model_args = dict(depth_mult=1.4, width_mult=1.2, head=dict(dropout_prob=0.3))
recursive_update(model_args, kwargs)
return _build_effnet(**model_args)
@registers.models.register()
@hub.pretrained(
"https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b4/effnet_b4.pkl"
)
def effnet_b4(**kwargs):
model_args = dict(depth_mult=1.8, width_mult=1.4, head=dict(dropout_prob=0.4))
recursive_update(model_args, kwargs)
return _build_effnet(**model_args)
@registers.models.register()
@hub.pretrained(
"https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b5/effnet_b5.pkl"
)
def effnet_b5(**kwargs):
model_args = dict(depth_mult=2.2, width_mult=1.6, head=dict(dropout_prob=0.4))
recursive_update(model_args, kwargs)
return _build_effnet(**model_args)
@registers.models.register()
@hub.pretrained(
"https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b6/effnet_b6.pkl"
)
def effnet_b6(**kwargs):
model_args = dict(depth_mult=2.6, width_mult=1.8, head=dict(dropout_prob=0.5))
recursive_update(model_args, kwargs)
return _build_effnet(**model_args)
@registers.models.register()
@hub.pretrained(
"https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b7/effnet_b7.pkl"
)
def effnet_b7(**kwargs):
model_args = dict(depth_mult=3.1, width_mult=2.0, head=dict(dropout_prob=0.5))
recursive_update(model_args, kwargs)
return _build_effnet(**model_args)
@registers.models.register()
@hub.pretrained(
"https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b8/effnet_b8.pkl"
)
def effnet_b8(**kwargs):
model_args = dict(depth_mult=3.6, width_mult=2.2, head=dict(dropout_prob=0.5))
recursive_update(model_args, kwargs)
return _build_effnet(**model_args)
@registers.models.register()
@hub.pretrained(
"https://data.megengine.org.cn/research/basecls/models/effnet/effnet_l2/effnet_l2.pkl"
)
def effnet_l2(**kwargs):
model_args = dict(depth_mult=5.3, width_mult=4.3, head=dict(dropout_prob=0.5))
recursive_update(model_args, kwargs)
return _build_effnet(**model_args)
@registers.models.register()
@hub.pretrained(
"https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0_lite/effnet_b0_lite.pkl"
)
def effnet_b0_lite(**kwargs):
model_args = dict(depth_mult=1.0, width_mult=1.0)
recursive_update(model_args, kwargs)
return _build_effnet_lite(**model_args)
@registers.models.register()
@hub.pretrained(
"https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1_lite/effnet_b1_lite.pkl"
)
def effnet_b1_lite(**kwargs):
model_args = dict(depth_mult=1.1, width_mult=1.0)
recursive_update(model_args, kwargs)
return _build_effnet_lite(**model_args)
@registers.models.register()
@hub.pretrained(
"https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2_lite/effnet_b2_lite.pkl"
)
def effnet_b2_lite(**kwargs):
model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3))
recursive_update(model_args, kwargs)
return _build_effnet_lite(**model_args)
@registers.models.register()
@hub.pretrained(
"https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3_lite/effnet_b3_lite.pkl"
)
def effnet_b3_lite(**kwargs):
model_args = dict(depth_mult=1.4, width_mult=1.2, head=dict(dropout_prob=0.3))
recursive_update(model_args, kwargs)
return _build_effnet_lite(**model_args)
@registers.models.register()
@hub.pretrained(
"https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b4_lite/effnet_b4_lite.pkl"
)
def effnet_b4_lite(**kwargs):
model_args = dict(depth_mult=1.8, width_mult=1.4, head=dict(dropout_prob=0.3))
recursive_update(model_args, kwargs)
return _build_effnet_lite(**model_args)
@registers.models.register()
@hub.pretrained(
"https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b0/effnetv2_b0.pkl"
)
def effnetv2_b0(**kwargs):
model_args = dict(depth_mult=1.0, width_mult=1.0)
recursive_update(model_args, kwargs)
return _build_effnetv2(**model_args)
@registers.models.register()
@hub.pretrained(
"https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b1/effnetv2_b1.pkl"
)
def effnetv2_b1(**kwargs):
model_args = dict(depth_mult=1.1, width_mult=1.0)
recursive_update(model_args, kwargs)
return _build_effnetv2(**model_args)
@registers.models.register()
@hub.pretrained(
"https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b2/effnetv2_b2.pkl"
)
def effnetv2_b2(**kwargs):
model_args = dict(depth_mult=1.2, width_mult=1.1, head=dict(dropout_prob=0.3))
recursive_update(model_args, kwargs)
return _build_effnetv2(**model_args)
@registers.models.register()
@hub.pretrained(
"https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b3/effnetv2_b3.pkl"
)
def effnetv2_b3(**kwargs):
model_args = dict(depth_mult=1.4, width_mult=1.2, head=dict(dropout_prob=0.3))
recursive_update(model_args, kwargs)
return _build_effnetv2(**model_args)
@registers.models.register()
@hub.pretrained(
"https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_s/effnetv2_s.pkl"
)
def effnetv2_s(**kwargs):
model_args = dict(stem_w=24, depths=[2, 4, 4, 6, 9, 15], widths=[24, 48, 64, 128, 160, 256])
recursive_update(model_args, kwargs)
return _build_effnetv2(**model_args)
@registers.models.register()
@hub.pretrained(
"https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_m/effnetv2_m.pkl"
)
def effnetv2_m(**kwargs):
model_args = dict(
stem_w=24,
block_name=[FuseMBConv, FuseMBConv, FuseMBConv, MBConv, MBConv, MBConv, MBConv],
depths=[3, 5, 5, 7, 14, 18, 5],
widths=[24, 48, 80, 160, 176, 304, 512],
strides=[1, 2, 2, 2, 1, 2, 1],
kernels=[3, 3, 3, 3, 3, 3, 3],
exp_rs=[1, 4, 4, 4, 6, 6, 6],
se_rs=[0, 0, 0, 0.25, 0.25, 0.25, 0.25],
head=dict(dropout_prob=0.3),
)
recursive_update(model_args, kwargs)
return _build_effnetv2(**model_args)
@registers.models.register()
@hub.pretrained(
"https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_l/effnetv2_l.pkl"
)
def effnetv2_l(**kwargs):
model_args = dict(
stem_w=32,
block_name=[FuseMBConv, FuseMBConv, FuseMBConv, MBConv, MBConv, MBConv, MBConv],
depths=[4, 7, 7, 10, 19, 25, 7],
widths=[32, 64, 96, 192, 224, 384, 640],
strides=[1, 2, 2, 2, 1, 2, 1],
kernels=[3, 3, 3, 3, 3, 3, 3],
exp_rs=[1, 4, 4, 4, 6, 6, 6],
se_rs=[0, 0, 0, 0.25, 0.25, 0.25, 0.25],
head=dict(dropout_prob=0.4),
)
recursive_update(model_args, kwargs)
return _build_effnetv2(**model_args)
|
[
"megengine.hub.pretrained"
] |
[((3301, 3328), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (3326, 3328), False, 'from basecls.utils import recursive_update, registers\n'), ((8976, 9003), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (9001, 9003), False, 'from basecls.utils import recursive_update, registers\n'), ((9005, 9117), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0/effnet_b0.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0/effnet_b0.pkl'\n )\n", (9019, 9117), True, 'import megengine.hub as hub\n'), ((9276, 9303), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (9301, 9303), False, 'from basecls.utils import recursive_update, registers\n'), ((9305, 9417), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1/effnet_b1.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1/effnet_b1.pkl'\n )\n", (9319, 9417), True, 'import megengine.hub as hub\n'), ((9576, 9603), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (9601, 9603), False, 'from basecls.utils import recursive_update, registers\n'), ((9605, 9717), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2/effnet_b2.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2/effnet_b2.pkl'\n )\n", (9619, 9717), True, 'import megengine.hub as hub\n'), ((9905, 9932), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (9930, 9932), False, 'from basecls.utils import recursive_update, registers\n'), ((9934, 10046), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3/effnet_b3.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3/effnet_b3.pkl'\n )\n", (9948, 10046), True, 'import megengine.hub as hub\n'), ((10234, 10261), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (10259, 10261), False, 'from basecls.utils import recursive_update, registers\n'), ((10263, 10375), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b4/effnet_b4.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b4/effnet_b4.pkl'\n )\n", (10277, 10375), True, 'import megengine.hub as hub\n'), ((10563, 10590), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (10588, 10590), False, 'from basecls.utils import recursive_update, registers\n'), ((10592, 10704), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b5/effnet_b5.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b5/effnet_b5.pkl'\n )\n", (10606, 10704), True, 'import megengine.hub as hub\n'), ((10892, 10919), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (10917, 10919), False, 'from basecls.utils import recursive_update, registers\n'), ((10921, 11033), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b6/effnet_b6.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b6/effnet_b6.pkl'\n )\n", (10935, 11033), True, 'import megengine.hub as hub\n'), ((11221, 11248), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (11246, 11248), False, 'from basecls.utils import recursive_update, registers\n'), ((11250, 11362), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b7/effnet_b7.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b7/effnet_b7.pkl'\n )\n", (11264, 11362), True, 'import megengine.hub as hub\n'), ((11550, 11577), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (11575, 11577), False, 'from basecls.utils import recursive_update, registers\n'), ((11579, 11691), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b8/effnet_b8.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b8/effnet_b8.pkl'\n )\n", (11593, 11691), True, 'import megengine.hub as hub\n'), ((11879, 11906), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (11904, 11906), False, 'from basecls.utils import recursive_update, registers\n'), ((11908, 12020), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/effnet/effnet_l2/effnet_l2.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/effnet/effnet_l2/effnet_l2.pkl'\n )\n", (11922, 12020), True, 'import megengine.hub as hub\n'), ((12208, 12235), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (12233, 12235), False, 'from basecls.utils import recursive_update, registers\n'), ((12237, 12359), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0_lite/effnet_b0_lite.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b0_lite/effnet_b0_lite.pkl'\n )\n", (12251, 12359), True, 'import megengine.hub as hub\n'), ((12528, 12555), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (12553, 12555), False, 'from basecls.utils import recursive_update, registers\n'), ((12557, 12679), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1_lite/effnet_b1_lite.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b1_lite/effnet_b1_lite.pkl'\n )\n", (12571, 12679), True, 'import megengine.hub as hub\n'), ((12848, 12875), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (12873, 12875), False, 'from basecls.utils import recursive_update, registers\n'), ((12877, 12999), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2_lite/effnet_b2_lite.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b2_lite/effnet_b2_lite.pkl'\n )\n", (12891, 12999), True, 'import megengine.hub as hub\n'), ((13197, 13224), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (13222, 13224), False, 'from basecls.utils import recursive_update, registers\n'), ((13226, 13348), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3_lite/effnet_b3_lite.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b3_lite/effnet_b3_lite.pkl'\n )\n", (13240, 13348), True, 'import megengine.hub as hub\n'), ((13546, 13573), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (13571, 13573), False, 'from basecls.utils import recursive_update, registers\n'), ((13575, 13697), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b4_lite/effnet_b4_lite.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/effnet/effnet_b4_lite/effnet_b4_lite.pkl'\n )\n", (13589, 13697), True, 'import megengine.hub as hub\n'), ((13895, 13922), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (13920, 13922), False, 'from basecls.utils import recursive_update, registers\n'), ((13924, 14040), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b0/effnetv2_b0.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b0/effnetv2_b0.pkl'\n )\n", (13938, 14040), True, 'import megengine.hub as hub\n'), ((14203, 14230), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (14228, 14230), False, 'from basecls.utils import recursive_update, registers\n'), ((14232, 14348), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b1/effnetv2_b1.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b1/effnetv2_b1.pkl'\n )\n", (14246, 14348), True, 'import megengine.hub as hub\n'), ((14511, 14538), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (14536, 14538), False, 'from basecls.utils import recursive_update, registers\n'), ((14540, 14656), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b2/effnetv2_b2.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b2/effnetv2_b2.pkl'\n )\n", (14554, 14656), True, 'import megengine.hub as hub\n'), ((14848, 14875), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (14873, 14875), False, 'from basecls.utils import recursive_update, registers\n'), ((14877, 14993), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b3/effnetv2_b3.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_b3/effnetv2_b3.pkl'\n )\n", (14891, 14993), True, 'import megengine.hub as hub\n'), ((15185, 15212), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (15210, 15212), False, 'from basecls.utils import recursive_update, registers\n'), ((15214, 15328), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_s/effnetv2_s.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_s/effnetv2_s.pkl'\n )\n", (15228, 15328), True, 'import megengine.hub as hub\n'), ((15533, 15560), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (15558, 15560), False, 'from basecls.utils import recursive_update, registers\n'), ((15562, 15676), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_m/effnetv2_m.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_m/effnetv2_m.pkl'\n )\n", (15576, 15676), True, 'import megengine.hub as hub\n'), ((16212, 16239), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (16237, 16239), False, 'from basecls.utils import recursive_update, registers\n'), ((16241, 16355), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_l/effnetv2_l.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/effnet/effnetv2_l/effnetv2_l.pkl'\n )\n", (16255, 16355), True, 'import megengine.hub as hub\n'), ((8251, 8287), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (8267, 8287), False, 'from basecls.utils import recursive_update, registers\n'), ((8427, 8463), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (8443, 8463), False, 'from basecls.utils import recursive_update, registers\n'), ((8897, 8933), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (8913, 8933), False, 'from basecls.utils import recursive_update, registers\n'), ((9197, 9233), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (9213, 9233), False, 'from basecls.utils import recursive_update, registers\n'), ((9497, 9533), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (9513, 9533), False, 'from basecls.utils import recursive_update, registers\n'), ((9826, 9862), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (9842, 9862), False, 'from basecls.utils import recursive_update, registers\n'), ((10155, 10191), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (10171, 10191), False, 'from basecls.utils import recursive_update, registers\n'), ((10484, 10520), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (10500, 10520), False, 'from basecls.utils import recursive_update, registers\n'), ((10813, 10849), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (10829, 10849), False, 'from basecls.utils import recursive_update, registers\n'), ((11142, 11178), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (11158, 11178), False, 'from basecls.utils import recursive_update, registers\n'), ((11471, 11507), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (11487, 11507), False, 'from basecls.utils import recursive_update, registers\n'), ((11800, 11836), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (11816, 11836), False, 'from basecls.utils import recursive_update, registers\n'), ((12129, 12165), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (12145, 12165), False, 'from basecls.utils import recursive_update, registers\n'), ((12444, 12480), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (12460, 12480), False, 'from basecls.utils import recursive_update, registers\n'), ((12764, 12800), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (12780, 12800), False, 'from basecls.utils import recursive_update, registers\n'), ((13113, 13149), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (13129, 13149), False, 'from basecls.utils import recursive_update, registers\n'), ((13462, 13498), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (13478, 13498), False, 'from basecls.utils import recursive_update, registers\n'), ((13811, 13847), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (13827, 13847), False, 'from basecls.utils import recursive_update, registers\n'), ((14122, 14158), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (14138, 14158), False, 'from basecls.utils import recursive_update, registers\n'), ((14430, 14466), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (14446, 14466), False, 'from basecls.utils import recursive_update, registers\n'), ((14767, 14803), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (14783, 14803), False, 'from basecls.utils import recursive_update, registers\n'), ((15104, 15140), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (15120, 15140), False, 'from basecls.utils import recursive_update, registers\n'), ((15452, 15488), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (15468, 15488), False, 'from basecls.utils import recursive_update, registers\n'), ((16131, 16167), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (16147, 16167), False, 'from basecls.utils import recursive_update, registers\n'), ((16811, 16847), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (16827, 16847), False, 'from basecls.utils import recursive_update, registers\n'), ((2377, 2471), 'basecls.layers.conv2d', 'conv2d', (['w_mid', 'w_out', '(1 if exp_r != 1.0 else kernel)'], {'stride': '(1 if exp_r != 1.0 else stride)'}), '(w_mid, w_out, 1 if exp_r != 1.0 else kernel, stride=1 if exp_r != \n 1.0 else stride)\n', (2383, 2471), False, 'from basecls.layers import SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d\n'), ((2512, 2536), 'basecls.layers.norm2d', 'norm2d', (['norm_name', 'w_out'], {}), '(norm_name, w_out)\n', (2518, 2536), False, 'from basecls.layers import SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d\n'), ((2052, 2094), 'basecls.layers.conv2d', 'conv2d', (['w_in', 'w_exp', 'kernel'], {'stride': 'stride'}), '(w_in, w_exp, kernel, stride=stride)\n', (2058, 2094), False, 'from basecls.layers import SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d\n'), ((2121, 2145), 'basecls.layers.norm2d', 'norm2d', (['norm_name', 'w_exp'], {}), '(norm_name, w_exp)\n', (2127, 2145), False, 'from basecls.layers import SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d\n'), ((2173, 2193), 'basecls.layers.activation', 'activation', (['act_name'], {}), '(act_name)\n', (2183, 2193), False, 'from basecls.layers import SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d\n'), ((2314, 2339), 'basecls.layers.SE', 'SE', (['w_mid', 'w_se', 'act_name'], {}), '(w_mid, w_se, act_name)\n', (2316, 2339), False, 'from basecls.layers import SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d\n'), ((2636, 2656), 'basecls.layers.activation', 'activation', (['act_name'], {}), '(act_name)\n', (2646, 2656), False, 'from basecls.layers import SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d\n'), ((2794, 2818), 'basecls.layers.DropPath', 'DropPath', (['drop_path_prob'], {}), '(drop_path_prob)\n', (2802, 2818), False, 'from basecls.layers import SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d\n'), ((5292, 5344), 'basecls.layers.make_divisible', 'make_divisible', (['(stem_w * width_mult)'], {'round_limit': '(0.9)'}), '(stem_w * width_mult, round_limit=0.9)\n', (5306, 5344), False, 'from basecls.layers import SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d\n'), ((5599, 5646), 'basecls.layers.make_divisible', 'make_divisible', (['(w * width_mult)'], {'round_limit': '(0.9)'}), '(w * width_mult, round_limit=0.9)\n', (5613, 5646), False, 'from basecls.layers import SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d\n'), ((7014, 7059), 'basecls.layers.build_head', 'build_head', (['prev_w', 'head', 'norm_name', 'act_name'], {}), '(prev_w, head, norm_name, act_name)\n', (7024, 7059), False, 'from basecls.layers import SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d\n'), ((5142, 5167), 'math.ceil', 'math.ceil', (['(d * depth_mult)'], {}), '(d * depth_mult)\n', (5151, 5167), False, 'import math\n'), ((6930, 6989), 'basecls.layers.make_divisible', 'make_divisible', (["(head['width'] * width_mult)"], {'round_limit': '(0.9)'}), "(head['width'] * width_mult, round_limit=0.9)\n", (6944, 6989), False, 'from basecls.layers import SE, DropPath, activation, build_head, conv2d, init_weights, make_divisible, norm2d\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: disable=too-many-lines
from typing import Optional, Tuple, Union
import megengine._internal as mgb
from megengine._internal import CompGraph, CompNode
from ..core import Tensor, wrap_io_tensor
from ..core.graph import _use_default_if_none
from ..jit import barrier, mark_impure
from ..random import uniform
from ..utils.types import _pair, _pair_nonzero
from .debug_param import get_conv_execution_strategy
from .tensor import concat
from .utils import _decide_comp_node_and_comp_graph
@wrap_io_tensor
def linear(inp: Tensor, weight: Tensor, bias: Optional[Tensor] = None) -> Tensor:
"""Applies a linear transformation to the input.
Refer to :class:`~.Linear` for more information.
"""
orig_shape = inp.shape
inp = inp.reshape(-1, orig_shape[-1])
ret = mgb.opr.matrix_mul(inp, weight, transposeB=True)
ret = ret.reshape(orig_shape[:-1], weight.shape[0])
if bias is not None:
ret += bias
return ret
@wrap_io_tensor
def conv2d(
inp: Tensor,
weight: Tensor,
bias: Optional[Tensor] = None,
stride: Union[int, Tuple[int, int]] = 1,
padding: Union[int, Tuple[int, int]] = 0,
dilation: Union[int, Tuple[int, int]] = 1,
groups: int = 1,
conv_mode="CROSS_CORRELATION",
compute_mode="DEFAULT",
) -> Tensor:
"""2D convolution operation.
:param inp: The feature map of the convolution operation
:param weight: The convolution kernel
:param bias: The bias added to the result of convolution (if given)
:param stride: Stride of the 2D convolution operation. Default: 1
:param padding: Size of the paddings added to the input on both sides of its
spatial dimensions. Only zero-padding is supported. Default: 0
:param dilation: Dilation of the 2D convolution operation. Default: 1
:param groups: number of groups to divide input and output channels into,
so as to perform a "grouped convolution". When ``groups`` is not 1,
``in_channels`` and ``out_channels`` must be divisible by ``groups``,
and the shape of weight should be ``(groups, out_channel // groups,
in_channels // groups, height, width)``.
:type conv_mode: string or :class:`mgb.opr_param_defs.Convolution.Mode`
:param conv_mode: Supports 'CROSS_CORRELATION' or 'CONVOLUTION'. Default:
'CROSS_CORRELATION'.
:type compute_mode: string or
:class:`mgb.opr_param_defs.Convolution.ComputeMode`
:param compute_mode: When set to 'DEFAULT', no special requirements will be
placed on the precision of intermediate results. When set to 'FLOAT32',
Float32 would be used for accumulator and intermediate result, but only
effective when input and output are of Float16 dtype.
Refer to :class:`~.Conv2d` for more information.
"""
ph, pw = _pair(padding)
sh, sw = _pair_nonzero(stride)
dh, dw = _pair_nonzero(dilation)
Sparse = mgb.opr_param_defs.Convolution.Sparse
sparse_type = Sparse.DENSE if groups == 1 else Sparse.GROUP
res = mgb.opr.convolution(
inp,
weight,
pad_h=ph,
pad_w=pw,
stride_h=sh,
stride_w=sw,
dilate_h=dh,
dilate_w=dw,
format="NCHW",
strategy=get_conv_execution_strategy(),
mode=conv_mode,
compute_mode=compute_mode,
sparse=sparse_type,
)
if bias is not None:
res += bias
return res
@wrap_io_tensor
def conv_transpose2d(
inp: Tensor,
weight: Tensor,
bias: Optional[Tensor] = None,
stride: Union[int, Tuple[int, int]] = 1,
padding: Union[int, Tuple[int, int]] = 0,
dilation: Union[int, Tuple[int, int]] = 1,
groups: int = 1,
conv_mode="CROSS_CORRELATION",
compute_mode="DEFAULT",
) -> Tensor:
"""2D transposed convolution operation.
:param inp: The feature map of the convolution operation
:param weight: The convolution kernel
:param bias: The bias added to the result of convolution (if given)
:param stride: Stride of the 2D convolution operation. Default: 1
:param padding: Size of the paddings added to the input on both sides of its
spatial dimensions. Only zero-padding is supported. Default: 0
:param dilation: Dilation of the 2D convolution operation. Default: 1
:param groups: number of groups to divide input and output channels into,
so as to perform a "grouped convolution". When ``groups`` is not 1,
``in_channels`` and ``out_channels`` must be divisible by ``groups``,
and the shape of weight should be ``(groups, out_channel // groups,
in_channels // groups, height, width)``. Default: 1
:type conv_mode: string or :class:`mgb.opr_param_defs.Convolution.Mode`
:param conv_mode: Supports 'CROSS_CORRELATION' or 'CONVOLUTION'. Default:
'CROSS_CORRELATION'.
:type compute_mode: string or
:class:`mgb.opr_param_defs.Convolution.ComputeMode`
:param compute_mode: When set to 'DEFAULT', no special requirements will be
placed on the precision of intermediate results. When set to 'FLOAT32',
Float32 would be used for accumulator and intermediate result, but only
effective when input and output are of Float16 dtype.
Refer to :class:`~.ConvTranspose2d` for more information.
"""
ph, pw = _pair(padding)
sh, sw = _pair_nonzero(stride)
dh, dw = _pair_nonzero(dilation)
Sparse = mgb.opr_param_defs.Convolution.Sparse
sparse_type = Sparse.DENSE if groups == 1 else Sparse.GROUP
res = mgb.opr.deconvolution(
inp,
weight,
pad_h=ph,
pad_w=pw,
stride_h=sh,
stride_w=sw,
dilate_h=dh,
dilate_w=dw,
format="NCHW",
strategy=get_conv_execution_strategy(),
mode=conv_mode,
compute_mode=compute_mode,
sparse=sparse_type,
)
if bias is not None:
res += bias
return res
@wrap_io_tensor
def max_pool2d(
inp: Tensor,
kernel_size: Union[int, Tuple[int, int]],
stride: Optional[Union[int, Tuple[int, int]]] = None,
padding: Union[int, Tuple[int, int]] = 0,
) -> Tensor:
"""Applies a 2D max pooling over an input.
:param inp: The input tensor.
:param kernel_size: The size of the window.
:param stride: The stride of the window. If not provided, its value is set to ``kernel_size``.
Default: None
:param padding: Implicit zero padding to be added on both sides. Default: 0
Refer to :class:`~.MaxPool2d` for more information.
"""
kh, kw = _pair_nonzero(kernel_size)
sh, sw = _pair_nonzero(stride or kernel_size)
ph, pw = _pair(padding)
mode = mgb.opr_param_defs.Pooling.Mode.MAX
return mgb.opr.pooling(
inp,
mode=mode,
format="NCHW",
stride_h=sh,
stride_w=sw,
pad_h=ph,
pad_w=pw,
window_h=kh,
window_w=kw,
)
@wrap_io_tensor
def avg_pool2d(
inp: Tensor,
kernel_size: Union[int, Tuple[int, int]],
stride: Optional[Union[int, Tuple[int, int]]] = None,
padding: Union[int, Tuple[int, int]] = 0,
) -> Tensor:
""" Applies a 2D average pooling over an input.
:param inp: The input tensor.
:param kernel_size: The size of the window.
:param stride: The stride of the window. If not provided, its value is set to ``kernel_size``.
Default: None
:param padding: Implicit zero padding to be added on both sides. Default: 0
Refer to :class:`~.AvgPool2d` for more information.
"""
kh, kw = _pair_nonzero(kernel_size)
sh, sw = _pair_nonzero(stride or kernel_size)
ph, pw = _pair(padding)
mode = mgb.opr_param_defs.Pooling.Mode.AVERAGE
return mgb.opr.pooling(
inp,
mode=mode,
format="NCHW",
stride_h=sh,
stride_w=sw,
pad_h=ph,
pad_w=pw,
window_h=kh,
window_w=kw,
)
@wrap_io_tensor
def prelu(inp: Tensor, weight: Tensor) -> Tensor:
r"""
Applies the element-wise PReLU function.
Refer to :class:`~.PReLU` for more information.
"""
return mgb.opr.elemwise(inp, 0, mode="MAX") + weight * mgb.opr.elemwise(
inp, 0, mode="MIN"
)
@wrap_io_tensor
def leaky_relu(inp: Tensor, negative_slope: float = 0.01) -> Tensor:
r"""
Applies the element-wise leaky_relu function
Refer to :class:`~.LeakyReLU` for more information.
"""
return mgb.opr.elemwise(inp, 0, mode="MAX") + negative_slope * mgb.opr.elemwise(
inp, 0, mode="MIN"
)
@wrap_io_tensor
def flatten(inp: Tensor, start_axis: int = 0, end_axis: int = -1) -> Tensor:
r"""
Reshapes the tensor by flattening the sub-tensor from dimension ``start_axis`` to dimension ``end_axis``.
:param inp: The input tensor.
:param start_axis: The start dimension that the sub-tensor to be flattened. Default: 0
:param end_axis: The end dimension that the sub-tensor to be flattened. Default: -1
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
inp_shape = (2, 2, 3, 3)
inp = tensor(
np.arange(36, dtype=np.int32).reshape(inp_shape),
)
oup = F.flatten(inp, 2)
print(inp.numpy().shape)
print(oup.numpy().shape)
Outputs:
.. testoutput::
(2, 2, 3, 3)
(2, 2, 9)
"""
target_shape = tuple(inp.shape[i] for i in range(start_axis)) + (-1,)
if end_axis != -1:
target_shape += (inp.shape[end_axis + 1 :],)
return inp.reshape(*target_shape)
def _get_softmax_axis(ndim: int) -> int:
if ndim in (0, 1, 3):
return 0
return 1
@wrap_io_tensor
def softmax(inp: Tensor, axis: Optional[int] = None) -> Tensor:
r"""
Applies a softmax function. Softmax is defined as:
.. math::
\text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}
It is applied to all elements along axis, and will re-scale them so that
the elements lie in the range `[0, 1]` and sum to 1.
See :class:`~megengine.module.activation.Softmax` for more details.
:param inp: The input tensor.
:param axis: An axis along which softmax will be applied. By default,
softmax will apply along the highest ranked axis.
"""
if axis is None:
axis = _get_softmax_axis(len(inp.imm_shape))
offset = mgb.opr.zero_grad(inp.max(axis=axis, keepdims=True))
inp = inp - offset
down = mgb.opr.elem.exp(inp).sum(axis=axis, keepdims=True)
return mgb.opr.elem.exp(inp) / down
@wrap_io_tensor
def batch_norm2d(
inp: Tensor,
running_mean: Tensor,
running_var: Tensor,
weight: Optional[Tensor] = None,
bias: Optional[Tensor] = None,
training: bool = False,
momentum: float = 0.9,
eps: float = 1e-5,
) -> Tensor:
"""Applies batch normalization to the input.
:param inp: input tensor.
:param running_mean: tensor to store running mean.
:param running_var: tensor to store running variance.
:param weight: scaling tensor in the learnable affine parameters.
See :math:`\gamma` in :class:`~.BatchNorm2d`
:param bias: bias tensor in the learnable affine parameters.
See :math:`\beta` in :class:`~.BatchNorm2d`
:param training: a boolean value to indicate whether batch norm is performed
in traning mode. Default: ``False``
:param momentum: the value used for the ``running_mean`` and ``running_var``
computation.
Default: 0.9
:param eps: a value added to the denominator for numerical stability.
Default: 1e-5.
Refer to :class:`~.BatchNorm2d` and :class:`~.BatchNorm1d` for more information.
"""
inp = mgb.opr.mark_no_broadcast_elemwise(inp)
_channels = inp.imm_shape[1]
_ndim = len(inp.imm_shape)
_param_shape = (1, _channels) + (1,) * (_ndim - 2)
assert _ndim == 4, "only 4D tensor supported"
if weight is not None:
weight = weight.reshape(*_param_shape)
else:
weight = mgb.make_immutable(*_use_default_if_none(None, None), 1.0).broadcast(
*_param_shape
)
if bias is not None:
bias = bias.reshape(*_param_shape)
else:
bias = mgb.make_immutable(*_use_default_if_none(None, None), 0.0).broadcast(
*_param_shape
)
FwdMode = mgb.opr_param_defs.BN.FwdMode
fwdmode = FwdMode.TRAINING if training else FwdMode.INFERENCE
avg_factor = 1 - momentum
if running_mean is not None and running_var is not None:
if training:
inp = barrier(inp)
output = mgb.opr.batch_norm(
inp,
weight,
bias,
running_mean,
running_var,
param_dim="DIM_1C11",
fwd_mode=fwdmode,
epsilon=eps,
avg_factor=avg_factor,
)[-1]
if training:
mark_impure(output)
else:
output = mgb.opr.batch_norm_no_statistic(
inp,
weight,
bias,
param_dim="DIM_1C11",
fwd_mode=fwdmode,
epsilon=eps,
avg_factor=avg_factor,
)[-1]
return output
def one_hot(inp: Tensor, num_classes: int = -1) -> Tensor:
r"""
Perform one-hot encoding for the input tensor.
:param inp: input tensor
:param num_classes: number of classes denotes the last dimension of the output tensor
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
inp = tensor(np.arange(1, 4, dtype=np.int32))
out = F.one_hot(inp)
print(out.numpy())
Outputs:
.. testoutput::
[[0 1 0 0]
[0 0 1 0]
[0 0 0 1]]
"""
comp_node, comp_graph = _decide_comp_node_and_comp_graph(inp)
if num_classes == -1:
num_classes = inp.max() + 1
zeros = mgb.make_immutable(value=0, comp_node=comp_node, comp_graph=comp_graph)
zeros_symvar = zeros.broadcast(inp.shapeof(), num_classes)
ones = mgb.make_immutable(value=1, comp_node=comp_node, comp_graph=comp_graph)
ones_symvar = ones.broadcast(inp.shapeof(), 1)
return Tensor(
mgb.opr.indexing_set_one_hot(
zeros_symvar, axis=len(inp.shapeof()), index=inp, value=ones_symvar
)
)
@wrap_io_tensor
def warp_perspective(
inp: Tensor,
M: Tensor,
dsize: Union[Tuple[int, int], int, Tensor],
border_mode: str = "REPLICATE",
border_val: float = 0.0,
interp_mode: str = "LINEAR",
):
r"""
Applies perspective transformation to batched 2D images.
The input images are transformed to the output images by the transformation matrix:
.. math::
\text{output}(n, c, h, w) = \text{input} \left( n, c,
\frac{M_{00}h + M_{01}w + M_{02}}{M_{20}h + M_{21}w + M_{22}},
\frac{M_{10}h + M_{11}w + M_{12}}{M_{20}h + M_{21}w + M_{22}}
\right)
:param inp: input image
:param M: (batch, 3, 3) transformation matrix
:param dsize: (h, w) size of the output image
:param border_mode: pixel extrapolation method. Default: ``"REPLICATE"``
:param border_val: value used in case of a constant border. Default: ``0``
:param interp_mode: interpolation methods. Default: ``"LINEAR"``
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
inp_shape = (1, 1, 4, 4)
inp = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
M_shape = (1, 3, 3)
# M defines a translation: dst(1, 1, h, w) = rst(1, 1, h+1, w+1)
M = tensor(np.array([[1., 0., 1.],
[0., 1., 1.],
[0., 0., 1.]], dtype=np.float32).reshape(M_shape))
out = F.warp_perspective(inp, M, (2, 2))
print(out.numpy())
Outputs:
.. testoutput::
[[[[ 5. 6.]
[ 9. 10.]]]]
"""
return mgb.opr.warp_perspective(
inp,
M,
dsize,
bmode=border_mode,
border_val=border_val,
imode=interp_mode,
format="NCHW",
)
@wrap_io_tensor
def eye(
n: int,
m: Optional[int] = None,
*,
dtype=None,
device: Optional[CompNode] = None,
comp_graph: Optional[CompGraph] = None
) -> Tensor:
"""
Fills the 2-dimensional input :class:`SymbolVar` with the identity matrix.
:param n: The number of rows
:param m: The number of columns, default to None
:param dtype: The data type, default to None
:param device: Compute node of the matrix, defaults to None
:param comp_graph: Compute graph of the matrix, defaults to None
:return: The eye matrix
Examples:
.. testcode::
import numpy as np
import megengine.functional as F
data_shape = (4, 6)
n, m = data_shape
out = F.eye(n, m, dtype=np.float32)
print(out.numpy())
Outputs:
.. testoutput::
[[1. 0. 0. 0. 0. 0.]
[0. 1. 0. 0. 0. 0.]
[0. 0. 1. 0. 0. 0.]
[0. 0. 0. 1. 0. 0.]]
"""
device, comp_graph = _use_default_if_none(device, comp_graph)
if m is None:
m = n
return mgb.opr.eye((n, m), dtype=dtype, comp_node=device, comp_graph=comp_graph)
@wrap_io_tensor
def matrix_mul(inp1: Tensor, inp2: Tensor) -> Tensor:
"""
Performs a matrix multiplication of the matrices ``inp1`` and ``inp2``
:param inp1: The first matrix to be multiplied (a, b)
:param inp2: The second matrix to be multiplied (b, c)
:return: The output tensor (a, c)
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
shape_1 = (2, 3)
shape_2 = (3, 4)
data1 = tensor(np.arange(0, 6, dtype=np.float32).reshape(2, 3))
data2 = tensor(np.arange(0, 6, dtype=np.float32).reshape(3, 2))
out = F.matrix_mul(data1, data2)
print(out.numpy())
Outputs:
.. testoutput::
[[10. 13.]
[28. 40.]]
"""
return mgb.opr.matrix_mul(inp1, inp2)
@wrap_io_tensor
def batched_matrix_mul(inp1: Tensor, inp2: Tensor) -> Tensor:
"""
Performs a batched multiplication of th batched matrices ``inp1`` and ``inp2``
:param inp1: The first batch matrix to be multiplied (n, a, b)
:param inp2: The second batch matrix to be multiplied (n, b, c)
:return: The output batch (n, a, c)
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
batch_size = 3
shape_1 = (batch_size, 2, 3)
shape_2 = (batch_size, 3, 4)
data1 = tensor(
np.arange(0, batch_size * 6, dtype=np.float32).reshape(batch_size, 2, 3))
data2 = tensor(
np.arange(0, batch_size * 12, dtype=np.float32).reshape(batch_size, 3, 4))
out = F.batched_matrix_mul(data1, data2)
print(out.numpy())
Outputs:
.. testoutput::
[[[ 20. 23. 26. 29.]
[ 56. 68. 80. 92.]]
[[ 344. 365. 386. 407.]
[ 488. 518. 548. 578.]]
[[1100. 1139. 1178. 1217.]
[1352. 1400. 1448. 1496.]]]
"""
return mgb.opr.batched_matrix_mul(inp1, inp2)
@wrap_io_tensor
def interpolate(
inp: Tensor,
size: Optional[Union[int, Tuple[int, int]]] = None,
scale_factor: Optional[Union[float, Tuple[float, float]]] = None,
mode: str = "BILINEAR",
align_corners: bool = None,
) -> Tensor:
r"""
Down/up samples the input tensor to either the given :attr:`size` or the given
:attr:`scale_factor`
:param inp: input tensor
:param size: size of the output tensor. Default: ``None``
:param scale_factor: scaling factor of the output tensor. Default: ``None``
:param mode: interpolation methods, acceptable values are:
'bilinear'(default), 'linear', 'nearest' (todo), 'cubic' (todo), 'area' (todo)
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
from megengine.test import assertTensorClose
inp = tensor(np.arange(1, 5, dtype=np.float32).reshape(1, 1, 2, 2))
out = F.interpolate(inp, [4, 4], align_corners=False)
print(out.numpy())
out2 = F.interpolate(inp, scale_factor=2.)
assertTensorClose(out.numpy(), out2.numpy())
Outputs:
.. testoutput::
[[[[1. 1.25 1.75 2. ]
[1.5 1.75 2.25 2.5 ]
[2.5 2.75 3.25 3.5 ]
[3. 3.25 3.75 4. ]]]]
"""
mode = mode.upper()
if mode not in ["BILINEAR", "LINEAR"]:
raise ValueError("interpolate only support bilinear mode")
if mode not in ["BILINEAR", "LINEAR"]:
if align_corners is not None:
raise ValueError(
"align_corners option can only be set in the bilinear/linear interpolating mode"
)
else:
if align_corners is None:
align_corners = False
if mode == "LINEAR":
inp = mgb.opr.add_axis(inp, 3)
if len(inp.imm_shape) != 4:
raise ValueError("shape of input tensor must correspond to the operartion mode")
if size is None:
if scale_factor is None:
raise ValueError("scale_factor must not be None when size is None")
if isinstance(scale_factor, (float, int)):
scale_factor = float(scale_factor)
if mode == "LINEAR":
scale_factor = (scale_factor, float(1))
else:
scale_factor = (scale_factor, scale_factor)
else:
if mode == "LINEAR":
raise ValueError(
"under LINEAR mode, scale_factor can only be single value"
)
assert len(scale_factor) == 2, "shape of scale_factor must be equal to (2, )"
assert isinstance(scale_factor[0], float) and isinstance(
scale_factor[1], float
), "scale_factor must be float type"
dsize = tuple(
mgb.opr.elemwise(inp.shape[i + 2] * scale_factor[i], mode="FLOOR")
for i in range(2)
)
dsize = mgb.opr.concat([dsize[0], dsize[1]], axis=0)
else:
if scale_factor is not None:
raise ValueError("scale_factor must be None when size is provided")
if isinstance(size, int):
size = (size, 1)
else:
if mode == "LINEAR":
raise ValueError("under LINEAR mode, size can only be single value")
dsize = size
oh, ow = dsize[0], dsize[1]
ih, iw = inp.shape[2], inp.shape[3]
if align_corners:
hscale = (ih - 1.0) / (oh - 1.0)
wscale = 1.0 * iw / ow
if mode != "LINEAR":
wscale = (iw - 1.0) / (ow - 1.0)
row0 = mgb.opr.concat([wscale, [0, 0]], axis=0).reshape(1, 3)
row1 = mgb.opr.concat([[0], hscale, [0]], axis=0).reshape(1, 3)
weight = mgb.opr.concat([row0, row1, [[0, 0, 1]]], axis=0).reshape(1, 3, 3)
weight = mgb.opr.broadcast(weight, (inp.shape[0], 3, 3))
else:
hscale = 1.0 * ih / oh
wscale = 1.0 * iw / ow
row0 = mgb.opr.concat([wscale, [0], 0.5 * wscale - 0.5], axis=0).reshape(1, 3)
row1 = mgb.opr.concat([[0], hscale, 0.5 * hscale - 0.5], axis=0).reshape(1, 3)
weight = mgb.opr.concat([row0, row1, [[0, 0, 1]]], axis=0).reshape(1, 3, 3)
weight = mgb.opr.broadcast(weight, (inp.shape[0], 3, 3))
ret = mgb.opr.warp_perspective(inp, weight, dsize, imode="LINEAR", format="NCHW")
if mode == "LINEAR":
ret = mgb.opr.reshape(ret, ret.shape[0:3])
return ret
@wrap_io_tensor
def dropout(inp: Tensor, drop_prob: float, rescale: bool = True) -> Tensor:
"""
Returns a new tensor where each of the elements are randomly set to zero
with probability P = ``drop_prob``. Optionally rescale the output tensor.
:param inp: The input tensor
:param drop_prob: The probability to drop (set to zero) a single element
:param rescale: The default behavior of ``dropout`` during training is to rescale the output,
then it can be replaced by an :class:`~.Identity` during inference, default to True.
:return: The output tensor
Examples:
.. testcode::
import numpy as np
import megengine as mge
import megengine.functional as F
from megengine import tensor
data = tensor(np.ones(10, dtype=np.float32))
out = F.dropout(data, 1./3.)
print(out.numpy())
Outputs:
.. testoutput::
:options: +SKIP
[1.5 1.5 0. 1.5 1.5 1.5 1.5 1.5 1.5 1.5]
"""
assert 0 <= drop_prob < 1
rv = uniform(inp.shape)
mask = rv > drop_prob
inp *= mask.astype(inp.dtype)
if rescale:
inp *= 1 / (1 - drop_prob)
return inp
@wrap_io_tensor
def identity(inp: Tensor) -> Tensor:
"""applies an identity transform to the input tensor.
:param inp: The input tensor
"""
return mgb.opr.identity(inp)
@wrap_io_tensor
def embedding(
input: Tensor,
weight: Tensor,
padding_idx: Optional[int] = None,
max_norm: Optional[float] = None,
norm_type: Optional[float] = None,
):
"""
Applies lookup table for embedding.
:param input: the tensor with indices.
:param weight: the learnable weights which embedding from.
:param padding_idx: should be set to None, not support now.
:param max_norm: should be set to None, not support now.
:param norm_type: should be set to None, not support now.
Refer to :class:`~.Embedding` for more information.
"""
if padding_idx is not None:
raise ValueError("Not support padding_idx Now!")
if max_norm is not None or norm_type is not None:
raise ValueError("Not support weight normlization Now!")
return mgb.opr.advanced_indexing(weight)[input.reshape(-1), :].reshape(
input.shape, weight.shape[-1]
)
@wrap_io_tensor
def roi_pooling(
input: Tensor,
rois: Tensor,
output_shape: Union[int, tuple, list],
mode: str = "max",
scale: float = 1.0,
) -> Tensor:
"""
Apply roi pooling on input feature
:param input: tensor that represents the input feature, (N, C, H, W) images
:param rois: (K, 5) boxes. First column is the index into N. The other 4 columns are xyxy
:param output_shape: (height, width) of output rois feature
:param mode: "max" or "average", use max/average align just like max/average pooling. Default: ``"max"``
:param scale: scale the input boxes by this number. Default: 1.0
:return: (K, C, output_shape[0], output_shape[1]) feature of rois
"""
assert mode in ["max", "average"], "only max/average mode is supported"
if isinstance(output_shape, int):
output_shape = (output_shape, output_shape)
return mgb.opr.roi_pooling(
input, rois, output_shape, mode=mode.upper(), scale=scale
)
@wrap_io_tensor
def roi_align(
input: Tensor,
rois: Tensor,
output_shape: Union[int, tuple, list],
mode: str = "average",
spatial_scale: float = 1.0,
sample_points: Union[int, tuple, list] = 2,
aligned: bool = True,
) -> Tensor:
"""
Apply roi align on input feature
:param input: tensor that represents the input feature, (N, C, H, W) images
:param rois: (N, 5) boxes. First column is the index into N. The other 4 columns are xyxy
:param output_shape: (height, width) shape of output rois feature.
:param mode: "max" or "average", use max/average align just like max/average pooling. Default: ``"average"``
:param spatial_scale: scale the input boxes by this number. Default: 1.0
:param sample_points: number of inputs samples to take for each output sample.
0 to take samples densely. Default: 2
:param aligned: wheather align the input feature, with `aligned=True`,
we first appropriately scale the ROI and then shift it by -0.5. Default: True
"""
assert mode in ["max", "average"], "only max/average mode is supported"
if isinstance(output_shape, int):
output_shape = (output_shape, output_shape)
pooled_height, pooled_width = output_shape
if isinstance(sample_points, int):
sample_points = (sample_points, sample_points)
sample_height, sample_width = sample_points
offset = 0.5 if aligned else 0.0
return mgb.opr.roi_align(
input,
rois,
mode=mode.upper(),
spatial_scale=spatial_scale,
offset=offset,
pooled_height=pooled_height,
pooled_width=pooled_width,
sample_height=sample_height,
sample_width=sample_width,
)
@wrap_io_tensor
def assert_equal(
get: Tensor, expect: Tensor, max_err: float = 1e-4, verbose: bool = False
) -> Tensor:
r"""
Asserts that ``get`` equals to ``expect``, and returns value of ``expect``.
:param get: tensor to be checked.
:param expect: tensor with expected values.
:param max_err: tolerance that two float values are asserted equal. Default: 1e-4
:param verbose: whether to print details if two tensors are not equal. Default: False
Examples:
.. testcode::
import megengine.functional as F
from megengine import tensor
get = tensor([1.0, 2.0])
max_err = 0.1
expect = get + max_err / 2.0
val = F.assert_equal(expect, get, max_err=max_err)
print(val.numpy())
Outputs:
.. testoutput::
[1.05 2.05]
"""
return mgb.opr.assert_equal(get, expect, maxerr=max_err, verbose=verbose)
@wrap_io_tensor
def indexing_one_hot(
src: Tensor, index: Tensor, axis: int = 1, keepdims=False
) -> Tensor:
r"""
One-hot indexing for some axis.
:param src: input data tensor.
:param index: index tensor.
:param axis: the axis on src for which values in index index. Default: 1
:param keepdims: whether not to remove the axis in result. Default: ``False``
Examples:
.. testcode::
import megengine.functional as F
from megengine import tensor
src = tensor([[1.0, 2.0]])
index = tensor([0])
val = F.indexing_one_hot(src, index)
print(val.numpy())
.. testoutput::
[1.]
"""
return mgb.opr.indexing_one_hot(src, axis, index, keepdims=keepdims)
|
[
"megengine._internal.opr.elem.exp",
"megengine._internal.opr.pooling",
"megengine._internal.opr.add_axis",
"megengine._internal.opr.assert_equal",
"megengine._internal.opr.elemwise",
"megengine._internal.opr.reshape",
"megengine._internal.opr.indexing_one_hot",
"megengine._internal.opr.batch_norm",
"megengine._internal.opr.advanced_indexing",
"megengine._internal.opr.concat",
"megengine._internal.opr.warp_perspective",
"megengine._internal.make_immutable",
"megengine._internal.opr.mark_no_broadcast_elemwise",
"megengine._internal.opr.identity",
"megengine._internal.opr.batched_matrix_mul",
"megengine._internal.opr.batch_norm_no_statistic",
"megengine._internal.opr.eye",
"megengine._internal.opr.matrix_mul",
"megengine._internal.opr.broadcast"
] |
[((1169, 1217), 'megengine._internal.opr.matrix_mul', 'mgb.opr.matrix_mul', (['inp', 'weight'], {'transposeB': '(True)'}), '(inp, weight, transposeB=True)\n', (1187, 1217), True, 'import megengine._internal as mgb\n'), ((7082, 7204), 'megengine._internal.opr.pooling', 'mgb.opr.pooling', (['inp'], {'mode': 'mode', 'format': '"""NCHW"""', 'stride_h': 'sh', 'stride_w': 'sw', 'pad_h': 'ph', 'pad_w': 'pw', 'window_h': 'kh', 'window_w': 'kw'}), "(inp, mode=mode, format='NCHW', stride_h=sh, stride_w=sw,\n pad_h=ph, pad_w=pw, window_h=kh, window_w=kw)\n", (7097, 7204), True, 'import megengine._internal as mgb\n'), ((8075, 8197), 'megengine._internal.opr.pooling', 'mgb.opr.pooling', (['inp'], {'mode': 'mode', 'format': '"""NCHW"""', 'stride_h': 'sh', 'stride_w': 'sw', 'pad_h': 'ph', 'pad_w': 'pw', 'window_h': 'kh', 'window_w': 'kw'}), "(inp, mode=mode, format='NCHW', stride_h=sh, stride_w=sw,\n pad_h=ph, pad_w=pw, window_h=kh, window_w=kw)\n", (8090, 8197), True, 'import megengine._internal as mgb\n'), ((12094, 12133), 'megengine._internal.opr.mark_no_broadcast_elemwise', 'mgb.opr.mark_no_broadcast_elemwise', (['inp'], {}), '(inp)\n', (12128, 12133), True, 'import megengine._internal as mgb\n'), ((14310, 14381), 'megengine._internal.make_immutable', 'mgb.make_immutable', ([], {'value': '(0)', 'comp_node': 'comp_node', 'comp_graph': 'comp_graph'}), '(value=0, comp_node=comp_node, comp_graph=comp_graph)\n', (14328, 14381), True, 'import megengine._internal as mgb\n'), ((14457, 14528), 'megengine._internal.make_immutable', 'mgb.make_immutable', ([], {'value': '(1)', 'comp_node': 'comp_node', 'comp_graph': 'comp_graph'}), '(value=1, comp_node=comp_node, comp_graph=comp_graph)\n', (14475, 14528), True, 'import megengine._internal as mgb\n'), ((16421, 16541), 'megengine._internal.opr.warp_perspective', 'mgb.opr.warp_perspective', (['inp', 'M', 'dsize'], {'bmode': 'border_mode', 'border_val': 'border_val', 'imode': 'interp_mode', 'format': '"""NCHW"""'}), "(inp, M, dsize, bmode=border_mode, border_val=\n border_val, imode=interp_mode, format='NCHW')\n", (16445, 16541), True, 'import megengine._internal as mgb\n'), ((17671, 17744), 'megengine._internal.opr.eye', 'mgb.opr.eye', (['(n, m)'], {'dtype': 'dtype', 'comp_node': 'device', 'comp_graph': 'comp_graph'}), '((n, m), dtype=dtype, comp_node=device, comp_graph=comp_graph)\n', (17682, 17744), True, 'import megengine._internal as mgb\n'), ((18554, 18584), 'megengine._internal.opr.matrix_mul', 'mgb.opr.matrix_mul', (['inp1', 'inp2'], {}), '(inp1, inp2)\n', (18572, 18584), True, 'import megengine._internal as mgb\n'), ((19745, 19783), 'megengine._internal.opr.batched_matrix_mul', 'mgb.opr.batched_matrix_mul', (['inp1', 'inp2'], {}), '(inp1, inp2)\n', (19771, 19783), True, 'import megengine._internal as mgb\n'), ((24039, 24114), 'megengine._internal.opr.warp_perspective', 'mgb.opr.warp_perspective', (['inp', 'weight', 'dsize'], {'imode': '"""LINEAR"""', 'format': '"""NCHW"""'}), "(inp, weight, dsize, imode='LINEAR', format='NCHW')\n", (24063, 24114), True, 'import megengine._internal as mgb\n'), ((25556, 25577), 'megengine._internal.opr.identity', 'mgb.opr.identity', (['inp'], {}), '(inp)\n', (25572, 25577), True, 'import megengine._internal as mgb\n'), ((30069, 30135), 'megengine._internal.opr.assert_equal', 'mgb.opr.assert_equal', (['get', 'expect'], {'maxerr': 'max_err', 'verbose': 'verbose'}), '(get, expect, maxerr=max_err, verbose=verbose)\n', (30089, 30135), True, 'import megengine._internal as mgb\n'), ((30828, 30889), 'megengine._internal.opr.indexing_one_hot', 'mgb.opr.indexing_one_hot', (['src', 'axis', 'index'], {'keepdims': 'keepdims'}), '(src, axis, index, keepdims=keepdims)\n', (30852, 30889), True, 'import megengine._internal as mgb\n'), ((8468, 8504), 'megengine._internal.opr.elemwise', 'mgb.opr.elemwise', (['inp', '(0)'], {'mode': '"""MAX"""'}), "(inp, 0, mode='MAX')\n", (8484, 8504), True, 'import megengine._internal as mgb\n'), ((8789, 8825), 'megengine._internal.opr.elemwise', 'mgb.opr.elemwise', (['inp', '(0)'], {'mode': '"""MAX"""'}), "(inp, 0, mode='MAX')\n", (8805, 8825), True, 'import megengine._internal as mgb\n'), ((10915, 10936), 'megengine._internal.opr.elem.exp', 'mgb.opr.elem.exp', (['inp'], {}), '(inp)\n', (10931, 10936), True, 'import megengine._internal as mgb\n'), ((21594, 21618), 'megengine._internal.opr.add_axis', 'mgb.opr.add_axis', (['inp', '(3)'], {}), '(inp, 3)\n', (21610, 21618), True, 'import megengine._internal as mgb\n'), ((22711, 22755), 'megengine._internal.opr.concat', 'mgb.opr.concat', (['[dsize[0], dsize[1]]'], {'axis': '(0)'}), '([dsize[0], dsize[1]], axis=0)\n', (22725, 22755), True, 'import megengine._internal as mgb\n'), ((23585, 23632), 'megengine._internal.opr.broadcast', 'mgb.opr.broadcast', (['weight', '(inp.shape[0], 3, 3)'], {}), '(weight, (inp.shape[0], 3, 3))\n', (23602, 23632), True, 'import megengine._internal as mgb\n'), ((23980, 24027), 'megengine._internal.opr.broadcast', 'mgb.opr.broadcast', (['weight', '(inp.shape[0], 3, 3)'], {}), '(weight, (inp.shape[0], 3, 3))\n', (23997, 24027), True, 'import megengine._internal as mgb\n'), ((24154, 24190), 'megengine._internal.opr.reshape', 'mgb.opr.reshape', (['ret', 'ret.shape[0:3]'], {}), '(ret, ret.shape[0:3])\n', (24169, 24190), True, 'import megengine._internal as mgb\n'), ((8516, 8552), 'megengine._internal.opr.elemwise', 'mgb.opr.elemwise', (['inp', '(0)'], {'mode': '"""MIN"""'}), "(inp, 0, mode='MIN')\n", (8532, 8552), True, 'import megengine._internal as mgb\n'), ((8845, 8881), 'megengine._internal.opr.elemwise', 'mgb.opr.elemwise', (['inp', '(0)'], {'mode': '"""MIN"""'}), "(inp, 0, mode='MIN')\n", (8861, 8881), True, 'import megengine._internal as mgb\n'), ((10852, 10873), 'megengine._internal.opr.elem.exp', 'mgb.opr.elem.exp', (['inp'], {}), '(inp)\n', (10868, 10873), True, 'import megengine._internal as mgb\n'), ((12985, 13130), 'megengine._internal.opr.batch_norm', 'mgb.opr.batch_norm', (['inp', 'weight', 'bias', 'running_mean', 'running_var'], {'param_dim': '"""DIM_1C11"""', 'fwd_mode': 'fwdmode', 'epsilon': 'eps', 'avg_factor': 'avg_factor'}), "(inp, weight, bias, running_mean, running_var, param_dim=\n 'DIM_1C11', fwd_mode=fwdmode, epsilon=eps, avg_factor=avg_factor)\n", (13003, 13130), True, 'import megengine._internal as mgb\n'), ((13329, 13459), 'megengine._internal.opr.batch_norm_no_statistic', 'mgb.opr.batch_norm_no_statistic', (['inp', 'weight', 'bias'], {'param_dim': '"""DIM_1C11"""', 'fwd_mode': 'fwdmode', 'epsilon': 'eps', 'avg_factor': 'avg_factor'}), "(inp, weight, bias, param_dim='DIM_1C11',\n fwd_mode=fwdmode, epsilon=eps, avg_factor=avg_factor)\n", (13360, 13459), True, 'import megengine._internal as mgb\n'), ((22588, 22654), 'megengine._internal.opr.elemwise', 'mgb.opr.elemwise', (['(inp.shape[i + 2] * scale_factor[i])'], {'mode': '"""FLOOR"""'}), "(inp.shape[i + 2] * scale_factor[i], mode='FLOOR')\n", (22604, 22654), True, 'import megengine._internal as mgb\n'), ((23357, 23397), 'megengine._internal.opr.concat', 'mgb.opr.concat', (['[wscale, [0, 0]]'], {'axis': '(0)'}), '([wscale, [0, 0]], axis=0)\n', (23371, 23397), True, 'import megengine._internal as mgb\n'), ((23427, 23469), 'megengine._internal.opr.concat', 'mgb.opr.concat', (['[[0], hscale, [0]]'], {'axis': '(0)'}), '([[0], hscale, [0]], axis=0)\n', (23441, 23469), True, 'import megengine._internal as mgb\n'), ((23501, 23550), 'megengine._internal.opr.concat', 'mgb.opr.concat', (['[row0, row1, [[0, 0, 1]]]'], {'axis': '(0)'}), '([row0, row1, [[0, 0, 1]]], axis=0)\n', (23515, 23550), True, 'import megengine._internal as mgb\n'), ((23720, 23777), 'megengine._internal.opr.concat', 'mgb.opr.concat', (['[wscale, [0], 0.5 * wscale - 0.5]'], {'axis': '(0)'}), '([wscale, [0], 0.5 * wscale - 0.5], axis=0)\n', (23734, 23777), True, 'import megengine._internal as mgb\n'), ((23807, 23864), 'megengine._internal.opr.concat', 'mgb.opr.concat', (['[[0], hscale, 0.5 * hscale - 0.5]'], {'axis': '(0)'}), '([[0], hscale, 0.5 * hscale - 0.5], axis=0)\n', (23821, 23864), True, 'import megengine._internal as mgb\n'), ((23896, 23945), 'megengine._internal.opr.concat', 'mgb.opr.concat', (['[row0, row1, [[0, 0, 1]]]'], {'axis': '(0)'}), '([row0, row1, [[0, 0, 1]]], axis=0)\n', (23910, 23945), True, 'import megengine._internal as mgb\n'), ((26397, 26430), 'megengine._internal.opr.advanced_indexing', 'mgb.opr.advanced_indexing', (['weight'], {}), '(weight)\n', (26422, 26430), True, 'import megengine._internal as mgb\n')]
|
from fastapi import APIRouter, Depends, HTTPException, Query, Path
from sqlmodel import Session, select
from sqlalchemy.exc import IntegrityError
from typing import List, Any
import datetime as dt
from app.src.common.utils import profiling_api
from app.src.models.product import (
Product,
ProductRead,
ProductCreate,
ProductUpdate,
ProductReadwithTypeAndTags,
)
from app.src.db.engine import get_session
from app.src.api.endpoints.product_type import get_producttype_or_404
from app.src.api.endpoints.tags import get_tag_or_404, get_tag_by_name_or_404
from app.src.common.security import get_current_user
from app.src.models.app_user import AppUser
from app.src.models.tag import Tag
from app.src.logger import logger
router = APIRouter()
async def get_product_or_404(
*,
session: Session = Depends(get_session),
product_id: int = Path(..., ge=1),
current_user: AppUser = Depends(get_current_user),
):
start_time = dt.datetime.now()
try:
db_product = session.get(Product, product_id)
if db_product:
return {
"db_product": db_product,
"username": current_user.username,
"start_time": start_time,
}
else:
logger.error("Product not found")
logger.exception("Product not found")
raise HTTPException(status_code=404, detail="Product not found")
except KeyError:
logger.error("Product not found")
logger.exception("KeyError: Product not found")
raise HTTPException(status_code=400, detail="Product not found")
@router.get("/", response_model=List[ProductReadwithTypeAndTags])
async def read_all_products(
session: Session = Depends(get_session),
offset: int = 0,
limit: int = Query(default=100, lte=100),
current_user: AppUser = Depends(get_current_user),
) -> Any:
"""
Retrieve all products
"""
start_time = dt.datetime.now()
products = session.exec(select(Product).offset(offset).limit(limit)).all()
profiling_api("Product:get:all", start_time, current_user.username)
return products
@router.get("/{product_id}", response_model=ProductReadwithTypeAndTags)
async def read_product(
*, product_id: int, db_product: Product = Depends(get_product_or_404)
):
"""
Get the product type by id
"""
# Le righe commentate sotto, sostituite dalla nuova Depends
# Nota: il parametro product_id a get_product_or_404 è preso dal path
# p = session.get(Product, product_id)
# if not p:
# raise HTTPException(
# status_code=404,
# detail="Product type not found"
# )
profiling_api(
f"Product:read:by_id:{product_id}",
db_product["start_time"],
db_product["username"],
)
return db_product["db_product"]
@router.post("/", response_model=ProductRead)
async def create_product(
*,
session: Session = Depends(get_session),
product: ProductCreate,
current_user: AppUser = Depends(get_current_user),
) -> Any:
"""
Create a new single product
"""
start_time = dt.datetime.now()
# Controllo esistenza product type
_ = await get_producttype_or_404(producttype_id=product.type_id, session=session)
# Controllo integrità o altri errori
try:
db_product = Product.from_orm(product)
session.add(db_product)
session.commit()
profiling_api("Product:insert:single", start_time, current_user.username)
except IntegrityError:
logger.error("Impossible to create product with same name")
logger.exception("Integrity Error: Impossible to create product with same name")
raise HTTPException(
status_code=404, detail="Impossible to create product with same name"
)
session.refresh(db_product)
return db_product
@router.patch("/update/{product_id}", response_model=ProductRead)
async def update_product_by_id(
*,
product_id: int,
session: Session = Depends(get_session),
product: ProductUpdate,
db_product: Product = Depends(get_product_or_404),
):
"""
Modify and existing product by id
"""
# Le righe commentate sotto, sostituite dalla nuova Depends
# Nota: il parametro product_id a get_product_or_404 è preso dal path
# db_product = session.get(Product, product_id)
# if not db_product:
# raise HTTPException(status_code=404, detail="Product not found")
existing_product = db_product["db_product"]
pr_data = product.dict(exclude_unset=True)
for key, value in pr_data.items():
setattr(existing_product, key, value)
session.add(existing_product)
session.commit()
session.refresh(existing_product)
profiling_api(
f"Product:update:by_id:{product_id}",
db_product["start_time"],
db_product["username"],
)
return existing_product
@router.patch("/update/by_name/{product_name}", response_model=ProductRead)
async def update_product_by_name(
*,
session: Session = Depends(get_session),
product_name: str,
product: ProductUpdate,
current_user: AppUser = Depends(get_current_user),
):
"""
Modify an existing product by name
"""
start_time = dt.datetime.now()
db_product = session.exec(select(Product).where(Product.name == product_name)).one()
if not db_product:
raise HTTPException(
status_code=404, detail="Product not found, impossible to update"
)
pr_data = product.dict(exclude_unset=True) # to use the nullable data
for key, value in pr_data.items():
setattr(db_product, key, value)
session.add(db_product)
session.commit()
session.refresh(db_product)
profiling_api(
f"Product:update:by_name:{product_name}",
start_time,
current_user.username,
)
return db_product
@router.patch(
"/update/{product_id}/add_tag_by_id/{tag_id}",
response_model=ProductReadwithTypeAndTags,
)
async def update_product_add_tag_by_id(
*,
product_id: int,
session: Session = Depends(get_session),
db_product: Product = Depends(get_product_or_404),
db_tag: Tag = Depends(get_tag_or_404),
):
"""
Add tag to product
"""
existing_product = db_product["db_product"]
existing_tag = db_tag["db_tag"]
existing_product.tags.append(existing_tag)
session.add(existing_product)
session.commit()
session.refresh(existing_product)
profiling_api(
f"Product:update:add_tag:by_id:{product_id}",
db_product["start_time"],
db_product["username"],
)
return existing_product
@router.patch(
"/update/{product_id}/add_tag_by_name/{tag_name}",
response_model=ProductReadwithTypeAndTags,
)
async def update_product_add_tag_by_name(
*,
product_id: int,
session: Session = Depends(get_session),
db_product: Product = Depends(get_product_or_404),
db_tag: Tag = Depends(get_tag_by_name_or_404),
):
"""
Add tag to product
"""
existing_product = db_product["db_product"]
existing_tag = db_tag["db_tag"]
existing_product.tags.append(existing_tag)
session.add(existing_product)
session.commit()
session.refresh(existing_product)
profiling_api(
f"Product:update:add_tag:by_name:{product_id}",
db_product["start_time"],
db_product["username"],
)
return existing_product
@router.patch(
"/update/{product_id}/remove_tag_by_id/{tag_id}",
response_model=ProductReadwithTypeAndTags,
)
async def update_product_remove_tag_by_id(
*,
product_id: int,
session: Session = Depends(get_session),
db_product: Product = Depends(get_product_or_404),
db_tag: Tag = Depends(get_tag_or_404),
):
"""
Remove tag from product
"""
existing_product = db_product["db_product"]
existing_tag = db_tag["db_tag"]
try:
existing_product.tags.remove(existing_tag)
session.add(existing_product)
session.commit()
session.refresh(existing_product)
profiling_api(
f"Product:update:remove_tag:by_id:{product_id}",
db_product["start_time"],
db_product["username"],
)
except Exception as message:
logger.error(message)
logger.exception(message)
raise HTTPException(
status_code=404,
detail="Impossible to remove the tag: product or tag not existing",
)
return existing_product
@router.patch(
"/update/{product_id}/remove_tag_by_name/{tag_name}",
response_model=ProductReadwithTypeAndTags,
)
async def update_product_remove_tag_by_name(
*,
product_id: int,
session: Session = Depends(get_session),
db_product: Product = Depends(get_product_or_404),
db_tag: Tag = Depends(get_tag_by_name_or_404),
):
"""
Remove tag from product
"""
existing_db_product = db_product["db_product"]
existing_db_tag = db_tag["db_tag"]
db_product.tags.remove(existing_db_tag)
session.add(existing_db_product)
session.commit()
session.refresh(existing_db_product)
profiling_api(
f"Product:update:remove_tag:by_name:{product_id}",
db_product["start_time"],
db_product["username"],
)
return db_product
@router.delete("/{product_id}")
async def delete_product(
*,
product_id: int,
session: Session = Depends(get_session),
db_product: Product = Depends(get_product_or_404),
):
"""
Delete and remove an existing product by id; it must be >= 1
"""
# Le righe commentate sotto, sostituite dalla nuova Depends
# Nota: il parametro product_id a get_product_or_404 è preso dal path
# product = session.get(Product, product_id)
# if not product:
# raise HTTPException(
# status_code=404, detail="Product not found, impossible to remove"
# )
existing_db_product = db_product["db_product"]
session.delete(existing_db_product)
session.commit()
profiling_api(
f"Product:update:add_tag:by_id:{product_id}",
db_product["start_time"],
db_product["username"],
)
return {"ok": True}
|
[
"sqlmodel.select"
] |
[((751, 762), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (760, 762), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((825, 845), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (832, 845), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((869, 884), 'fastapi.Path', 'Path', (['...'], {'ge': '(1)'}), '(..., ge=1)\n', (873, 884), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((914, 939), 'fastapi.Depends', 'Depends', (['get_current_user'], {}), '(get_current_user)\n', (921, 939), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((961, 978), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (976, 978), True, 'import datetime as dt\n'), ((1734, 1754), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1741, 1754), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((1794, 1821), 'fastapi.Query', 'Query', ([], {'default': '(100)', 'lte': '(100)'}), '(default=100, lte=100)\n', (1799, 1821), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((1851, 1876), 'fastapi.Depends', 'Depends', (['get_current_user'], {}), '(get_current_user)\n', (1858, 1876), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((1947, 1964), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (1962, 1964), True, 'import datetime as dt\n'), ((2048, 2115), 'app.src.common.utils.profiling_api', 'profiling_api', (['"""Product:get:all"""', 'start_time', 'current_user.username'], {}), "('Product:get:all', start_time, current_user.username)\n", (2061, 2115), False, 'from app.src.common.utils import profiling_api\n'), ((2280, 2307), 'fastapi.Depends', 'Depends', (['get_product_or_404'], {}), '(get_product_or_404)\n', (2287, 2307), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((2683, 2786), 'app.src.common.utils.profiling_api', 'profiling_api', (['f"""Product:read:by_id:{product_id}"""', "db_product['start_time']", "db_product['username']"], {}), "(f'Product:read:by_id:{product_id}', db_product['start_time'],\n db_product['username'])\n", (2696, 2786), False, 'from app.src.common.utils import profiling_api\n'), ((2954, 2974), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (2961, 2974), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((3032, 3057), 'fastapi.Depends', 'Depends', (['get_current_user'], {}), '(get_current_user)\n', (3039, 3057), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((3134, 3151), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (3149, 3151), True, 'import datetime as dt\n'), ((4023, 4043), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (4030, 4043), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((4099, 4126), 'fastapi.Depends', 'Depends', (['get_product_or_404'], {}), '(get_product_or_404)\n', (4106, 4126), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((4755, 4861), 'app.src.common.utils.profiling_api', 'profiling_api', (['f"""Product:update:by_id:{product_id}"""', "db_product['start_time']", "db_product['username']"], {}), "(f'Product:update:by_id:{product_id}', db_product['start_time'\n ], db_product['username'])\n", (4768, 4861), False, 'from app.src.common.utils import profiling_api\n'), ((5058, 5078), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (5065, 5078), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((5159, 5184), 'fastapi.Depends', 'Depends', (['get_current_user'], {}), '(get_current_user)\n', (5166, 5184), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((5261, 5278), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (5276, 5278), True, 'import datetime as dt\n'), ((5747, 5841), 'app.src.common.utils.profiling_api', 'profiling_api', (['f"""Product:update:by_name:{product_name}"""', 'start_time', 'current_user.username'], {}), "(f'Product:update:by_name:{product_name}', start_time,\n current_user.username)\n", (5760, 5841), False, 'from app.src.common.utils import profiling_api\n'), ((6099, 6119), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (6106, 6119), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((6147, 6174), 'fastapi.Depends', 'Depends', (['get_product_or_404'], {}), '(get_product_or_404)\n', (6154, 6174), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((6194, 6217), 'fastapi.Depends', 'Depends', (['get_tag_or_404'], {}), '(get_tag_or_404)\n', (6201, 6217), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((6491, 6605), 'app.src.common.utils.profiling_api', 'profiling_api', (['f"""Product:update:add_tag:by_id:{product_id}"""', "db_product['start_time']", "db_product['username']"], {}), "(f'Product:update:add_tag:by_id:{product_id}', db_product[\n 'start_time'], db_product['username'])\n", (6504, 6605), False, 'from app.src.common.utils import profiling_api\n'), ((6874, 6894), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (6881, 6894), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((6922, 6949), 'fastapi.Depends', 'Depends', (['get_product_or_404'], {}), '(get_product_or_404)\n', (6929, 6949), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((6969, 7000), 'fastapi.Depends', 'Depends', (['get_tag_by_name_or_404'], {}), '(get_tag_by_name_or_404)\n', (6976, 7000), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((7274, 7390), 'app.src.common.utils.profiling_api', 'profiling_api', (['f"""Product:update:add_tag:by_name:{product_id}"""', "db_product['start_time']", "db_product['username']"], {}), "(f'Product:update:add_tag:by_name:{product_id}', db_product[\n 'start_time'], db_product['username'])\n", (7287, 7390), False, 'from app.src.common.utils import profiling_api\n'), ((7659, 7679), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (7666, 7679), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((7707, 7734), 'fastapi.Depends', 'Depends', (['get_product_or_404'], {}), '(get_product_or_404)\n', (7714, 7734), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((7754, 7777), 'fastapi.Depends', 'Depends', (['get_tag_or_404'], {}), '(get_tag_or_404)\n', (7761, 7777), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((8737, 8757), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (8744, 8757), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((8785, 8812), 'fastapi.Depends', 'Depends', (['get_product_or_404'], {}), '(get_product_or_404)\n', (8792, 8812), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((8832, 8863), 'fastapi.Depends', 'Depends', (['get_tag_by_name_or_404'], {}), '(get_tag_by_name_or_404)\n', (8839, 8863), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((9151, 9270), 'app.src.common.utils.profiling_api', 'profiling_api', (['f"""Product:update:remove_tag:by_name:{product_id}"""', "db_product['start_time']", "db_product['username']"], {}), "(f'Product:update:remove_tag:by_name:{product_id}', db_product\n ['start_time'], db_product['username'])\n", (9164, 9270), False, 'from app.src.common.utils import profiling_api\n'), ((9430, 9450), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (9437, 9450), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((9478, 9505), 'fastapi.Depends', 'Depends', (['get_product_or_404'], {}), '(get_product_or_404)\n', (9485, 9505), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((10039, 10153), 'app.src.common.utils.profiling_api', 'profiling_api', (['f"""Product:update:add_tag:by_id:{product_id}"""', "db_product['start_time']", "db_product['username']"], {}), "(f'Product:update:add_tag:by_id:{product_id}', db_product[\n 'start_time'], db_product['username'])\n", (10052, 10153), False, 'from app.src.common.utils import profiling_api\n'), ((3205, 3276), 'app.src.api.endpoints.product_type.get_producttype_or_404', 'get_producttype_or_404', ([], {'producttype_id': 'product.type_id', 'session': 'session'}), '(producttype_id=product.type_id, session=session)\n', (3227, 3276), False, 'from app.src.api.endpoints.product_type import get_producttype_or_404\n'), ((3348, 3373), 'app.src.models.product.Product.from_orm', 'Product.from_orm', (['product'], {}), '(product)\n', (3364, 3373), False, 'from app.src.models.product import Product, ProductRead, ProductCreate, ProductUpdate, ProductReadwithTypeAndTags\n'), ((3439, 3512), 'app.src.common.utils.profiling_api', 'profiling_api', (['"""Product:insert:single"""', 'start_time', 'current_user.username'], {}), "('Product:insert:single', start_time, current_user.username)\n", (3452, 3512), False, 'from app.src.common.utils import profiling_api\n'), ((5405, 5490), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Product not found, impossible to update"""'}), "(status_code=404, detail='Product not found, impossible to update'\n )\n", (5418, 5490), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((8084, 8201), 'app.src.common.utils.profiling_api', 'profiling_api', (['f"""Product:update:remove_tag:by_id:{product_id}"""', "db_product['start_time']", "db_product['username']"], {}), "(f'Product:update:remove_tag:by_id:{product_id}', db_product[\n 'start_time'], db_product['username'])\n", (8097, 8201), False, 'from app.src.common.utils import profiling_api\n'), ((1261, 1294), 'app.src.logger.logger.error', 'logger.error', (['"""Product not found"""'], {}), "('Product not found')\n", (1273, 1294), False, 'from app.src.logger import logger\n'), ((1307, 1344), 'app.src.logger.logger.exception', 'logger.exception', (['"""Product not found"""'], {}), "('Product not found')\n", (1323, 1344), False, 'from app.src.logger import logger\n'), ((1363, 1421), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Product not found"""'}), "(status_code=404, detail='Product not found')\n", (1376, 1421), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((1451, 1484), 'app.src.logger.logger.error', 'logger.error', (['"""Product not found"""'], {}), "('Product not found')\n", (1463, 1484), False, 'from app.src.logger import logger\n'), ((1493, 1540), 'app.src.logger.logger.exception', 'logger.exception', (['"""KeyError: Product not found"""'], {}), "('KeyError: Product not found')\n", (1509, 1540), False, 'from app.src.logger import logger\n'), ((1555, 1613), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(400)', 'detail': '"""Product not found"""'}), "(status_code=400, detail='Product not found')\n", (1568, 1613), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((3548, 3607), 'app.src.logger.logger.error', 'logger.error', (['"""Impossible to create product with same name"""'], {}), "('Impossible to create product with same name')\n", (3560, 3607), False, 'from app.src.logger import logger\n'), ((3616, 3701), 'app.src.logger.logger.exception', 'logger.exception', (['"""Integrity Error: Impossible to create product with same name"""'], {}), "('Integrity Error: Impossible to create product with same name'\n )\n", (3632, 3701), False, 'from app.src.logger import logger\n'), ((3711, 3800), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Impossible to create product with same name"""'}), "(status_code=404, detail=\n 'Impossible to create product with same name')\n", (3724, 3800), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((8285, 8306), 'app.src.logger.logger.error', 'logger.error', (['message'], {}), '(message)\n', (8297, 8306), False, 'from app.src.logger import logger\n'), ((8315, 8340), 'app.src.logger.logger.exception', 'logger.exception', (['message'], {}), '(message)\n', (8331, 8340), False, 'from app.src.logger import logger\n'), ((8355, 8458), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Impossible to remove the tag: product or tag not existing"""'}), "(status_code=404, detail=\n 'Impossible to remove the tag: product or tag not existing')\n", (8368, 8458), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Path\n'), ((5309, 5324), 'sqlmodel.select', 'select', (['Product'], {}), '(Product)\n', (5315, 5324), False, 'from sqlmodel import Session, select\n'), ((1993, 2008), 'sqlmodel.select', 'select', (['Product'], {}), '(Product)\n', (1999, 2008), False, 'from sqlmodel import Session, select\n')]
|
# Copyright 2021 Modelyst LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import chain
from json import dumps
from pathlib import Path
from typing import List
from uuid import UUID
import typer
from prettytable import ALL, PrettyTable
from sqlalchemy import update
from sqlmodel import Session, select
import dbgen.cli.styles as styles
from dbgen.cli.options import config_option, model_arg_option
from dbgen.cli.utils import test_connection, validate_model_str
from dbgen.configuration import initialize
from dbgen.core.metadata import ModelEntity
model_app = typer.Typer(name='model', no_args_is_help=True)
@model_app.command('list')
def list_models(config_file: Path = config_option, tags: List[str] = typer.Option(None, '-t')):
# Notify of config file
if config_file:
_, meta_conn = initialize(config_file)
test_connection(meta_conn)
meta_engine = meta_conn.get_engine()
tags = tags or []
statement = select( # type: ignore
ModelEntity.id,
ModelEntity.name,
ModelEntity.created_at,
ModelEntity.last_run,
ModelEntity.tags,
) # type: ignore
if tags:
statement = statement.where(ModelEntity.tags.op('&&')(tags)) # type: ignore
columns = ['id', 'name', 'created_at', 'last_run', 'tags']
table = PrettyTable(field_names=columns, align='l', hrules=ALL)
with Session(meta_engine) as session:
result = session.exec(statement)
for model_id, model_name, created_at, last_run, tags in result:
table.add_row((model_id, model_name, created_at, last_run, tags))
styles.theme_typer_print(str(table))
@model_app.command('tag')
def tag(model_id: UUID, tags: List[str], config_file: Path = config_option):
# Notify of config file
if config_file:
_, meta_conn = initialize(config_file)
test_connection(meta_conn)
meta_engine = meta_conn.get_engine()
with Session(meta_engine) as session:
existing_tags = session.exec(select(ModelEntity.tags).where(ModelEntity.id == model_id)).one_or_none()
if existing_tags is None:
raise typer.BadParameter(f"Invalid model_id, no model with ID {model_id}")
new_tags = set(chain(existing_tags, tags))
session.execute(update(ModelEntity).values(tags=new_tags).where(ModelEntity.id == model_id))
session.commit()
@model_app.command('serialize')
def model_serialize(
model_str: str = model_arg_option,
out_file: Path = typer.Option(
None, '-o', '--out', help='Path to write the serialized model to in json format'
),
config_file: Path = config_option,
):
model = validate_model_str(model_str)
# Notify of config file
if config_file:
_, meta_conn = initialize(config_file)
test_connection(meta_conn)
meta_engine = meta_conn.get_engine()
with Session(meta_engine) as session:
model_row = model._get_model_row()
# Check for existing row and if found grab its created_at
created_at = session.exec(
select(ModelEntity.created_at).where(ModelEntity.id == model.uuid)
).one_or_none()
if created_at is None:
session.merge(model_row)
session.commit()
styles.good_typer_print(f"Loaded model {model.name!r} into the database with ID {model.uuid}")
else:
styles.good_typer_print(f"Model {model.name!r} already existed with ID {model.uuid}")
if out_file:
out_file.write_text(dumps(model_row.graph_json))
styles.good_typer_print(f"Wrote serialized graph to {out_file}")
@model_app.command('export')
def model_export(
model_id: UUID,
out_file: Path = typer.Option(
'model.json', '-o', '--out', help='Path to write the serialized model to in json format'
),
config_file: Path = config_option,
):
# Notify of config file
if config_file:
_, meta_conn = initialize(config_file)
test_connection(meta_conn)
meta_engine = meta_conn.get_engine()
with Session(meta_engine) as session:
# Check for existing row and if found grab its created_at
graph_json = session.exec(
select(ModelEntity.graph_json).where(ModelEntity.id == model_id)
).one_or_none()
if not graph_json:
raise ValueError(f"Invalid model_id: No model found with model_id {model_id}")
out_file.write_text(dumps(graph_json))
styles.good_typer_print(f"Wrote serialized graph to {out_file}")
|
[
"sqlmodel.Session",
"sqlmodel.select"
] |
[((1102, 1149), 'typer.Typer', 'typer.Typer', ([], {'name': '"""model"""', 'no_args_is_help': '(True)'}), "(name='model', no_args_is_help=True)\n", (1113, 1149), False, 'import typer\n'), ((1248, 1272), 'typer.Option', 'typer.Option', (['None', '"""-t"""'], {}), "(None, '-t')\n", (1260, 1272), False, 'import typer\n'), ((1375, 1401), 'dbgen.cli.utils.test_connection', 'test_connection', (['meta_conn'], {}), '(meta_conn)\n', (1390, 1401), False, 'from dbgen.cli.utils import test_connection, validate_model_str\n'), ((1481, 1589), 'sqlmodel.select', 'select', (['ModelEntity.id', 'ModelEntity.name', 'ModelEntity.created_at', 'ModelEntity.last_run', 'ModelEntity.tags'], {}), '(ModelEntity.id, ModelEntity.name, ModelEntity.created_at,\n ModelEntity.last_run, ModelEntity.tags)\n', (1487, 1589), False, 'from sqlmodel import Session, select\n'), ((1838, 1893), 'prettytable.PrettyTable', 'PrettyTable', ([], {'field_names': 'columns', 'align': '"""l"""', 'hrules': 'ALL'}), "(field_names=columns, align='l', hrules=ALL)\n", (1849, 1893), False, 'from prettytable import ALL, PrettyTable\n'), ((2372, 2398), 'dbgen.cli.utils.test_connection', 'test_connection', (['meta_conn'], {}), '(meta_conn)\n', (2387, 2398), False, 'from dbgen.cli.utils import test_connection, validate_model_str\n'), ((3008, 3107), 'typer.Option', 'typer.Option', (['None', '"""-o"""', '"""--out"""'], {'help': '"""Path to write the serialized model to in json format"""'}), "(None, '-o', '--out', help=\n 'Path to write the serialized model to in json format')\n", (3020, 3107), False, 'import typer\n'), ((3172, 3201), 'dbgen.cli.utils.validate_model_str', 'validate_model_str', (['model_str'], {}), '(model_str)\n', (3190, 3201), False, 'from dbgen.cli.utils import test_connection, validate_model_str\n'), ((3302, 3328), 'dbgen.cli.utils.test_connection', 'test_connection', (['meta_conn'], {}), '(meta_conn)\n', (3317, 3328), False, 'from dbgen.cli.utils import test_connection, validate_model_str\n'), ((4212, 4319), 'typer.Option', 'typer.Option', (['"""model.json"""', '"""-o"""', '"""--out"""'], {'help': '"""Path to write the serialized model to in json format"""'}), "('model.json', '-o', '--out', help=\n 'Path to write the serialized model to in json format')\n", (4224, 4319), False, 'import typer\n'), ((4472, 4498), 'dbgen.cli.utils.test_connection', 'test_connection', (['meta_conn'], {}), '(meta_conn)\n', (4487, 4498), False, 'from dbgen.cli.utils import test_connection, validate_model_str\n'), ((4951, 5015), 'dbgen.cli.styles.good_typer_print', 'styles.good_typer_print', (['f"""Wrote serialized graph to {out_file}"""'], {}), "(f'Wrote serialized graph to {out_file}')\n", (4974, 5015), True, 'import dbgen.cli.styles as styles\n'), ((1347, 1370), 'dbgen.configuration.initialize', 'initialize', (['config_file'], {}), '(config_file)\n', (1357, 1370), False, 'from dbgen.configuration import initialize\n'), ((1903, 1923), 'sqlmodel.Session', 'Session', (['meta_engine'], {}), '(meta_engine)\n', (1910, 1923), False, 'from sqlmodel import Session, select\n'), ((2344, 2367), 'dbgen.configuration.initialize', 'initialize', (['config_file'], {}), '(config_file)\n', (2354, 2367), False, 'from dbgen.configuration import initialize\n'), ((2450, 2470), 'sqlmodel.Session', 'Session', (['meta_engine'], {}), '(meta_engine)\n', (2457, 2470), False, 'from sqlmodel import Session, select\n'), ((3274, 3297), 'dbgen.configuration.initialize', 'initialize', (['config_file'], {}), '(config_file)\n', (3284, 3297), False, 'from dbgen.configuration import initialize\n'), ((3379, 3399), 'sqlmodel.Session', 'Session', (['meta_engine'], {}), '(meta_engine)\n', (3386, 3399), False, 'from sqlmodel import Session, select\n'), ((4057, 4121), 'dbgen.cli.styles.good_typer_print', 'styles.good_typer_print', (['f"""Wrote serialized graph to {out_file}"""'], {}), "(f'Wrote serialized graph to {out_file}')\n", (4080, 4121), True, 'import dbgen.cli.styles as styles\n'), ((4444, 4467), 'dbgen.configuration.initialize', 'initialize', (['config_file'], {}), '(config_file)\n', (4454, 4467), False, 'from dbgen.configuration import initialize\n'), ((4550, 4570), 'sqlmodel.Session', 'Session', (['meta_engine'], {}), '(meta_engine)\n', (4557, 4570), False, 'from sqlmodel import Session, select\n'), ((4928, 4945), 'json.dumps', 'dumps', (['graph_json'], {}), '(graph_json)\n', (4933, 4945), False, 'from json import dumps\n'), ((2646, 2714), 'typer.BadParameter', 'typer.BadParameter', (['f"""Invalid model_id, no model with ID {model_id}"""'], {}), "(f'Invalid model_id, no model with ID {model_id}')\n", (2664, 2714), False, 'import typer\n'), ((2738, 2764), 'itertools.chain', 'chain', (['existing_tags', 'tags'], {}), '(existing_tags, tags)\n', (2743, 2764), False, 'from itertools import chain\n'), ((3768, 3867), 'dbgen.cli.styles.good_typer_print', 'styles.good_typer_print', (['f"""Loaded model {model.name!r} into the database with ID {model.uuid}"""'], {}), "(\n f'Loaded model {model.name!r} into the database with ID {model.uuid}')\n", (3791, 3867), True, 'import dbgen.cli.styles as styles\n'), ((3889, 3979), 'dbgen.cli.styles.good_typer_print', 'styles.good_typer_print', (['f"""Model {model.name!r} already existed with ID {model.uuid}"""'], {}), "(\n f'Model {model.name!r} already existed with ID {model.uuid}')\n", (3912, 3979), True, 'import dbgen.cli.styles as styles\n'), ((4020, 4047), 'json.dumps', 'dumps', (['model_row.graph_json'], {}), '(model_row.graph_json)\n', (4025, 4047), False, 'from json import dumps\n'), ((1714, 1739), 'dbgen.core.metadata.ModelEntity.tags.op', 'ModelEntity.tags.op', (['"""&&"""'], {}), "('&&')\n", (1733, 1739), False, 'from dbgen.core.metadata import ModelEntity\n'), ((2520, 2544), 'sqlmodel.select', 'select', (['ModelEntity.tags'], {}), '(ModelEntity.tags)\n', (2526, 2544), False, 'from sqlmodel import Session, select\n'), ((2790, 2809), 'sqlalchemy.update', 'update', (['ModelEntity'], {}), '(ModelEntity)\n', (2796, 2809), False, 'from sqlalchemy import update\n'), ((3568, 3598), 'sqlmodel.select', 'select', (['ModelEntity.created_at'], {}), '(ModelEntity.created_at)\n', (3574, 3598), False, 'from sqlmodel import Session, select\n'), ((4696, 4726), 'sqlmodel.select', 'select', (['ModelEntity.graph_json'], {}), '(ModelEntity.graph_json)\n', (4702, 4726), False, 'from sqlmodel import Session, select\n')]
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: disable=import-error,no-name-in-module,no-member
from test.traced_module.test_tflite import _test_convert_result
from test.utils import ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, LinearOpr
import megengine as mge
import megengine.module as M
import numpy as np
from megengine.core.tensor import dtype
from megengine.core.tensor.dtype import _builtin_quant_dtypes
from megengine.module.quant_dequant import QuantStub
from megengine.quantization.quantize import quantize_qat
from megengine.quantization.utils import create_qparams
from megengine.traced_module.fake_quant import FakeQuantize
from .tm_utils import get_traced_module
max_error = 1e-4
tmp_file = "test_model"
def get_qat_net(inp_dtype, net, num_inp=1, shape=(1, 16, 32, 32)):
qat_net = quantize_qat(net)
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_qint8(data1.numpy()))
inp1.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp1.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
inps.append(inp1)
return qat_net, inps
def test_qat_conv_qint8():
class QConvOpr(M.Module):
def __init__(self):
super().__init__()
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), padding=(3, 1), dilation=(2, 2),
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.normal_conv(x)
return x
net = QConvOpr()
qat_net = quantize_qat(net)
inp_dtype = dtype.qint8(16.0 / 128)
data = mge.tensor(np.random.random((1, 3, 224, 224))) * 16
data = data.astype(inp_dtype)
inp = mge.tensor(dtype.convert_from_qint8(data.numpy()))
inp.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
traced_module, tm_result = get_traced_module(qat_net, inp)
print(traced_module.flatten().graph)
inp = inp.astype(inp_dtype)
out_dtype = traced_module.graph.outputs[0].qparams
scale = out_dtype.scale.numpy()
_test_convert_result(
inp,
traced_module,
tm_result,
scale=scale,
require_quantize=True,
max_err=max_error,
)
def test_qat_convrelu():
net = ConvRelu2dOpr()
qat_net = quantize_qat(net)
inp_dtype = dtype.qint8(16.0 / 128)
data = mge.tensor(np.random.random((1, 3, 224, 224))) * 16
data = data.astype(inp_dtype)
inp = mge.tensor(dtype.convert_from_qint8(data.numpy()))
inp.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
traced_module, tm_result = get_traced_module(qat_net, inp)
inp = inp.astype(inp_dtype)
out_dtype = traced_module.graph.outputs[0].qparams
scale = out_dtype.scale.numpy()
_test_convert_result(
inp,
traced_module,
tm_result,
scale=scale,
require_quantize=True,
max_err=max_error,
)
def test_qat_convbn():
net = ConvBn2dOpr()
net.eval()
qat_net = quantize_qat(net)
inp_dtype = dtype.qint8(16.0 / 128)
data = mge.tensor(np.random.random((1, 3, 224, 224))) * 16
data = data.astype(inp_dtype)
inp = mge.tensor(dtype.convert_from_qint8(data.numpy()))
inp.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
traced_module, tm_result = get_traced_module(qat_net, inp)
inp = inp.astype(inp_dtype)
out_dtype = traced_module.graph.outputs[0].qparams
scale = out_dtype.scale.numpy()
_test_convert_result(
inp,
traced_module,
tm_result,
scale=scale,
require_quantize=True,
max_err=max_error,
)
def test_qat_convbnrelu():
net = ConvBnRelu2dOpr()
net.eval()
qat_net = quantize_qat(net)
inp_dtype = dtype.qint8(16.0 / 128)
data = mge.tensor(np.random.random((1, 3, 224, 224))) * 16
data = data.astype(inp_dtype)
inp = mge.tensor(dtype.convert_from_qint8(data.numpy()))
inp.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
traced_module, tm_result = get_traced_module(qat_net, inp)
inp = inp.astype(inp_dtype)
out_dtype = traced_module.graph.outputs[0].qparams
scale = out_dtype.scale.numpy()
_test_convert_result(
inp,
traced_module,
tm_result,
scale=scale,
require_quantize=True,
max_err=max_error,
)
def test_deconv_qint8():
net = ConvOpr("tflite_transpose")
qat_net = quantize_qat(net)
inp_dtype = dtype.qint8(16.0 / 128)
data = mge.tensor(np.random.random((1, 3, 64, 64))) * 16
data = data.astype(inp_dtype)
inp = mge.tensor(dtype.convert_from_qint8(data.numpy()))
inp.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
traced_module, tm_result = get_traced_module(qat_net, inp)
print(traced_module.flatten().graph)
inp = inp.astype(inp_dtype)
out_dtype = traced_module.graph.outputs[0].qparams
scale = out_dtype.scale.numpy()
_test_convert_result(
inp,
traced_module,
tm_result,
scale=scale,
require_quantize=True,
max_err=max_error,
)
def test_linear():
net = LinearOpr()
inp_dtype = dtype.qint8(16.0 / 128.0)
qat_net, inps = get_qat_net(inp_dtype, net, shape=(10, 100))
traced_module, tm_result = get_traced_module(qat_net, inps[0])
print(traced_module.flatten().graph)
out_dtype = traced_module.graph.outputs[0].qparams
scale = out_dtype.scale.numpy()
inp = inps[0].astype(inp_dtype)
_test_convert_result(
inp,
traced_module,
tm_result,
scale=scale,
require_quantize=True,
max_err=max_error,
)
def test_add():
class ElemwiseOpr(M.Module):
def __init__(self,):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.add1 = M.Elemwise("add")
self.add2 = M.Elemwise("add")
self.add3 = M.Elemwise("add")
scale = mge.tensor((16.0 / 128.0))
self.quant_stub = QuantStub()
self.quant_stub.act_fake_quant = FakeQuantize(
_builtin_quant_dtypes["qint8"]
)
self.quant_stub.act_fake_quant.set_qparams(
create_qparams(
dtype_meta=_builtin_quant_dtypes["qint8"],
scale=scale,
zero_point=None,
)
)
self.quant_stub1 = QuantStub()
self.quant_stub1.act_fake_quant = FakeQuantize(
_builtin_quant_dtypes["qint8"]
)
self.quant_stub1.act_fake_quant.set_qparams(
create_qparams(
dtype_meta=_builtin_quant_dtypes["qint8"],
scale=scale,
zero_point=None,
)
)
def forward(self, a):
n = self.quant_stub(mge.tensor(np.float32(10)))
data1 = self.quant_stub1(mge.tensor(self.data1))
x = self.add1(a, n)
y = self.add2(a, data1)
z = self.add3(x, y)
return z
net = ElemwiseOpr()
inp_dtype = dtype.qint8(16.0 / 128.0)
qat_net, inps = get_qat_net(inp_dtype, net, shape=(1, 3, 1, 1))
traced_module, tm_result = get_traced_module(qat_net, inps[0])
print(traced_module.flatten().graph)
out_dtype = traced_module.graph.outputs[0].qparams
scale = out_dtype.scale.numpy()
inp = inps[0].astype(inp_dtype)
_test_convert_result(
inp,
traced_module,
tm_result,
scale=scale,
require_quantize=True,
max_err=max_error,
)
|
[
"megengine.quantization.quantize.quantize_qat",
"megengine.module.Elemwise",
"megengine.traced_module.fake_quant.FakeQuantize",
"megengine.tensor",
"megengine.quantization.utils.create_qparams",
"megengine.core.tensor.dtype.get_scale",
"megengine.module.quant_dequant.QuantStub",
"megengine.core.tensor.dtype.qint8",
"megengine.module.Conv2d"
] |
[((1132, 1149), 'megengine.quantization.quantize.quantize_qat', 'quantize_qat', (['net'], {}), '(net)\n', (1144, 1149), False, 'from megengine.quantization.quantize import quantize_qat\n'), ((2064, 2081), 'megengine.quantization.quantize.quantize_qat', 'quantize_qat', (['net'], {}), '(net)\n', (2076, 2081), False, 'from megengine.quantization.quantize import quantize_qat\n'), ((2099, 2122), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['(16.0 / 128)'], {}), '(16.0 / 128)\n', (2110, 2122), False, 'from megengine.core.tensor import dtype\n'), ((2642, 2752), 'test.traced_module.test_tflite._test_convert_result', '_test_convert_result', (['inp', 'traced_module', 'tm_result'], {'scale': 'scale', 'require_quantize': '(True)', 'max_err': 'max_error'}), '(inp, traced_module, tm_result, scale=scale,\n require_quantize=True, max_err=max_error)\n', (2662, 2752), False, 'from test.traced_module.test_tflite import _test_convert_result\n'), ((2841, 2856), 'test.utils.ConvRelu2dOpr', 'ConvRelu2dOpr', ([], {}), '()\n', (2854, 2856), False, 'from test.utils import ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, LinearOpr\n'), ((2871, 2888), 'megengine.quantization.quantize.quantize_qat', 'quantize_qat', (['net'], {}), '(net)\n', (2883, 2888), False, 'from megengine.quantization.quantize import quantize_qat\n'), ((2905, 2928), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['(16.0 / 128)'], {}), '(16.0 / 128)\n', (2916, 2928), False, 'from megengine.core.tensor import dtype\n'), ((3407, 3517), 'test.traced_module.test_tflite._test_convert_result', '_test_convert_result', (['inp', 'traced_module', 'tm_result'], {'scale': 'scale', 'require_quantize': '(True)', 'max_err': 'max_error'}), '(inp, traced_module, tm_result, scale=scale,\n require_quantize=True, max_err=max_error)\n', (3427, 3517), False, 'from test.traced_module.test_tflite import _test_convert_result\n'), ((3604, 3617), 'test.utils.ConvBn2dOpr', 'ConvBn2dOpr', ([], {}), '()\n', (3615, 3617), False, 'from test.utils import ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, LinearOpr\n'), ((3647, 3664), 'megengine.quantization.quantize.quantize_qat', 'quantize_qat', (['net'], {}), '(net)\n', (3659, 3664), False, 'from megengine.quantization.quantize import quantize_qat\n'), ((3681, 3704), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['(16.0 / 128)'], {}), '(16.0 / 128)\n', (3692, 3704), False, 'from megengine.core.tensor import dtype\n'), ((4183, 4293), 'test.traced_module.test_tflite._test_convert_result', '_test_convert_result', (['inp', 'traced_module', 'tm_result'], {'scale': 'scale', 'require_quantize': '(True)', 'max_err': 'max_error'}), '(inp, traced_module, tm_result, scale=scale,\n require_quantize=True, max_err=max_error)\n', (4203, 4293), False, 'from test.traced_module.test_tflite import _test_convert_result\n'), ((4384, 4401), 'test.utils.ConvBnRelu2dOpr', 'ConvBnRelu2dOpr', ([], {}), '()\n', (4399, 4401), False, 'from test.utils import ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, LinearOpr\n'), ((4431, 4448), 'megengine.quantization.quantize.quantize_qat', 'quantize_qat', (['net'], {}), '(net)\n', (4443, 4448), False, 'from megengine.quantization.quantize import quantize_qat\n'), ((4465, 4488), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['(16.0 / 128)'], {}), '(16.0 / 128)\n', (4476, 4488), False, 'from megengine.core.tensor import dtype\n'), ((4967, 5077), 'test.traced_module.test_tflite._test_convert_result', '_test_convert_result', (['inp', 'traced_module', 'tm_result'], {'scale': 'scale', 'require_quantize': '(True)', 'max_err': 'max_error'}), '(inp, traced_module, tm_result, scale=scale,\n require_quantize=True, max_err=max_error)\n', (4987, 5077), False, 'from test.traced_module.test_tflite import _test_convert_result\n'), ((5166, 5193), 'test.utils.ConvOpr', 'ConvOpr', (['"""tflite_transpose"""'], {}), "('tflite_transpose')\n", (5173, 5193), False, 'from test.utils import ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, LinearOpr\n'), ((5208, 5225), 'megengine.quantization.quantize.quantize_qat', 'quantize_qat', (['net'], {}), '(net)\n', (5220, 5225), False, 'from megengine.quantization.quantize import quantize_qat\n'), ((5243, 5266), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['(16.0 / 128)'], {}), '(16.0 / 128)\n', (5254, 5266), False, 'from megengine.core.tensor import dtype\n'), ((5784, 5894), 'test.traced_module.test_tflite._test_convert_result', '_test_convert_result', (['inp', 'traced_module', 'tm_result'], {'scale': 'scale', 'require_quantize': '(True)', 'max_err': 'max_error'}), '(inp, traced_module, tm_result, scale=scale,\n require_quantize=True, max_err=max_error)\n', (5804, 5894), False, 'from test.traced_module.test_tflite import _test_convert_result\n'), ((5977, 5988), 'test.utils.LinearOpr', 'LinearOpr', ([], {}), '()\n', (5986, 5988), False, 'from test.utils import ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, LinearOpr\n'), ((6005, 6030), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['(16.0 / 128.0)'], {}), '(16.0 / 128.0)\n', (6016, 6030), False, 'from megengine.core.tensor import dtype\n'), ((6335, 6445), 'test.traced_module.test_tflite._test_convert_result', '_test_convert_result', (['inp', 'traced_module', 'tm_result'], {'scale': 'scale', 'require_quantize': '(True)', 'max_err': 'max_error'}), '(inp, traced_module, tm_result, scale=scale,\n require_quantize=True, max_err=max_error)\n', (6355, 6445), False, 'from test.traced_module.test_tflite import _test_convert_result\n'), ((8073, 8098), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['(16.0 / 128.0)'], {}), '(16.0 / 128.0)\n', (8084, 8098), False, 'from megengine.core.tensor import dtype\n'), ((8406, 8516), 'test.traced_module.test_tflite._test_convert_result', '_test_convert_result', (['inp', 'traced_module', 'tm_result'], {'scale': 'scale', 'require_quantize': '(True)', 'max_err': 'max_error'}), '(inp, traced_module, tm_result, scale=scale,\n require_quantize=True, max_err=max_error)\n', (8426, 8516), False, 'from test.traced_module.test_tflite import _test_convert_result\n'), ((2316, 2342), 'megengine.core.tensor.dtype.get_scale', 'dtype.get_scale', (['inp_dtype'], {}), '(inp_dtype)\n', (2331, 2342), False, 'from megengine.core.tensor import dtype\n'), ((3122, 3148), 'megengine.core.tensor.dtype.get_scale', 'dtype.get_scale', (['inp_dtype'], {}), '(inp_dtype)\n', (3137, 3148), False, 'from megengine.core.tensor import dtype\n'), ((3898, 3924), 'megengine.core.tensor.dtype.get_scale', 'dtype.get_scale', (['inp_dtype'], {}), '(inp_dtype)\n', (3913, 3924), False, 'from megengine.core.tensor import dtype\n'), ((4682, 4708), 'megengine.core.tensor.dtype.get_scale', 'dtype.get_scale', (['inp_dtype'], {}), '(inp_dtype)\n', (4697, 4708), False, 'from megengine.core.tensor import dtype\n'), ((5458, 5484), 'megengine.core.tensor.dtype.get_scale', 'dtype.get_scale', (['inp_dtype'], {}), '(inp_dtype)\n', (5473, 5484), False, 'from megengine.core.tensor import dtype\n'), ((1397, 1423), 'megengine.core.tensor.dtype.get_scale', 'dtype.get_scale', (['inp_dtype'], {}), '(inp_dtype)\n', (1412, 1423), False, 'from megengine.core.tensor import dtype\n'), ((1696, 1762), 'megengine.module.Conv2d', 'M.Conv2d', (['(3)', '(30)', '(3)'], {'stride': '(2, 3)', 'padding': '(3, 1)', 'dilation': '(2, 2)'}), '(3, 30, 3, stride=(2, 3), padding=(3, 1), dilation=(2, 2))\n', (1704, 1762), True, 'import megengine.module as M\n'), ((2145, 2179), 'numpy.random.random', 'np.random.random', (['(1, 3, 224, 224)'], {}), '((1, 3, 224, 224))\n', (2161, 2179), True, 'import numpy as np\n'), ((2951, 2985), 'numpy.random.random', 'np.random.random', (['(1, 3, 224, 224)'], {}), '((1, 3, 224, 224))\n', (2967, 2985), True, 'import numpy as np\n'), ((3727, 3761), 'numpy.random.random', 'np.random.random', (['(1, 3, 224, 224)'], {}), '((1, 3, 224, 224))\n', (3743, 3761), True, 'import numpy as np\n'), ((4511, 4545), 'numpy.random.random', 'np.random.random', (['(1, 3, 224, 224)'], {}), '((1, 3, 224, 224))\n', (4527, 4545), True, 'import numpy as np\n'), ((5289, 5321), 'numpy.random.random', 'np.random.random', (['(1, 3, 64, 64)'], {}), '((1, 3, 64, 64))\n', (5305, 5321), True, 'import numpy as np\n'), ((6776, 6793), 'megengine.module.Elemwise', 'M.Elemwise', (['"""add"""'], {}), "('add')\n", (6786, 6793), True, 'import megengine.module as M\n'), ((6818, 6835), 'megengine.module.Elemwise', 'M.Elemwise', (['"""add"""'], {}), "('add')\n", (6828, 6835), True, 'import megengine.module as M\n'), ((6860, 6877), 'megengine.module.Elemwise', 'M.Elemwise', (['"""add"""'], {}), "('add')\n", (6870, 6877), True, 'import megengine.module as M\n'), ((6899, 6923), 'megengine.tensor', 'mge.tensor', (['(16.0 / 128.0)'], {}), '(16.0 / 128.0)\n', (6909, 6923), True, 'import megengine as mge\n'), ((6956, 6967), 'megengine.module.quant_dequant.QuantStub', 'QuantStub', ([], {}), '()\n', (6965, 6967), False, 'from megengine.module.quant_dequant import QuantStub\n'), ((7013, 7057), 'megengine.traced_module.fake_quant.FakeQuantize', 'FakeQuantize', (["_builtin_quant_dtypes['qint8']"], {}), "(_builtin_quant_dtypes['qint8'])\n", (7025, 7057), False, 'from megengine.traced_module.fake_quant import FakeQuantize\n'), ((7372, 7383), 'megengine.module.quant_dequant.QuantStub', 'QuantStub', ([], {}), '()\n', (7381, 7383), False, 'from megengine.module.quant_dequant import QuantStub\n'), ((7430, 7474), 'megengine.traced_module.fake_quant.FakeQuantize', 'FakeQuantize', (["_builtin_quant_dtypes['qint8']"], {}), "(_builtin_quant_dtypes['qint8'])\n", (7442, 7474), False, 'from megengine.traced_module.fake_quant import FakeQuantize\n'), ((1220, 1243), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (1236, 1243), True, 'import numpy as np\n'), ((7160, 7251), 'megengine.quantization.utils.create_qparams', 'create_qparams', ([], {'dtype_meta': "_builtin_quant_dtypes['qint8']", 'scale': 'scale', 'zero_point': 'None'}), "(dtype_meta=_builtin_quant_dtypes['qint8'], scale=scale,\n zero_point=None)\n", (7174, 7251), False, 'from megengine.quantization.utils import create_qparams\n'), ((7578, 7669), 'megengine.quantization.utils.create_qparams', 'create_qparams', ([], {'dtype_meta': "_builtin_quant_dtypes['qint8']", 'scale': 'scale', 'zero_point': 'None'}), "(dtype_meta=_builtin_quant_dtypes['qint8'], scale=scale,\n zero_point=None)\n", (7592, 7669), False, 'from megengine.quantization.utils import create_qparams\n'), ((7887, 7909), 'megengine.tensor', 'mge.tensor', (['self.data1'], {}), '(self.data1)\n', (7897, 7909), True, 'import megengine as mge\n'), ((6632, 6657), 'numpy.ones', 'np.ones', (['(2, 3, 224, 224)'], {}), '((2, 3, 224, 224))\n', (6639, 6657), True, 'import numpy as np\n'), ((6702, 6732), 'numpy.random.random', 'np.random.random', (['(1, 3, 1, 1)'], {}), '((1, 3, 1, 1))\n', (6718, 6732), True, 'import numpy as np\n'), ((7833, 7847), 'numpy.float32', 'np.float32', (['(10)'], {}), '(10)\n', (7843, 7847), True, 'import numpy as np\n'), ((1861, 1906), 'numpy.random.random', 'np.random.random', (['self.normal_conv.bias.shape'], {}), '(self.normal_conv.bias.shape)\n', (1877, 1906), True, 'import numpy as np\n')]
|
# mixed formulation
# 07.08.2009
#!
#! Homogenization: Linear Elasticity
#! =================================
#$ \centerline{Example input file, \today}
#! Homogenization of heterogeneous linear elastic material - mixed formulation
import numpy as nm
import sfepy.discrete.fem.periodic as per
from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson_mixed, bulk_from_youngpoisson
from sfepy.homogenization.utils import define_box_regions, get_box_volume
import sfepy.homogenization.coefs_base as cb
from sfepy import data_dir
from sfepy.base.base import Struct
from sfepy.homogenization.recovery import compute_micro_u, compute_stress_strain_u, compute_mac_stress_part, add_stress_p
def recovery_le( pb, corrs, macro ):
out = {}
dim = corrs['corrs_le']['u_00'].shape[1]
mic_u = - compute_micro_u( corrs['corrs_le'], macro['strain'], 'u', dim )
mic_p = - compute_micro_u( corrs['corrs_le'], macro['strain'], 'p', dim )
out['u_mic'] = Struct( name = 'output_data',
mode = 'vertex', data = mic_u,
var_name = 'u', dofs = None )
out['p_mic'] = Struct( name = 'output_data',
mode = 'cell', data = mic_p[:,nm.newaxis,
:,nm.newaxis],
var_name = 'p', dofs = None )
stress_Y, strain_Y = compute_stress_strain_u( pb, 'i', 'Y', 'mat.D', 'u', mic_u )
stress_Y += compute_mac_stress_part( pb, 'i', 'Y', 'mat.D', 'u', macro['strain'] )
add_stress_p( stress_Y, pb, 'i', 'Y', 'p', mic_p )
strain = macro['strain'] + strain_Y
out['cauchy_strain'] = Struct( name = 'output_data',
mode = 'cell', data = strain,
dofs = None )
out['cauchy_stress'] = Struct( name = 'output_data',
mode = 'cell', data = stress_Y,
dofs = None )
return out
#! Mesh
#! ----
dim = 3
filename_mesh = data_dir + '/meshes/3d/matrix_fiber.mesh'
region_lbn = (0, 0, 0)
region_rtf = (1, 1, 1)
#! Regions
#! -------
#! Regions, edges, ...
regions = {
'Y' : 'all',
'Ym' : 'cells of group 1',
'Yc' : 'cells of group 2',
}
regions.update( define_box_regions( dim, region_lbn, region_rtf ) )
#! Materials
#! ---------
materials = {
'mat' : ({'D' : {'Ym': stiffness_from_youngpoisson_mixed(dim, 7.0e9, 0.4),
'Yc': stiffness_from_youngpoisson_mixed(dim, 70.0e9, 0.2)},
'gamma': {'Ym': 1.0/bulk_from_youngpoisson(7.0e9, 0.4),
'Yc': 1.0/bulk_from_youngpoisson(70.0e9, 0.2)}},),
}
#! Fields
#! ------
#! Scalar field for corrector basis functions.
fields = {
'corrector_u' : ('real', dim, 'Y', 1),
'corrector_p' : ('real', 1, 'Y', 0),
}
#! Variables
#! ---------
#! Unknown and corresponding test variables. Parameter fields
#! used for evaluation of homogenized coefficients.
variables = {
'u' : ('unknown field', 'corrector_u'),
'v' : ('test field', 'corrector_u', 'u'),
'p' : ('unknown field', 'corrector_p'),
'q' : ('test field', 'corrector_p', 'p'),
'Pi' : ('parameter field', 'corrector_u', 'u'),
'Pi1u' : ('parameter field', 'corrector_u', '(set-to-None)'),
'Pi2u' : ('parameter field', 'corrector_u', '(set-to-None)'),
'Pi1p' : ('parameter field', 'corrector_p', '(set-to-None)'),
'Pi2p' : ('parameter field', 'corrector_p', '(set-to-None)'),
}
#! Functions
functions = {
'match_x_plane' : (per.match_x_plane,),
'match_y_plane' : (per.match_y_plane,),
'match_z_plane' : (per.match_z_plane,),
}
#! Boundary Conditions
#! -------------------
#! Fixed nodes.
ebcs = {
'fixed_u' : ('Corners', {'u.all' : 0.0}),
}
if dim == 3:
epbcs = {
'periodic_x' : (['Left', 'Right'], {'u.all' : 'u.all'}, 'match_x_plane'),
'periodic_y' : (['Near', 'Far'], {'u.all' : 'u.all'}, 'match_y_plane'),
'periodic_z' : (['Top', 'Bottom'], {'u.all' : 'u.all'}, 'match_z_plane'),
}
else:
epbcs = {
'periodic_x' : (['Left', 'Right'], {'u.all' : 'u.all'}, 'match_x_plane'),
'periodic_y' : (['Bottom', 'Top'], {'u.all' : 'u.all'}, 'match_y_plane'),
}
all_periodic = ['periodic_%s' % ii for ii in ['x', 'y', 'z'][:dim] ]
#! Integrals
#! ---------
#! Define the integral type Volume/Surface and quadrature rule.
integrals = {
'i' : 2,
}
#! Options
#! -------
#! Various problem-specific options.
options = {
'coefs' : 'coefs',
'requirements' : 'requirements',
'ls' : 'ls', # linear solver to use
'volume' : { #'variables' : ['u'],
#'expression' : 'd_volume.i.Y( u )',
'value' : get_box_volume( dim, region_lbn, region_rtf ),
},
'output_dir' : 'output',
'coefs_filename' : 'coefs_le_up',
'recovery_hook' : 'recovery_le',
}
#! Equations
#! ---------
#! Equations for corrector functions.
equation_corrs = {
'balance_of_forces' :
""" dw_lin_elastic.i.Y( mat.D, v, u )
- dw_stokes.i.Y( v, p ) =
- dw_lin_elastic.i.Y( mat.D, v, Pi )""",
'pressure constraint' :
"""- dw_stokes.i.Y( u, q )
- dw_volume_dot.i.Y( mat.gamma, q, p ) =
+ dw_stokes.i.Y( Pi, q )""",
}
#! Expressions for homogenized linear elastic coefficients.
expr_coefs = {
'Q1' : """dw_lin_elastic.i.Y( mat.D, Pi1u, Pi2u )""",
'Q2' : """dw_volume_dot.i.Y( mat.gamma, Pi1p, Pi2p )""",
}
#! Coefficients
#! ------------
#! Definition of homogenized acoustic coefficients.
def set_elastic_u(variables, ir, ic, mode, pis, corrs_rs):
mode2var = {'row' : 'Pi1u', 'col' : 'Pi2u'}
val = pis.states[ir, ic]['u'] + corrs_rs.states[ir, ic]['u']
variables[mode2var[mode]].set_data(val)
coefs = {
'elastic_u' : {
'requires' : ['pis', 'corrs_rs'],
'expression' : expr_coefs['Q1'],
'set_variables' : set_elastic_u,
'class' : cb.CoefSymSym,
},
'elastic_p' : {
'requires' : ['corrs_rs'],
'expression' : expr_coefs['Q2'],
'set_variables' : [('Pi1p', 'corrs_rs', 'p'), ('Pi2p', 'corrs_rs', 'p')],
'class' : cb.CoefSymSym,
},
'D' : {
'requires' : ['c.elastic_u', 'c.elastic_p'],
'class' : cb.CoefSum,
},
'filenames' : {},
}
requirements = {
'pis' : {
'variables' : ['u'],
'class' : cb.ShapeDimDim,
},
'corrs_rs' : {
'requires' : ['pis'],
'ebcs' : ['fixed_u'],
'epbcs' : all_periodic,
'equations' : equation_corrs,
'set_variables' : [('Pi', 'pis', 'u')],
'class' : cb.CorrDimDim,
'save_name' : 'corrs_le',
'dump_variables' : ['u', 'p'],
'is_linear' : True,
},
}
#! Solvers
#! -------
#! Define linear and nonlinear solver.
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 1,
'eps_a' : 1e-4,
})
}
|
[
"sfepy.homogenization.utils.get_box_volume",
"sfepy.mechanics.matcoefs.stiffness_from_youngpoisson_mixed",
"sfepy.homogenization.recovery.compute_stress_strain_u",
"sfepy.mechanics.matcoefs.bulk_from_youngpoisson",
"sfepy.homogenization.utils.define_box_regions",
"sfepy.base.base.Struct",
"sfepy.homogenization.recovery.compute_micro_u",
"sfepy.homogenization.recovery.compute_mac_stress_part",
"sfepy.homogenization.recovery.add_stress_p"
] |
[((968, 1046), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""vertex"""', 'data': 'mic_u', 'var_name': '"""u"""', 'dofs': 'None'}), "(name='output_data', mode='vertex', data=mic_u, var_name='u', dofs=None)\n", (974, 1046), False, 'from sfepy.base.base import Struct\n'), ((1132, 1243), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""cell"""', 'data': 'mic_p[:, nm.newaxis, :, nm.newaxis]', 'var_name': '"""p"""', 'dofs': 'None'}), "(name='output_data', mode='cell', data=mic_p[:, nm.newaxis, :, nm.\n newaxis], var_name='p', dofs=None)\n", (1138, 1243), False, 'from sfepy.base.base import Struct\n'), ((1384, 1442), 'sfepy.homogenization.recovery.compute_stress_strain_u', 'compute_stress_strain_u', (['pb', '"""i"""', '"""Y"""', '"""mat.D"""', '"""u"""', 'mic_u'], {}), "(pb, 'i', 'Y', 'mat.D', 'u', mic_u)\n", (1407, 1442), False, 'from sfepy.homogenization.recovery import compute_micro_u, compute_stress_strain_u, compute_mac_stress_part, add_stress_p\n'), ((1461, 1529), 'sfepy.homogenization.recovery.compute_mac_stress_part', 'compute_mac_stress_part', (['pb', '"""i"""', '"""Y"""', '"""mat.D"""', '"""u"""', "macro['strain']"], {}), "(pb, 'i', 'Y', 'mat.D', 'u', macro['strain'])\n", (1484, 1529), False, 'from sfepy.homogenization.recovery import compute_micro_u, compute_stress_strain_u, compute_mac_stress_part, add_stress_p\n'), ((1536, 1584), 'sfepy.homogenization.recovery.add_stress_p', 'add_stress_p', (['stress_Y', 'pb', '"""i"""', '"""Y"""', '"""p"""', 'mic_p'], {}), "(stress_Y, pb, 'i', 'Y', 'p', mic_p)\n", (1548, 1584), False, 'from sfepy.homogenization.recovery import compute_micro_u, compute_stress_strain_u, compute_mac_stress_part, add_stress_p\n'), ((1656, 1719), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""cell"""', 'data': 'strain', 'dofs': 'None'}), "(name='output_data', mode='cell', data=strain, dofs=None)\n", (1662, 1719), False, 'from sfepy.base.base import Struct\n'), ((1827, 1892), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""cell"""', 'data': 'stress_Y', 'dofs': 'None'}), "(name='output_data', mode='cell', data=stress_Y, dofs=None)\n", (1833, 1892), False, 'from sfepy.base.base import Struct\n'), ((2271, 2318), 'sfepy.homogenization.utils.define_box_regions', 'define_box_regions', (['dim', 'region_lbn', 'region_rtf'], {}), '(dim, region_lbn, region_rtf)\n', (2289, 2318), False, 'from sfepy.homogenization.utils import define_box_regions, get_box_volume\n'), ((806, 867), 'sfepy.homogenization.recovery.compute_micro_u', 'compute_micro_u', (["corrs['corrs_le']", "macro['strain']", '"""u"""', 'dim'], {}), "(corrs['corrs_le'], macro['strain'], 'u', dim)\n", (821, 867), False, 'from sfepy.homogenization.recovery import compute_micro_u, compute_stress_strain_u, compute_mac_stress_part, add_stress_p\n'), ((884, 945), 'sfepy.homogenization.recovery.compute_micro_u', 'compute_micro_u', (["corrs['corrs_le']", "macro['strain']", '"""p"""', 'dim'], {}), "(corrs['corrs_le'], macro['strain'], 'p', dim)\n", (899, 945), False, 'from sfepy.homogenization.recovery import compute_micro_u, compute_stress_strain_u, compute_mac_stress_part, add_stress_p\n'), ((4731, 4774), 'sfepy.homogenization.utils.get_box_volume', 'get_box_volume', (['dim', 'region_lbn', 'region_rtf'], {}), '(dim, region_lbn, region_rtf)\n', (4745, 4774), False, 'from sfepy.homogenization.utils import define_box_regions, get_box_volume\n'), ((2390, 2447), 'sfepy.mechanics.matcoefs.stiffness_from_youngpoisson_mixed', 'stiffness_from_youngpoisson_mixed', (['dim', '(7000000000.0)', '(0.4)'], {}), '(dim, 7000000000.0, 0.4)\n', (2423, 2447), False, 'from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson_mixed, bulk_from_youngpoisson\n'), ((2469, 2527), 'sfepy.mechanics.matcoefs.stiffness_from_youngpoisson_mixed', 'stiffness_from_youngpoisson_mixed', (['dim', '(70000000000.0)', '(0.2)'], {}), '(dim, 70000000000.0, 0.2)\n', (2502, 2527), False, 'from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson_mixed, bulk_from_youngpoisson\n'), ((2557, 2598), 'sfepy.mechanics.matcoefs.bulk_from_youngpoisson', 'bulk_from_youngpoisson', (['(7000000000.0)', '(0.4)'], {}), '(7000000000.0, 0.4)\n', (2579, 2598), False, 'from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson_mixed, bulk_from_youngpoisson\n'), ((2627, 2669), 'sfepy.mechanics.matcoefs.bulk_from_youngpoisson', 'bulk_from_youngpoisson', (['(70000000000.0)', '(0.2)'], {}), '(70000000000.0, 0.2)\n', (2649, 2669), False, 'from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson_mixed, bulk_from_youngpoisson\n')]
|
from sqlalchemy.orm import Session
from sqlmodel import select
from sqlalchemy.exc import SQLAlchemyError
from typing import Any
# import sys
#
# sys.path.append("..")
from app.db import models, pagination,session_scope
from app.util import passutil, schemas
from app.logs import fastapi_logger
from app.crud import get_user,get_user_password
class CRUDLogin:
def check_username_password(self, email: str, password: str) -> Any:
""" Verify Password"""
db_user_info = get_user_password(email=email)
return passutil.verify_password(str(password),
str(db_user_info.password))
def check_active_session(self, session_id: str):
""" check for active session """
try:
with session_scope() as db:
statement = select(models.UsersLoginAttempt).where(
models.UsersLoginAttempt.session_id == session_id)
results = db.exec(statement)
data = results.one()
return data
except SQLAlchemyError as e:
fastapi_logger.exception("check_active_session")
return None
def login_user(self, user: schemas.UserLogIn, session_id: str) -> Any:
""" Login Attempt Record """
try:
with session_scope() as db:
db_session = models.UsersLoginAttempt(
email=user.email,
session_id=session_id,
ip_address=user.ip_address,
browser=user.browser,
status="logged_in")
db.add(db_session)
db.commit()
db.refresh(db_session)
return db_session
except SQLAlchemyError as e:
fastapi_logger.exception("login_user")
return None
def active_user(self, session_id: str) -> Any:
""" check for active user"""
try:
with session_scope() as db:
statement = select(models.UsersLoginAttempt).where(
models.UsersLoginAttempt.session_id == session_id)
results = db.exec(statement)
db_session = results.one()
db_session.status = "active"
db.add(db_session)
db.commit()
db.refresh(db_session)
return db_session
except SQLAlchemyError as e:
fastapi_logger.exception("active_user")
return None
def logoff_user(self, session_id: str) -> Any:
""" Logging off Record"""
try:
with session_scope() as db:
statement = select(models.UsersLoginAttempt).where(
models.UsersLoginAttempt.session_id == session_id)
results = db.exec(statement)
db_session = results.one()
db_session.status = "logged_off"
db.add(db_session)
db.commit()
db.refresh(db_session)
return db_session
except SQLAlchemyError as e:
fastapi_logger.exception("logoff_user")
return None
crud_login = CRUDLogin()
|
[
"sqlmodel.select"
] |
[((511, 541), 'app.crud.get_user_password', 'get_user_password', ([], {'email': 'email'}), '(email=email)\n', (528, 541), False, 'from app.crud import get_user, get_user_password\n'), ((799, 814), 'app.db.session_scope', 'session_scope', ([], {}), '()\n', (812, 814), False, 'from app.db import models, pagination, session_scope\n'), ((1127, 1175), 'app.logs.fastapi_logger.exception', 'fastapi_logger.exception', (['"""check_active_session"""'], {}), "('check_active_session')\n", (1151, 1175), False, 'from app.logs import fastapi_logger\n'), ((1349, 1364), 'app.db.session_scope', 'session_scope', ([], {}), '()\n', (1362, 1364), False, 'from app.db import models, pagination, session_scope\n'), ((1402, 1541), 'app.db.models.UsersLoginAttempt', 'models.UsersLoginAttempt', ([], {'email': 'user.email', 'session_id': 'session_id', 'ip_address': 'user.ip_address', 'browser': 'user.browser', 'status': '"""logged_in"""'}), "(email=user.email, session_id=session_id,\n ip_address=user.ip_address, browser=user.browser, status='logged_in')\n", (1426, 1541), False, 'from app.db import models, pagination, session_scope\n'), ((1835, 1873), 'app.logs.fastapi_logger.exception', 'fastapi_logger.exception', (['"""login_user"""'], {}), "('login_user')\n", (1859, 1873), False, 'from app.logs import fastapi_logger\n'), ((2023, 2038), 'app.db.session_scope', 'session_scope', ([], {}), '()\n', (2036, 2038), False, 'from app.db import models, pagination, session_scope\n'), ((2516, 2555), 'app.logs.fastapi_logger.exception', 'fastapi_logger.exception', (['"""active_user"""'], {}), "('active_user')\n", (2540, 2555), False, 'from app.logs import fastapi_logger\n'), ((2702, 2717), 'app.db.session_scope', 'session_scope', ([], {}), '()\n', (2715, 2717), False, 'from app.db import models, pagination, session_scope\n'), ((3199, 3238), 'app.logs.fastapi_logger.exception', 'fastapi_logger.exception', (['"""logoff_user"""'], {}), "('logoff_user')\n", (3223, 3238), False, 'from app.logs import fastapi_logger\n'), ((851, 883), 'sqlmodel.select', 'select', (['models.UsersLoginAttempt'], {}), '(models.UsersLoginAttempt)\n', (857, 883), False, 'from sqlmodel import select\n'), ((2075, 2107), 'sqlmodel.select', 'select', (['models.UsersLoginAttempt'], {}), '(models.UsersLoginAttempt)\n', (2081, 2107), False, 'from sqlmodel import select\n'), ((2754, 2786), 'sqlmodel.select', 'select', (['models.UsersLoginAttempt'], {}), '(models.UsersLoginAttempt)\n', (2760, 2786), False, 'from sqlmodel import select\n')]
|
#!/usr/bin/env python
"""
Dispersion analysis of a heterogeneous finite scale periodic cell.
The periodic cell mesh has to contain two subdomains Y1 (with the cell ids 1),
Y2 (with the cell ids 2), so that different material properties can be defined
in each of the subdomains (see ``--pars`` option). The command line parameters
can be given in any consistent unit set, for example the basic SI units. The
``--unit-multipliers`` option can be used to rescale the input units to ones
more suitable to the simulation, for example to prevent having different
matrix blocks with large differences of matrix entries magnitudes. The results
are then in the rescaled units.
Usage Examples
--------------
Default material parameters, a square periodic cell with a spherical inclusion,
logs also standard pressure dilatation and shear waves, no eigenvectors::
python examples/linear_elasticity/dispersion_analysis.py meshes/2d/special/circle_in_square.mesh --log-std-waves --eigs-only
As above, with custom eigenvalue solver parameters, and different number of
eigenvalues, mesh size and units used in the calculation::
python examples/linear_elasticity/dispersion_analysis.py meshes/2d/special/circle_in_square.mesh --solver-conf="kind='eig.scipy', method='eigsh', tol=1e-10, maxiter=1000, which='LM', sigma=0" --log-std-waves -n 5 --range=0,640,101 --mode=omega --unit-multipliers=1e-6,1e-2,1e-3 --mesh-size=1e-2 --eigs-only
Default material parameters, a square periodic cell with a square inclusion,
and a very small mesh to allow comparing the omega and kappa modes (full matrix
solver required!)::
python examples/linear_elasticity/dispersion_analysis.py meshes/2d/square_2m.mesh --solver-conf="kind='eig.scipy', method='eigh'" --log-std-waves -n 10 --range=0,640,101 --mesh-size=1e-2 --mode=omega --eigs-only --no-legends --unit-multipliers=1e-6,1e-2,1e-3 -o output/omega
python examples/linear_elasticity/dispersion_analysis.py meshes/2d/square_2m.mesh --solver-conf="kind='eig.qevp', method='companion', mode='inverted', solver={kind='eig.scipy', method='eig'}" --log-std-waves -n 500 --range=0,4000000,1001 --mesh-size=1e-2 --mode=kappa --eigs-only --no-legends --unit-multipliers=1e-6,1e-2,1e-3 -o output/kappa
View/compare the resulting logs::
python script/plot_logs.py output/omega/frequencies.txt --no-legends -g 1 -o mode-omega.png
python script/plot_logs.py output/kappa/wave-numbers.txt --no-legends -o mode-kappa.png
python script/plot_logs.py output/kappa/wave-numbers.txt --no-legends --swap-axes -o mode-kappa-t.png
In contrast to the heterogeneous square periodic cell, a homogeneous
square periodic cell (the region Y2 is empty)::
python examples/linear_elasticity/dispersion_analysis.py meshes/2d/square_1m.mesh --solver-conf="kind='eig.scipy', method='eigh'" --log-std-waves -n 10 --range=0,640,101 --mesh-size=1e-2 --mode=omega --eigs-only --no-legends --unit-multipliers=1e-6,1e-2,1e-3 -o output/omega-h
python script/plot_logs.py output/omega-h/frequencies.txt --no-legends -g 1 -o mode-omega-h.png
Use the Brillouin stepper::
python examples/linear_elasticity/dispersion_analysis.py meshes/2d/special/circle_in_square.mesh --log-std-waves -n=60 --eigs-only --no-legends --stepper=brillouin
python script/plot_logs.py output/frequencies.txt -g 0 --rc="'font.size':14, 'lines.linewidth' : 3, 'lines.markersize' : 4" -o brillouin-stepper-kappas.png
python script/plot_logs.py output/frequencies.txt -g 1 --no-legends --rc="'font.size':14, 'lines.linewidth' : 3, 'lines.markersize' : 4" -o brillouin-stepper-omegas.png
Additional arguments can be passed to the problem configuration's
:func:`define()` function using the ``--define-kwargs`` option. In this file,
only the mesh vertex separation parameter `mesh_eps` can be used::
python examples/linear_elasticity/dispersion_analysis.py meshes/2d/special/circle_in_square.mesh --log-std-waves --eigs-only --define-kwargs="mesh_eps=1e-10" --save-regions
"""
from __future__ import absolute_import
import os
import sys
sys.path.append('.')
import gc
from copy import copy
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import import_file, output, Struct
from sfepy.base.conf import dict_from_string, ProblemConf
from sfepy.base.ioutils import ensure_path, remove_files_patterns, save_options
from sfepy.base.log import Log
from sfepy.discrete.fem import MeshIO
from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson as stiffness
import sfepy.mechanics.matcoefs as mc
from sfepy.mechanics.units import apply_unit_multipliers
import sfepy.discrete.fem.periodic as per
from sfepy.discrete.fem.meshio import convert_complex_output
from sfepy.homogenization.utils import define_box_regions
from sfepy.discrete import Problem
from sfepy.mechanics.tensors import get_von_mises_stress
from sfepy.solvers import Solver
from sfepy.solvers.ts import get_print_info, TimeStepper
from sfepy.linalg.utils import output_array_stats, max_diff_csr
def apply_units(pars, unit_multipliers):
new_pars = apply_unit_multipliers(pars,
['stress', 'one', 'density',
'stress', 'one' ,'density'],
unit_multipliers)
return new_pars
def compute_von_mises(out, pb, state, extend=False, wmag=None, wdir=None):
"""
Calculate the von Mises stress.
"""
stress = pb.evaluate('ev_cauchy_stress.i.Omega(m.D, u)', mode='el_avg')
vms = get_von_mises_stress(stress.squeeze())
vms.shape = (vms.shape[0], 1, 1, 1)
out['von_mises_stress'] = Struct(name='output_data', mode='cell',
data=vms)
return out
def define(filename_mesh, pars, approx_order, refinement_level, solver_conf,
plane='strain', post_process=False, mesh_eps=1e-8):
io = MeshIO.any_from_filename(filename_mesh)
bbox = io.read_bounding_box()
dim = bbox.shape[1]
options = {
'absolute_mesh_path' : True,
'refinement_level' : refinement_level,
'allow_empty_regions' : True,
'post_process_hook' : 'compute_von_mises' if post_process else None,
}
fields = {
'displacement': ('complex', dim, 'Omega', approx_order),
}
young1, poisson1, density1, young2, poisson2, density2 = pars
materials = {
'm' : ({
'D' : {'Y1' : stiffness(dim, young=young1, poisson=poisson1,
plane=plane),
'Y2' : stiffness(dim, young=young2, poisson=poisson2,
plane=plane)},
'density' : {'Y1' : density1, 'Y2' : density2},
},),
'wave' : 'get_wdir',
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
}
regions = {
'Omega' : 'all',
'Y1': 'cells of group 1',
'Y2': 'cells of group 2',
}
regions.update(define_box_regions(dim,
bbox[0], bbox[1], mesh_eps))
ebcs = {
}
if dim == 3:
epbcs = {
'periodic_x' : (['Left', 'Right'], {'u.all' : 'u.all'},
'match_x_plane'),
'periodic_y' : (['Near', 'Far'], {'u.all' : 'u.all'},
'match_y_plane'),
'periodic_z' : (['Top', 'Bottom'], {'u.all' : 'u.all'},
'match_z_plane'),
}
else:
epbcs = {
'periodic_x' : (['Left', 'Right'], {'u.all' : 'u.all'},
'match_y_line'),
'periodic_y' : (['Bottom', 'Top'], {'u.all' : 'u.all'},
'match_x_line'),
}
per.set_accuracy(mesh_eps)
functions = {
'match_x_plane' : (per.match_x_plane,),
'match_y_plane' : (per.match_y_plane,),
'match_z_plane' : (per.match_z_plane,),
'match_x_line' : (per.match_x_line,),
'match_y_line' : (per.match_y_line,),
'get_wdir' : (get_wdir,),
}
integrals = {
'i' : 2 * approx_order,
}
equations = {
'K' : 'dw_lin_elastic.i.Omega(m.D, v, u)',
'S' : 'dw_elastic_wave.i.Omega(m.D, wave.vec, v, u)',
'R' : """1j * dw_elastic_wave_cauchy.i.Omega(m.D, wave.vec, u, v)
- 1j * dw_elastic_wave_cauchy.i.Omega(m.D, wave.vec, v, u)""",
'M' : 'dw_volume_dot.i.Omega(m.density, v, u)',
}
solver_0 = solver_conf.copy()
solver_0['name'] = 'eig'
return locals()
def get_wdir(ts, coors, mode=None,
equations=None, term=None, problem=None, wdir=None, **kwargs):
if mode == 'special':
return {'vec' : wdir}
def set_wave_dir(pb, wdir):
materials = pb.get_materials()
wave_mat = materials['wave']
wave_mat.set_extra_args(wdir=wdir)
def save_materials(output_dir, pb, options):
stiffness = pb.evaluate('ev_volume_integrate_mat.2.Omega(m.D, u)',
mode='el_avg', copy_materials=False, verbose=False)
young, poisson = mc.youngpoisson_from_stiffness(stiffness,
plane=options.plane)
density = pb.evaluate('ev_volume_integrate_mat.2.Omega(m.density, u)',
mode='el_avg', copy_materials=False, verbose=False)
out = {}
out['young'] = Struct(name='young', mode='cell',
data=young[..., None, None])
out['poisson'] = Struct(name='poisson', mode='cell',
data=poisson[..., None, None])
out['density'] = Struct(name='density', mode='cell', data=density)
materials_filename = os.path.join(output_dir, 'materials.vtk')
pb.save_state(materials_filename, out=out)
def get_std_wave_fun(pb, options):
stiffness = pb.evaluate('ev_volume_integrate_mat.2.Omega(m.D, u)',
mode='el_avg', copy_materials=False, verbose=False)
young, poisson = mc.youngpoisson_from_stiffness(stiffness,
plane=options.plane)
density = pb.evaluate('ev_volume_integrate_mat.2.Omega(m.density, u)',
mode='el_avg', copy_materials=False, verbose=False)
lam, mu = mc.lame_from_youngpoisson(young, poisson,
plane=options.plane)
alam = nm.average(lam)
amu = nm.average(mu)
adensity = nm.average(density)
cp = nm.sqrt((alam + 2.0 * amu) / adensity)
cs = nm.sqrt(amu / adensity)
output('average p-wave speed:', cp)
output('average shear wave speed:', cs)
log_names = [r'$\omega_p$', r'$\omega_s$']
log_plot_kwargs = [{'ls' : '--', 'color' : 'k'},
{'ls' : '--', 'color' : 'gray'}]
if options.mode == 'omega':
fun = lambda wmag, wdir: (cp * wmag, cs * wmag)
else:
fun = lambda wmag, wdir: (wmag / cp, wmag / cs)
return fun, log_names, log_plot_kwargs
def get_stepper(rng, pb, options):
if options.stepper == 'linear':
stepper = TimeStepper(rng[0], rng[1], dt=None, n_step=rng[2])
return stepper
bbox = pb.domain.mesh.get_bounding_box()
bzone = 2.0 * nm.pi / (bbox[1] - bbox[0])
num = rng[2] // 3
class BrillouinStepper(Struct):
"""
Step over 1. Brillouin zone in xy plane.
"""
def __init__(self, t0, t1, dt=None, n_step=None, step=None, **kwargs):
Struct.__init__(self, t0=t0, t1=t1, dt=dt, n_step=n_step, step=step)
self.n_digit, self.format, self.suffix = get_print_info(self.n_step)
def __iter__(self):
ts = TimeStepper(0, bzone[0], dt=None, n_step=num)
for ii, val in ts:
yield ii, val, nm.array([1.0, 0.0])
if ii == (num-2): break
ts = TimeStepper(0, bzone[1], dt=None, n_step=num)
for ii, k1 in ts:
wdir = nm.array([bzone[0], k1])
val = nm.linalg.norm(wdir)
wdir = wdir / val
yield num + ii, val, wdir
if ii == (num-2): break
wdir = nm.array([bzone[0], bzone[1]])
val = nm.linalg.norm(wdir)
wdir = wdir / val
ts = TimeStepper(0, 1, dt=None, n_step=num)
for ii, _ in ts:
yield 2 * num + ii, val * (1.0 - float(ii)/(num-1)), wdir
stepper = BrillouinStepper(0, 1, n_step=rng[2])
return stepper
def save_eigenvectors(filename, svecs, wmag, wdir, pb):
if svecs is None: return
variables = pb.get_variables()
# Make full eigenvectors (add DOFs fixed by boundary conditions).
vecs = nm.empty((variables.di.ptr[-1], svecs.shape[1]),
dtype=svecs.dtype)
for ii in range(svecs.shape[1]):
vecs[:, ii] = variables.make_full_vec(svecs[:, ii])
# Save the eigenvectors.
out = {}
state = pb.create_state()
pp_name = pb.conf.options.get('post_process_hook')
pp = getattr(pb.conf.funmod, pp_name if pp_name is not None else '',
lambda out, *args, **kwargs: out)
for ii in range(svecs.shape[1]):
state.set_full(vecs[:, ii])
aux = state.create_output_dict()
aux2 = {}
pp(aux2, pb, state, wmag=wmag, wdir=wdir)
aux.update(convert_complex_output(aux2))
out.update({key + '%03d' % ii : aux[key] for key in aux})
pb.save_state(filename, out=out)
def assemble_matrices(define, mod, pars, set_wave_dir, options, wdir=None):
"""
Assemble the blocks of dispersion eigenvalue problem matrices.
"""
define_dict = define(filename_mesh=options.mesh_filename,
pars=pars,
approx_order=options.order,
refinement_level=options.refine,
solver_conf=options.solver_conf,
plane=options.plane,
post_process=options.post_process,
**options.define_kwargs)
conf = ProblemConf.from_dict(define_dict, mod)
pb = Problem.from_conf(conf)
pb.dispersion_options = options
pb.set_output_dir(options.output_dir)
dim = pb.domain.shape.dim
# Set the normalized wave vector direction to the material(s).
if wdir is None:
wdir = nm.asarray(options.wave_dir[:dim], dtype=nm.float64)
wdir = wdir / nm.linalg.norm(wdir)
set_wave_dir(pb, wdir)
bbox = pb.domain.mesh.get_bounding_box()
size = (bbox[1] - bbox[0]).max()
scaling0 = apply_unit_multipliers([1.0], ['length'],
options.unit_multipliers)[0]
scaling = scaling0
if options.mesh_size is not None:
scaling *= options.mesh_size / size
output('scaling factor of periodic cell mesh coordinates:', scaling)
output('new mesh size with applied unit multipliers:', scaling * size)
pb.domain.mesh.coors[:] *= scaling
pb.set_mesh_coors(pb.domain.mesh.coors, update_fields=True)
bzone = 2.0 * nm.pi / (scaling * size)
output('1. Brillouin zone size:', bzone * scaling0)
output('1. Brillouin zone size with applied unit multipliers:', bzone)
pb.time_update()
pb.update_materials()
# Assemble the matrices.
mtxs = {}
for key, eq in pb.equations.iteritems():
mtxs[key] = mtx = pb.mtx_a.copy()
mtx = eq.evaluate(mode='weak', dw_mode='matrix', asm_obj=mtx)
mtx.eliminate_zeros()
output_array_stats(mtx.data, 'nonzeros in %s' % key)
output('symmetry checks:')
output('%s - %s^T:' % (key, key), max_diff_csr(mtx, mtx.T))
output('%s - %s^H:' % (key, key), max_diff_csr(mtx, mtx.H))
return pb, wdir, bzone, mtxs
def setup_n_eigs(options, pb, mtxs):
"""
Setup the numbers of eigenvalues based on options and numbers of DOFs.
"""
solver_n_eigs = n_eigs = options.n_eigs
n_dof = mtxs['K'].shape[0]
if options.mode == 'omega':
if options.n_eigs > n_dof:
n_eigs = n_dof
solver_n_eigs = None
else:
if options.n_eigs > 2 * n_dof:
n_eigs = 2 * n_dof
solver_n_eigs = None
return solver_n_eigs, n_eigs
def build_evp_matrices(mtxs, val, mode, pb):
"""
Build the matrices of the dispersion eigenvalue problem.
"""
if mode == 'omega':
mtx_a = mtxs['K'] + val**2 * mtxs['S'] + val * mtxs['R']
output('A - A^H:', max_diff_csr(mtx_a, mtx_a.H))
evp_mtxs = (mtx_a, mtxs['M'])
else:
evp_mtxs = (mtxs['S'], mtxs['R'], mtxs['K'] - val**2 * mtxs['M'])
return evp_mtxs
def process_evp_results(eigs, svecs, val, wdir, bzone, pb, mtxs, options,
std_wave_fun=None):
"""
Transform eigenvalues to either omegas or kappas, depending on `mode`.
Transform eigenvectors, if available, depending on `mode`.
Return also the values to log.
"""
if options.mode == 'omega':
omegas = nm.sqrt(eigs)
output('eigs, omegas:')
for ii, om in enumerate(omegas):
output('{:>3}. {: .10e}, {:.10e}'.format(ii, eigs[ii], om))
if options.stepper == 'linear':
out = tuple(eigs) + tuple(omegas)
else:
out = tuple(val * wdir) + tuple(omegas)
if std_wave_fun is not None:
out = out + std_wave_fun(val, wdir)
return omegas, svecs, out
else:
kappas = eigs.copy()
rks = kappas.copy()
# Mask modes far from 1. Brillouin zone.
max_kappa = 1.2 * bzone
kappas[kappas.real > max_kappa] = nm.nan
# Mask non-physical modes.
kappas[kappas.real < 0] = nm.nan
kappas[nm.abs(kappas.imag) > 1e-10] = nm.nan
out = tuple(kappas.real)
output('raw kappas, masked real part:',)
for ii, kr in enumerate(kappas.real):
output('{:>3}. {: 23.5e}, {:.10e}'.format(ii, rks[ii], kr))
if svecs is not None:
n_dof = mtxs['K'].shape[0]
# Select only vectors corresponding to physical modes.
ii = nm.isfinite(kappas.real)
svecs = svecs[:n_dof, ii]
if std_wave_fun is not None:
out = out + tuple(ii if ii <= max_kappa else nm.nan
for ii in std_wave_fun(val, wdir))
return kappas, svecs, out
helps = {
'pars' :
'material parameters in Y1, Y2 subdomains in basic units'
' [default: %(default)s]',
'conf' :
'if given, an alternative problem description file with apply_units() and'
' define() functions [default: %(default)s]',
'define_kwargs' : 'additional keyword arguments passed to define()',
'mesh_size' :
'desired mesh size (max. of bounding box dimensions) in basic units'
' - the input periodic cell mesh is rescaled to this size'
' [default: %(default)s]',
'unit_multipliers' :
'basic unit multipliers (time, length, mass) [default: %(default)s]',
'plane' :
'for 2D problems, plane strain or stress hypothesis selection'
' [default: %(default)s]',
'wave_dir' : 'the wave vector direction (will be normalized)'
' [default: %(default)s]',
'mode' : 'solution mode: omega = solve a generalized EVP for omega,'
' kappa = solve a quadratic generalized EVP for kappa'
' [default: %(default)s]',
'stepper' : 'the range stepper. For "brillouin", only the number'
' of items from --range is used'
' [default: %(default)s]',
'range' : 'the wave vector magnitude / frequency range'
' (like numpy.linspace) depending on the mode option'
' [default: %(default)s]',
'order' : 'displacement field approximation order [default: %(default)s]',
'refine' : 'number of uniform mesh refinements [default: %(default)s]',
'n_eigs' : 'the number of eigenvalues to compute [default: %(default)s]',
'eigs_only' : 'compute only eigenvalues, not eigenvectors',
'post_process' : 'post-process eigenvectors',
'solver_conf' : 'eigenvalue problem solver configuration options'
' [default: %(default)s]',
'save_regions' : 'save defined regions into'
' <output_directory>/regions.vtk',
'save_materials' : 'save material parameters into'
' <output_directory>/materials.vtk',
'log_std_waves' : 'log also standard pressure dilatation and shear waves',
'no_legends' :
'do not show legends in the log plots',
'no_show' :
'do not show the log figure',
'silent' : 'do not print messages to screen',
'clear' :
'clear old solution files from output directory',
'output_dir' :
'output directory [default: %(default)s]',
'mesh_filename' :
'input periodic cell mesh file name [default: %(default)s]',
}
def main():
# Aluminium and epoxy.
default_pars = '70e9,0.35,2.799e3, 3.8e9,0.27,1.142e3'
default_solver_conf = ("kind='eig.scipy',method='eigsh',tol=1.0e-5,"
"maxiter=1000,which='LM',sigma=0.0")
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--pars', metavar='young1,poisson1,density1'
',young2,poisson2,density2',
action='store', dest='pars',
default=default_pars, help=helps['pars'])
parser.add_argument('--conf', metavar='filename',
action='store', dest='conf',
default=None, help=helps['conf'])
parser.add_argument('--define-kwargs', metavar='dict-like',
action='store', dest='define_kwargs',
default=None, help=helps['define_kwargs'])
parser.add_argument('--mesh-size', type=float, metavar='float',
action='store', dest='mesh_size',
default=None, help=helps['mesh_size'])
parser.add_argument('--unit-multipliers',
metavar='c_time,c_length,c_mass',
action='store', dest='unit_multipliers',
default='1.0,1.0,1.0', help=helps['unit_multipliers'])
parser.add_argument('--plane', action='store', dest='plane',
choices=['strain', 'stress'],
default='strain', help=helps['plane'])
parser.add_argument('--wave-dir', metavar='float,float[,float]',
action='store', dest='wave_dir',
default='1.0,0.0,0.0', help=helps['wave_dir'])
parser.add_argument('--mode', action='store', dest='mode',
choices=['omega', 'kappa'],
default='omega', help=helps['mode'])
parser.add_argument('--stepper', action='store', dest='stepper',
choices=['linear', 'brillouin'],
default='linear', help=helps['stepper'])
parser.add_argument('--range', metavar='start,stop,count',
action='store', dest='range',
default='0,6.4,33', help=helps['range'])
parser.add_argument('--order', metavar='int', type=int,
action='store', dest='order',
default=1, help=helps['order'])
parser.add_argument('--refine', metavar='int', type=int,
action='store', dest='refine',
default=0, help=helps['refine'])
parser.add_argument('-n', '--n-eigs', metavar='int', type=int,
action='store', dest='n_eigs',
default=6, help=helps['n_eigs'])
group = parser.add_mutually_exclusive_group()
group.add_argument('--eigs-only',
action='store_true', dest='eigs_only',
default=False, help=helps['eigs_only'])
group.add_argument('--post-process',
action='store_true', dest='post_process',
default=False, help=helps['post_process'])
parser.add_argument('--solver-conf', metavar='dict-like',
action='store', dest='solver_conf',
default=default_solver_conf, help=helps['solver_conf'])
parser.add_argument('--save-regions',
action='store_true', dest='save_regions',
default=False, help=helps['save_regions'])
parser.add_argument('--save-materials',
action='store_true', dest='save_materials',
default=False, help=helps['save_materials'])
parser.add_argument('--log-std-waves',
action='store_true', dest='log_std_waves',
default=False, help=helps['log_std_waves'])
parser.add_argument('--no-legends',
action='store_false', dest='show_legends',
default=True, help=helps['no_legends'])
parser.add_argument('--no-show',
action='store_false', dest='show',
default=True, help=helps['no_show'])
parser.add_argument('--silent',
action='store_true', dest='silent',
default=False, help=helps['silent'])
parser.add_argument('-c', '--clear',
action='store_true', dest='clear',
default=False, help=helps['clear'])
parser.add_argument('-o', '--output-dir', metavar='path',
action='store', dest='output_dir',
default='output', help=helps['output_dir'])
parser.add_argument('mesh_filename', default='',
help=helps['mesh_filename'])
options = parser.parse_args()
output_dir = options.output_dir
output.set_output(filename=os.path.join(output_dir,'output_log.txt'),
combined=options.silent == False)
if options.conf is not None:
mod = import_file(options.conf)
else:
mod = sys.modules[__name__]
apply_units = mod.apply_units
define = mod.define
set_wave_dir = mod.set_wave_dir
setup_n_eigs = mod.setup_n_eigs
build_evp_matrices = mod.build_evp_matrices
save_materials = mod.save_materials
get_std_wave_fun = mod.get_std_wave_fun
get_stepper = mod.get_stepper
process_evp_results = mod.process_evp_results
options.pars = [float(ii) for ii in options.pars.split(',')]
options.unit_multipliers = [float(ii)
for ii in options.unit_multipliers.split(',')]
options.wave_dir = [float(ii)
for ii in options.wave_dir.split(',')]
aux = options.range.split(',')
options.range = [float(aux[0]), float(aux[1]), int(aux[2])]
options.solver_conf = dict_from_string(options.solver_conf)
options.define_kwargs = dict_from_string(options.define_kwargs)
if options.clear:
remove_files_patterns(output_dir,
['*.h5', '*.vtk', '*.txt'],
ignores=['output_log.txt'],
verbose=True)
filename = os.path.join(output_dir, 'options.txt')
ensure_path(filename)
save_options(filename, [('options', vars(options))],
quote_command_line=True)
pars = apply_units(options.pars, options.unit_multipliers)
output('material parameters with applied unit multipliers:')
output(pars)
if options.mode == 'omega':
rng = copy(options.range)
rng[:2] = apply_unit_multipliers(options.range[:2],
['wave_number', 'wave_number'],
options.unit_multipliers)
output('wave number range with applied unit multipliers:', rng)
else:
if options.stepper == 'brillouin':
raise ValueError('Cannot use "brillouin" stepper in kappa mode!')
rng = copy(options.range)
rng[:2] = apply_unit_multipliers(options.range[:2],
['frequency', 'frequency'],
options.unit_multipliers)
output('frequency range with applied unit multipliers:', rng)
pb, wdir, bzone, mtxs = assemble_matrices(define, mod, pars, set_wave_dir,
options)
dim = pb.domain.shape.dim
if dim != 2:
options.plane = 'strain'
if options.save_regions:
pb.save_regions_as_groups(os.path.join(output_dir, 'regions'))
if options.save_materials:
save_materials(output_dir, pb, options)
conf = pb.solver_confs['eig']
eig_solver = Solver.any_from_conf(conf)
n_eigs, options.n_eigs = setup_n_eigs(options, pb, mtxs)
get_color = lambda ii: plt.cm.viridis((float(ii) / (options.n_eigs - 1)))
plot_kwargs = [{'color' : get_color(ii), 'ls' : '', 'marker' : 'o'}
for ii in range(options.n_eigs)]
get_color_dim = lambda ii: plt.cm.viridis((float(ii) / (dim-1)))
plot_kwargs_dim = [{'color' : get_color_dim(ii), 'ls' : '', 'marker' : 'o'}
for ii in range(dim)]
log_names = []
log_plot_kwargs = []
if options.log_std_waves:
std_wave_fun, log_names, log_plot_kwargs = get_std_wave_fun(
pb, options)
else:
std_wave_fun = None
stepper = get_stepper(rng, pb, options)
if options.mode == 'omega':
eigenshapes_filename = os.path.join(output_dir,
'frequency-eigenshapes-%s.vtk'
% stepper.suffix)
if options.stepper == 'linear':
log = Log([[r'$\lambda_{%d}$' % ii for ii in range(options.n_eigs)],
[r'$\omega_{%d}$'
% ii for ii in range(options.n_eigs)] + log_names],
plot_kwargs=[plot_kwargs, plot_kwargs + log_plot_kwargs],
formats=[['{:.5e}'] * options.n_eigs,
['{:.5e}'] * (options.n_eigs + len(log_names))],
yscales=['linear', 'linear'],
xlabels=[r'$\kappa$', r'$\kappa$'],
ylabels=[r'eigenvalues $\lambda_i$',
r'frequencies $\omega_i$'],
show_legends=options.show_legends,
is_plot=options.show,
log_filename=os.path.join(output_dir, 'frequencies.txt'),
aggregate=1000, sleep=0.1)
else:
log = Log([[r'$\kappa_{%d}$'% ii for ii in range(dim)],
[r'$\omega_{%d}$'
% ii for ii in range(options.n_eigs)] + log_names],
plot_kwargs=[plot_kwargs_dim,
plot_kwargs + log_plot_kwargs],
formats=[['{:.5e}'] * dim,
['{:.5e}'] * (options.n_eigs + len(log_names))],
yscales=['linear', 'linear'],
xlabels=[r'', r''],
ylabels=[r'wave vector $\kappa$',
r'frequencies $\omega_i$'],
show_legends=options.show_legends,
is_plot=options.show,
log_filename=os.path.join(output_dir, 'frequencies.txt'),
aggregate=1000, sleep=0.1)
for aux in stepper:
if options.stepper == 'linear':
iv, wmag = aux
else:
iv, wmag, wdir = aux
output('step %d: wave vector %s' % (iv, wmag * wdir))
if options.stepper == 'brillouin':
pb, _, bzone, mtxs = assemble_matrices(
define, mod, pars, set_wave_dir, options, wdir=wdir)
evp_mtxs = build_evp_matrices(mtxs, wmag, options.mode, pb)
if options.eigs_only:
eigs = eig_solver(*evp_mtxs, n_eigs=n_eigs,
eigenvectors=False)
svecs = None
else:
eigs, svecs = eig_solver(*evp_mtxs, n_eigs=n_eigs,
eigenvectors=True)
omegas, svecs, out = process_evp_results(
eigs, svecs, wmag, wdir, bzone, pb, mtxs, options,
std_wave_fun=std_wave_fun
)
if options.stepper == 'linear':
log(*out, x=[wmag, wmag])
else:
log(*out, x=[iv, iv])
save_eigenvectors(eigenshapes_filename % iv, svecs, wmag, wdir, pb)
gc.collect()
log(save_figure=os.path.join(output_dir, 'frequencies.png'))
log(finished=True)
else:
eigenshapes_filename = os.path.join(output_dir,
'wave-number-eigenshapes-%s.vtk'
% stepper.suffix)
log = Log([[r'$\kappa_{%d}$' % ii for ii in range(options.n_eigs)]
+ log_names],
plot_kwargs=[plot_kwargs + log_plot_kwargs],
formats=[['{:.5e}'] * (options.n_eigs + len(log_names))],
yscales=['linear'],
xlabels=[r'$\omega$'],
ylabels=[r'wave numbers $\kappa_i$'],
show_legends=options.show_legends,
is_plot=options.show,
log_filename=os.path.join(output_dir, 'wave-numbers.txt'),
aggregate=1000, sleep=0.1)
for io, omega in stepper:
output('step %d: frequency %s' % (io, omega))
evp_mtxs = build_evp_matrices(mtxs, omega, options.mode, pb)
if options.eigs_only:
eigs = eig_solver(*evp_mtxs, n_eigs=n_eigs,
eigenvectors=False)
svecs = None
else:
eigs, svecs = eig_solver(*evp_mtxs, n_eigs=n_eigs,
eigenvectors=True)
kappas, svecs, out = process_evp_results(
eigs, svecs, omega, wdir, bzone, pb, mtxs, options,
std_wave_fun=std_wave_fun
)
log(*out, x=[omega])
save_eigenvectors(eigenshapes_filename % io, svecs, kappas, wdir,
pb)
gc.collect()
log(save_figure=os.path.join(output_dir, 'wave-numbers.png'))
log(finished=True)
if __name__ == '__main__':
main()
|
[
"sfepy.base.conf.dict_from_string",
"sfepy.solvers.ts.TimeStepper",
"sfepy.linalg.utils.output_array_stats",
"sfepy.base.ioutils.remove_files_patterns",
"sfepy.base.base.Struct.__init__",
"sfepy.base.ioutils.ensure_path",
"sfepy.discrete.fem.periodic.set_accuracy",
"sfepy.mechanics.matcoefs.lame_from_youngpoisson",
"sfepy.base.base.Struct",
"sfepy.base.conf.ProblemConf.from_dict",
"sfepy.discrete.fem.meshio.convert_complex_output",
"sfepy.base.base.output",
"sfepy.discrete.Problem.from_conf",
"sfepy.base.base.import_file",
"sfepy.solvers.ts.get_print_info",
"sfepy.discrete.fem.MeshIO.any_from_filename",
"sfepy.linalg.utils.max_diff_csr",
"sfepy.solvers.Solver.any_from_conf",
"sfepy.mechanics.matcoefs.stiffness_from_youngpoisson",
"sfepy.homogenization.utils.define_box_regions",
"sfepy.mechanics.matcoefs.youngpoisson_from_stiffness",
"sfepy.mechanics.units.apply_unit_multipliers"
] |
[((4031, 4051), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (4046, 4051), False, 'import sys\n'), ((5102, 5210), 'sfepy.mechanics.units.apply_unit_multipliers', 'apply_unit_multipliers', (['pars', "['stress', 'one', 'density', 'stress', 'one', 'density']", 'unit_multipliers'], {}), "(pars, ['stress', 'one', 'density', 'stress', 'one',\n 'density'], unit_multipliers)\n", (5124, 5210), False, 'from sfepy.mechanics.units import apply_unit_multipliers\n'), ((5666, 5715), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""cell"""', 'data': 'vms'}), "(name='output_data', mode='cell', data=vms)\n", (5672, 5715), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((5919, 5958), 'sfepy.discrete.fem.MeshIO.any_from_filename', 'MeshIO.any_from_filename', (['filename_mesh'], {}), '(filename_mesh)\n', (5943, 5958), False, 'from sfepy.discrete.fem import MeshIO\n'), ((7812, 7838), 'sfepy.discrete.fem.periodic.set_accuracy', 'per.set_accuracy', (['mesh_eps'], {}), '(mesh_eps)\n', (7828, 7838), True, 'import sfepy.discrete.fem.periodic as per\n'), ((9143, 9205), 'sfepy.mechanics.matcoefs.youngpoisson_from_stiffness', 'mc.youngpoisson_from_stiffness', (['stiffness'], {'plane': 'options.plane'}), '(stiffness, plane=options.plane)\n', (9173, 9205), True, 'import sfepy.mechanics.matcoefs as mc\n'), ((9444, 9506), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""young"""', 'mode': '"""cell"""', 'data': 'young[..., None, None]'}), "(name='young', mode='cell', data=young[..., None, None])\n", (9450, 9506), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((9554, 9620), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""poisson"""', 'mode': '"""cell"""', 'data': 'poisson[..., None, None]'}), "(name='poisson', mode='cell', data=poisson[..., None, None])\n", (9560, 9620), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((9670, 9719), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""density"""', 'mode': '"""cell"""', 'data': 'density'}), "(name='density', mode='cell', data=density)\n", (9676, 9719), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((9745, 9786), 'os.path.join', 'os.path.join', (['output_dir', '"""materials.vtk"""'], {}), "(output_dir, 'materials.vtk')\n", (9757, 9786), False, 'import os\n'), ((10042, 10104), 'sfepy.mechanics.matcoefs.youngpoisson_from_stiffness', 'mc.youngpoisson_from_stiffness', (['stiffness'], {'plane': 'options.plane'}), '(stiffness, plane=options.plane)\n', (10072, 10104), True, 'import sfepy.mechanics.matcoefs as mc\n'), ((10325, 10387), 'sfepy.mechanics.matcoefs.lame_from_youngpoisson', 'mc.lame_from_youngpoisson', (['young', 'poisson'], {'plane': 'options.plane'}), '(young, poisson, plane=options.plane)\n', (10350, 10387), True, 'import sfepy.mechanics.matcoefs as mc\n'), ((10439, 10454), 'numpy.average', 'nm.average', (['lam'], {}), '(lam)\n', (10449, 10454), True, 'import numpy as nm\n'), ((10465, 10479), 'numpy.average', 'nm.average', (['mu'], {}), '(mu)\n', (10475, 10479), True, 'import numpy as nm\n'), ((10495, 10514), 'numpy.average', 'nm.average', (['density'], {}), '(density)\n', (10505, 10514), True, 'import numpy as nm\n'), ((10525, 10563), 'numpy.sqrt', 'nm.sqrt', (['((alam + 2.0 * amu) / adensity)'], {}), '((alam + 2.0 * amu) / adensity)\n', (10532, 10563), True, 'import numpy as nm\n'), ((10573, 10596), 'numpy.sqrt', 'nm.sqrt', (['(amu / adensity)'], {}), '(amu / adensity)\n', (10580, 10596), True, 'import numpy as nm\n'), ((10601, 10636), 'sfepy.base.base.output', 'output', (['"""average p-wave speed:"""', 'cp'], {}), "('average p-wave speed:', cp)\n", (10607, 10636), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((10641, 10680), 'sfepy.base.base.output', 'output', (['"""average shear wave speed:"""', 'cs'], {}), "('average shear wave speed:', cs)\n", (10647, 10680), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((12742, 12809), 'numpy.empty', 'nm.empty', (['(variables.di.ptr[-1], svecs.shape[1])'], {'dtype': 'svecs.dtype'}), '((variables.di.ptr[-1], svecs.shape[1]), dtype=svecs.dtype)\n', (12750, 12809), True, 'import numpy as nm\n'), ((14111, 14150), 'sfepy.base.conf.ProblemConf.from_dict', 'ProblemConf.from_dict', (['define_dict', 'mod'], {}), '(define_dict, mod)\n', (14132, 14150), False, 'from sfepy.base.conf import dict_from_string, ProblemConf\n'), ((14161, 14184), 'sfepy.discrete.Problem.from_conf', 'Problem.from_conf', (['conf'], {}), '(conf)\n', (14178, 14184), False, 'from sfepy.discrete import Problem\n'), ((14836, 14904), 'sfepy.base.base.output', 'output', (['"""scaling factor of periodic cell mesh coordinates:"""', 'scaling'], {}), "('scaling factor of periodic cell mesh coordinates:', scaling)\n", (14842, 14904), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((14909, 14979), 'sfepy.base.base.output', 'output', (['"""new mesh size with applied unit multipliers:"""', '(scaling * size)'], {}), "('new mesh size with applied unit multipliers:', scaling * size)\n", (14915, 14979), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((15131, 15182), 'sfepy.base.base.output', 'output', (['"""1. Brillouin zone size:"""', '(bzone * scaling0)'], {}), "('1. Brillouin zone size:', bzone * scaling0)\n", (15137, 15182), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((15187, 15257), 'sfepy.base.base.output', 'output', (['"""1. Brillouin zone size with applied unit multipliers:"""', 'bzone'], {}), "('1. Brillouin zone size with applied unit multipliers:', bzone)\n", (15193, 15257), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((21060, 21145), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'RawDescriptionHelpFormatter'}), '(description=__doc__, formatter_class=RawDescriptionHelpFormatter\n )\n', (21074, 21145), False, 'from argparse import ArgumentParser, RawDescriptionHelpFormatter\n'), ((26795, 26832), 'sfepy.base.conf.dict_from_string', 'dict_from_string', (['options.solver_conf'], {}), '(options.solver_conf)\n', (26811, 26832), False, 'from sfepy.base.conf import dict_from_string, ProblemConf\n'), ((26861, 26900), 'sfepy.base.conf.dict_from_string', 'dict_from_string', (['options.define_kwargs'], {}), '(options.define_kwargs)\n', (26877, 26900), False, 'from sfepy.base.conf import dict_from_string, ProblemConf\n'), ((27142, 27181), 'os.path.join', 'os.path.join', (['output_dir', '"""options.txt"""'], {}), "(output_dir, 'options.txt')\n", (27154, 27181), False, 'import os\n'), ((27186, 27207), 'sfepy.base.ioutils.ensure_path', 'ensure_path', (['filename'], {}), '(filename)\n', (27197, 27207), False, 'from sfepy.base.ioutils import ensure_path, remove_files_patterns, save_options\n'), ((27375, 27435), 'sfepy.base.base.output', 'output', (['"""material parameters with applied unit multipliers:"""'], {}), "('material parameters with applied unit multipliers:')\n", (27381, 27435), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((27440, 27452), 'sfepy.base.base.output', 'output', (['pars'], {}), '(pars)\n', (27446, 27452), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((28674, 28700), 'sfepy.solvers.Solver.any_from_conf', 'Solver.any_from_conf', (['conf'], {}), '(conf)\n', (28694, 28700), False, 'from sfepy.solvers import Solver\n'), ((7046, 7097), 'sfepy.homogenization.utils.define_box_regions', 'define_box_regions', (['dim', 'bbox[0]', 'bbox[1]', 'mesh_eps'], {}), '(dim, bbox[0], bbox[1], mesh_eps)\n', (7064, 7097), False, 'from sfepy.homogenization.utils import define_box_regions\n'), ((11128, 11179), 'sfepy.solvers.ts.TimeStepper', 'TimeStepper', (['rng[0]', 'rng[1]'], {'dt': 'None', 'n_step': 'rng[2]'}), '(rng[0], rng[1], dt=None, n_step=rng[2])\n', (11139, 11179), False, 'from sfepy.solvers.ts import get_print_info, TimeStepper\n'), ((14397, 14449), 'numpy.asarray', 'nm.asarray', (['options.wave_dir[:dim]'], {'dtype': 'nm.float64'}), '(options.wave_dir[:dim], dtype=nm.float64)\n', (14407, 14449), True, 'import numpy as nm\n'), ((14618, 14685), 'sfepy.mechanics.units.apply_unit_multipliers', 'apply_unit_multipliers', (['[1.0]', "['length']", 'options.unit_multipliers'], {}), "([1.0], ['length'], options.unit_multipliers)\n", (14640, 14685), False, 'from sfepy.mechanics.units import apply_unit_multipliers\n'), ((15545, 15597), 'sfepy.linalg.utils.output_array_stats', 'output_array_stats', (['mtx.data', "('nonzeros in %s' % key)"], {}), "(mtx.data, 'nonzeros in %s' % key)\n", (15563, 15597), False, 'from sfepy.linalg.utils import output_array_stats, max_diff_csr\n'), ((15607, 15633), 'sfepy.base.base.output', 'output', (['"""symmetry checks:"""'], {}), "('symmetry checks:')\n", (15613, 15633), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((17054, 17067), 'numpy.sqrt', 'nm.sqrt', (['eigs'], {}), '(eigs)\n', (17061, 17067), True, 'import numpy as nm\n'), ((17077, 17100), 'sfepy.base.base.output', 'output', (['"""eigs, omegas:"""'], {}), "('eigs, omegas:')\n", (17083, 17100), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((17860, 17899), 'sfepy.base.base.output', 'output', (['"""raw kappas, masked real part:"""'], {}), "('raw kappas, masked real part:')\n", (17866, 17899), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((25966, 25991), 'sfepy.base.base.import_file', 'import_file', (['options.conf'], {}), '(options.conf)\n', (25977, 25991), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((26932, 27040), 'sfepy.base.ioutils.remove_files_patterns', 'remove_files_patterns', (['output_dir', "['*.h5', '*.vtk', '*.txt']"], {'ignores': "['output_log.txt']", 'verbose': '(True)'}), "(output_dir, ['*.h5', '*.vtk', '*.txt'], ignores=[\n 'output_log.txt'], verbose=True)\n", (26953, 27040), False, 'from sfepy.base.ioutils import ensure_path, remove_files_patterns, save_options\n'), ((27500, 27519), 'copy.copy', 'copy', (['options.range'], {}), '(options.range)\n', (27504, 27519), False, 'from copy import copy\n'), ((27538, 27641), 'sfepy.mechanics.units.apply_unit_multipliers', 'apply_unit_multipliers', (['options.range[:2]', "['wave_number', 'wave_number']", 'options.unit_multipliers'], {}), "(options.range[:2], ['wave_number', 'wave_number'],\n options.unit_multipliers)\n", (27560, 27641), False, 'from sfepy.mechanics.units import apply_unit_multipliers\n'), ((27728, 27791), 'sfepy.base.base.output', 'output', (['"""wave number range with applied unit multipliers:"""', 'rng'], {}), "('wave number range with applied unit multipliers:', rng)\n", (27734, 27791), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((27939, 27958), 'copy.copy', 'copy', (['options.range'], {}), '(options.range)\n', (27943, 27958), False, 'from copy import copy\n'), ((27977, 28076), 'sfepy.mechanics.units.apply_unit_multipliers', 'apply_unit_multipliers', (['options.range[:2]', "['frequency', 'frequency']", 'options.unit_multipliers'], {}), "(options.range[:2], ['frequency', 'frequency'],\n options.unit_multipliers)\n", (27999, 28076), False, 'from sfepy.mechanics.units import apply_unit_multipliers\n'), ((28163, 28224), 'sfepy.base.base.output', 'output', (['"""frequency range with applied unit multipliers:"""', 'rng'], {}), "('frequency range with applied unit multipliers:', rng)\n", (28169, 28224), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((29476, 29549), 'os.path.join', 'os.path.join', (['output_dir', "('frequency-eigenshapes-%s.vtk' % stepper.suffix)"], {}), "(output_dir, 'frequency-eigenshapes-%s.vtk' % stepper.suffix)\n", (29488, 29549), False, 'import os\n'), ((32758, 32833), 'os.path.join', 'os.path.join', (['output_dir', "('wave-number-eigenshapes-%s.vtk' % stepper.suffix)"], {}), "(output_dir, 'wave-number-eigenshapes-%s.vtk' % stepper.suffix)\n", (32770, 32833), False, 'import os\n'), ((11520, 11588), 'sfepy.base.base.Struct.__init__', 'Struct.__init__', (['self'], {'t0': 't0', 't1': 't1', 'dt': 'dt', 'n_step': 'n_step', 'step': 'step'}), '(self, t0=t0, t1=t1, dt=dt, n_step=n_step, step=step)\n', (11535, 11588), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((11643, 11670), 'sfepy.solvers.ts.get_print_info', 'get_print_info', (['self.n_step'], {}), '(self.n_step)\n', (11657, 11670), False, 'from sfepy.solvers.ts import get_print_info, TimeStepper\n'), ((11717, 11762), 'sfepy.solvers.ts.TimeStepper', 'TimeStepper', (['(0)', 'bzone[0]'], {'dt': 'None', 'n_step': 'num'}), '(0, bzone[0], dt=None, n_step=num)\n', (11728, 11762), False, 'from sfepy.solvers.ts import get_print_info, TimeStepper\n'), ((11904, 11949), 'sfepy.solvers.ts.TimeStepper', 'TimeStepper', (['(0)', 'bzone[1]'], {'dt': 'None', 'n_step': 'num'}), '(0, bzone[1], dt=None, n_step=num)\n', (11915, 11949), False, 'from sfepy.solvers.ts import get_print_info, TimeStepper\n'), ((12207, 12237), 'numpy.array', 'nm.array', (['[bzone[0], bzone[1]]'], {}), '([bzone[0], bzone[1]])\n', (12215, 12237), True, 'import numpy as nm\n'), ((12256, 12276), 'numpy.linalg.norm', 'nm.linalg.norm', (['wdir'], {}), '(wdir)\n', (12270, 12276), True, 'import numpy as nm\n'), ((12324, 12362), 'sfepy.solvers.ts.TimeStepper', 'TimeStepper', (['(0)', '(1)'], {'dt': 'None', 'n_step': 'num'}), '(0, 1, dt=None, n_step=num)\n', (12335, 12362), False, 'from sfepy.solvers.ts import get_print_info, TimeStepper\n'), ((13382, 13410), 'sfepy.discrete.fem.meshio.convert_complex_output', 'convert_complex_output', (['aux2'], {}), '(aux2)\n', (13404, 13410), False, 'from sfepy.discrete.fem.meshio import convert_complex_output\n'), ((14472, 14492), 'numpy.linalg.norm', 'nm.linalg.norm', (['wdir'], {}), '(wdir)\n', (14486, 14492), True, 'import numpy as nm\n'), ((15676, 15700), 'sfepy.linalg.utils.max_diff_csr', 'max_diff_csr', (['mtx', 'mtx.T'], {}), '(mtx, mtx.T)\n', (15688, 15700), False, 'from sfepy.linalg.utils import output_array_stats, max_diff_csr\n'), ((15744, 15768), 'sfepy.linalg.utils.max_diff_csr', 'max_diff_csr', (['mtx', 'mtx.H'], {}), '(mtx, mtx.H)\n', (15756, 15768), False, 'from sfepy.linalg.utils import output_array_stats, max_diff_csr\n'), ((16522, 16550), 'sfepy.linalg.utils.max_diff_csr', 'max_diff_csr', (['mtx_a', 'mtx_a.H'], {}), '(mtx_a, mtx_a.H)\n', (16534, 16550), False, 'from sfepy.linalg.utils import output_array_stats, max_diff_csr\n'), ((18173, 18197), 'numpy.isfinite', 'nm.isfinite', (['kappas.real'], {}), '(kappas.real)\n', (18184, 18197), True, 'import numpy as nm\n'), ((25819, 25861), 'os.path.join', 'os.path.join', (['output_dir', '"""output_log.txt"""'], {}), "(output_dir, 'output_log.txt')\n", (25831, 25861), False, 'import os\n'), ((28505, 28540), 'os.path.join', 'os.path.join', (['output_dir', '"""regions"""'], {}), "(output_dir, 'regions')\n", (28517, 28540), False, 'import os\n'), ((31563, 31616), 'sfepy.base.base.output', 'output', (["('step %d: wave vector %s' % (iv, wmag * wdir))"], {}), "('step %d: wave vector %s' % (iv, wmag * wdir))\n", (31569, 31616), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((32606, 32618), 'gc.collect', 'gc.collect', ([], {}), '()\n', (32616, 32618), False, 'import gc\n'), ((33566, 33611), 'sfepy.base.base.output', 'output', (["('step %d: frequency %s' % (io, omega))"], {}), "('step %d: frequency %s' % (io, omega))\n", (33572, 33611), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((34348, 34360), 'gc.collect', 'gc.collect', ([], {}), '()\n', (34358, 34360), False, 'import gc\n'), ((12003, 12027), 'numpy.array', 'nm.array', (['[bzone[0], k1]'], {}), '([bzone[0], k1])\n', (12011, 12027), True, 'import numpy as nm\n'), ((12050, 12070), 'numpy.linalg.norm', 'nm.linalg.norm', (['wdir'], {}), '(wdir)\n', (12064, 12070), True, 'import numpy as nm\n'), ((17780, 17799), 'numpy.abs', 'nm.abs', (['kappas.imag'], {}), '(kappas.imag)\n', (17786, 17799), True, 'import numpy as nm\n'), ((32644, 32687), 'os.path.join', 'os.path.join', (['output_dir', '"""frequencies.png"""'], {}), "(output_dir, 'frequencies.png')\n", (32656, 32687), False, 'import os\n'), ((33429, 33473), 'os.path.join', 'os.path.join', (['output_dir', '"""wave-numbers.txt"""'], {}), "(output_dir, 'wave-numbers.txt')\n", (33441, 33473), False, 'import os\n'), ((34386, 34430), 'os.path.join', 'os.path.join', (['output_dir', '"""wave-numbers.png"""'], {}), "(output_dir, 'wave-numbers.png')\n", (34398, 34430), False, 'import os\n'), ((6454, 6513), 'sfepy.mechanics.matcoefs.stiffness_from_youngpoisson', 'stiffness', (['dim'], {'young': 'young1', 'poisson': 'poisson1', 'plane': 'plane'}), '(dim, young=young1, poisson=poisson1, plane=plane)\n', (6463, 6513), True, 'from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson as stiffness\n'), ((6577, 6636), 'sfepy.mechanics.matcoefs.stiffness_from_youngpoisson', 'stiffness', (['dim'], {'young': 'young2', 'poisson': 'poisson2', 'plane': 'plane'}), '(dim, young=young2, poisson=poisson2, plane=plane)\n', (6586, 6636), True, 'from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson as stiffness\n'), ((30413, 30456), 'os.path.join', 'os.path.join', (['output_dir', '"""frequencies.txt"""'], {}), "(output_dir, 'frequencies.txt')\n", (30425, 30456), False, 'import os\n'), ((31296, 31339), 'os.path.join', 'os.path.join', (['output_dir', '"""frequencies.txt"""'], {}), "(output_dir, 'frequencies.txt')\n", (31308, 31339), False, 'import os\n'), ((11825, 11845), 'numpy.array', 'nm.array', (['[1.0, 0.0]'], {}), '([1.0, 0.0])\n', (11833, 11845), True, 'import numpy as nm\n')]
|
import json
from sfm.utils import validate_signature, calc_signature
from sfm.dependencies import get_db
from sfm.models import WorkItemCreate, Project, CommitCreate, WorkItem, WorkItemUpdate
from typing import List, Optional
from sqlmodel import Session, select, and_
from fastapi import APIRouter, HTTPException, Depends, Path, Header, Request, Query
from opencensus.ext.azure.log_exporter import AzureLogHandler
from sfm.config import get_settings
from sfm.logger import create_logger
from .github_functions import (
webhook_project_processor,
deployment_processor,
pull_request_processor,
populate_past_github,
defect_processor,
reopened_processor,
unlabeled_processor,
)
app_settings = get_settings()
logger = create_logger(__name__)
router = APIRouter()
def fetch_github_payload(request):
raw = request.body()
signature = request.headers.get("X-Hub-Signature-256")
proj_auth_token = validate_signature(signature, raw)
payload = request.json()
event_type = request.headers.get("X-Github-Event")
return payload, event_type, proj_auth_token
@router.post("/github_webhooks/") # pragma: no cover
async def webhook_handler(request: Request, db: Session = Depends(get_db)):
"""
## Github Webhook Handler
Awaits incoming payload from Github Webhooks and parses the data.
Currently, endpoint processes two different event types: "Deployment" and "Pull Request".
The payload data is parsed and data needed to calculate the DORA metrics is stored in the db tables.
"""
if app_settings.GITHUB_WEBHOOK_SECRET in ["", "XXXXXXXXXXX"]:
raise HTTPException(
status_code=412,
detail="Missing github webhook secret. Please specify GITHUB_WEBHOOK_SECRET and try again",
)
# if app_settings.ENV != "test":
payload, event_type, proj_auth_token = fetch_github_payload(request)
# gather common payload object properties
if event_type != "push": # push events are the exception to common properties
repository = payload.get("repository")
else: # TODO: pull in push event information
pass
if event_type != "repository":
project_name = repository.get("name")
print("THE PROJECT NAME: ", project_name)
project_db = db.exec(
select(Project).where(Project.name == project_name)
).first()
if not project_db:
logger.debug("A matching project was not found in the database")
raise HTTPException(
status_code=404, detail="Matching project not found in db"
)
if event_type == "repository":
action = payload.get("action")
webhook_project_processor(db, repository, action)
elif event_type == "deployment":
deployment = payload.get("deployment")
deployment_processor(db, deployment, project_db, proj_auth_token)
elif event_type == "pull_request":
pull_request = payload.get("pull_request")
if (
pull_request["head"]["repo"]["default_branch"] == "main"
): # process only pull requests to main
pull_request_processor(db, pull_request, project_db, proj_auth_token)
elif event_type == "issues":
action = payload.get("action")
issue = payload.get("issue")
if action == "closed":
defect_processor(db, issue, project_db, proj_auth_token, closed=True)
elif action == "labeled" and "production defect" in [
lbl["name"] for lbl in issue["labels"]
]:
defect_processor(db, issue, project_db, proj_auth_token, closed=False)
elif action == "reopened":
reopened_processor(db, issue, proj_auth_token)
elif action == "unlabeled" and "production defect" not in [
lbl["name"] for lbl in issue["labels"]
]:
unlabeled_processor(db, issue, proj_auth_token)
else:
logger.debug("Issues event type passed that is unhandled")
else:
logger.warning("Event type not handled")
return {"code": "event type not handled"}
# raise HTTPException(status_code=404, detail="Event type not handled.")
return {"code": "success"}
@router.get("/github_populate")
def populate_past_data(
org: str,
db: Session = Depends(get_db),
include_only_list: Optional[List[str]] = Query(None),
):
"""
## Github Backpopulate
Queries the GitHub API to populate projects and work items that already exist in specified repos.
"include_only_list" is a list of repo names (as strings) that you wish use to populate the database.
If "include_only_list" is populated, only projects in this list will be populated
"""
proj_intended_not_found = populate_past_github(db, org, include_only_list)
in_database = db.exec(select(Project)).all()
proj_name_in_db = [proj.name for proj in in_database]
not_found_projects = []
if include_only_list is not None:
for repo in include_only_list:
if repo not in proj_name_in_db:
not_found_projects.append(repo)
if proj_intended_not_found != [] or not_found_projects != []: # pragma: no cover
included_projects = []
for proj in include_only_list:
if proj not in proj_intended_not_found and proj in proj_name_in_db:
included_projects.append(proj)
return {
"projects_included": included_projects,
"projects_not_included": proj_intended_not_found,
"project_not_found": not_found_projects,
}
else:
return {"code": "success"}
|
[
"sqlmodel.select"
] |
[((721, 735), 'sfm.config.get_settings', 'get_settings', ([], {}), '()\n', (733, 735), False, 'from sfm.config import get_settings\n'), ((747, 770), 'sfm.logger.create_logger', 'create_logger', (['__name__'], {}), '(__name__)\n', (760, 770), False, 'from sfm.logger import create_logger\n'), ((782, 793), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (791, 793), False, 'from fastapi import APIRouter, HTTPException, Depends, Path, Header, Request, Query\n'), ((937, 971), 'sfm.utils.validate_signature', 'validate_signature', (['signature', 'raw'], {}), '(signature, raw)\n', (955, 971), False, 'from sfm.utils import validate_signature, calc_signature\n'), ((1221, 1236), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (1228, 1236), False, 'from fastapi import APIRouter, HTTPException, Depends, Path, Header, Request, Query\n'), ((4325, 4340), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (4332, 4340), False, 'from fastapi import APIRouter, HTTPException, Depends, Path, Header, Request, Query\n'), ((4387, 4398), 'fastapi.Query', 'Query', (['None'], {}), '(None)\n', (4392, 4398), False, 'from fastapi import APIRouter, HTTPException, Depends, Path, Header, Request, Query\n'), ((1636, 1768), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(412)', 'detail': '"""Missing github webhook secret. Please specify GITHUB_WEBHOOK_SECRET and try again"""'}), "(status_code=412, detail=\n 'Missing github webhook secret. Please specify GITHUB_WEBHOOK_SECRET and try again'\n )\n", (1649, 1768), False, 'from fastapi import APIRouter, HTTPException, Depends, Path, Header, Request, Query\n'), ((2513, 2586), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Matching project not found in db"""'}), "(status_code=404, detail='Matching project not found in db')\n", (2526, 2586), False, 'from fastapi import APIRouter, HTTPException, Depends, Path, Header, Request, Query\n'), ((4846, 4861), 'sqlmodel.select', 'select', (['Project'], {}), '(Project)\n', (4852, 4861), False, 'from sqlmodel import Session, select, and_\n'), ((2320, 2335), 'sqlmodel.select', 'select', (['Project'], {}), '(Project)\n', (2326, 2335), False, 'from sqlmodel import Session, select, and_\n')]
|
from pydantic.types import Optional
from sqlmodel import Field, Relationship, SQLModel
from api.public.team.models import Team
class HeroBase(SQLModel):
name: str
secret_name: str
age: Optional[int] = None
team_id: Optional[int] = Field(default=None, foreign_key="team.id")
class Config:
schema_extra = {
"example": {
"id": 1,
"name": "<NAME>",
"secret_name": "<NAME>",
"age": 27,
"team_id": 1,
}
}
class Hero(HeroBase, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
team: Optional[Team] = Relationship(back_populates="heroes")
class HeroCreate(HeroBase):
pass
class HeroRead(HeroBase):
id: int
class HeroUpdate(HeroBase):
name: Optional[str] = None
secret_name: Optional[str] = None
age: Optional[int] = None
team_id: Optional[int] = None
class Config:
schema_extra = {
"example": {
"name": "<NAME>",
"secret_name": "<NAME>",
"age": 27,
"team_id": 1,
}
}
|
[
"sqlmodel.Relationship",
"sqlmodel.Field"
] |
[((251, 293), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""team.id"""'}), "(default=None, foreign_key='team.id')\n", (256, 293), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((604, 641), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (609, 641), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((670, 707), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""heroes"""'}), "(back_populates='heroes')\n", (682, 707), False, 'from sqlmodel import Field, Relationship, SQLModel\n')]
|
from typing import Optional
from sqlmodel import Field, Session, SQLModel, create_engine
class Hero(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
secret_name: str
age: Optional[int] = None
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
hero_1 = Hero(name="Deadpond", secret_name="<NAME>")
hero_2 = Hero(name="Spider-Boy", secret_name="<NAME>")
hero_3 = Hero(name="Rusty-Man", secret_name="<NAME>", age=48)
print("Before interacting with the database")
print("Hero 1:", hero_1)
print("Hero 2:", hero_2)
print("Hero 3:", hero_3)
with Session(engine) as session:
session.add(hero_1)
session.add(hero_2)
session.add(hero_3)
print("After adding to the session")
print("Hero 1:", hero_1)
print("Hero 2:", hero_2)
print("Hero 3:", hero_3)
session.commit()
print("After committing the session")
print("Hero 1:", hero_1)
print("Hero 2:", hero_2)
print("Hero 3:", hero_3)
print("After committing the session, show IDs")
print("Hero 1 ID:", hero_1.id)
print("Hero 2 ID:", hero_2.id)
print("Hero 3 ID:", hero_3.id)
print("After committing the session, show names")
print("Hero 1 name:", hero_1.name)
print("Hero 2 name:", hero_2.name)
print("Hero 3 name:", hero_3.name)
session.refresh(hero_1)
session.refresh(hero_2)
session.refresh(hero_3)
print("After refreshing the heroes")
print("Hero 1:", hero_1)
print("Hero 2:", hero_2)
print("Hero 3:", hero_3)
print("After the session closes")
print("Hero 1:", hero_1)
print("Hero 2:", hero_2)
print("Hero 3:", hero_3)
def main():
create_db_and_tables()
create_heroes()
if __name__ == "__main__":
main()
|
[
"sqlmodel.Session",
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.create_engine",
"sqlmodel.Field"
] |
[((343, 379), 'sqlmodel.create_engine', 'create_engine', (['sqlite_url'], {'echo': '(True)'}), '(sqlite_url, echo=True)\n', (356, 379), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((150, 187), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (155, 187), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((414, 450), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (442, 450), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((804, 819), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (811, 819), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n')]
|
from fastapi.exceptions import HTTPException
import pytest
from sqlmodel import select, Session
from sqlmodel.main import SQLModel
from sfm.routes.projects import crud
from tests.conftest import hashed_token1, hashed_token2
from sfm.models import Project, ProjectCreate, ProjectUpdate
# get_all
def test_get_all(db, session: Session):
"""test that the crud function works as expected"""
response = crud.get_all(db)
assert response is not None
assert response[0].name == "Test Project 1"
assert response[0].lead_name == "<NAME>"
assert response[0].lead_email == "<EMAIL>"
assert response[0].description == "A test project for testing"
assert response[0].location == "Strangeville"
assert response[0].repo_url == "github.com/starkEnterprises"
assert response[0].on_prem is False
assert response[0].project_auth_token_hashed == hashed_token1
assert response[1].name == "Test Project 2"
assert response[1].lead_name == "<NAME>"
assert response[1].lead_email == "<EMAIL>"
assert response[1].description == "A second test project for testing"
assert response[1].location == "Kohler"
assert response[1].repo_url == "github.com/pgaGolf"
assert response[1].on_prem is False
assert response[1].project_auth_token_hashed == hashed_token2
"""
Test that the function raises an error when there are
no projects in the table
"""
session.delete(session.get(Project, 1))
session.delete(session.get(Project, 2))
session.commit()
# SQLModel.metadata.drop_all(engine)
with pytest.raises(Exception) as ex:
crud.get_all(db)
assert ex.value.message == "Projects not found"
# get_by_id
def test_get_by_id(db):
"""test that the crud function works as expected"""
response = crud.get_by_id(db, project_id=1)
assert response is not None
assert response.name == "Test Project 1"
assert response.lead_name == "<NAME>"
assert response.lead_email == "<EMAIL>"
assert response.description == "A test project for testing"
assert response.location == "Strangeville"
assert response.repo_url == "github.com/starkEnterprises"
assert response.on_prem is False
assert response.project_auth_token_hashed == hashed_token1
"""
Testing that the crud function raises exception when the project
does with matching id does not exist in DB
"""
with pytest.raises(Exception) as ex:
crud.get_by_id(db, project_id=15)
assert ex.value.message == "Project not found"
# create_project
def test_create(db):
"""Testing that the project works as expected"""
project_data = ProjectCreate(
**{
"name": "Test Project 3",
"lead_name": "<NAME>",
"lead_email": "<EMAIL>",
"description": "A test project for testing creation",
"location": "Bikini Gotham",
"repo_url": "github.com/crustyEnterprises",
"on_prem": True,
}
)
response = crud.create_project(db, project_data, admin_key="admin_key")
assert len(response) == 2
assert response[0].name == "Test Project 3"
assert response[0].lead_name == "<NAME>"
assert response[0].lead_email == "<EMAIL>"
assert response[0].description == "A test project for testing creation"
assert response[0].location == "Bikini Gotham"
assert response[0].repo_url == "github.com/crustyEnterprises"
assert response[0].on_prem is True
assert response[0].project_auth_token_hashed is not None
"""
Testing that the function raises an error if the project name already
exists in the database
"""
with pytest.raises(Exception) as ex:
response = crud.create_project(db, project_data, admin_key="admin_key")
assert ex.value.message == "Database entry already exists"
"""
Testing that the project raises an exception when the admin_key
is incorrect
"""
with pytest.raises(Exception) as ex:
crud.create_project(db, project_data, admin_key="Shmadmin_key")
assert ex.value.message == "Credentials are incorrect"
# delete_project
def test_delete_project(db):
"""Testing that the crud function works as expected"""
response = crud.delete_project(db, project_id=1, admin_key="admin_key")
assert response is True
projects = db.exec(select(Project)).all()
for project in projects:
assert project.id != 1
"""
Testing that the crud function raises an exception when the project
with matching id does not exist in the database
"""
with pytest.raises(Exception) as ex:
crud.delete_project(db, project_id=15, admin_key="admin_key")
assert ex.value.message == "Project not found"
"""
Testing that the project raises an exception when the admin_key
is incorrect
"""
with pytest.raises(Exception) as ex:
crud.delete_project(db, project_id=1, admin_key="Shmadmin_key")
assert ex.value.message == "Credentials are incorrect"
# refresh_project_key
def test_refresh_project_key(db):
"""Testing that the crud function works as expected"""
response = crud.refresh_project_key(db, project_id=1, admin_key="admin_key")
assert response is not False
assert response != "Catalyst"
# testing that refreshing key did not change project details
project_test = db.get(Project, 1)
assert project_test.name == "Test Project 1"
assert project_test.lead_name == "<NAME>"
assert project_test.lead_email == "<EMAIL>"
assert project_test.description == "A test project for testing"
assert project_test.location == "Strangeville"
assert project_test.repo_url == "github.com/starkEnterprises"
assert project_test.on_prem is False
"""
Testing that the crud function raises an exception when the project
with matching id does not exist in the database
"""
with pytest.raises(Exception) as ex:
crud.refresh_project_key(db, project_id=15, admin_key="admin_key")
assert ex.value.message == "Project not found"
"""
Testing that the project raises an exception when the admin_key
is incorrect
"""
with pytest.raises(Exception) as ex:
crud.refresh_project_key(db, project_id=1, admin_key="Shmadmin_key")
assert ex.value.message == "Credentials are incorrect"
# update_project
def test_update_project(db):
"""Testing that the project works as expected"""
update_dict = {
"name": "New Test Project 1",
"lead_name": "Strong Squid",
"repo_url": "github.com/SquidEnterprises",
}
# vvv causes unset params to become default (exclude_unset didnt help)
updated_project = ProjectUpdate(**update_dict)
response = crud.update_project(
db, project_id=1, project_data=updated_project, admin_key="admin_key"
)
assert response is not None
assert response.name == "New Test Project 1"
assert response.lead_name == "Strong Squid"
assert response.lead_email == "<EMAIL>"
assert response.description == "A test project for testing"
assert response.location == "Strangeville"
assert response.repo_url == "github.com/SquidEnterprises"
assert response.on_prem is False
assert response.project_auth_token_hashed == hashed_token1
"""
Testing that the crud function raises an exception when the
project with matching id does not exist in the database
"""
with pytest.raises(Exception) as ex:
crud.update_project(
db,
project_id=15,
project_data="placeholder",
admin_key="admin_key",
)
assert ex.value.message == "Project not found"
"""
Testing that the project raises an exception when the admin_key
is incorrect
"""
with pytest.raises(Exception) as ex:
crud.update_project(
db,
project_id=1,
project_data="placeholder",
admin_key="Shmadmin_key",
)
assert ex.value.message == "Credentials are incorrect"
|
[
"sqlmodel.select"
] |
[((409, 425), 'sfm.routes.projects.crud.get_all', 'crud.get_all', (['db'], {}), '(db)\n', (421, 425), False, 'from sfm.routes.projects import crud\n'), ((1792, 1824), 'sfm.routes.projects.crud.get_by_id', 'crud.get_by_id', (['db'], {'project_id': '(1)'}), '(db, project_id=1)\n', (1806, 1824), False, 'from sfm.routes.projects import crud\n'), ((2644, 2891), 'sfm.models.ProjectCreate', 'ProjectCreate', ([], {}), "(**{'name': 'Test Project 3', 'lead_name': '<NAME>',\n 'lead_email': '<EMAIL>', 'description':\n 'A test project for testing creation', 'location': 'Bikini Gotham',\n 'repo_url': 'github.com/crustyEnterprises', 'on_prem': True})\n", (2657, 2891), False, 'from sfm.models import Project, ProjectCreate, ProjectUpdate\n'), ((3005, 3065), 'sfm.routes.projects.crud.create_project', 'crud.create_project', (['db', 'project_data'], {'admin_key': '"""admin_key"""'}), "(db, project_data, admin_key='admin_key')\n", (3024, 3065), False, 'from sfm.routes.projects import crud\n'), ((4236, 4296), 'sfm.routes.projects.crud.delete_project', 'crud.delete_project', (['db'], {'project_id': '(1)', 'admin_key': '"""admin_key"""'}), "(db, project_id=1, admin_key='admin_key')\n", (4255, 4296), False, 'from sfm.routes.projects import crud\n'), ((5148, 5213), 'sfm.routes.projects.crud.refresh_project_key', 'crud.refresh_project_key', (['db'], {'project_id': '(1)', 'admin_key': '"""admin_key"""'}), "(db, project_id=1, admin_key='admin_key')\n", (5172, 5213), False, 'from sfm.routes.projects import crud\n'), ((6699, 6727), 'sfm.models.ProjectUpdate', 'ProjectUpdate', ([], {}), '(**update_dict)\n', (6712, 6727), False, 'from sfm.models import Project, ProjectCreate, ProjectUpdate\n'), ((6743, 6837), 'sfm.routes.projects.crud.update_project', 'crud.update_project', (['db'], {'project_id': '(1)', 'project_data': 'updated_project', 'admin_key': '"""admin_key"""'}), "(db, project_id=1, project_data=updated_project,\n admin_key='admin_key')\n", (6762, 6837), False, 'from sfm.routes.projects import crud\n'), ((1570, 1594), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (1583, 1594), False, 'import pytest\n'), ((1610, 1626), 'sfm.routes.projects.crud.get_all', 'crud.get_all', (['db'], {}), '(db)\n', (1622, 1626), False, 'from sfm.routes.projects import crud\n'), ((2403, 2427), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2416, 2427), False, 'import pytest\n'), ((2443, 2476), 'sfm.routes.projects.crud.get_by_id', 'crud.get_by_id', (['db'], {'project_id': '(15)'}), '(db, project_id=15)\n', (2457, 2476), False, 'from sfm.routes.projects import crud\n'), ((3657, 3681), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (3670, 3681), False, 'import pytest\n'), ((3708, 3768), 'sfm.routes.projects.crud.create_project', 'crud.create_project', (['db', 'project_data'], {'admin_key': '"""admin_key"""'}), "(db, project_data, admin_key='admin_key')\n", (3727, 3768), False, 'from sfm.routes.projects import crud\n'), ((3947, 3971), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (3960, 3971), False, 'import pytest\n'), ((3987, 4050), 'sfm.routes.projects.crud.create_project', 'crud.create_project', (['db', 'project_data'], {'admin_key': '"""Shmadmin_key"""'}), "(db, project_data, admin_key='Shmadmin_key')\n", (4006, 4050), False, 'from sfm.routes.projects import crud\n'), ((4581, 4605), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (4594, 4605), False, 'import pytest\n'), ((4621, 4682), 'sfm.routes.projects.crud.delete_project', 'crud.delete_project', (['db'], {'project_id': '(15)', 'admin_key': '"""admin_key"""'}), "(db, project_id=15, admin_key='admin_key')\n", (4640, 4682), False, 'from sfm.routes.projects import crud\n'), ((4849, 4873), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (4862, 4873), False, 'import pytest\n'), ((4889, 4952), 'sfm.routes.projects.crud.delete_project', 'crud.delete_project', (['db'], {'project_id': '(1)', 'admin_key': '"""Shmadmin_key"""'}), "(db, project_id=1, admin_key='Shmadmin_key')\n", (4908, 4952), False, 'from sfm.routes.projects import crud\n'), ((5904, 5928), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (5917, 5928), False, 'import pytest\n'), ((5944, 6010), 'sfm.routes.projects.crud.refresh_project_key', 'crud.refresh_project_key', (['db'], {'project_id': '(15)', 'admin_key': '"""admin_key"""'}), "(db, project_id=15, admin_key='admin_key')\n", (5968, 6010), False, 'from sfm.routes.projects import crud\n'), ((6177, 6201), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (6190, 6201), False, 'import pytest\n'), ((6217, 6285), 'sfm.routes.projects.crud.refresh_project_key', 'crud.refresh_project_key', (['db'], {'project_id': '(1)', 'admin_key': '"""Shmadmin_key"""'}), "(db, project_id=1, admin_key='Shmadmin_key')\n", (6241, 6285), False, 'from sfm.routes.projects import crud\n'), ((7445, 7469), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (7458, 7469), False, 'import pytest\n'), ((7485, 7578), 'sfm.routes.projects.crud.update_project', 'crud.update_project', (['db'], {'project_id': '(15)', 'project_data': '"""placeholder"""', 'admin_key': '"""admin_key"""'}), "(db, project_id=15, project_data='placeholder',\n admin_key='admin_key')\n", (7504, 7578), False, 'from sfm.routes.projects import crud\n'), ((7800, 7824), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (7813, 7824), False, 'import pytest\n'), ((7840, 7936), 'sfm.routes.projects.crud.update_project', 'crud.update_project', (['db'], {'project_id': '(1)', 'project_data': '"""placeholder"""', 'admin_key': '"""Shmadmin_key"""'}), "(db, project_id=1, project_data='placeholder', admin_key\n ='Shmadmin_key')\n", (7859, 7936), False, 'from sfm.routes.projects import crud\n'), ((4348, 4363), 'sqlmodel.select', 'select', (['Project'], {}), '(Project)\n', (4354, 4363), False, 'from sqlmodel import select, Session\n')]
|
from datetime import datetime
try:
from humps.main import depascalize
from sqlalchemy import Column, DateTime
from sqlalchemy.orm.decl_api import declared_attr
from sqlmodel import Field, SQLModel
except ImportError:
raise RuntimeError(
"SQLModel is not installed. Please install it with `pip install sqlmodel pyhumps`"
)
class Model(SQLModel):
"""
Abstract model providing `id`, `date_created` and `date_updated` fields.
And also automatic table naming to `snake_case`.
"""
id: int = Field(primary_key=True)
date_created: datetime = Field(sa_column=Column(DateTime, default=datetime.utcnow))
date_updated: datetime = Field(sa_column=Column(DateTime, onupdate=datetime.utcnow))
@declared_attr
def __tablename__(cls):
"""
Convert Pascal class name style to `snake_case`
"""
return depascalize(cls.__name__)
|
[
"sqlmodel.Field"
] |
[((542, 565), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (547, 565), False, 'from sqlmodel import Field, SQLModel\n'), ((886, 911), 'humps.main.depascalize', 'depascalize', (['cls.__name__'], {}), '(cls.__name__)\n', (897, 911), False, 'from humps.main import depascalize\n'), ((611, 652), 'sqlalchemy.Column', 'Column', (['DateTime'], {'default': 'datetime.utcnow'}), '(DateTime, default=datetime.utcnow)\n', (617, 652), False, 'from sqlalchemy import Column, DateTime\n'), ((699, 741), 'sqlalchemy.Column', 'Column', (['DateTime'], {'onupdate': 'datetime.utcnow'}), '(DateTime, onupdate=datetime.utcnow)\n', (705, 741), False, 'from sqlalchemy import Column, DateTime\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv4 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=18 * dr,
dilation=18 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_gp = M.Sequential(
M.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_out = M.Sequential(
M.Conv2d(out_channels * 5, out_channels, 1, 1, padding=0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
def forward(self, x):
conv1 = self.conv1(x)
conv31 = self.conv2(x)
conv32 = self.conv3(x)
conv33 = self.conv4(x)
gp = F.mean(x, [2, 3], True)
gp = self.conv_gp(gp)
gp = F.nn.interpolate(gp, (x.shape[2], x.shape[3]))
out = F.concat([conv1, conv31, conv32, conv33, gp], axis=1)
out = self.conv_out(out)
return out
class DeepLabV3Plus(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.output_stride = 16
self.sub_output_stride = self.output_stride // 4
self.num_classes = cfg.num_classes
self.aspp = ASPP(
in_channels=2048, out_channels=256, dr=16 // self.output_stride
)
self.dropout = M.Dropout(0.5)
self.upstage1 = M.Sequential(
M.Conv2d(256, 48, 1, 1, padding=1 // 2, bias=False),
M.BatchNorm2d(48),
M.ReLU(),
)
self.upstage2 = M.Sequential(
M.Conv2d(256 + 48, 256, 3, 1, padding=1, bias=False),
M.BatchNorm2d(256),
M.ReLU(),
M.Dropout(0.5),
M.Conv2d(256, 256, 3, 1, padding=1, bias=False),
M.BatchNorm2d(256),
M.ReLU(),
M.Dropout(0.1),
)
self.conv_out = M.Conv2d(256, self.num_classes, 1, 1, padding=0)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
self.backbone = getattr(resnet, cfg.backbone)(
replace_stride_with_dilation=[False, False, True],
pretrained=cfg.backbone_pretrained,
)
del self.backbone.fc
def forward(self, x):
layers = self.backbone.extract_features(x)
up0 = self.aspp(layers["res5"])
up0 = self.dropout(up0)
up0 = F.nn.interpolate(up0, scale_factor=self.sub_output_stride)
up1 = self.upstage1(layers["res2"])
up1 = F.concat([up0, up1], 1)
up2 = self.upstage2(up1)
out = self.conv_out(up2)
out = F.nn.interpolate(out, scale_factor=4)
return out
|
[
"megengine.module.ReLU",
"megengine.module.init.ones_",
"megengine.module.init.msra_normal_",
"megengine.functional.nn.interpolate",
"megengine.module.init.zeros_",
"megengine.module.Conv2d",
"megengine.functional.mean",
"megengine.module.Dropout",
"megengine.module.BatchNorm2d",
"megengine.functional.concat"
] |
[((2386, 2409), 'megengine.functional.mean', 'F.mean', (['x', '[2, 3]', '(True)'], {}), '(x, [2, 3], True)\n', (2392, 2409), True, 'import megengine.functional as F\n'), ((2453, 2499), 'megengine.functional.nn.interpolate', 'F.nn.interpolate', (['gp', '(x.shape[2], x.shape[3])'], {}), '(gp, (x.shape[2], x.shape[3]))\n', (2469, 2499), True, 'import megengine.functional as F\n'), ((2515, 2568), 'megengine.functional.concat', 'F.concat', (['[conv1, conv31, conv32, conv33, gp]'], {'axis': '(1)'}), '([conv1, conv31, conv32, conv33, gp], axis=1)\n', (2523, 2568), True, 'import megengine.functional as F\n'), ((3002, 3016), 'megengine.module.Dropout', 'M.Dropout', (['(0.5)'], {}), '(0.5)\n', (3011, 3016), True, 'import megengine.module as M\n'), ((3548, 3596), 'megengine.module.Conv2d', 'M.Conv2d', (['(256)', 'self.num_classes', '(1)', '(1)'], {'padding': '(0)'}), '(256, self.num_classes, 1, 1, padding=0)\n', (3556, 3596), True, 'import megengine.module as M\n'), ((4249, 4307), 'megengine.functional.nn.interpolate', 'F.nn.interpolate', (['up0'], {'scale_factor': 'self.sub_output_stride'}), '(up0, scale_factor=self.sub_output_stride)\n', (4265, 4307), True, 'import megengine.functional as F\n'), ((4367, 4390), 'megengine.functional.concat', 'F.concat', (['[up0, up1]', '(1)'], {}), '([up0, up1], 1)\n', (4375, 4390), True, 'import megengine.functional as F\n'), ((4473, 4510), 'megengine.functional.nn.interpolate', 'F.nn.interpolate', (['out'], {'scale_factor': '(4)'}), '(out, scale_factor=4)\n', (4489, 4510), True, 'import megengine.functional as F\n'), ((657, 734), 'megengine.module.Conv2d', 'M.Conv2d', (['in_channels', 'out_channels', '(1)', '(1)'], {'padding': '(0)', 'dilation': 'dr', 'bias': '(False)'}), '(in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False)\n', (665, 734), True, 'import megengine.module as M\n'), ((778, 805), 'megengine.module.BatchNorm2d', 'M.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (791, 805), True, 'import megengine.module as M\n'), ((819, 827), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (825, 827), True, 'import megengine.module as M\n'), ((886, 976), 'megengine.module.Conv2d', 'M.Conv2d', (['in_channels', 'out_channels', '(3)', '(1)'], {'padding': '(6 * dr)', 'dilation': '(6 * dr)', 'bias': '(False)'}), '(in_channels, out_channels, 3, 1, padding=6 * dr, dilation=6 * dr,\n bias=False)\n', (894, 976), True, 'import megengine.module as M\n'), ((1113, 1140), 'megengine.module.BatchNorm2d', 'M.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (1126, 1140), True, 'import megengine.module as M\n'), ((1154, 1162), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (1160, 1162), True, 'import megengine.module as M\n'), ((1221, 1313), 'megengine.module.Conv2d', 'M.Conv2d', (['in_channels', 'out_channels', '(3)', '(1)'], {'padding': '(12 * dr)', 'dilation': '(12 * dr)', 'bias': '(False)'}), '(in_channels, out_channels, 3, 1, padding=12 * dr, dilation=12 * dr,\n bias=False)\n', (1229, 1313), True, 'import megengine.module as M\n'), ((1450, 1477), 'megengine.module.BatchNorm2d', 'M.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (1463, 1477), True, 'import megengine.module as M\n'), ((1491, 1499), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (1497, 1499), True, 'import megengine.module as M\n'), ((1558, 1650), 'megengine.module.Conv2d', 'M.Conv2d', (['in_channels', 'out_channels', '(3)', '(1)'], {'padding': '(18 * dr)', 'dilation': '(18 * dr)', 'bias': '(False)'}), '(in_channels, out_channels, 3, 1, padding=18 * dr, dilation=18 * dr,\n bias=False)\n', (1566, 1650), True, 'import megengine.module as M\n'), ((1787, 1814), 'megengine.module.BatchNorm2d', 'M.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (1800, 1814), True, 'import megengine.module as M\n'), ((1828, 1836), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (1834, 1836), True, 'import megengine.module as M\n'), ((1897, 1953), 'megengine.module.Conv2d', 'M.Conv2d', (['in_channels', 'out_channels', '(1)', '(1)', '(0)'], {'bias': '(False)'}), '(in_channels, out_channels, 1, 1, 0, bias=False)\n', (1905, 1953), True, 'import megengine.module as M\n'), ((1967, 1994), 'megengine.module.BatchNorm2d', 'M.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (1980, 1994), True, 'import megengine.module as M\n'), ((2008, 2016), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (2014, 2016), True, 'import megengine.module as M\n'), ((2078, 2147), 'megengine.module.Conv2d', 'M.Conv2d', (['(out_channels * 5)', 'out_channels', '(1)', '(1)'], {'padding': '(0)', 'bias': '(False)'}), '(out_channels * 5, out_channels, 1, 1, padding=0, bias=False)\n', (2086, 2147), True, 'import megengine.module as M\n'), ((2161, 2188), 'megengine.module.BatchNorm2d', 'M.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (2174, 2188), True, 'import megengine.module as M\n'), ((2202, 2210), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (2208, 2210), True, 'import megengine.module as M\n'), ((3068, 3119), 'megengine.module.Conv2d', 'M.Conv2d', (['(256)', '(48)', '(1)', '(1)'], {'padding': '(1 // 2)', 'bias': '(False)'}), '(256, 48, 1, 1, padding=1 // 2, bias=False)\n', (3076, 3119), True, 'import megengine.module as M\n'), ((3133, 3150), 'megengine.module.BatchNorm2d', 'M.BatchNorm2d', (['(48)'], {}), '(48)\n', (3146, 3150), True, 'import megengine.module as M\n'), ((3164, 3172), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (3170, 3172), True, 'import megengine.module as M\n'), ((3235, 3287), 'megengine.module.Conv2d', 'M.Conv2d', (['(256 + 48)', '(256)', '(3)', '(1)'], {'padding': '(1)', 'bias': '(False)'}), '(256 + 48, 256, 3, 1, padding=1, bias=False)\n', (3243, 3287), True, 'import megengine.module as M\n'), ((3301, 3319), 'megengine.module.BatchNorm2d', 'M.BatchNorm2d', (['(256)'], {}), '(256)\n', (3314, 3319), True, 'import megengine.module as M\n'), ((3333, 3341), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (3339, 3341), True, 'import megengine.module as M\n'), ((3355, 3369), 'megengine.module.Dropout', 'M.Dropout', (['(0.5)'], {}), '(0.5)\n', (3364, 3369), True, 'import megengine.module as M\n'), ((3383, 3430), 'megengine.module.Conv2d', 'M.Conv2d', (['(256)', '(256)', '(3)', '(1)'], {'padding': '(1)', 'bias': '(False)'}), '(256, 256, 3, 1, padding=1, bias=False)\n', (3391, 3430), True, 'import megengine.module as M\n'), ((3444, 3462), 'megengine.module.BatchNorm2d', 'M.BatchNorm2d', (['(256)'], {}), '(256)\n', (3457, 3462), True, 'import megengine.module as M\n'), ((3476, 3484), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (3482, 3484), True, 'import megengine.module as M\n'), ((3498, 3512), 'megengine.module.Dropout', 'M.Dropout', (['(0.1)'], {}), '(0.1)\n', (3507, 3512), True, 'import megengine.module as M\n'), ((3687, 3753), 'megengine.module.init.msra_normal_', 'M.init.msra_normal_', (['m.weight'], {'mode': '"""fan_out"""', 'nonlinearity': '"""relu"""'}), "(m.weight, mode='fan_out', nonlinearity='relu')\n", (3706, 3753), True, 'import megengine.module as M\n'), ((3817, 3839), 'megengine.module.init.ones_', 'M.init.ones_', (['m.weight'], {}), '(m.weight)\n', (3829, 3839), True, 'import megengine.module as M\n'), ((3856, 3877), 'megengine.module.init.zeros_', 'M.init.zeros_', (['m.bias'], {}), '(m.bias)\n', (3869, 3877), True, 'import megengine.module as M\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import MLP
import megengine._internal as mgb
import megengine.functional as F
from megengine.core import Graph
from megengine.module import Linear, Module
from megengine.optimizer import SGD
from megengine.test import assertTensorClose
def test_compile_multi_times_eager():
return # XXX: rewrite or remove this test
data = Input("data", shape=(2, 28))
label = Input("label", shape=(2,), dtype=np.int32)
mlp = MLP()
opt = SGD(mlp.parameters(requires_grad=True), lr=0.01)
pred0 = mlp(data)
pred = F.softmax(pred0)
loss = F.square_loss(pred, label.reshape(2, 1))
opt.zero_grad()
grads = opt.backward(loss)
opt.step()
f0 = compile(pred, None)
f1 = compile([pred, loss], grads, copy=False)
for _ in range(3):
data = np.random.random((2, 28)).astype(np.float32)
label = np.random.randint(0, 10, (2,)).astype(np.float32)
out0 = f0(data=data)
out1 = f1(data=data, label=label)
assertTensorClose(out0[0], out1[0])
def test_compile_multi_times_static():
return # XXX: rewrite or remove this test
with Graph() as cg:
cg.set_option("eager_evaluation", False)
data = Input("data", shape=(2, 28))
label = Input("label", shape=(2,), dtype=np.int32)
mlp = MLP()
opt = SGD(mlp.parameters(requires_grad=True), lr=0.01)
pred0 = mlp(data)
pred = F.softmax(pred0)
loss = F.square_loss(pred, label.reshape(2, 1))
opt.zero_grad()
grads = opt.backward(loss)
opt.step()
f0 = compile(pred, None)
f1 = compile([pred, loss], grads, copy=True)
data = np.random.random((2, 28)).astype(np.float32)
label = np.random.randint(0, 10, (2,)).astype(np.float32)
out0 = f0(data=data)
out1 = f1(data=data, label=label)
assertTensorClose(out0[0], out1[0])
_ = compile([pred, loss], grads, copy=False)
with pytest.raises(mgb.MegBrainError):
f0(data=data)
|
[
"megengine.test.assertTensorClose",
"megengine.core.Graph",
"megengine.functional.softmax"
] |
[((853, 858), 'helpers.MLP', 'MLP', ([], {}), '()\n', (856, 858), False, 'from helpers import MLP\n'), ((952, 968), 'megengine.functional.softmax', 'F.softmax', (['pred0'], {}), '(pred0)\n', (961, 968), True, 'import megengine.functional as F\n'), ((1395, 1430), 'megengine.test.assertTensorClose', 'assertTensorClose', (['out0[0]', 'out1[0]'], {}), '(out0[0], out1[0])\n', (1412, 1430), False, 'from megengine.test import assertTensorClose\n'), ((1528, 1535), 'megengine.core.Graph', 'Graph', ([], {}), '()\n', (1533, 1535), False, 'from megengine.core import Graph\n'), ((1710, 1715), 'helpers.MLP', 'MLP', ([], {}), '()\n', (1713, 1715), False, 'from helpers import MLP\n'), ((1821, 1837), 'megengine.functional.softmax', 'F.softmax', (['pred0'], {}), '(pred0)\n', (1830, 1837), True, 'import megengine.functional as F\n'), ((2265, 2300), 'megengine.test.assertTensorClose', 'assertTensorClose', (['out0[0]', 'out1[0]'], {}), '(out0[0], out1[0])\n', (2282, 2300), False, 'from megengine.test import assertTensorClose\n'), ((2368, 2400), 'pytest.raises', 'pytest.raises', (['mgb.MegBrainError'], {}), '(mgb.MegBrainError)\n', (2381, 2400), False, 'import pytest\n'), ((1205, 1230), 'numpy.random.random', 'np.random.random', (['(2, 28)'], {}), '((2, 28))\n', (1221, 1230), True, 'import numpy as np\n'), ((1266, 1296), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', '(2,)'], {}), '(0, 10, (2,))\n', (1283, 1296), True, 'import numpy as np\n'), ((2075, 2100), 'numpy.random.random', 'np.random.random', (['(2, 28)'], {}), '((2, 28))\n', (2091, 2100), True, 'import numpy as np\n'), ((2136, 2166), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', '(2,)'], {}), '(0, 10, (2,))\n', (2153, 2166), True, 'import numpy as np\n')]
|
# Creare connessioni tra tabelle M:N (many to many): Product e Tags
# https://sqlmodel.tiangolo.com/tutorial/many-to-many/
# e seguito
from typing import Optional, List
from sqlmodel import Field, SQLModel, Session,\
Relationship, create_engine, select
# Tabella di associazione tra Tag e Product
class TagProductLink(SQLModel, table=True):
tag_id: Optional[int] = Field(
default=None, foreign_key="tag.id", primary_key=True
)
product_id: Optional[int] = Field(
default=None, foreign_key="product.id", primary_key=True
)
class Tag(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
# Relazione many con Product
products: List["Product"] =\
Relationship(back_populates="tags", link_model=TagProductLink)
class ProductType(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class Product(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
product_type: Optional[int] = Field(default=None,
foreign_key="producttype.id")
# Relazione many con Tag
tags: List["Tag"] =\
Relationship(back_populates="products", link_model=TagProductLink)
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_entities():
tag_offerta = Tag(name="Offerta")
tag_maionese = Tag(name="Con Maionese")
tag_no_maionese = Tag(name="Senza Maionese")
tipo_panino = ProductType(name="panino")
tipo_bibita = ProductType(name="bibita")
with Session(engine) as session:
session.add(tag_offerta)
session.add(tag_maionese)
session.add(tag_no_maionese)
session.add(tipo_panino)
session.add(tipo_bibita)
session.commit()
session.refresh(tag_offerta)
session.refresh(tag_maionese)
session.refresh(tag_no_maionese)
session.refresh(tipo_panino)
session.refresh(tipo_bibita)
hamburger = Product(
name="hamburger",
product_type=tipo_panino.id,
tags=[tag_offerta, tag_maionese]
)
coke = Product(
name="Coca Cola",
product_type=tipo_bibita.id,
tags=[tag_offerta]
)
session.add(hamburger)
session.add(coke)
session.commit()
session.refresh(hamburger)
session.refresh(coke)
print("Created :", hamburger)
print("Created :", coke)
def update_burger():
with Session(engine) as session:
tag_no_maionese = session.exec(
select(Tag).where(Tag.name == "Senza Maionese")
).one()
tag_maionese = session.exec(
select(Tag).where(Tag.name == "Con Maionese")
).one()
hamburger = session.exec(
select(Product).where(Product.name == "hamburger")
).one()
hamburger.tags.append(tag_no_maionese)
hamburger.tags.remove(tag_maionese)
session.add(hamburger)
session.commit()
print("Updated hamburger:", hamburger.tags)
print("Updated tags:", tag_maionese.products, tag_no_maionese.products)
def select_products():
with Session(engine) as session:
statement = select(Product, ProductType).\
where(Product.product_type == ProductType.id)
results = session.exec(statement)
for product, product_type in results:
print("product:", product, "product_type:",
product_type, "tags:", product.tags)
def main():
create_db_and_tables()
create_entities()
update_burger()
# select_products()
if __name__ == "__main__":
main()
|
[
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.Session",
"sqlmodel.Field",
"sqlmodel.select",
"sqlmodel.create_engine",
"sqlmodel.Relationship"
] |
[((1400, 1436), 'sqlmodel.create_engine', 'create_engine', (['sqlite_url'], {'echo': '(True)'}), '(sqlite_url, echo=True)\n', (1413, 1436), False, 'from sqlmodel import Field, SQLModel, Session, Relationship, create_engine, select\n'), ((392, 451), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""tag.id"""', 'primary_key': '(True)'}), "(default=None, foreign_key='tag.id', primary_key=True)\n", (397, 451), False, 'from sqlmodel import Field, SQLModel, Session, Relationship, create_engine, select\n'), ((498, 561), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""product.id"""', 'primary_key': '(True)'}), "(default=None, foreign_key='product.id', primary_key=True)\n", (503, 561), False, 'from sqlmodel import Field, SQLModel, Session, Relationship, create_engine, select\n'), ((635, 672), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (640, 672), False, 'from sqlmodel import Field, SQLModel, Session, Relationship, create_engine, select\n'), ((761, 823), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""tags"""', 'link_model': 'TagProductLink'}), "(back_populates='tags', link_model=TagProductLink)\n", (773, 823), False, 'from sqlmodel import Field, SQLModel, Session, Relationship, create_engine, select\n'), ((891, 928), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (896, 928), False, 'from sqlmodel import Field, SQLModel, Session, Relationship, create_engine, select\n'), ((1006, 1043), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (1011, 1043), False, 'from sqlmodel import Field, SQLModel, Session, Relationship, create_engine, select\n'), ((1092, 1141), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""producttype.id"""'}), "(default=None, foreign_key='producttype.id')\n", (1097, 1141), False, 'from sqlmodel import Field, SQLModel, Session, Relationship, create_engine, select\n'), ((1244, 1310), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""products"""', 'link_model': 'TagProductLink'}), "(back_populates='products', link_model=TagProductLink)\n", (1256, 1310), False, 'from sqlmodel import Field, SQLModel, Session, Relationship, create_engine, select\n'), ((1471, 1507), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (1499, 1507), False, 'from sqlmodel import Field, SQLModel, Session, Relationship, create_engine, select\n'), ((1763, 1778), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1770, 1778), False, 'from sqlmodel import Field, SQLModel, Session, Relationship, create_engine, select\n'), ((2717, 2732), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (2724, 2732), False, 'from sqlmodel import Field, SQLModel, Session, Relationship, create_engine, select\n'), ((3398, 3413), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (3405, 3413), False, 'from sqlmodel import Field, SQLModel, Session, Relationship, create_engine, select\n'), ((3446, 3474), 'sqlmodel.select', 'select', (['Product', 'ProductType'], {}), '(Product, ProductType)\n', (3452, 3474), False, 'from sqlmodel import Field, SQLModel, Session, Relationship, create_engine, select\n'), ((2797, 2808), 'sqlmodel.select', 'select', (['Tag'], {}), '(Tag)\n', (2803, 2808), False, 'from sqlmodel import Field, SQLModel, Session, Relationship, create_engine, select\n'), ((2910, 2921), 'sqlmodel.select', 'select', (['Tag'], {}), '(Tag)\n', (2916, 2921), False, 'from sqlmodel import Field, SQLModel, Session, Relationship, create_engine, select\n'), ((3018, 3033), 'sqlmodel.select', 'select', (['Product'], {}), '(Product)\n', (3024, 3033), False, 'from sqlmodel import Field, SQLModel, Session, Relationship, create_engine, select\n')]
|
"""Add countries
Revision ID: <KEY>
Revises: 423e059e8b64
Create Date: 2022-02-12 07:51:13.003045+00:00
"""
import sqlalchemy as sa
import sqlmodel
from alembic import op
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "423e059e8b64"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"countries",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
op.add_column("applications", sa.Column("country_id", sa.Integer(), nullable=False))
op.create_foreign_key(None, "applications", "countries", ["country_id"], ["id"])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, "applications", type_="foreignkey")
op.drop_column("applications", "country_id")
op.drop_table("countries")
# ### end Alembic commands ###
|
[
"sqlmodel.sql.sqltypes.AutoString"
] |
[((701, 786), 'alembic.op.create_foreign_key', 'op.create_foreign_key', (['None', '"""applications"""', '"""countries"""', "['country_id']", "['id']"], {}), "(None, 'applications', 'countries', ['country_id'], ['id']\n )\n", (722, 786), False, 'from alembic import op\n'), ((906, 966), 'alembic.op.drop_constraint', 'op.drop_constraint', (['None', '"""applications"""'], {'type_': '"""foreignkey"""'}), "(None, 'applications', type_='foreignkey')\n", (924, 966), False, 'from alembic import op\n'), ((971, 1015), 'alembic.op.drop_column', 'op.drop_column', (['"""applications"""', '"""country_id"""'], {}), "('applications', 'country_id')\n", (985, 1015), False, 'from alembic import op\n'), ((1020, 1046), 'alembic.op.drop_table', 'op.drop_table', (['"""countries"""'], {}), "('countries')\n", (1033, 1046), False, 'from alembic import op\n'), ((571, 600), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (594, 600), True, 'import sqlalchemy as sa\n'), ((453, 465), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (463, 465), True, 'import sqlalchemy as sa\n'), ((510, 544), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (542, 544), False, 'import sqlmodel\n'), ((666, 678), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (676, 678), True, 'import sqlalchemy as sa\n')]
|
from typing import Optional
from fastapi import FastAPI
from sqlmodel import Session, SQLModel, create_engine
from .abstractions import MultipleModels, register_endpoints
class HeroBase(SQLModel):
name: str
secret_name: str
age: Optional[int] = None
class HeroRead(HeroBase):
id: int
hero_models = MultipleModels(path="/heroes/", base=HeroBase, response=HeroRead)
Hero = hero_models.table # Shim to avoid changing tests.
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
connect_args = {"check_same_thread": False}
engine = create_engine(sqlite_url, echo=True, connect_args=connect_args)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def get_session():
with Session(engine) as session:
yield session
app = FastAPI()
register_endpoints(app, models=hero_models, get_session=get_session)
|
[
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.Session",
"sqlmodel.create_engine"
] |
[((579, 642), 'sqlmodel.create_engine', 'create_engine', (['sqlite_url'], {'echo': '(True)', 'connect_args': 'connect_args'}), '(sqlite_url, echo=True, connect_args=connect_args)\n', (592, 642), False, 'from sqlmodel import Session, SQLModel, create_engine\n'), ((802, 811), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (809, 811), False, 'from fastapi import FastAPI\n'), ((677, 713), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (705, 713), False, 'from sqlmodel import Session, SQLModel, create_engine\n'), ((744, 759), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (751, 759), False, 'from sqlmodel import Session, SQLModel, create_engine\n')]
|
# Copyright 2021 Modelyst LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from random import shuffle
from typing import Optional, cast
import pytest
from sqlalchemy.future import Engine
from sqlmodel import Session, func, select
import tests.example.entities as entities
from dbgen.core.args import Const
from dbgen.core.entity import Entity
from dbgen.core.func import Import
from dbgen.core.generator import Generator
from dbgen.core.metadata import RunEntity
from dbgen.core.node.load import Load
from dbgen.core.node.query import BaseQuery
from dbgen.core.node.transforms import PyBlock
def transform_func(x):
return f"{x}-child"
@pytest.fixture(scope='function')
def basic_generator() -> Generator:
Parent = entities.Parent
Child = entities.Child
select_stmt = select(Parent.label)
query = BaseQuery.from_select_statement(select_stmt)
assert isinstance(query.hash, str)
pyblock = PyBlock(function=transform_func, inputs=[query["label"]], outputs=["newnames"])
load = Child.load(insert=True, label=pyblock["newnames"], type=Const("child_type"))
assert isinstance(load.hash, str)
gen = Generator(name="test", extract=query, transforms=[pyblock], loads=[load])
return gen
def test_basic_graph_sort(basic_generator: Generator):
"""Ensure a simple Query->PyBlock->Load is sorted correctly."""
graph = basic_generator._computational_graph()
assert len(graph) == 3
sorted_nodes = basic_generator._sort_graph()
query, transform, load = sorted_nodes
assert isinstance(query, BaseQuery)
assert isinstance(transform, PyBlock)
assert isinstance(load, Load)
def test_basic_graph_in_place(basic_generator: Generator):
"""Ensure that changes to the output of ._sort_graph() are in place and affect the generator as well."""
query, transform, load = basic_generator._sort_graph()
assert isinstance(load, Load)
load.run({transform.hash: {"newnames": ("1", "2")}})
assert load._output == basic_generator._sorted_loads()[0]._output
assert isinstance(query, BaseQuery)
query.outputs.append("test")
assert basic_generator.extract == query
assert isinstance(transform, PyBlock)
import_to_add = Import(lib="numpy", lib_alias="np")
transform.env.imports.append(import_to_add)
assert basic_generator.transforms[0] == transform
assert basic_generator.transforms[0].env.imports == [import_to_add]
def test_sorted_loads():
"""Shuffle around the loads and make sure sorted_loads still works."""
val = Const("test")
gp_load = entities.GrandParent.load(label=val, type=val)
u_load = entities.Parent.load(label=val, type=Const("uncle"), grand_parent_id=gp_load)
p_load = entities.Parent.load(label=val, type=val, grand_parent_id=gp_load)
c_load = entities.Child.load(label=val, type=val, parent_id=p_load, uncle_id=u_load)
loads = [gp_load, c_load, p_load, u_load]
for _ in range(10):
shuffle(loads)
gen = Generator(name="test", loads=loads)
assert gen._sorted_loads() == [
gp_load,
*sorted((u_load, p_load), key=lambda x: x.hash),
c_load,
]
@pytest.mark.skip
def test_no_extractor(sql_engine: Engine, raw_connection):
"""Shuffle around the loads and make sure sorted_loads still works."""
entities.Parent.metadata.create_all(sql_engine)
pyblock = PyBlock(function=transform_func, inputs=[Const("test")], outputs=["newnames"])
p_load = entities.GrandParent.load(insert=True, label=pyblock["newnames"], type=Const("gp_type"))
gen = Generator(name="test", transforms=[pyblock], loads=[p_load])
gen.run(sql_engine)
with Session(sql_engine) as session:
session = cast(Session, session)
statement = select(entities.GrandParent).where(entities.GrandParent.label == "test-child")
result = session.exec(statement)
assert result.one()
@pytest.mark.database
def test_dumb_extractor(connection, sql_engine, recreate_meta):
class User(Entity, table=True):
__identifying__ = {"label"}
label: Optional[str]
new_label: Optional[str] = None
User.metadata.create_all(connection)
num_users = 100
sess = Session(connection)
users = [User(label=f"user_{i}") for i in range(num_users)]
user_le = User._get_load_entity()
for user in users:
user.id = user_le._get_hash(user.dict())
sess.add(user)
count = sess.exec(select(func.count(User.id))).one()
assert count == num_users
connection.commit()
statement = select(User.id, User.label)
query = BaseQuery.from_select_statement(statement)
assert query.length(connection=connection) == num_users
pyblock = PyBlock(function=transform_func, inputs=[query["label"]])
u_load = User.load(user=query["id"], new_label=pyblock["out"])
run = RunEntity()
sess.add(run)
sess.commit()
sess.refresh(run)
gen = Generator(
name="test",
extract=query,
transforms=[pyblock],
loads=[u_load],
batch_size=10000,
)
connection.commit()
gen.run(sql_engine, sql_engine, run_id=run.id, ordering=0)
|
[
"sqlmodel.func.count",
"sqlmodel.select",
"sqlmodel.Session"
] |
[((1168, 1200), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (1182, 1200), False, 'import pytest\n'), ((1311, 1331), 'sqlmodel.select', 'select', (['Parent.label'], {}), '(Parent.label)\n', (1317, 1331), False, 'from sqlmodel import Session, func, select\n'), ((1344, 1388), 'dbgen.core.node.query.BaseQuery.from_select_statement', 'BaseQuery.from_select_statement', (['select_stmt'], {}), '(select_stmt)\n', (1375, 1388), False, 'from dbgen.core.node.query import BaseQuery\n'), ((1442, 1521), 'dbgen.core.node.transforms.PyBlock', 'PyBlock', ([], {'function': 'transform_func', 'inputs': "[query['label']]", 'outputs': "['newnames']"}), "(function=transform_func, inputs=[query['label']], outputs=['newnames'])\n", (1449, 1521), False, 'from dbgen.core.node.transforms import PyBlock\n'), ((1659, 1732), 'dbgen.core.generator.Generator', 'Generator', ([], {'name': '"""test"""', 'extract': 'query', 'transforms': '[pyblock]', 'loads': '[load]'}), "(name='test', extract=query, transforms=[pyblock], loads=[load])\n", (1668, 1732), False, 'from dbgen.core.generator import Generator\n'), ((2727, 2762), 'dbgen.core.func.Import', 'Import', ([], {'lib': '"""numpy"""', 'lib_alias': '"""np"""'}), "(lib='numpy', lib_alias='np')\n", (2733, 2762), False, 'from dbgen.core.func import Import\n'), ((3049, 3062), 'dbgen.core.args.Const', 'Const', (['"""test"""'], {}), "('test')\n", (3054, 3062), False, 'from dbgen.core.args import Const\n'), ((3077, 3123), 'tests.example.entities.GrandParent.load', 'entities.GrandParent.load', ([], {'label': 'val', 'type': 'val'}), '(label=val, type=val)\n', (3102, 3123), True, 'import tests.example.entities as entities\n'), ((3228, 3294), 'tests.example.entities.Parent.load', 'entities.Parent.load', ([], {'label': 'val', 'type': 'val', 'grand_parent_id': 'gp_load'}), '(label=val, type=val, grand_parent_id=gp_load)\n', (3248, 3294), True, 'import tests.example.entities as entities\n'), ((3308, 3383), 'tests.example.entities.Child.load', 'entities.Child.load', ([], {'label': 'val', 'type': 'val', 'parent_id': 'p_load', 'uncle_id': 'u_load'}), '(label=val, type=val, parent_id=p_load, uncle_id=u_load)\n', (3327, 3383), True, 'import tests.example.entities as entities\n'), ((3837, 3884), 'tests.example.entities.Parent.metadata.create_all', 'entities.Parent.metadata.create_all', (['sql_engine'], {}), '(sql_engine)\n', (3872, 3884), True, 'import tests.example.entities as entities\n'), ((4090, 4150), 'dbgen.core.generator.Generator', 'Generator', ([], {'name': '"""test"""', 'transforms': '[pyblock]', 'loads': '[p_load]'}), "(name='test', transforms=[pyblock], loads=[p_load])\n", (4099, 4150), False, 'from dbgen.core.generator import Generator\n'), ((4728, 4747), 'sqlmodel.Session', 'Session', (['connection'], {}), '(connection)\n', (4735, 4747), False, 'from sqlmodel import Session, func, select\n'), ((5072, 5099), 'sqlmodel.select', 'select', (['User.id', 'User.label'], {}), '(User.id, User.label)\n', (5078, 5099), False, 'from sqlmodel import Session, func, select\n'), ((5112, 5154), 'dbgen.core.node.query.BaseQuery.from_select_statement', 'BaseQuery.from_select_statement', (['statement'], {}), '(statement)\n', (5143, 5154), False, 'from dbgen.core.node.query import BaseQuery\n'), ((5229, 5286), 'dbgen.core.node.transforms.PyBlock', 'PyBlock', ([], {'function': 'transform_func', 'inputs': "[query['label']]"}), "(function=transform_func, inputs=[query['label']])\n", (5236, 5286), False, 'from dbgen.core.node.transforms import PyBlock\n'), ((5364, 5375), 'dbgen.core.metadata.RunEntity', 'RunEntity', ([], {}), '()\n', (5373, 5375), False, 'from dbgen.core.metadata import RunEntity\n'), ((5444, 5541), 'dbgen.core.generator.Generator', 'Generator', ([], {'name': '"""test"""', 'extract': 'query', 'transforms': '[pyblock]', 'loads': '[u_load]', 'batch_size': '(10000)'}), "(name='test', extract=query, transforms=[pyblock], loads=[u_load],\n batch_size=10000)\n", (5453, 5541), False, 'from dbgen.core.generator import Generator\n'), ((3462, 3476), 'random.shuffle', 'shuffle', (['loads'], {}), '(loads)\n', (3469, 3476), False, 'from random import shuffle\n'), ((3491, 3526), 'dbgen.core.generator.Generator', 'Generator', ([], {'name': '"""test"""', 'loads': 'loads'}), "(name='test', loads=loads)\n", (3500, 3526), False, 'from dbgen.core.generator import Generator\n'), ((4185, 4204), 'sqlmodel.Session', 'Session', (['sql_engine'], {}), '(sql_engine)\n', (4192, 4204), False, 'from sqlmodel import Session, func, select\n'), ((4235, 4257), 'typing.cast', 'cast', (['Session', 'session'], {}), '(Session, session)\n', (4239, 4257), False, 'from typing import Optional, cast\n'), ((1590, 1609), 'dbgen.core.args.Const', 'Const', (['"""child_type"""'], {}), "('child_type')\n", (1595, 1609), False, 'from dbgen.core.args import Const\n'), ((3174, 3188), 'dbgen.core.args.Const', 'Const', (['"""uncle"""'], {}), "('uncle')\n", (3179, 3188), False, 'from dbgen.core.args import Const\n'), ((4062, 4078), 'dbgen.core.args.Const', 'Const', (['"""gp_type"""'], {}), "('gp_type')\n", (4067, 4078), False, 'from dbgen.core.args import Const\n'), ((3940, 3953), 'dbgen.core.args.Const', 'Const', (['"""test"""'], {}), "('test')\n", (3945, 3953), False, 'from dbgen.core.args import Const\n'), ((4278, 4306), 'sqlmodel.select', 'select', (['entities.GrandParent'], {}), '(entities.GrandParent)\n', (4284, 4306), False, 'from sqlmodel import Session, func, select\n'), ((4974, 4993), 'sqlmodel.func.count', 'func.count', (['User.id'], {}), '(User.id)\n', (4984, 4993), False, 'from sqlmodel import Session, func, select\n')]
|
# 10.07.2007, c
# last revision: 25.03.2008
from sfepy import data_dir
filename_meshes = ['/meshes/3d/cube_medium_tetra.mesh',
'/meshes/3d/cube_medium_tetra.mesh',
'/meshes/3d/cube_medium_hexa.mesh']
filename_meshes = [data_dir + name for name in filename_meshes]
all_your_bases = [1, 2, 1]
filename_mesh = None
field_1 = {
'name' : '3_displacement',
'dtype' : 'real',
'shape' : (3,),
'region' : 'Omega',
'approx_order' : None,
}
def get_pars( dim, full = False ):
import numpy as nm
sym = (dim + 1) * dim / 2
lam = 1e1
mu = 1e0
o = nm.array( [1.] * dim + [0.] * (sym - dim), dtype = nm.float64 )
oot = nm.outer( o, o )
if full:
return lam * oot + mu * nm.diag( o + 1.0 )
else:
return lam, mu
material_1 = {
'name' : 'solid',
'values' : {
'lam' : get_pars( 3 )[0],
'mu' : get_pars( 3 )[1],
'Dijkl' : get_pars( 3, True ),
}
}
material_2 = {
'name' : 'spring',
'values' : {
'.pars' : {'stiffness' : 1e0, 'projection' : None},
}
}
variable_1 = {
'name' : 'u',
'kind' : 'unknown field',
'field' : '3_displacement',
'order' : 0,
}
variable_2 = {
'name' : 'v',
'kind' : 'test field',
'field' : '3_displacement',
'dual' : 'u',
}
region_1000 = {
'name' : 'Omega',
'select' : 'all',
}
region_1 = {
'name' : 'Bottom',
'select' : 'vertices in (z < -0.499)',
'kind' : 'facet',
}
region_2 = {
'name' : 'Top',
'select' : 'vertices in (z > 0.499)',
'kind' : 'facet',
}
ebc_1 = {
'name' : 'Load',
'region' : 'Top',
'dofs' : {'u.2' : 0.1},
}
integral_1 = {
'name' : 'i',
'order' : 2,
}
equations_iso = {
'balance_of_forces' :
"""dw_lin_elastic_iso.i.Omega( solid.lam, solid.mu, v, u )
= dw_point_lspring.i.Bottom( spring.pars, v, u )""",
}
equations_general = {
'balance_of_forces' :
"""dw_lin_elastic.i.Omega( solid.Dijkl, v, u )
= dw_point_lspring.i.Bottom( spring.pars, v, u )""",
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 1,
'eps_a' : 1e-10,
}
from sfepy.base.testing import TestCommon
##
# 10.07.2007, c
class Test( TestCommon ):
tests = ['test_get_solution', 'test_linear_terms']
##
# 10.07.2007, c
def from_conf( conf, options ):
return Test( conf = conf, options = options )
from_conf = staticmethod( from_conf )
##
# c: 25.03.2008, r: 25.03.2008
def test_linear_terms( self ):
ok = True
for sols in self.solutions:
ok = ok and self.compare_vectors( sols[0], sols[1],
label1 = 'isotropic',
label2 = 'general' )
return ok
##
# c: 10.07.2007, r: 25.03.2008
def test_get_solution( self ):
from sfepy.applications import solve_pde
from sfepy.base.base import IndexedStruct
import os.path as op
ok = True
self.solutions = []
for ii, approx_order in enumerate(all_your_bases):
fname = filename_meshes[ii]
self.conf.filename_mesh = fname
fields = {'field_1' : {
'name' : '3_displacement',
'dtype' : 'real',
'shape' : (3,),
'region' : 'Omega',
'approx_order' : approx_order,
}
}
self.conf.edit('fields', fields)
self.report( 'mesh: %s, base: %s' % (fname, approx_order) )
status = IndexedStruct()
self.report( 'isotropic' )
self.conf.equations = self.conf.equations_iso
problem, state1 = solve_pde(self.conf, nls_status=status,
save_results=False)
converged = status.condition == 0
ok = ok and converged
self.report( 'converged: %s' % converged )
self.report( 'general' )
self.conf.equations = self.conf.equations_general
problem, state2 = solve_pde(self.conf, nls_status=status,
save_results=False)
converged = status.condition == 0
ok = ok and converged
self.report( 'converged: %s' % converged )
self.solutions.append((state1(), state2()))
name = op.join(self.options.out_dir,
'_'.join(('test_elasticity_small_strain',
op.splitext(op.basename(fname))[0],
'%d' % approx_order))
+ '.vtk')
problem.save_state(name, state1)
## trunk = op.join( self.options.out_dir,
## op.splitext( op.basename( fname ) )[0] )
## problem.save_field_meshes( trunk )
## problem.save_regions( trunk )
return ok
|
[
"sfepy.base.base.IndexedStruct",
"sfepy.applications.solve_pde"
] |
[((616, 677), 'numpy.array', 'nm.array', (['([1.0] * dim + [0.0] * (sym - dim))'], {'dtype': 'nm.float64'}), '([1.0] * dim + [0.0] * (sym - dim), dtype=nm.float64)\n', (624, 677), True, 'import numpy as nm\n'), ((690, 704), 'numpy.outer', 'nm.outer', (['o', 'o'], {}), '(o, o)\n', (698, 704), True, 'import numpy as nm\n'), ((3744, 3759), 'sfepy.base.base.IndexedStruct', 'IndexedStruct', ([], {}), '()\n', (3757, 3759), False, 'from sfepy.base.base import IndexedStruct\n'), ((3888, 3947), 'sfepy.applications.solve_pde', 'solve_pde', (['self.conf'], {'nls_status': 'status', 'save_results': '(False)'}), '(self.conf, nls_status=status, save_results=False)\n', (3897, 3947), False, 'from sfepy.applications import solve_pde\n'), ((4253, 4312), 'sfepy.applications.solve_pde', 'solve_pde', (['self.conf'], {'nls_status': 'status', 'save_results': '(False)'}), '(self.conf, nls_status=status, save_results=False)\n', (4262, 4312), False, 'from sfepy.applications import solve_pde\n'), ((752, 768), 'numpy.diag', 'nm.diag', (['(o + 1.0)'], {}), '(o + 1.0)\n', (759, 768), True, 'import numpy as nm\n'), ((4713, 4731), 'os.path.basename', 'op.basename', (['fname'], {}), '(fname)\n', (4724, 4731), True, 'import os.path as op\n')]
|
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
class Matcher:
def __init__(self, thresholds, labels, allow_low_quality_matches=False):
assert len(thresholds) + 1 == len(labels), "thresholds and labels are not matched"
assert all(low <= high for (low, high) in zip(thresholds[:-1], thresholds[1:]))
thresholds.append(float("inf"))
thresholds.insert(0, -float("inf"))
self.thresholds = thresholds
self.labels = labels
self.allow_low_quality_matches = allow_low_quality_matches
def __call__(self, matrix):
"""
matrix(tensor): A two dim tensor with shape of (N, M). N is number of GT-boxes,
while M is the number of anchors in detection.
"""
assert len(matrix.shape) == 2
max_scores = matrix.max(axis=0)
match_indices = F.argmax(matrix, axis=0)
# default ignore label: -1
labels = F.full_like(match_indices, -1)
for label, low, high in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]):
mask = (max_scores >= low) & (max_scores < high)
labels[mask] = label
if self.allow_low_quality_matches:
mask = (matrix == F.max(matrix, axis=1, keepdims=True)).sum(axis=0) > 0
labels[mask] = 1
return match_indices, labels
|
[
"megengine.functional.max",
"megengine.functional.argmax",
"megengine.functional.full_like"
] |
[((1208, 1232), 'megengine.functional.argmax', 'F.argmax', (['matrix'], {'axis': '(0)'}), '(matrix, axis=0)\n', (1216, 1232), True, 'import megengine.functional as F\n'), ((1286, 1316), 'megengine.functional.full_like', 'F.full_like', (['match_indices', '(-1)'], {}), '(match_indices, -1)\n', (1297, 1316), True, 'import megengine.functional as F\n'), ((1579, 1615), 'megengine.functional.max', 'F.max', (['matrix'], {'axis': '(1)', 'keepdims': '(True)'}), '(matrix, axis=1, keepdims=True)\n', (1584, 1615), True, 'import megengine.functional as F\n')]
|
#!/usr/bin/env python
"""
Show terms use in problem description files in the given directory.
"""
from __future__ import absolute_import
import sys
import six
sys.path.append('.')
import os
from argparse import ArgumentParser
from sfepy.base.base import output, dict_from_keys_init, ordered_iteritems
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.base.ioutils import locate_files
from sfepy.discrete.equations import parse_definition
from sfepy.terms import term_table
helps = {
'counts' : 'show terms use counts only',
'unused' : 'show unused terms only',
}
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-c', '--counts',
action='store_true', dest='counts',
default=False, help=helps['counts'])
parser.add_argument('-u', '--unused',
action='store_true', dest='unused',
default=False, help=helps['unused'])
parser.add_argument('directory')
options = parser.parse_args()
pdf_dir = os.path.realpath(options.directory)
required, other = get_standard_keywords()
terms_use = dict_from_keys_init(term_table.keys(), set)
for filename in locate_files('*.py', pdf_dir):
base = filename.replace(pdf_dir, '').lstrip(os.path.sep)
output('trying "%s"...' % base)
try:
conf = ProblemConf.from_file(filename, required, other,
verbose=False)
except:
output('...failed')
continue
use = conf.options.get('use_equations', 'equations')
eqs_conf = getattr(conf, use)
for key, eq_conf in six.iteritems(eqs_conf):
term_descs = parse_definition(eq_conf)
for td in term_descs:
terms_use[td.name].add(base)
output('...ok')
output('...done')
if options.unused:
output('unused terms:')
unused = [name for name in terms_use.keys()
if len(terms_use[name]) == 0]
for name in sorted(unused):
output(' ' + name)
output('total: %d' % len(unused))
else:
output('terms use:')
for name, ex_names in ordered_iteritems(terms_use):
output('%s: %d' % (name, len(ex_names)))
if not options.counts:
for ex_name in sorted(ex_names):
output(' ' + ex_name)
if __name__ == '__main__':
main()
|
[
"sfepy.terms.term_table.keys",
"sfepy.base.conf.ProblemConf.from_file",
"sfepy.base.base.ordered_iteritems",
"sfepy.base.conf.get_standard_keywords",
"sfepy.discrete.equations.parse_definition",
"sfepy.base.base.output",
"sfepy.base.ioutils.locate_files"
] |
[((159, 179), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (174, 179), False, 'import sys\n'), ((623, 658), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (637, 658), False, 'from argparse import ArgumentParser\n'), ((1146, 1181), 'os.path.realpath', 'os.path.realpath', (['options.directory'], {}), '(options.directory)\n', (1162, 1181), False, 'import os\n'), ((1205, 1228), 'sfepy.base.conf.get_standard_keywords', 'get_standard_keywords', ([], {}), '()\n', (1226, 1228), False, 'from sfepy.base.conf import ProblemConf, get_standard_keywords\n'), ((1311, 1340), 'sfepy.base.ioutils.locate_files', 'locate_files', (['"""*.py"""', 'pdf_dir'], {}), "('*.py', pdf_dir)\n", (1323, 1340), False, 'from sfepy.base.ioutils import locate_files\n'), ((1967, 1984), 'sfepy.base.base.output', 'output', (['"""...done"""'], {}), "('...done')\n", (1973, 1984), False, 'from sfepy.base.base import output, dict_from_keys_init, ordered_iteritems\n'), ((1266, 1283), 'sfepy.terms.term_table.keys', 'term_table.keys', ([], {}), '()\n', (1281, 1283), False, 'from sfepy.terms import term_table\n'), ((1415, 1446), 'sfepy.base.base.output', 'output', (['(\'trying "%s"...\' % base)'], {}), '(\'trying "%s"...\' % base)\n', (1421, 1446), False, 'from sfepy.base.base import output, dict_from_keys_init, ordered_iteritems\n'), ((1783, 1806), 'six.iteritems', 'six.iteritems', (['eqs_conf'], {}), '(eqs_conf)\n', (1796, 1806), False, 'import six\n'), ((1947, 1962), 'sfepy.base.base.output', 'output', (['"""...ok"""'], {}), "('...ok')\n", (1953, 1962), False, 'from sfepy.base.base import output, dict_from_keys_init, ordered_iteritems\n'), ((2017, 2040), 'sfepy.base.base.output', 'output', (['"""unused terms:"""'], {}), "('unused terms:')\n", (2023, 2040), False, 'from sfepy.base.base import output, dict_from_keys_init, ordered_iteritems\n'), ((2272, 2292), 'sfepy.base.base.output', 'output', (['"""terms use:"""'], {}), "('terms use:')\n", (2278, 2292), False, 'from sfepy.base.base import output, dict_from_keys_init, ordered_iteritems\n'), ((2323, 2351), 'sfepy.base.base.ordered_iteritems', 'ordered_iteritems', (['terms_use'], {}), '(terms_use)\n', (2340, 2351), False, 'from sfepy.base.base import output, dict_from_keys_init, ordered_iteritems\n'), ((1480, 1543), 'sfepy.base.conf.ProblemConf.from_file', 'ProblemConf.from_file', (['filename', 'required', 'other'], {'verbose': '(False)'}), '(filename, required, other, verbose=False)\n', (1501, 1543), False, 'from sfepy.base.conf import ProblemConf, get_standard_keywords\n'), ((1833, 1858), 'sfepy.discrete.equations.parse_definition', 'parse_definition', (['eq_conf'], {}), '(eq_conf)\n', (1849, 1858), False, 'from sfepy.discrete.equations import parse_definition\n'), ((2190, 2209), 'sfepy.base.base.output', 'output', (["(' ' + name)"], {}), "(' ' + name)\n", (2196, 2209), False, 'from sfepy.base.base import output, dict_from_keys_init, ordered_iteritems\n'), ((1614, 1633), 'sfepy.base.base.output', 'output', (['"""...failed"""'], {}), "('...failed')\n", (1620, 1633), False, 'from sfepy.base.base import output, dict_from_keys_init, ordered_iteritems\n'), ((2510, 2532), 'sfepy.base.base.output', 'output', (["(' ' + ex_name)"], {}), "(' ' + ex_name)\n", (2516, 2532), False, 'from sfepy.base.base import output, dict_from_keys_init, ordered_iteritems\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.dtype import QuantDtypeMeta
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import (
QuantMode,
create_qparams,
fake_quant_tensor,
lsq_forward,
tqt_forward,
)
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
s = np.random.rand(1) - 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
with Grad() as grad:
grad.wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_x.numpy(), g_x_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_s.numpy(), g_s_np, rtol=5e-5, atol=5e-5)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
test_dtype = QuantDtypeMeta("test_qint8", None, "int8", qmin, qmax)
def run(zero_point, scale):
qparams = create_qparams(QuantMode.ASYMMERTIC, test_dtype, scale, zero_point)
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qparams).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x, callback=_save_to(x))
y = fake_quant_tensor(x, qparams)
grad(y, tensor(F.ones_like(x)))
x1 = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x1, callback=_save_to(x1))
y1 = fake_quant_tensor_gt(x1, scale, zero_point, qmin, qmax)
grad(y1, tensor(F.ones_like(x1)))
assert np.allclose(x.grad.numpy(), x1.grad.numpy())
assert make_shape_tuple(x.grad.shape) == make_shape_tuple(x1.grad.shape)
# test nan
x = F.full((1, 32, 3, 3), np.nan)
y = fake_quant_tensor(x, qparams).numpy()
assert np.isnan(y).all()
zero_point = tensor([1.0], dtype=np.float32)
scale = tensor([4.0], dtype=np.float32)
run(zero_point, scale)
zero_point = tensor(1.0 * np.ones((1, 32, 1, 1)), dtype=np.float32)
scale = tensor(4.0 * np.ones((1, 32, 1, 1)), dtype=np.float32)
run(zero_point, scale)
class LSQ_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale, zero_point, grad_scale):
inp_scaled = inp / scale + zero_point
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.floor(inp_clipped + 0.5)
inp_flq = (inp_rounded - zero_point) * scale
self.saved_tensors = (inp_scaled, inp_rounded, scale, grad_scale)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, scale, grad_scale) = self.saved_tensors
ind_small = inp_scaled < self.lowerbound
ind_big = inp_scaled > self.upperbound
ind_middle = np.logical_xor(ind_small, ind_big)
ind_middle = np.abs(ind_middle - 1)
grad_s = (
ind_small * self.lowerbound
+ ind_big * self.upperbound
+ ind_middle * (-inp_scaled + inp_rounded)
)
grad_s = grad_s * grad_scale * grad_inp_flq
grad_s = grad_s.sum()
grad_inp = grad_inp_flq * ind_middle
return grad_inp, grad_s
def test_lsq():
g = []
def cb(grad):
g.append(grad)
# FIXME: use random number when LSQ is fixed
# x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
# s = np.random.rand(1)
x = np.array(
[
[
[
[4.0, 38.0, -121.0, 38.0],
[15.0, -115.0, -112.0, 24.0],
[23.0, -65.0, 109.0, -115.0],
],
[
[-66.0, -90.0, -45.0, -101.0],
[68.0, -98.0, 108.0, -79.0],
[54.0, 63.0, -10.0, -50.0],
],
]
],
dtype="float32",
)
s = np.array([0.02918224], dtype="float32")
eps = np.array([1e-5], dtype="float32")
s = np.abs(s) if np.abs(s) > eps else eps
zero_point = np.array([1.0], dtype="float32")
grad_s = np.array([2.0], dtype="float32")
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = LSQ_numpy(-127, 127)
y_np = n.forward(x, s, zero_point, grad_s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
zero_point = mge.tensor(zero_point, dtype="float32")
grad_s = mge.tensor(grad_s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
with Grad() as grad:
grad.wrt(x, s, callback=cb)
y = lsq_forward(-127, 127, x, s, zero_point, grad_s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, rtol=1e-7, atol=1e-7)
np.testing.assert_allclose(g_x.numpy(), g_x_np, rtol=1e-7, atol=1e-7)
np.testing.assert_allclose(g_s.numpy(), g_s_np, rtol=5e-7, atol=5e-7)
|
[
"megengine.quantization.utils.fake_quant_tensor",
"megengine.functional.maximum",
"megengine.quantization.utils.lsq_forward",
"megengine.functional.full",
"megengine.core.tensor.utils.make_shape_tuple",
"megengine.quantization.utils.create_qparams",
"megengine.functional.ones_like",
"megengine.core.autodiff.grad.Grad",
"megengine.tensor",
"megengine.quantization.utils.tqt_forward",
"megengine.core.tensor.dtype.QuantDtypeMeta",
"megengine.functional.round"
] |
[((2467, 2511), 'numpy.ones', 'np.ones', ([], {'shape': '(1, 2, 3, 4)', 'dtype': '"""float32"""'}), "(shape=(1, 2, 3, 4), dtype='float32')\n", (2474, 2511), True, 'import numpy as np\n'), ((2615, 2645), 'megengine.tensor', 'mge.tensor', (['x'], {'dtype': '"""float32"""'}), "(x, dtype='float32')\n", (2625, 2645), True, 'import megengine as mge\n'), ((2654, 2684), 'megengine.tensor', 'mge.tensor', (['s'], {'dtype': '"""float32"""'}), "(s, dtype='float32')\n", (2664, 2684), True, 'import megengine as mge\n'), ((2695, 2727), 'megengine.tensor', 'mge.tensor', (['g_y'], {'dtype': '"""float32"""'}), "(g_y, dtype='float32')\n", (2705, 2727), True, 'import megengine as mge\n'), ((3625, 3679), 'megengine.core.tensor.dtype.QuantDtypeMeta', 'QuantDtypeMeta', (['"""test_qint8"""', 'None', '"""int8"""', 'qmin', 'qmax'], {}), "('test_qint8', None, 'int8', qmin, qmax)\n", (3639, 3679), False, 'from megengine.core.tensor.dtype import QuantDtypeMeta\n'), ((4958, 4989), 'megengine.tensor', 'tensor', (['[1.0]'], {'dtype': 'np.float32'}), '([1.0], dtype=np.float32)\n', (4964, 4989), False, 'from megengine import tensor\n'), ((5002, 5033), 'megengine.tensor', 'tensor', (['[4.0]'], {'dtype': 'np.float32'}), '([4.0], dtype=np.float32)\n', (5008, 5033), False, 'from megengine import tensor\n'), ((6683, 6900), 'numpy.array', 'np.array', (['[[[[4.0, 38.0, -121.0, 38.0], [15.0, -115.0, -112.0, 24.0], [23.0, -65.0, \n 109.0, -115.0]], [[-66.0, -90.0, -45.0, -101.0], [68.0, -98.0, 108.0, -\n 79.0], [54.0, 63.0, -10.0, -50.0]]]]'], {'dtype': '"""float32"""'}), "([[[[4.0, 38.0, -121.0, 38.0], [15.0, -115.0, -112.0, 24.0], [23.0,\n -65.0, 109.0, -115.0]], [[-66.0, -90.0, -45.0, -101.0], [68.0, -98.0, \n 108.0, -79.0], [54.0, 63.0, -10.0, -50.0]]]], dtype='float32')\n", (6691, 6900), True, 'import numpy as np\n'), ((7150, 7189), 'numpy.array', 'np.array', (['[0.02918224]'], {'dtype': '"""float32"""'}), "([0.02918224], dtype='float32')\n", (7158, 7189), True, 'import numpy as np\n'), ((7200, 7234), 'numpy.array', 'np.array', (['[1e-05]'], {'dtype': '"""float32"""'}), "([1e-05], dtype='float32')\n", (7208, 7234), True, 'import numpy as np\n'), ((7297, 7329), 'numpy.array', 'np.array', (['[1.0]'], {'dtype': '"""float32"""'}), "([1.0], dtype='float32')\n", (7305, 7329), True, 'import numpy as np\n'), ((7343, 7375), 'numpy.array', 'np.array', (['[2.0]'], {'dtype': '"""float32"""'}), "([2.0], dtype='float32')\n", (7351, 7375), True, 'import numpy as np\n'), ((7387, 7431), 'numpy.ones', 'np.ones', ([], {'shape': '(1, 2, 3, 4)', 'dtype': '"""float32"""'}), "(shape=(1, 2, 3, 4), dtype='float32')\n", (7394, 7431), True, 'import numpy as np\n'), ((7555, 7585), 'megengine.tensor', 'mge.tensor', (['x'], {'dtype': '"""float32"""'}), "(x, dtype='float32')\n", (7565, 7585), True, 'import megengine as mge\n'), ((7594, 7624), 'megengine.tensor', 'mge.tensor', (['s'], {'dtype': '"""float32"""'}), "(s, dtype='float32')\n", (7604, 7624), True, 'import megengine as mge\n'), ((7642, 7681), 'megengine.tensor', 'mge.tensor', (['zero_point'], {'dtype': '"""float32"""'}), "(zero_point, dtype='float32')\n", (7652, 7681), True, 'import megengine as mge\n'), ((7695, 7730), 'megengine.tensor', 'mge.tensor', (['grad_s'], {'dtype': '"""float32"""'}), "(grad_s, dtype='float32')\n", (7705, 7730), True, 'import megengine as mge\n'), ((7742, 7774), 'megengine.tensor', 'mge.tensor', (['g_y'], {'dtype': '"""float32"""'}), "(g_y, dtype='float32')\n", (7752, 7774), True, 'import megengine as mge\n'), ((1283, 1304), 'numpy.round', 'np.round', (['inp_clipped'], {}), '(inp_clipped)\n', (1291, 1304), True, 'import numpy as np\n'), ((1715, 1736), 'numpy.abs', 'np.abs', (['(mask_clip - 1)'], {}), '(mask_clip - 1)\n', (1721, 1736), True, 'import numpy as np\n'), ((2435, 2452), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (2449, 2452), True, 'import numpy as np\n'), ((2737, 2743), 'megengine.core.autodiff.grad.Grad', 'Grad', ([], {}), '()\n', (2741, 2743), False, 'from megengine.core.autodiff.grad import Function, Grad\n'), ((2801, 2829), 'megengine.quantization.utils.tqt_forward', 'tqt_forward', (['(-127)', '(127)', 'x', 's'], {}), '(-127, 127, x, s)\n', (2812, 2829), False, 'from megengine.quantization.utils import QuantMode, create_qparams, fake_quant_tensor, lsq_forward, tqt_forward\n'), ((3267, 3277), 'megengine.functional.round', 'F.round', (['x'], {}), '(x)\n', (3274, 3277), True, 'import megengine.functional as F\n'), ((3473, 3493), 'megengine.functional.maximum', 'F.maximum', (['oup', 'qmin'], {}), '(oup, qmin)\n', (3482, 3493), True, 'import megengine.functional as F\n'), ((3731, 3798), 'megengine.quantization.utils.create_qparams', 'create_qparams', (['QuantMode.ASYMMERTIC', 'test_dtype', 'scale', 'zero_point'], {}), '(QuantMode.ASYMMERTIC, test_dtype, scale, zero_point)\n', (3745, 3798), False, 'from megengine.quantization.utils import QuantMode, create_qparams, fake_quant_tensor, lsq_forward, tqt_forward\n'), ((3818, 3881), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-512.0)', 'high': '(512.0)', 'size': '(1, 32, 32, 32)'}), '(low=-512.0, high=512.0, size=(1, 32, 32, 32))\n', (3835, 3881), True, 'import numpy as np\n'), ((3896, 3930), 'megengine.tensor', 'tensor', (['inp_data'], {'dtype': 'np.float32'}), '(inp_data, dtype=np.float32)\n', (3902, 3930), False, 'from megengine import tensor\n'), ((4105, 4129), 'numpy.allclose', 'np.allclose', (['oup', 'oup_gt'], {}), '(oup, oup_gt)\n', (4116, 4129), True, 'import numpy as np\n'), ((4208, 4242), 'megengine.tensor', 'tensor', (['inp_data'], {'dtype': 'np.float32'}), '(inp_data, dtype=np.float32)\n', (4214, 4242), False, 'from megengine import tensor\n'), ((4422, 4456), 'megengine.tensor', 'tensor', (['inp_data'], {'dtype': 'np.float32'}), '(inp_data, dtype=np.float32)\n', (4428, 4456), False, 'from megengine import tensor\n'), ((4827, 4856), 'megengine.functional.full', 'F.full', (['(1, 32, 3, 3)', 'np.nan'], {}), '((1, 32, 3, 3), np.nan)\n', (4833, 4856), True, 'import megengine.functional as F\n'), ((5637, 5664), 'numpy.floor', 'np.floor', (['(inp_clipped + 0.5)'], {}), '(inp_clipped + 0.5)\n', (5645, 5664), True, 'import numpy as np\n'), ((6046, 6080), 'numpy.logical_xor', 'np.logical_xor', (['ind_small', 'ind_big'], {}), '(ind_small, ind_big)\n', (6060, 6080), True, 'import numpy as np\n'), ((6102, 6124), 'numpy.abs', 'np.abs', (['(ind_middle - 1)'], {}), '(ind_middle - 1)\n', (6108, 6124), True, 'import numpy as np\n'), ((7242, 7251), 'numpy.abs', 'np.abs', (['s'], {}), '(s)\n', (7248, 7251), True, 'import numpy as np\n'), ((7784, 7790), 'megengine.core.autodiff.grad.Grad', 'Grad', ([], {}), '()\n', (7788, 7790), False, 'from megengine.core.autodiff.grad import Function, Grad\n'), ((7848, 7896), 'megengine.quantization.utils.lsq_forward', 'lsq_forward', (['(-127)', '(127)', 'x', 's', 'zero_point', 'grad_s'], {}), '(-127, 127, x, s, zero_point, grad_s)\n', (7859, 7896), False, 'from megengine.quantization.utils import QuantMode, create_qparams, fake_quant_tensor, lsq_forward, tqt_forward\n'), ((1194, 1233), 'numpy.minimum', 'np.minimum', (['inp_scaled', 'self.upperbound'], {}), '(inp_scaled, self.upperbound)\n', (1204, 1233), True, 'import numpy as np\n'), ((2193, 2202), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (2199, 2202), True, 'import numpy as np\n'), ((2361, 2408), 'numpy.random.randint', 'np.random.randint', (['(-128)', '(128)'], {'size': '(1, 2, 3, 4)'}), '(-128, 128, size=(1, 2, 3, 4))\n', (2378, 2408), True, 'import numpy as np\n'), ((4256, 4262), 'megengine.core.autodiff.grad.Grad', 'Grad', ([], {}), '()\n', (4260, 4262), False, 'from megengine.core.autodiff.grad import Function, Grad\n'), ((4334, 4363), 'megengine.quantization.utils.fake_quant_tensor', 'fake_quant_tensor', (['x', 'qparams'], {}), '(x, qparams)\n', (4351, 4363), False, 'from megengine.quantization.utils import QuantMode, create_qparams, fake_quant_tensor, lsq_forward, tqt_forward\n'), ((4470, 4476), 'megengine.core.autodiff.grad.Grad', 'Grad', ([], {}), '()\n', (4474, 4476), False, 'from megengine.core.autodiff.grad import Function, Grad\n'), ((4729, 4759), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['x.grad.shape'], {}), '(x.grad.shape)\n', (4745, 4759), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((4763, 4794), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['x1.grad.shape'], {}), '(x1.grad.shape)\n', (4779, 4794), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((5092, 5114), 'numpy.ones', 'np.ones', (['(1, 32, 1, 1)'], {}), '((1, 32, 1, 1))\n', (5099, 5114), True, 'import numpy as np\n'), ((5159, 5181), 'numpy.ones', 'np.ones', (['(1, 32, 1, 1)'], {}), '((1, 32, 1, 1))\n', (5166, 5181), True, 'import numpy as np\n'), ((5548, 5587), 'numpy.minimum', 'np.minimum', (['inp_scaled', 'self.upperbound'], {}), '(inp_scaled, self.upperbound)\n', (5558, 5587), True, 'import numpy as np\n'), ((7255, 7264), 'numpy.abs', 'np.abs', (['s'], {}), '(s)\n', (7261, 7264), True, 'import numpy as np\n'), ((3968, 3999), 'megengine.quantization.utils.fake_quant_tensor', 'fake_quant_tensor', (['inp', 'qparams'], {}), '(inp, qparams)\n', (3985, 3999), False, 'from megengine.quantization.utils import QuantMode, create_qparams, fake_quant_tensor, lsq_forward, tqt_forward\n'), ((4869, 4898), 'megengine.quantization.utils.fake_quant_tensor', 'fake_quant_tensor', (['x', 'qparams'], {}), '(x, qparams)\n', (4886, 4898), False, 'from megengine.quantization.utils import QuantMode, create_qparams, fake_quant_tensor, lsq_forward, tqt_forward\n'), ((4922, 4933), 'numpy.isnan', 'np.isnan', (['y'], {}), '(y)\n', (4930, 4933), True, 'import numpy as np\n'), ((4391, 4405), 'megengine.functional.ones_like', 'F.ones_like', (['x'], {}), '(x)\n', (4402, 4405), True, 'import megengine.functional as F\n'), ((4635, 4650), 'megengine.functional.ones_like', 'F.ones_like', (['x1'], {}), '(x1)\n', (4646, 4650), True, 'import megengine.functional as F\n')]
|
from enum import Enum
from typing import TYPE_CHECKING, Optional, Set
from sqlalchemy import Column
from sqlalchemy import Enum as SQLEnum
from sqlalchemy import ForeignKey, Integer
from sqlmodel import Field, Relationship, SQLModel
from .application import Status
if TYPE_CHECKING:
from .message import Message
class Group(Enum):
EVERYONE = "Everyone"
APPLICATION_COMPLETE = "Application - Complete"
APPLICATION_INCOMPLETE = "Application - Incomplete"
STATUS_ACCEPTED = "Status - Accepted"
STATUS_DENIED = "Status - Denied"
STATUS_PENDING = "Status - Pending"
@staticmethod
def completion_states() -> Set["Group"]:
return {Group.APPLICATION_COMPLETE, Group.APPLICATION_INCOMPLETE}
@staticmethod
def statuses() -> Set["Group"]:
return {Group.STATUS_ACCEPTED, Group.STATUS_DENIED, Group.STATUS_PENDING}
def to_status(self) -> Optional[Status]:
if self == Group.STATUS_ACCEPTED:
return Status.ACCEPTED
elif self == Group.STATUS_DENIED:
return Status.REJECTED
elif self == Group.STATUS_PENDING:
return Status.PENDING
else:
return None
class RecipientBase(SQLModel):
group: Group = Field(
sa_column=Column(
SQLEnum(Group),
nullable=False,
primary_key=True,
)
)
class Recipient(RecipientBase, table=True):
__tablename__ = "recipients"
message_id: int = Field(
sa_column=Column(
Integer(),
ForeignKey("messages.id", ondelete="CASCADE"),
primary_key=True,
)
)
message: "Message" = Relationship(back_populates="recipients")
class RecipientCreate(RecipientBase):
pass
class RecipientRead(RecipientBase):
pass
|
[
"sqlmodel.Relationship"
] |
[((1659, 1700), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""recipients"""'}), "(back_populates='recipients')\n", (1671, 1700), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((1281, 1295), 'sqlalchemy.Enum', 'SQLEnum', (['Group'], {}), '(Group)\n', (1288, 1295), True, 'from sqlalchemy import Enum as SQLEnum\n'), ((1518, 1527), 'sqlalchemy.Integer', 'Integer', ([], {}), '()\n', (1525, 1527), False, 'from sqlalchemy import ForeignKey, Integer\n'), ((1541, 1586), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""messages.id"""'], {'ondelete': '"""CASCADE"""'}), "('messages.id', ondelete='CASCADE')\n", (1551, 1586), False, 'from sqlalchemy import ForeignKey, Integer\n')]
|
from sqlmodel import SQLModel, Field, JSON, Relationship, VARCHAR
from pydantic import BaseModel
from datetime import datetime
from typing import List, Optional
from sqlalchemy import String, Column
class ServerCatagoryLink(SQLModel, table=True):
server_id: Optional[int] = Field(
default=None, foreign_key="server.id", primary_key=True
)
catagory_id: Optional[int] = Field(
default=None, foreign_key="catagory.id", primary_key=True
)
class ServerOrganizationLink(SQLModel, table=True):
server_id: Optional[int] = Field(
default=None, foreign_key="server.id", primary_key=True
)
organization_id: Optional[int] = Field(
default=None, foreign_key="organization.id", primary_key=True
)
class Catagory(SQLModel, table=True):
id: int = Field(primary_key=True)
title: str = Field(sa_column=Column("title", String(255), unique=True))
meta_ref: str = Field(sa_column=Column("meta_ref", String(255), unique=True))
color: str
servers: List["Server"] = Relationship(
back_populates="catagories", link_model=ServerCatagoryLink
)
class SaveCatagory(SQLModel):
title: str
color: str
class Organization(SQLModel, table=True):
id: int = Field(primary_key=True)
title: str = Field(sa_column=Column("title", String(255), unique=True))
parent_id: Optional[int] = Field(foreign_key="organization.id")
ref_title: str
description: str
servers: List["Server"] = Relationship(
back_populates="organizations", link_model=ServerOrganizationLink
)
class Server(SQLModel, table=True):
id: int = Field(primary_key=True)
scheme: str = Field(default="http")
domain_name: str = Field(sa_column=Column("domain_name", String(255), unique=True))
path: Optional[str]
agency: Optional[int]
organization: Optional[str]
status: str = "LOADING"
server_log: List["ServerLog"] = Relationship(back_populates="server")
server_reports: List["ServerReport"] = Relationship(back_populates="server")
clicks: int = 0
ipaddress: Optional[str]
response_time: Optional[int] = None
last_checked: Optional[datetime]
catagories: List["Catagory"] = Relationship(
back_populates="servers", link_model=ServerCatagoryLink
)
organizations: List["Organization"] = Relationship(
back_populates="servers", link_model=ServerOrganizationLink
)
class Config:
arbitrary_types_allowed = True
class ServerLog(SQLModel, table=True):
id: int = Field(primary_key=True)
datetime: datetime
server_id: Optional[int] = Field(default=None, foreign_key="server.id")
server: Optional[Server] = Relationship(back_populates="server_log")
response_code: Optional[int]
response_time: Optional[int]
ipaddress: Optional[str]
url: str
error: Optional[str]
class ServerReport(SQLModel, table=True):
id: int = Field(primary_key=True)
datetime: datetime
server_id: Optional[int] = Field(default=None, foreign_key="server.id")
server: Optional[Server] = Relationship(back_populates="server_reports")
|
[
"sqlmodel.Field",
"sqlmodel.Relationship"
] |
[((280, 342), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""server.id"""', 'primary_key': '(True)'}), "(default=None, foreign_key='server.id', primary_key=True)\n", (285, 342), False, 'from sqlmodel import SQLModel, Field, JSON, Relationship, VARCHAR\n'), ((390, 454), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""catagory.id"""', 'primary_key': '(True)'}), "(default=None, foreign_key='catagory.id', primary_key=True)\n", (395, 454), False, 'from sqlmodel import SQLModel, Field, JSON, Relationship, VARCHAR\n'), ((554, 616), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""server.id"""', 'primary_key': '(True)'}), "(default=None, foreign_key='server.id', primary_key=True)\n", (559, 616), False, 'from sqlmodel import SQLModel, Field, JSON, Relationship, VARCHAR\n'), ((668, 736), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""organization.id"""', 'primary_key': '(True)'}), "(default=None, foreign_key='organization.id', primary_key=True)\n", (673, 736), False, 'from sqlmodel import SQLModel, Field, JSON, Relationship, VARCHAR\n'), ((805, 828), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (810, 828), False, 'from sqlmodel import SQLModel, Field, JSON, Relationship, VARCHAR\n'), ((1032, 1104), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""catagories"""', 'link_model': 'ServerCatagoryLink'}), "(back_populates='catagories', link_model=ServerCatagoryLink)\n", (1044, 1104), False, 'from sqlmodel import SQLModel, Field, JSON, Relationship, VARCHAR\n'), ((1239, 1262), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (1244, 1262), False, 'from sqlmodel import SQLModel, Field, JSON, Relationship, VARCHAR\n'), ((1370, 1406), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""organization.id"""'}), "(foreign_key='organization.id')\n", (1375, 1406), False, 'from sqlmodel import SQLModel, Field, JSON, Relationship, VARCHAR\n'), ((1477, 1556), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""organizations"""', 'link_model': 'ServerOrganizationLink'}), "(back_populates='organizations', link_model=ServerOrganizationLink)\n", (1489, 1556), False, 'from sqlmodel import SQLModel, Field, JSON, Relationship, VARCHAR\n'), ((1623, 1646), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (1628, 1646), False, 'from sqlmodel import SQLModel, Field, JSON, Relationship, VARCHAR\n'), ((1665, 1686), 'sqlmodel.Field', 'Field', ([], {'default': '"""http"""'}), "(default='http')\n", (1670, 1686), False, 'from sqlmodel import SQLModel, Field, JSON, Relationship, VARCHAR\n'), ((1921, 1958), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""server"""'}), "(back_populates='server')\n", (1933, 1958), False, 'from sqlmodel import SQLModel, Field, JSON, Relationship, VARCHAR\n'), ((2002, 2039), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""server"""'}), "(back_populates='server')\n", (2014, 2039), False, 'from sqlmodel import SQLModel, Field, JSON, Relationship, VARCHAR\n'), ((2201, 2270), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""servers"""', 'link_model': 'ServerCatagoryLink'}), "(back_populates='servers', link_model=ServerCatagoryLink)\n", (2213, 2270), False, 'from sqlmodel import SQLModel, Field, JSON, Relationship, VARCHAR\n'), ((2327, 2400), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""servers"""', 'link_model': 'ServerOrganizationLink'}), "(back_populates='servers', link_model=ServerOrganizationLink)\n", (2339, 2400), False, 'from sqlmodel import SQLModel, Field, JSON, Relationship, VARCHAR\n'), ((2528, 2551), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (2533, 2551), False, 'from sqlmodel import SQLModel, Field, JSON, Relationship, VARCHAR\n'), ((2606, 2650), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""server.id"""'}), "(default=None, foreign_key='server.id')\n", (2611, 2650), False, 'from sqlmodel import SQLModel, Field, JSON, Relationship, VARCHAR\n'), ((2682, 2723), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""server_log"""'}), "(back_populates='server_log')\n", (2694, 2723), False, 'from sqlmodel import SQLModel, Field, JSON, Relationship, VARCHAR\n'), ((2916, 2939), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (2921, 2939), False, 'from sqlmodel import SQLModel, Field, JSON, Relationship, VARCHAR\n'), ((2994, 3038), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""server.id"""'}), "(default=None, foreign_key='server.id')\n", (2999, 3038), False, 'from sqlmodel import SQLModel, Field, JSON, Relationship, VARCHAR\n'), ((3070, 3115), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""server_reports"""'}), "(back_populates='server_reports')\n", (3082, 3115), False, 'from sqlmodel import SQLModel, Field, JSON, Relationship, VARCHAR\n'), ((878, 889), 'sqlalchemy.String', 'String', (['(255)'], {}), '(255)\n', (884, 889), False, 'from sqlalchemy import String, Column\n'), ((960, 971), 'sqlalchemy.String', 'String', (['(255)'], {}), '(255)\n', (966, 971), False, 'from sqlalchemy import String, Column\n'), ((1312, 1323), 'sqlalchemy.String', 'String', (['(255)'], {}), '(255)\n', (1318, 1323), False, 'from sqlalchemy import String, Column\n'), ((1748, 1759), 'sqlalchemy.String', 'String', (['(255)'], {}), '(255)\n', (1754, 1759), False, 'from sqlalchemy import String, Column\n')]
|
import typing as t
if t.TYPE_CHECKING:
from .other import DB_AccessToken, DB_APIKey
from .discussions import DB_Discussion
from datetime import datetime
from sqlmodel import SQLModel, Field, Relationship, Column, JSON
from ..extensions.tags import DB_Tag, DB_TagUser
class DB_User(SQLModel, table=True):
__tablename__ = 'users'
id: t.Optional[int] = Field(default=None, primary_key=True)
"""The ID of the user. This is handled by the database."""
username: str = Field(max_length=100, sa_column_kwargs={"unique": True})
"""The user's username."""
email: str = Field(max_length=150, sa_column_kwargs={"unique": True})
"""The user's E-mail address."""
is_email_confirmed: bool = Field(default=False)
"""Whether or not the user confirmed their E-mail address."""
password: str = Field(max_length=100)
"""The user's password (<PASSWORD>)."""
avatar_url: t.Optional[str] = Field(max_length=100)
"""The file name of user's avatar. Avatars are located in the `public/assets/avatars` directory of your forum root."""
preferences: t.Dict[str, bool] = Field(sa_column=Column(JSON), default={"notify_discussionRenamed_alert": True,"notify_postLiked_alert": True,"notify_discussionLocked_alert": True,"notify_postMentioned_alert": True,"notify_postMentioned_email": False,"notify_userMentioned_alert": True,"notify_userMentioned_email": False,"notify_newPost_alert": True, "notify_newPost_email": True, "notify_userSuspended_alert": True, "notify_userUnsuspended_alert": True, "followAfterReply": True, "discloseOnline": True, "indexProfile": True, "locale": None })
"""The user's preferences (e. g.: for notifications)."""
joined_at: t.Optional[datetime] = Field(default=None)
"""When did the user join the forum."""
last_seen_at: t.Optional[datetime] = Field(default=None)
"""When was the user last seen at."""
marked_all_as_read_at: t.Optional[datetime] = Field(default=None)
"""When did the user mark all discussions as read."""
read_notifications_at: t.Optional[datetime] = Field(default=None)
"""When did the user read their notifications."""
discussion_count: int = Field(default=0)
"""The user's discussion count."""
comment_count: int = Field(default=0)
"""The user's comment (post) count."""
access_tokens: t.List['DB_AccessToken'] = Relationship(back_populates='user')
"""List of access tokens belonging to this user."""
api_keys: t.List['DB_APIKey'] = Relationship(back_populates='user')
"""List of API keys that perform actions on behalf of this user."""
discussions: t.List['DB_Discussion'] = Relationship(back_populates='author')
"""List of discussions that this user made."""
tags: t.List['DB_Tag'] = Relationship(back_populates='users', link_model=DB_TagUser)
"""Tags that have relationship with this user."""
|
[
"sqlmodel.Relationship",
"sqlmodel.Field",
"sqlmodel.Column"
] |
[((372, 409), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (377, 409), False, 'from sqlmodel import SQLModel, Field, Relationship, Column, JSON\n'), ((494, 550), 'sqlmodel.Field', 'Field', ([], {'max_length': '(100)', 'sa_column_kwargs': "{'unique': True}"}), "(max_length=100, sa_column_kwargs={'unique': True})\n", (499, 550), False, 'from sqlmodel import SQLModel, Field, Relationship, Column, JSON\n'), ((600, 656), 'sqlmodel.Field', 'Field', ([], {'max_length': '(150)', 'sa_column_kwargs': "{'unique': True}"}), "(max_length=150, sa_column_kwargs={'unique': True})\n", (605, 656), False, 'from sqlmodel import SQLModel, Field, Relationship, Column, JSON\n'), ((725, 745), 'sqlmodel.Field', 'Field', ([], {'default': '(False)'}), '(default=False)\n', (730, 745), False, 'from sqlmodel import SQLModel, Field, Relationship, Column, JSON\n'), ((832, 853), 'sqlmodel.Field', 'Field', ([], {'max_length': '(100)'}), '(max_length=100)\n', (837, 853), False, 'from sqlmodel import SQLModel, Field, Relationship, Column, JSON\n'), ((933, 954), 'sqlmodel.Field', 'Field', ([], {'max_length': '(100)'}), '(max_length=100)\n', (938, 954), False, 'from sqlmodel import SQLModel, Field, Relationship, Column, JSON\n'), ((1728, 1747), 'sqlmodel.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (1733, 1747), False, 'from sqlmodel import SQLModel, Field, Relationship, Column, JSON\n'), ((1833, 1852), 'sqlmodel.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (1838, 1852), False, 'from sqlmodel import SQLModel, Field, Relationship, Column, JSON\n'), ((1945, 1964), 'sqlmodel.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (1950, 1964), False, 'from sqlmodel import SQLModel, Field, Relationship, Column, JSON\n'), ((2073, 2092), 'sqlmodel.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (2078, 2092), False, 'from sqlmodel import SQLModel, Field, Relationship, Column, JSON\n'), ((2176, 2192), 'sqlmodel.Field', 'Field', ([], {'default': '(0)'}), '(default=0)\n', (2181, 2192), False, 'from sqlmodel import SQLModel, Field, Relationship, Column, JSON\n'), ((2257, 2273), 'sqlmodel.Field', 'Field', ([], {'default': '(0)'}), '(default=0)\n', (2262, 2273), False, 'from sqlmodel import SQLModel, Field, Relationship, Column, JSON\n'), ((2364, 2399), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""user"""'}), "(back_populates='user')\n", (2376, 2399), False, 'from sqlmodel import SQLModel, Field, Relationship, Column, JSON\n'), ((2492, 2527), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""user"""'}), "(back_populates='user')\n", (2504, 2527), False, 'from sqlmodel import SQLModel, Field, Relationship, Column, JSON\n'), ((2643, 2680), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""author"""'}), "(back_populates='author')\n", (2655, 2680), False, 'from sqlmodel import SQLModel, Field, Relationship, Column, JSON\n'), ((2762, 2821), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""users"""', 'link_model': 'DB_TagUser'}), "(back_populates='users', link_model=DB_TagUser)\n", (2774, 2821), False, 'from sqlmodel import SQLModel, Field, Relationship, Column, JSON\n'), ((1131, 1143), 'sqlmodel.Column', 'Column', (['JSON'], {}), '(JSON)\n', (1137, 1143), False, 'from sqlmodel import SQLModel, Field, Relationship, Column, JSON\n')]
|
from datetime import datetime
from sqlmodel import Session, SQLModel, create_engine, text
import sqlite3
database_loc = "backend/database.sqlite"
con_str = f"sqlite:///{database_loc}"
engine = create_engine(con_str, echo=True)
sqlite3_engine = sqlite3.connect(f"{database_loc}")
def get_session():
session = Session(engine)
return session
def create_db():
SQLModel.metadata.create_all(engine)
def execute_sample_sql(session):
"""Read sample sql database and import it."""
with open("backend/tests/sample.sql") as f:
content = f.read()
queries = filter(None, content.split(";\n"))
queries = [text(query) for query in queries]
for query in queries:
session.exec(query)
session.commit()
session.expire_all()
session = Session(engine)
tags_metadata = [
{
"name": "user",
"description": "Operations with users",
},
{
"name": "epic",
"description": "operations with epics",
},
{
"name": "epic_area",
"description": "operations with epic areas",
},
{
"name": "team",
"description": "operations with teams",
},
{
"name": "sponsor",
"description": "operations with sponsors",
},
{
"name": "client",
"description": "operations with clients",
},
{
"name": "forecast",
"description": "operations with forecasts",
},
{
"name": "rate",
"description": "operations with rates",
},
{
"name": "timelog",
"description": "operations with timelogs",
},
]
def string_to_datetime(date_string):
date = datetime.strptime(date_string, "%Y-%m-%d %H:%M")
return date
def string_to_datetime_hm(date_string):
date = datetime.strptime(date_string, "%H:%M")
return date
def string_to_datetime_GMT(date_string):
date = datetime.strptime(date_string, "%a %b %d %Y %H:%M:%S %Z%z")
return date
def string_to_datetime_work(date_string):
date = datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%S.%fZ")
return date
def datetime_to_string(date_date):
date_string = date_date.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
return date_string
def time_period(time_of_start, time_of_end):
starting_time = string_to_datetime_work(time_of_start)
ending_time = string_to_datetime_work(time_of_end)
working_time = ending_time - starting_time
return working_time
def date_str_to_date(date: str):
date_date = datetime.strptime(date, "%Y-%m-%d").date()
return date_date
far_date = date_str_to_date("9999-12-31")
|
[
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.Session",
"sqlmodel.text",
"sqlmodel.create_engine"
] |
[((195, 228), 'sqlmodel.create_engine', 'create_engine', (['con_str'], {'echo': '(True)'}), '(con_str, echo=True)\n', (208, 228), False, 'from sqlmodel import Session, SQLModel, create_engine, text\n'), ((246, 280), 'sqlite3.connect', 'sqlite3.connect', (['f"""{database_loc}"""'], {}), "(f'{database_loc}')\n", (261, 280), False, 'import sqlite3\n'), ((784, 799), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (791, 799), False, 'from sqlmodel import Session, SQLModel, create_engine, text\n'), ((316, 331), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (323, 331), False, 'from sqlmodel import Session, SQLModel, create_engine, text\n'), ((374, 410), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (402, 410), False, 'from sqlmodel import Session, SQLModel, create_engine, text\n'), ((1670, 1718), 'datetime.datetime.strptime', 'datetime.strptime', (['date_string', '"""%Y-%m-%d %H:%M"""'], {}), "(date_string, '%Y-%m-%d %H:%M')\n", (1687, 1718), False, 'from datetime import datetime\n'), ((1788, 1827), 'datetime.datetime.strptime', 'datetime.strptime', (['date_string', '"""%H:%M"""'], {}), "(date_string, '%H:%M')\n", (1805, 1827), False, 'from datetime import datetime\n'), ((1898, 1957), 'datetime.datetime.strptime', 'datetime.strptime', (['date_string', '"""%a %b %d %Y %H:%M:%S %Z%z"""'], {}), "(date_string, '%a %b %d %Y %H:%M:%S %Z%z')\n", (1915, 1957), False, 'from datetime import datetime\n'), ((2029, 2084), 'datetime.datetime.strptime', 'datetime.strptime', (['date_string', '"""%Y-%m-%dT%H:%M:%S.%fZ"""'], {}), "(date_string, '%Y-%m-%dT%H:%M:%S.%fZ')\n", (2046, 2084), False, 'from datetime import datetime\n'), ((636, 647), 'sqlmodel.text', 'text', (['query'], {}), '(query)\n', (640, 647), False, 'from sqlmodel import Session, SQLModel, create_engine, text\n'), ((2506, 2541), 'datetime.datetime.strptime', 'datetime.strptime', (['date', '"""%Y-%m-%d"""'], {}), "(date, '%Y-%m-%d')\n", (2523, 2541), False, 'from datetime import datetime\n')]
|
from __future__ import annotations
import inspect
from functools import wraps
from typing import Any, List, Type, TypeVar
from fastapi.encoders import jsonable_encoder
from sqlmodel import SQLModel, select
from sqlmodel.ext.asyncio.session import AsyncSession
Self = TypeVar("Self", bound="Base")
class InvalidTable(RuntimeError):
"""Raised when calling a method coupled to SQLAlchemy operations.
It should be called only by SQLModel objects that are tables.
"""
def is_table(cls: Type[Self]) -> bool:
base_is_table = False
for base in cls.__bases__:
config = getattr(base, "__config__")
if config and getattr(config, "table", False):
base_is_table = True
break
return getattr(cls.__config__, "table", False) and not base_is_table
def validate_table(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
cls = self if inspect.isclass(self) else self.__class__
if not is_table(cls):
raise InvalidTable(
f'"{cls.__name__}" is not a table. '
"Add the class parameter `table=True` or don't use with this object."
)
return func(self, *args, **kwargs)
return wrapper
class Base(SQLModel):
@classmethod
@validate_table
async def get(
cls: Type[Self], session: AsyncSession, *args: Any, **kwargs: Any
) -> Self:
result = await session.execute(select(cls).filter(*args).filter_by(**kwargs))
return result.scalars().first()
@classmethod
@validate_table
async def get_multi(
cls: Type[Self],
session: AsyncSession,
*args,
offset: int = 0,
limit: int = 100,
**kwargs,
) -> List[Self]:
result = await session.execute(
select(cls).filter(*args).filter_by(**kwargs).offset(offset).limit(limit)
)
return result.scalars().all()
@classmethod
@validate_table
async def create(cls: Type[Self], session: AsyncSession, **kwargs: Any) -> Self:
db_obj = cls(**kwargs)
session.add(db_obj)
await session.commit()
return db_obj
@validate_table
async def update(self: Self, session: AsyncSession, **kwargs: Any) -> Self:
obj_data = jsonable_encoder(self)
for field in obj_data:
if field in kwargs:
setattr(self, field, kwargs[field])
session.add(self)
await session.commit()
await session.refresh(self)
return self
@classmethod
@validate_table
async def delete(
cls: Type[Self], session: AsyncSession, *args: Any, **kwargs: Any
) -> Self:
db_obj = await cls.get(session, *args, **kwargs)
await session.delete(db_obj)
await session.commit()
return db_obj
|
[
"sqlmodel.select"
] |
[((270, 299), 'typing.TypeVar', 'TypeVar', (['"""Self"""'], {'bound': '"""Base"""'}), "('Self', bound='Base')\n", (277, 299), False, 'from typing import Any, List, Type, TypeVar\n'), ((836, 847), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (841, 847), False, 'from functools import wraps\n'), ((2278, 2300), 'fastapi.encoders.jsonable_encoder', 'jsonable_encoder', (['self'], {}), '(self)\n', (2294, 2300), False, 'from fastapi.encoders import jsonable_encoder\n'), ((910, 931), 'inspect.isclass', 'inspect.isclass', (['self'], {}), '(self)\n', (925, 931), False, 'import inspect\n'), ((1438, 1449), 'sqlmodel.select', 'select', (['cls'], {}), '(cls)\n', (1444, 1449), False, 'from sqlmodel import SQLModel, select\n'), ((1801, 1812), 'sqlmodel.select', 'select', (['cls'], {}), '(cls)\n', (1807, 1812), False, 'from sqlmodel import SQLModel, select\n')]
|
import megengine as mge
import megengine.functional as F
from megengine.core import tensor
from layers.nms import gpu_nms
from config import config
from det_opr.bbox_opr import bbox_transform_inv_opr, clip_boxes_opr, \
filter_boxes_opr
def find_top_rpn_proposals(is_train, rpn_bbox_offsets_list, rpn_cls_prob_list,
all_anchors_list, im_info):
prev_nms_top_n = config.train_prev_nms_top_n \
if is_train else config.test_prev_nms_top_n
post_nms_top_n = config.train_post_nms_top_n \
if is_train else config.test_post_nms_top_n
batch_per_gpu = config.batch_per_gpu if is_train else 1
nms_threshold = config.rpn_nms_threshold
box_min_size = config.rpn_min_box_size
bbox_normalize_targets = config.rpn_bbox_normalize_targets
bbox_normalize_means = config.bbox_normalize_means
bbox_normalize_stds = config.bbox_normalize_stds
list_size = len(rpn_bbox_offsets_list)
return_rois = []
return_probs = []
for bid in range(batch_per_gpu):
batch_proposals_list = []
batch_probs_list = []
for l in range(list_size):
# get proposals and probs
offsets = rpn_bbox_offsets_list[l][bid] \
.dimshuffle(1, 2, 0).reshape(-1, 4)
if bbox_normalize_targets:
std_opr = tensor(config.bbox_normalize_stds[None, :])
mean_opr = tensor(config.bbox_normalize_means[None, :])
pred_offsets = pred_offsets * std_opr
pred_offsets = pred_offsets + mean_opr
all_anchors = all_anchors_list[l]
proposals = bbox_transform_inv_opr(all_anchors, offsets)
if config.anchor_within_border:
proposals = clip_boxes_opr(proposals, im_info[bid, :])
probs = rpn_cls_prob_list[l][bid] \
.dimshuffle(1,2,0).reshape(-1, 2)
probs = F.softmax(probs)[:, 1]
# gather the proposals and probs
batch_proposals_list.append(proposals)
batch_probs_list.append(probs)
batch_proposals = F.concat(batch_proposals_list, axis=0)
batch_probs = F.concat(batch_probs_list, axis=0)
# filter the zero boxes.
batch_keep_mask = filter_boxes_opr(
batch_proposals, box_min_size * im_info[bid, 2])
batch_probs = batch_probs * batch_keep_mask
# prev_nms_top_n
num_proposals = F.minimum(prev_nms_top_n, batch_probs.shapeof()[0])
batch_probs, idx = F.argsort(batch_probs, descending=True)
batch_probs = batch_probs[:num_proposals].reshape(-1,1)
topk_idx = idx[:num_proposals].reshape(-1)
batch_proposals = batch_proposals.ai[topk_idx]
batch_rois = F.concat([batch_proposals, batch_probs], axis=1)
# For each image, run a total-level NMS, and choose topk results.
keep_inds = gpu_nms(batch_rois, nms_threshold, post_nms_top_n)
batch_rois = batch_rois.ai[keep_inds]
batch_probs = batch_rois[:, -1]
# cons the rois
batch_inds = mge.ones((batch_rois.shapeof()[0], 1)) * bid
batch_rois = F.concat([batch_inds, batch_rois[:, :-1]], axis=1)
return_rois.append(batch_rois)
return_probs.append(batch_probs)
if batch_per_gpu == 1:
return batch_rois, batch_probs
else:
concated_rois = F.concat(return_rois, axis=0)
concated_probs = F.concat(return_probs, axis=0)
return concated_rois, concated_probs
|
[
"megengine.functional.argsort",
"megengine.functional.concat",
"megengine.functional.softmax",
"megengine.core.tensor"
] |
[((2080, 2118), 'megengine.functional.concat', 'F.concat', (['batch_proposals_list'], {'axis': '(0)'}), '(batch_proposals_list, axis=0)\n', (2088, 2118), True, 'import megengine.functional as F\n'), ((2141, 2175), 'megengine.functional.concat', 'F.concat', (['batch_probs_list'], {'axis': '(0)'}), '(batch_probs_list, axis=0)\n', (2149, 2175), True, 'import megengine.functional as F\n'), ((2235, 2300), 'det_opr.bbox_opr.filter_boxes_opr', 'filter_boxes_opr', (['batch_proposals', '(box_min_size * im_info[bid, 2])'], {}), '(batch_proposals, box_min_size * im_info[bid, 2])\n', (2251, 2300), False, 'from det_opr.bbox_opr import bbox_transform_inv_opr, clip_boxes_opr, filter_boxes_opr\n'), ((2498, 2537), 'megengine.functional.argsort', 'F.argsort', (['batch_probs'], {'descending': '(True)'}), '(batch_probs, descending=True)\n', (2507, 2537), True, 'import megengine.functional as F\n'), ((2729, 2777), 'megengine.functional.concat', 'F.concat', (['[batch_proposals, batch_probs]'], {'axis': '(1)'}), '([batch_proposals, batch_probs], axis=1)\n', (2737, 2777), True, 'import megengine.functional as F\n'), ((2872, 2922), 'layers.nms.gpu_nms', 'gpu_nms', (['batch_rois', 'nms_threshold', 'post_nms_top_n'], {}), '(batch_rois, nms_threshold, post_nms_top_n)\n', (2879, 2922), False, 'from layers.nms import gpu_nms\n'), ((3120, 3170), 'megengine.functional.concat', 'F.concat', (['[batch_inds, batch_rois[:, :-1]]'], {'axis': '(1)'}), '([batch_inds, batch_rois[:, :-1]], axis=1)\n', (3128, 3170), True, 'import megengine.functional as F\n'), ((3352, 3381), 'megengine.functional.concat', 'F.concat', (['return_rois'], {'axis': '(0)'}), '(return_rois, axis=0)\n', (3360, 3381), True, 'import megengine.functional as F\n'), ((3407, 3437), 'megengine.functional.concat', 'F.concat', (['return_probs'], {'axis': '(0)'}), '(return_probs, axis=0)\n', (3415, 3437), True, 'import megengine.functional as F\n'), ((1610, 1654), 'det_opr.bbox_opr.bbox_transform_inv_opr', 'bbox_transform_inv_opr', (['all_anchors', 'offsets'], {}), '(all_anchors, offsets)\n', (1632, 1654), False, 'from det_opr.bbox_opr import bbox_transform_inv_opr, clip_boxes_opr, filter_boxes_opr\n'), ((1315, 1358), 'megengine.core.tensor', 'tensor', (['config.bbox_normalize_stds[None, :]'], {}), '(config.bbox_normalize_stds[None, :])\n', (1321, 1358), False, 'from megengine.core import tensor\n'), ((1386, 1430), 'megengine.core.tensor', 'tensor', (['config.bbox_normalize_means[None, :]'], {}), '(config.bbox_normalize_means[None, :])\n', (1392, 1430), False, 'from megengine.core import tensor\n'), ((1727, 1769), 'det_opr.bbox_opr.clip_boxes_opr', 'clip_boxes_opr', (['proposals', 'im_info[bid, :]'], {}), '(proposals, im_info[bid, :])\n', (1741, 1769), False, 'from det_opr.bbox_opr import bbox_transform_inv_opr, clip_boxes_opr, filter_boxes_opr\n'), ((1892, 1908), 'megengine.functional.softmax', 'F.softmax', (['probs'], {}), '(probs)\n', (1901, 1908), True, 'import megengine.functional as F\n')]
|
"""
Functions to visualize the geometry elements and numbering and orientation of
their facets (edges and faces).
The standard geometry elements can be plotted by running::
$ python sfepy/postprocess/plot_facets.py
"""
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.linalg import (get_perpendiculars, normalize_vectors,
make_axis_rotation_matrix)
from sfepy.postprocess.plot_dofs import _get_axes, plot_mesh, plot_global_dofs
def plot_geometry(ax, gel, show=False):
"""
Plot a geometry element as a wireframe.
"""
ax = plot_mesh(ax, gel.coors, [gel.conn], gel.edges, show=False)
ax = plot_global_dofs(ax, gel.coors, [gel.conn], show=show)
return ax
def plot_edges(ax, gel, length, show=False):
"""
Plot edges of a geometry element as numbered arrows.
"""
dim = gel.dim
ax = _get_axes(ax, dim)
l2 = 0.5 * length
for ii, edge in enumerate(gel.edges):
cc = gel.coors[edge]
centre = 0.5 * cc.sum(axis=0)
vdir = (cc - centre)
normalize_vectors(vdir)
cc = l2 * vdir + centre
draw_arrow(ax, cc, length=0.3*length, linewidth=3, color='b')
if dim == 3:
cx, cy, cz = centre
ax.text(cx, cy, cz, ii,
color='b', fontsize=10, weight='light')
else:
cx, cy = centre
ax.text(cx, cy, ii,
color='b', fontsize=10, weight='light')
return ax
def plot_faces(ax, gel, radius, n_point, show=False):
"""
Plot faces of a 3D geometry element as numbered oriented arcs. An arc
centre corresponds to the first node of a face. It points from the first
edge towards the last edge of the face.
"""
dim = gel.dim
ax = _get_axes(ax, dim)
if dim < 3: return ax
for ii, face in enumerate(gel.faces):
cc = gel.coors[face]
t1 = cc[1, :] - cc[0, :]
t2 = cc[-1, :] - cc[0, :]
n = nm.cross(t1, t2)
nt1 = nm.linalg.norm(t1)
nt2 = nm.linalg.norm(t2)
angle = nm.arccos(nm.dot(t1, t2) / (nt1 * nt2))
da = angle / (n_point - 1)
mtx = make_axis_rotation_matrix(n, da)
rt = cc[0] + radius * t1 / nt1
coors = [rt]
for ip in range(n_point - 1):
rt = nm.dot(mtx.T, (rt - cc[0])) + cc[0]
coors.append(rt)
coors = nm.array(coors, dtype=nm.float64)
centre = coors.sum(axis=0) / coors.shape[0]
draw_arrow(ax, coors, length=0.3*radius, linewidth=3, color='r')
if dim == 3:
cx, cy, cz = centre
ax.text(cx, cy, cz, ii,
color='r', fontsize=10, weight='light')
else:
cx, cy = centre
ax.text(cx, cy, ii,
color='r', fontsize=10, weight='light')
return ax
def draw_arrow(ax, coors, angle=20.0, length=0.3, **kwargs):
"""
Draw a line ended with an arrow head, in 2D or 3D.
"""
color = kwargs.get('color', 'b')
c0 = coors[-2]
c1 = coors[-1]
vd = c1 - c0
nvd = nm.linalg.norm(vd)
vd /= nvd
c0 = c1 - length * vd
ps = get_perpendiculars(vd)
rangle = nm.deg2rad(min(angle, 60.0))
plength = length * nm.arctan(rangle)
if coors.shape[1] == 2:
from matplotlib.patches import Polygon
cx, cy = coors[:, 0], coors[:, 1]
ax.plot(cx, cy, **kwargs)
p0 = c0 + plength * ps
p1 = c0 - plength * ps
pol = Polygon([p0, p1, c1], color=color)
ax.add_artist(pol)
else:
import mpl_toolkits.mplot3d as plt3
cx, cy, cz = coors[:, 0], coors[:, 1], coors[:, 2]
ax.plot(cx, cy, cz, **kwargs)
p00 = c0 + plength * ps[0]
p01 = c0 - plength * ps[0]
p10 = c0 + plength * ps[1]
p11 = c0 - plength * ps[1]
arr = plt3.art3d.Poly3DCollection([[p00, p01, c1],
[p10, p11, c1]], color=color)
ax.add_collection3d(arr)
if __name__ == '__main__':
from sfepy.discrete.fem.geometry_element import GeometryElement, geometry_data
for key, gd in geometry_data.iteritems():
if key == '1_2' : continue
gel = GeometryElement(key)
ax = plot_geometry(None, gel)
ax = plot_edges(ax, gel, length=0.2)
ax = plot_faces(ax, gel, radius=0.3, n_point=5)
dd = 0.05
ax.set_xlim([-dd, 1.0 + dd])
ax.set_ylim([-dd, 1.0 + dd])
if gel.dim == 3:
ax.set_zlim([-dd, 1.0 + dd])
plt.show()
|
[
"sfepy.linalg.normalize_vectors",
"sfepy.discrete.fem.geometry_element.geometry_data.iteritems",
"sfepy.linalg.get_perpendiculars",
"sfepy.discrete.fem.geometry_element.GeometryElement",
"sfepy.postprocess.plot_dofs.plot_global_dofs",
"sfepy.postprocess.plot_dofs.plot_mesh",
"sfepy.linalg.make_axis_rotation_matrix",
"sfepy.postprocess.plot_dofs._get_axes"
] |
[((582, 641), 'sfepy.postprocess.plot_dofs.plot_mesh', 'plot_mesh', (['ax', 'gel.coors', '[gel.conn]', 'gel.edges'], {'show': '(False)'}), '(ax, gel.coors, [gel.conn], gel.edges, show=False)\n', (591, 641), False, 'from sfepy.postprocess.plot_dofs import _get_axes, plot_mesh, plot_global_dofs\n'), ((651, 705), 'sfepy.postprocess.plot_dofs.plot_global_dofs', 'plot_global_dofs', (['ax', 'gel.coors', '[gel.conn]'], {'show': 'show'}), '(ax, gel.coors, [gel.conn], show=show)\n', (667, 705), False, 'from sfepy.postprocess.plot_dofs import _get_axes, plot_mesh, plot_global_dofs\n'), ((867, 885), 'sfepy.postprocess.plot_dofs._get_axes', '_get_axes', (['ax', 'dim'], {}), '(ax, dim)\n', (876, 885), False, 'from sfepy.postprocess.plot_dofs import _get_axes, plot_mesh, plot_global_dofs\n'), ((1776, 1794), 'sfepy.postprocess.plot_dofs._get_axes', '_get_axes', (['ax', 'dim'], {}), '(ax, dim)\n', (1785, 1794), False, 'from sfepy.postprocess.plot_dofs import _get_axes, plot_mesh, plot_global_dofs\n'), ((3093, 3111), 'numpy.linalg.norm', 'nm.linalg.norm', (['vd'], {}), '(vd)\n', (3107, 3111), True, 'import numpy as nm\n'), ((3163, 3185), 'sfepy.linalg.get_perpendiculars', 'get_perpendiculars', (['vd'], {}), '(vd)\n', (3181, 3185), False, 'from sfepy.linalg import get_perpendiculars, normalize_vectors, make_axis_rotation_matrix\n'), ((4157, 4182), 'sfepy.discrete.fem.geometry_element.geometry_data.iteritems', 'geometry_data.iteritems', ([], {}), '()\n', (4180, 4182), False, 'from sfepy.discrete.fem.geometry_element import GeometryElement, geometry_data\n'), ((1056, 1079), 'sfepy.linalg.normalize_vectors', 'normalize_vectors', (['vdir'], {}), '(vdir)\n', (1073, 1079), False, 'from sfepy.linalg import get_perpendiculars, normalize_vectors, make_axis_rotation_matrix\n'), ((1974, 1990), 'numpy.cross', 'nm.cross', (['t1', 't2'], {}), '(t1, t2)\n', (1982, 1990), True, 'import numpy as nm\n'), ((2006, 2024), 'numpy.linalg.norm', 'nm.linalg.norm', (['t1'], {}), '(t1)\n', (2020, 2024), True, 'import numpy as nm\n'), ((2039, 2057), 'numpy.linalg.norm', 'nm.linalg.norm', (['t2'], {}), '(t2)\n', (2053, 2057), True, 'import numpy as nm\n'), ((2165, 2197), 'sfepy.linalg.make_axis_rotation_matrix', 'make_axis_rotation_matrix', (['n', 'da'], {}), '(n, da)\n', (2190, 2197), False, 'from sfepy.linalg import get_perpendiculars, normalize_vectors, make_axis_rotation_matrix\n'), ((2396, 2429), 'numpy.array', 'nm.array', (['coors'], {'dtype': 'nm.float64'}), '(coors, dtype=nm.float64)\n', (2404, 2429), True, 'import numpy as nm\n'), ((3252, 3269), 'numpy.arctan', 'nm.arctan', (['rangle'], {}), '(rangle)\n', (3261, 3269), True, 'import numpy as nm\n'), ((3502, 3536), 'matplotlib.patches.Polygon', 'Polygon', (['[p0, p1, c1]'], {'color': 'color'}), '([p0, p1, c1], color=color)\n', (3509, 3536), False, 'from matplotlib.patches import Polygon\n'), ((3875, 3949), 'mpl_toolkits.mplot3d.art3d.Poly3DCollection', 'plt3.art3d.Poly3DCollection', (['[[p00, p01, c1], [p10, p11, c1]]'], {'color': 'color'}), '([[p00, p01, c1], [p10, p11, c1]], color=color)\n', (3902, 3949), True, 'import mpl_toolkits.mplot3d as plt3\n'), ((4234, 4254), 'sfepy.discrete.fem.geometry_element.GeometryElement', 'GeometryElement', (['key'], {}), '(key)\n', (4249, 4254), False, 'from sfepy.discrete.fem.geometry_element import GeometryElement, geometry_data\n'), ((4563, 4573), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4571, 4573), True, 'import matplotlib.pyplot as plt\n'), ((2084, 2098), 'numpy.dot', 'nm.dot', (['t1', 't2'], {}), '(t1, t2)\n', (2090, 2098), True, 'import numpy as nm\n'), ((2314, 2339), 'numpy.dot', 'nm.dot', (['mtx.T', '(rt - cc[0])'], {}), '(mtx.T, rt - cc[0])\n', (2320, 2339), True, 'import numpy as nm\n')]
|
import pytest
from typing import Generator, Dict
from sqlmodel import Session, SQLModel, create_engine
from sqlmodel.pool import StaticPool
from fastapi.testclient import TestClient
from app.models import *
from app.main import app
from app.api.deps import get_db
@pytest.fixture(name="session")
def session_fixture() -> Generator:
engine = create_engine(
"sqlite://",
echo=False,
connect_args={"check_same_thread": False},
poolclass=StaticPool,
)
SQLModel.metadata.create_all(engine)
with Session(engine) as session:
yield session
@pytest.fixture(name="client")
def client_fixture(session: Session):
def get_db_override():
return session
app.dependency_overrides[get_db] = get_db_override
client = TestClient(app)
yield client
app.dependency_overrides.clear()
@pytest.fixture(scope="module")
def random_product() -> Dict[str, str]:
return {
"id": 1,
"name": "<NAME>",
"price": 80,
}
|
[
"sqlmodel.create_engine",
"sqlmodel.Session",
"sqlmodel.SQLModel.metadata.create_all"
] |
[((267, 297), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""session"""'}), "(name='session')\n", (281, 297), False, 'import pytest\n'), ((593, 622), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""client"""'}), "(name='client')\n", (607, 622), False, 'import pytest\n'), ((853, 883), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (867, 883), False, 'import pytest\n'), ((347, 455), 'sqlmodel.create_engine', 'create_engine', (['"""sqlite://"""'], {'echo': '(False)', 'connect_args': "{'check_same_thread': False}", 'poolclass': 'StaticPool'}), "('sqlite://', echo=False, connect_args={'check_same_thread': \n False}, poolclass=StaticPool)\n", (360, 455), False, 'from sqlmodel import Session, SQLModel, create_engine\n'), ((494, 530), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (522, 530), False, 'from sqlmodel import Session, SQLModel, create_engine\n'), ((780, 795), 'fastapi.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (790, 795), False, 'from fastapi.testclient import TestClient\n'), ((817, 849), 'app.main.app.dependency_overrides.clear', 'app.dependency_overrides.clear', ([], {}), '()\n', (847, 849), False, 'from app.main import app\n'), ((540, 555), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (547, 555), False, 'from sqlmodel import Session, SQLModel, create_engine\n')]
|
from typing import Optional # (1)
from sqlmodel import Field, SQLModel, create_engine # (2)
class Hero(SQLModel, table=True): # (3)
id: Optional[int] = Field(default=None, primary_key=True) # (4)
name: str # (5)
secret_name: str # (6)
age: Optional[int] = None # (7)
sqlite_file_name = "database.db" # (8)
sqlite_url = f"sqlite:///{sqlite_file_name}" # (9)
engine = create_engine(sqlite_url, echo=True) # (10)
def create_db_and_tables(): # (11)
SQLModel.metadata.create_all(engine) # (12)
if __name__ == "__main__": # (13)
create_db_and_tables() # (14)
|
[
"sqlmodel.create_engine",
"sqlmodel.Field",
"sqlmodel.SQLModel.metadata.create_all"
] |
[((397, 433), 'sqlmodel.create_engine', 'create_engine', (['sqlite_url'], {'echo': '(True)'}), '(sqlite_url, echo=True)\n', (410, 433), False, 'from sqlmodel import Field, SQLModel, create_engine\n'), ((162, 199), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (167, 199), False, 'from sqlmodel import Field, SQLModel, create_engine\n'), ((484, 520), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (512, 520), False, 'from sqlmodel import Field, SQLModel, create_engine\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import bisect
import datetime
import math
import os
import pickle
import time
from typing import Optional
import megengine as mge
import megengine.distributed as dist
import megengine.module as M
from basecore.config import ConfigDict
from basecore.engine import BaseHook, BaseTrainer
from basecore.utils import (
Checkpoint,
MeterBuffer,
cached_property,
ensure_dir,
get_last_call_deltatime,
)
from loguru import logger
from tensorboardX import SummaryWriter
from basecls.layers import compute_precise_bn_stats
from basecls.models import sync_model
from basecls.utils import default_logging, registers
from .tester import ClsTester
__all__ = [
"CheckpointHook",
"EvalHook",
"LoggerHook",
"LRSchedulerHook",
"PreciseBNHook",
"ResumeHook",
"TensorboardHook",
]
def _create_checkpoint(trainer: BaseTrainer, save_dir: str) -> Checkpoint:
"""Create a checkpoint for save and resume"""
model = trainer.model
ema = trainer.ema
ckpt_kws = {"ema": ema} if ema is not None else {}
optim = trainer.solver.optimizer
scaler = trainer.solver.grad_scaler
progress = trainer.progress
ckpt = Checkpoint(
save_dir,
model,
tag_file=None,
optimizer=optim,
scaler=scaler,
progress=progress,
**ckpt_kws,
)
return ckpt
class CheckpointHook(BaseHook):
"""Hook for managing checkpoints during training.
Effect during ``after_epoch`` and ``after_train`` procedure.
Args:
save_dir: checkpoint directory.
save_every_n_epoch: interval for saving checkpoint. Default: ``1``
"""
def __init__(self, save_dir: str = None, save_every_n_epoch: int = 1):
super().__init__()
ensure_dir(save_dir)
self.save_dir = save_dir
self.save_every_n_epoch = save_every_n_epoch
def after_epoch(self):
progress = self.trainer.progress
ckpt = _create_checkpoint(self.trainer, self.save_dir)
ckpt.save("latest.pkl")
if progress.epoch % self.save_every_n_epoch == 0:
progress_str = progress.progress_str_list()
save_name = "_".join(progress_str[:-1]) + ".pkl"
ckpt.save(save_name)
logger.info(f"Save checkpoint {save_name} to {self.save_dir}")
def after_train(self):
# NOTE: usually final ema is not the best so we dont save it
mge.save(
{"state_dict": self.trainer.model.state_dict()},
os.path.join(self.save_dir, "dumped_model.pkl"),
pickle_protocol=pickle.DEFAULT_PROTOCOL,
)
class EvalHook(BaseHook):
"""Hook for evaluating during training.
Effect during ``after_epoch`` and ``after_train`` procedure.
Args:
save_dir: checkpoint directory.
eval_every_n_epoch: interval for evaluating. Default: ``1``
"""
def __init__(self, save_dir: str = None, eval_every_n_epoch: int = 1):
super().__init__()
ensure_dir(save_dir)
self.save_dir = save_dir
self.eval_every_n_epoch = eval_every_n_epoch
self.best_acc1 = 0
self.best_ema_acc1 = 0
def after_epoch(self):
trainer = self.trainer
cfg = trainer.cfg
model = trainer.model
ema = trainer.ema
progress = trainer.progress
if progress.epoch % self.eval_every_n_epoch == 0 and progress.epoch != progress.max_epoch:
self.test(cfg, model, ema)
def after_train(self):
trainer = self.trainer
cfg = trainer.cfg
model = trainer.model
ema = trainer.ema
# TODO: actually useless maybe when precise_bn is on
sync_model(model)
if ema is not None:
sync_model(ema)
self.test(cfg, model, ema)
def test(self, cfg: ConfigDict, model: M.Module, ema: Optional[M.Module] = None):
dataloader = registers.dataloaders.get(cfg.data.name).build(cfg, False)
# FIXME: need atomic user_pop, maybe in MegEngine 1.5?
# tester = BaseTester(model, dataloader, AccEvaluator())
tester = ClsTester(cfg, model, dataloader)
acc1, _ = tester.test()
if acc1 > self.best_acc1:
self.best_acc1 = acc1
if dist.get_rank() == 0:
mge.save(
{"state_dict": model.state_dict(), "acc1": self.best_acc1},
os.path.join(self.save_dir, "best_model.pkl"),
pickle_protocol=pickle.DEFAULT_PROTOCOL,
)
logger.info(
f"Epoch: {self.trainer.progress.epoch}, Test Acc@1: {acc1:.3f}, "
f"Best Test Acc@1: {self.best_acc1:.3f}"
)
if ema is None:
return
tester_ema = ClsTester(cfg, ema, dataloader)
ema_acc1, _ = tester_ema.test()
if ema_acc1 > self.best_ema_acc1:
self.best_ema_acc1 = ema_acc1
if dist.get_rank() == 0:
mge.save(
{"state_dict": ema.state_dict(), "acc1": self.best_ema_acc1},
os.path.join(self.save_dir, "best_ema_model.pkl"),
pickle_protocol=pickle.DEFAULT_PROTOCOL,
)
logger.info(
f"Epoch: {self.trainer.progress.epoch}, EMA Acc@1: {ema_acc1:.3f}, "
f"Best EMA Acc@1: {self.best_ema_acc1:.3f}"
)
class LoggerHook(BaseHook):
"""Hook for logging during training.
Effect during ``before_train``, ``after_train``, ``before_iter`` and ``after_iter`` procedure.
Args:
log_every_n_iter: interval for logging. Default: ``20``
"""
def __init__(self, log_every_n_iter: int = 20):
super().__init__()
self.log_every_n_iter = log_every_n_iter
self.meter = MeterBuffer(self.log_every_n_iter)
def before_train(self):
trainer = self.trainer
progress = trainer.progress
default_logging(trainer.cfg, trainer.model)
logger.info(f"Starting training from epoch {progress.epoch}, iteration {progress.iter}")
self.start_training_time = time.perf_counter()
def after_train(self):
total_training_time = time.perf_counter() - self.start_training_time
total_time_str = str(datetime.timedelta(seconds=total_training_time))
logger.info(
"Total training time: {} ({:.4f} s / iter)".format(
total_time_str, self.meter["iters_time"].global_avg
)
)
def before_iter(self):
self.iter_start_time = time.perf_counter()
def after_iter(self):
single_iter_time = time.perf_counter() - self.iter_start_time
delta_time = get_last_call_deltatime()
if delta_time is None:
delta_time = single_iter_time
self.meter.update(
{
"iters_time": single_iter_time, # to get global average iter time
"eta_iter_time": delta_time, # to get ETA time
"extra_time": delta_time - single_iter_time, # to get extra time
}
)
trainer = self.trainer
progress = trainer.progress
epoch_id, iter_id = progress.epoch, progress.iter
max_epoch, max_iter = progress.max_epoch, progress.max_iter
if iter_id % self.log_every_n_iter == 0 or (iter_id == 1 and epoch_id == 1):
log_str_list = []
# step info string
log_str_list.append(str(progress))
# loss string
log_str_list.append(self.get_loss_str(trainer.meter))
# stat string
log_str_list.append(self.get_stat_str(trainer.meter))
# other training info like learning rate.
log_str_list.append(self.get_train_info_str())
# memory useage.
log_str_list.append(self.get_memory_str(trainer.meter))
# time string
left_iters = max_iter - iter_id + (max_epoch - epoch_id) * max_iter
time_str = self.get_time_str(left_iters)
log_str_list.append(time_str)
# filter empty strings
log_str_list = [s for s in log_str_list if len(s) > 0]
log_str = ", ".join(log_str_list)
logger.info(log_str)
# reset meters in trainer
trainer.meter.reset()
def get_loss_str(self, meter):
"""Get loss information during trainging process."""
loss_dict = meter.get_filtered_meter(filter_key="loss")
loss_str = ", ".join(
[f"{name}:{value.latest:.3f}({value.avg:.3f})" for name, value in loss_dict.items()]
)
return loss_str
def get_stat_str(self, meter):
"""Get stat information during trainging process."""
stat_dict = meter.get_filtered_meter(filter_key="stat")
stat_str = ", ".join(
[f"{name}:{value.latest:.3f}({value.avg:.3f})" for name, value in stat_dict.items()]
)
return stat_str
def get_memory_str(self, meter):
"""Get memory information during trainging process."""
def mem_in_Mb(mem_value):
return math.ceil(mem_value / 1024 / 1024)
mem_dict = meter.get_filtered_meter(filter_key="memory")
mem_str = ", ".join(
[
f"{name}:{mem_in_Mb(value.latest)}({mem_in_Mb(value.avg)})Mb"
for name, value in mem_dict.items()
]
)
return mem_str
def get_train_info_str(self):
"""Get training process related information such as learning rate."""
# extra info to display, such as learning rate
trainer = self.trainer
lr = trainer.solver.optimizer.param_groups[0]["lr"]
lr_str = f"lr:{lr:.3e}"
loss_scale = trainer.solver.grad_scaler.scale_factor
loss_scale_str = f", amp_loss_scale:{loss_scale:.1f}" if trainer.cfg.amp.enabled else ""
return lr_str + loss_scale_str
def get_time_str(self, left_iters: int) -> str:
"""Get time related information sucn as data_time, train_time, ETA and so on."""
# time string
trainer = self.trainer
time_dict = trainer.meter.get_filtered_meter(filter_key="time")
train_time_str = ", ".join(
[f"{name}:{value.avg:.3f}s" for name, value in time_dict.items()]
)
train_time_str += ", extra_time:{:.3f}s, ".format(self.meter["extra_time"].avg)
eta_seconds = self.meter["eta_iter_time"].global_avg * left_iters
eta_string = "ETA:{}".format(datetime.timedelta(seconds=int(eta_seconds)))
time_str = train_time_str + eta_string
return time_str
class LRSchedulerHook(BaseHook):
"""Hook for learning rate scheduling during training.
Effect during ``before_epoch`` procedure.
"""
def before_epoch(self):
trainer = self.trainer
epoch_id = trainer.progress.epoch
cfg = trainer.cfg.solver
lr_factor = self.get_lr_factor(cfg, epoch_id)
if epoch_id <= cfg.warmup_epochs:
alpha = (epoch_id - 1) / cfg.warmup_epochs
lr_factor *= cfg.warmup_factor * (1 - alpha) + alpha
scaled_lr = self.total_lr * lr_factor
for param_group in trainer.solver.optimizer.param_groups:
param_group["lr"] = scaled_lr
def get_lr_factor(self, cfg: ConfigDict, epoch_id: int) -> float:
"""Calculate learning rate factor.
It supports ``"step"``, ``"linear"``, ``"cosine"``, ``"exp"``, and ``"rel_exp"`` schedule.
Args:
cfg: config for training.
epoch_id: current epoch.
Returns:
Learning rate factor.
"""
if cfg.lr_schedule == "step":
return cfg.lr_decay_factor ** bisect.bisect_left(cfg.lr_decay_steps, epoch_id)
elif cfg.lr_schedule == "linear":
alpha = 1 - (epoch_id - 1) / cfg.max_epoch
return (1 - cfg.lr_min_factor) * alpha + cfg.lr_min_factor
elif cfg.lr_schedule == "cosine":
alpha = 0.5 * (1 + math.cos(math.pi * (epoch_id - 1) / cfg.max_epoch))
return (1 - cfg.lr_min_factor) * alpha + cfg.lr_min_factor
elif cfg.lr_schedule == "exp":
return cfg.lr_decay_factor ** (epoch_id - 1)
elif cfg.lr_schedule == "rel_exp":
if cfg.lr_min_factor <= 0:
raise ValueError(
"Exponential lr schedule requires lr_min_factor to be greater than 0"
)
return cfg.lr_min_factor ** ((epoch_id - 1) / cfg.max_epoch)
else:
raise NotImplementedError(f"Learning rate schedule '{cfg.lr_schedule}' not supported")
@cached_property
def total_lr(self) -> float:
"""Total learning rate."""
cfg = self.trainer.cfg.solver
total_lr = cfg.basic_lr * dist.get_world_size() # linear scaling rule
return total_lr
class PreciseBNHook(BaseHook):
"""Hook for precising BN during training.
Effect during ``after_epoch`` procedure.
Args:
precise_every_n_epoch: interval for precising BN. Default: ``1``
"""
def __init__(self, precise_every_n_epoch: int = 1):
super().__init__()
self.precise_every_n_epoch = precise_every_n_epoch
def before_train(self):
if self.precise_every_n_epoch == -1:
self.precise_every_n_epoch = self.trainer.progress.max_epoch
def after_epoch(self):
trainer = self.trainer
if (
trainer.progress.epoch % self.precise_every_n_epoch == 0
and trainer.cfg.bn.num_samples_precise > 0
):
logger.info(f"Apply Precising BN at epoch{trainer.progress.epoch}")
compute_precise_bn_stats(trainer.cfg, trainer.model, trainer.dataloader)
if trainer.ema is not None:
logger.info(f"Apply Precising BN for EMA at epoch{trainer.progress.epoch}")
compute_precise_bn_stats(trainer.cfg, trainer.ema, trainer.dataloader)
class ResumeHook(BaseHook):
"""Hook for resuming training process.
Effect during ``before_train`` procedure.
Args:
save_dir: checkpoint directory.
resume: enable resume or not. Default: ``False``
"""
def __init__(self, save_dir: int = None, resume: bool = False):
super().__init__()
ensure_dir(save_dir)
self.save_dir = save_dir
self.resume = resume
def before_train(self):
trainer = self.trainer
if self.resume:
progress = trainer.progress
ckpt = _create_checkpoint(self.trainer, self.save_dir)
filename = ckpt.get_checkpoint_file("latest.pkl")
logger.info(f"Load checkpoint from {filename}")
ckpt.resume(filename)
# since ckpt is dumped after every epoch,
# resume training requires epoch + 1 and set iter to 1
progress.epoch += 1
progress.iter = 1
class TensorboardHook(BaseHook):
"""Hook for tensorboard during training.
Effect during ``before_train``, ``after_train`` and ``after_iter`` procedure.
Args:
log_dir: tensorboard directory.
log_every_n_iter: interval for logging. Default: ``20``
scalar_type: statistic to record, supports ``"latest"``, ``"avg"``, ``"global_avg"`` and
``"median"``. Default: ``"latest"``
"""
def __init__(self, log_dir: str, log_every_n_iter: int = 20, scalar_type: str = "latest"):
super().__init__()
if scalar_type not in ("latest", "avg", "global_avg", "median"):
raise ValueError(f"Tensorboard scalar type '{scalar_type}' not supported")
ensure_dir(log_dir)
self.log_dir = log_dir
self.log_every_n_iter = log_every_n_iter
self.scalar_type = scalar_type
def before_train(self):
self.writer = SummaryWriter(self.log_dir)
def after_train(self):
self.writer.close()
def after_iter(self):
trainer = self.trainer
epoch_id, iter_id = trainer.progress.epoch, trainer.progress.iter
if iter_id % self.log_every_n_iter == 0 or (iter_id == 1 and epoch_id == 1):
self.write(context=trainer)
def write(self, context):
cur_iter = self.calc_iter(context.progress)
for key, meter in context.meter.items():
value = getattr(meter, self.scalar_type, meter.latest)
for prefix in ("loss", "stat", "time", "memory"):
if prefix in key:
key = f"{prefix}/{key}"
break
self.writer.add_scalar(key, value, cur_iter)
# write lr into tensorboard
lr = context.solver.optimizer.param_groups[0]["lr"]
self.writer.add_scalar("lr", lr, cur_iter)
# write loss_scale into tensorboard
if context.cfg.amp.enabled:
loss_scale = context.solver.grad_scaler.scale_factor
self.writer.add_scalar("amp_loss_scale", loss_scale, cur_iter)
@classmethod
def calc_iter(cls, progress):
return (progress.epoch - 1) * progress.max_iter + progress.iter - 1
|
[
"megengine.distributed.get_rank",
"megengine.distributed.get_world_size"
] |
[((1244, 1353), 'basecore.utils.Checkpoint', 'Checkpoint', (['save_dir', 'model'], {'tag_file': 'None', 'optimizer': 'optim', 'scaler': 'scaler', 'progress': 'progress'}), '(save_dir, model, tag_file=None, optimizer=optim, scaler=scaler,\n progress=progress, **ckpt_kws)\n', (1254, 1353), False, 'from basecore.utils import Checkpoint, MeterBuffer, cached_property, ensure_dir, get_last_call_deltatime\n'), ((1828, 1848), 'basecore.utils.ensure_dir', 'ensure_dir', (['save_dir'], {}), '(save_dir)\n', (1838, 1848), False, 'from basecore.utils import Checkpoint, MeterBuffer, cached_property, ensure_dir, get_last_call_deltatime\n'), ((3059, 3079), 'basecore.utils.ensure_dir', 'ensure_dir', (['save_dir'], {}), '(save_dir)\n', (3069, 3079), False, 'from basecore.utils import Checkpoint, MeterBuffer, cached_property, ensure_dir, get_last_call_deltatime\n'), ((3749, 3766), 'basecls.models.sync_model', 'sync_model', (['model'], {}), '(model)\n', (3759, 3766), False, 'from basecls.models import sync_model\n'), ((4603, 4728), 'loguru.logger.info', 'logger.info', (['f"""Epoch: {self.trainer.progress.epoch}, Test Acc@1: {acc1:.3f}, Best Test Acc@1: {self.best_acc1:.3f}"""'], {}), "(\n f'Epoch: {self.trainer.progress.epoch}, Test Acc@1: {acc1:.3f}, Best Test Acc@1: {self.best_acc1:.3f}'\n )\n", (4614, 4728), False, 'from loguru import logger\n'), ((5284, 5415), 'loguru.logger.info', 'logger.info', (['f"""Epoch: {self.trainer.progress.epoch}, EMA Acc@1: {ema_acc1:.3f}, Best EMA Acc@1: {self.best_ema_acc1:.3f}"""'], {}), "(\n f'Epoch: {self.trainer.progress.epoch}, EMA Acc@1: {ema_acc1:.3f}, Best EMA Acc@1: {self.best_ema_acc1:.3f}'\n )\n", (5295, 5415), False, 'from loguru import logger\n'), ((5848, 5882), 'basecore.utils.MeterBuffer', 'MeterBuffer', (['self.log_every_n_iter'], {}), '(self.log_every_n_iter)\n', (5859, 5882), False, 'from basecore.utils import Checkpoint, MeterBuffer, cached_property, ensure_dir, get_last_call_deltatime\n'), ((5988, 6031), 'basecls.utils.default_logging', 'default_logging', (['trainer.cfg', 'trainer.model'], {}), '(trainer.cfg, trainer.model)\n', (6003, 6031), False, 'from basecls.utils import default_logging, registers\n'), ((6041, 6139), 'loguru.logger.info', 'logger.info', (['f"""Starting training from epoch {progress.epoch}, iteration {progress.iter}"""'], {}), "(\n f'Starting training from epoch {progress.epoch}, iteration {progress.iter}'\n )\n", (6052, 6139), False, 'from loguru import logger\n'), ((6166, 6185), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (6183, 6185), False, 'import time\n'), ((6605, 6624), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (6622, 6624), False, 'import time\n'), ((6744, 6769), 'basecore.utils.get_last_call_deltatime', 'get_last_call_deltatime', ([], {}), '()\n', (6767, 6769), False, 'from basecore.utils import Checkpoint, MeterBuffer, cached_property, ensure_dir, get_last_call_deltatime\n'), ((14390, 14410), 'basecore.utils.ensure_dir', 'ensure_dir', (['save_dir'], {}), '(save_dir)\n', (14400, 14410), False, 'from basecore.utils import Checkpoint, MeterBuffer, cached_property, ensure_dir, get_last_call_deltatime\n'), ((15725, 15744), 'basecore.utils.ensure_dir', 'ensure_dir', (['log_dir'], {}), '(log_dir)\n', (15735, 15744), False, 'from basecore.utils import Checkpoint, MeterBuffer, cached_property, ensure_dir, get_last_call_deltatime\n'), ((15915, 15942), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['self.log_dir'], {}), '(self.log_dir)\n', (15928, 15942), False, 'from tensorboardX import SummaryWriter\n'), ((2320, 2382), 'loguru.logger.info', 'logger.info', (['f"""Save checkpoint {save_name} to {self.save_dir}"""'], {}), "(f'Save checkpoint {save_name} to {self.save_dir}')\n", (2331, 2382), False, 'from loguru import logger\n'), ((2571, 2618), 'os.path.join', 'os.path.join', (['self.save_dir', '"""dumped_model.pkl"""'], {}), "(self.save_dir, 'dumped_model.pkl')\n", (2583, 2618), False, 'import os\n'), ((3807, 3822), 'basecls.models.sync_model', 'sync_model', (['ema'], {}), '(ema)\n', (3817, 3822), False, 'from basecls.models import sync_model\n'), ((6244, 6263), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (6261, 6263), False, 'import time\n'), ((6320, 6367), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'total_training_time'}), '(seconds=total_training_time)\n', (6338, 6367), False, 'import datetime\n'), ((6679, 6698), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (6696, 6698), False, 'import time\n'), ((8288, 8308), 'loguru.logger.info', 'logger.info', (['log_str'], {}), '(log_str)\n', (8299, 8308), False, 'from loguru import logger\n'), ((9181, 9215), 'math.ceil', 'math.ceil', (['(mem_value / 1024 / 1024)'], {}), '(mem_value / 1024 / 1024)\n', (9190, 9215), False, 'import math\n'), ((12883, 12904), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (12902, 12904), True, 'import megengine.distributed as dist\n'), ((13678, 13745), 'loguru.logger.info', 'logger.info', (['f"""Apply Precising BN at epoch{trainer.progress.epoch}"""'], {}), "(f'Apply Precising BN at epoch{trainer.progress.epoch}')\n", (13689, 13745), False, 'from loguru import logger\n'), ((13758, 13830), 'basecls.layers.compute_precise_bn_stats', 'compute_precise_bn_stats', (['trainer.cfg', 'trainer.model', 'trainer.dataloader'], {}), '(trainer.cfg, trainer.model, trainer.dataloader)\n', (13782, 13830), False, 'from basecls.layers import compute_precise_bn_stats\n'), ((14738, 14785), 'loguru.logger.info', 'logger.info', (['f"""Load checkpoint from {filename}"""'], {}), "(f'Load checkpoint from {filename}')\n", (14749, 14785), False, 'from loguru import logger\n'), ((3966, 4006), 'basecls.utils.registers.dataloaders.get', 'registers.dataloaders.get', (['cfg.data.name'], {}), '(cfg.data.name)\n', (3991, 4006), False, 'from basecls.utils import default_logging, registers\n'), ((4320, 4335), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (4333, 4335), True, 'import megengine.distributed as dist\n'), ((4995, 5010), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (5008, 5010), True, 'import megengine.distributed as dist\n'), ((11802, 11850), 'bisect.bisect_left', 'bisect.bisect_left', (['cfg.lr_decay_steps', 'epoch_id'], {}), '(cfg.lr_decay_steps, epoch_id)\n', (11820, 11850), False, 'import bisect\n'), ((13887, 13962), 'loguru.logger.info', 'logger.info', (['f"""Apply Precising BN for EMA at epoch{trainer.progress.epoch}"""'], {}), "(f'Apply Precising BN for EMA at epoch{trainer.progress.epoch}')\n", (13898, 13962), False, 'from loguru import logger\n'), ((13979, 14049), 'basecls.layers.compute_precise_bn_stats', 'compute_precise_bn_stats', (['trainer.cfg', 'trainer.ema', 'trainer.dataloader'], {}), '(trainer.cfg, trainer.ema, trainer.dataloader)\n', (14003, 14049), False, 'from basecls.layers import compute_precise_bn_stats\n'), ((4468, 4513), 'os.path.join', 'os.path.join', (['self.save_dir', '"""best_model.pkl"""'], {}), "(self.save_dir, 'best_model.pkl')\n", (4480, 4513), False, 'import os\n'), ((5145, 5194), 'os.path.join', 'os.path.join', (['self.save_dir', '"""best_ema_model.pkl"""'], {}), "(self.save_dir, 'best_ema_model.pkl')\n", (5157, 5194), False, 'import os\n'), ((12092, 12142), 'math.cos', 'math.cos', (['(math.pi * (epoch_id - 1) / cfg.max_epoch)'], {}), '(math.pi * (epoch_id - 1) / cfg.max_epoch)\n', (12100, 12142), False, 'import math\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import megengine as mge
import megengine.module as M
import numpy as np
import pytest
from basecls.layers import ClsHead, MBV3Head, VGGHead, build_head
@pytest.mark.parametrize("w_in", [4])
@pytest.mark.parametrize(
"head_args",
[
None,
dict(name=None),
dict(name="ClsHead", w_out=8),
dict(name="ClsHead", w_out=8, width=8, norm_name="BN", act_name="relu"),
dict(name="ClsHead", w_out=8, width=8, norm_name="BN", act_name="relu", bias=False),
dict(name="VGGHead", w_out=8, width=8),
dict(name="VGGHead", w_out=8, width=8, dropout_prob=0.5, act_name="relu"),
dict(name="MBV3Head", w_out=8, width=8, w_h=16, norm_name="BN", act_name="relu"),
dict(name="MBV3Head", w_out=8, width=8, w_h=16, norm_name="BN", act_name="relu", se_r=0.25),
dict(
name="MBV3Head",
w_out=8,
width=8,
w_h=16,
norm_name="BN",
act_name="relu",
se_r=0.25,
bias=False,
),
],
)
@pytest.mark.parametrize("norm_name", ["BN"])
@pytest.mark.parametrize("act_name", ["relu"])
def test_build_head(w_in, head_args, norm_name, act_name):
m = build_head(w_in, head_args, norm_name, act_name)
if head_args is None or head_args.get("name") is None:
assert m is None
else:
assert isinstance(m, M.Module)
def test_cls_head():
C = 4
K = 8
x = np.random.rand(2, C, 8, 8).astype("float32")
y = ClsHead(C, K)(mge.Tensor(x)).numpy()
assert len(y.shape) == 2 and y.shape[1] == K
def test_mbv3_head():
C = 4
K = 8
x = np.random.rand(2, C, 8, 8).astype("float32")
y = MBV3Head(C, K, 8, 16)(mge.Tensor(x)).numpy()
assert len(y.shape) == 2 and y.shape[1] == K
def test_vgg_head():
C = 4
K = 8
x = np.random.rand(2, C, 8, 8).astype("float32")
y = VGGHead(C, K, 8)(mge.Tensor(x)).numpy()
assert len(y.shape) == 2 and y.shape[1] == K
|
[
"megengine.Tensor"
] |
[((238, 274), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_in"""', '[4]'], {}), "('w_in', [4])\n", (261, 274), False, 'import pytest\n'), ((1128, 1172), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""norm_name"""', "['BN']"], {}), "('norm_name', ['BN'])\n", (1151, 1172), False, 'import pytest\n'), ((1174, 1219), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""act_name"""', "['relu']"], {}), "('act_name', ['relu'])\n", (1197, 1219), False, 'import pytest\n'), ((1287, 1335), 'basecls.layers.build_head', 'build_head', (['w_in', 'head_args', 'norm_name', 'act_name'], {}), '(w_in, head_args, norm_name, act_name)\n', (1297, 1335), False, 'from basecls.layers import ClsHead, MBV3Head, VGGHead, build_head\n'), ((1521, 1547), 'numpy.random.rand', 'np.random.rand', (['(2)', 'C', '(8)', '(8)'], {}), '(2, C, 8, 8)\n', (1535, 1547), True, 'import numpy as np\n'), ((1713, 1739), 'numpy.random.rand', 'np.random.rand', (['(2)', 'C', '(8)', '(8)'], {}), '(2, C, 8, 8)\n', (1727, 1739), True, 'import numpy as np\n'), ((1912, 1938), 'numpy.random.rand', 'np.random.rand', (['(2)', 'C', '(8)', '(8)'], {}), '(2, C, 8, 8)\n', (1926, 1938), True, 'import numpy as np\n'), ((1574, 1587), 'basecls.layers.ClsHead', 'ClsHead', (['C', 'K'], {}), '(C, K)\n', (1581, 1587), False, 'from basecls.layers import ClsHead, MBV3Head, VGGHead, build_head\n'), ((1588, 1601), 'megengine.Tensor', 'mge.Tensor', (['x'], {}), '(x)\n', (1598, 1601), True, 'import megengine as mge\n'), ((1766, 1787), 'basecls.layers.MBV3Head', 'MBV3Head', (['C', 'K', '(8)', '(16)'], {}), '(C, K, 8, 16)\n', (1774, 1787), False, 'from basecls.layers import ClsHead, MBV3Head, VGGHead, build_head\n'), ((1788, 1801), 'megengine.Tensor', 'mge.Tensor', (['x'], {}), '(x)\n', (1798, 1801), True, 'import megengine as mge\n'), ((1965, 1981), 'basecls.layers.VGGHead', 'VGGHead', (['C', 'K', '(8)'], {}), '(C, K, 8)\n', (1972, 1981), False, 'from basecls.layers import ClsHead, MBV3Head, VGGHead, build_head\n'), ((1982, 1995), 'megengine.Tensor', 'mge.Tensor', (['x'], {}), '(x)\n', (1992, 1995), True, 'import megengine as mge\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import argparse
import importlib
import multiprocessing as mp
import os
import sys
import megengine as mge
import megengine.distributed as dist
from basecore.config import ConfigDict
from loguru import logger
from basecls.engine import ClsTester
from basecls.models import build_model, load_model
from basecls.utils import default_logging, registers, set_nccl_env, set_num_threads, setup_logger
def make_parser() -> argparse.ArgumentParser:
"""Build args parser for testing script.
Returns:
The args parser.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", type=str, help="testing process description file")
parser.add_argument("-w", "--weight_file", default=None, type=str, help="weight file")
return parser
@logger.catch
def worker(args: argparse.Namespace):
"""Worker function for testing script.
Args:
args: args for testing script.
"""
logger.info(f"Init process group for gpu{dist.get_rank()} done")
sys.path.append(os.path.dirname(args.file))
module_name = os.path.splitext(os.path.basename(args.file))[0]
current_network = importlib.import_module(module_name)
cfg = current_network.Cfg()
if cfg.output_dir is None:
cfg.output_dir = f"./logs_{module_name}"
cfg.output_dir = os.path.abspath(cfg.output_dir)
if args.weight_file:
cfg.weights = args.weight_file
else:
cfg.weights = os.path.join(cfg.output_dir, "latest.pkl")
cfg.set_mode("freeze")
if dist.get_rank() == 0 and not os.path.exists(cfg.output_dir):
os.makedirs(cfg.output_dir)
dist.group_barrier()
setup_logger(cfg.output_dir, "test_log.txt", to_loguru=True)
logger.info(f"args: {args}")
if cfg.fastrun:
logger.info("Using fastrun mode...")
mge.functional.debug_param.set_execution_strategy("PROFILE")
tester = build(cfg)
tester.test()
def build(cfg: ConfigDict):
"""Build function for testing script.
Args:
cfg: config for testing.
Returns:
A tester.
"""
model = build_model(cfg)
load_model(model, cfg.weights)
model.eval()
default_logging(cfg, model)
dataloader = registers.dataloaders.get(cfg.data.name).build(cfg, False)
# FIXME: need atomic user_pop, maybe in MegEngine 1.5?
# tester = BaseTester(model, dataloader, AccEvaluator())
return ClsTester(cfg, model, dataloader)
def main():
"""Main function for testing script."""
parser = make_parser()
args = parser.parse_args()
mp.set_start_method("spawn")
set_nccl_env()
set_num_threads()
if not os.path.exists(args.file):
raise ValueError("Description file does not exist")
device_count = mge.device.get_device_count("gpu")
if device_count == 0:
logger.warning("No GPU was found, testing on CPU")
worker(args)
elif device_count > 1:
mp_worker = dist.launcher(worker)
mp_worker(args)
else:
worker(args)
if __name__ == "__main__":
main()
|
[
"megengine.distributed.get_rank",
"megengine.functional.debug_param.set_execution_strategy",
"megengine.device.get_device_count",
"megengine.distributed.group_barrier",
"megengine.distributed.launcher"
] |
[((632, 657), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (655, 657), False, 'import argparse\n'), ((1220, 1256), 'importlib.import_module', 'importlib.import_module', (['module_name'], {}), '(module_name)\n', (1243, 1256), False, 'import importlib\n'), ((1390, 1421), 'os.path.abspath', 'os.path.abspath', (['cfg.output_dir'], {}), '(cfg.output_dir)\n', (1405, 1421), False, 'import os\n'), ((1699, 1719), 'megengine.distributed.group_barrier', 'dist.group_barrier', ([], {}), '()\n', (1717, 1719), True, 'import megengine.distributed as dist\n'), ((1725, 1785), 'basecls.utils.setup_logger', 'setup_logger', (['cfg.output_dir', '"""test_log.txt"""'], {'to_loguru': '(True)'}), "(cfg.output_dir, 'test_log.txt', to_loguru=True)\n", (1737, 1785), False, 'from basecls.utils import default_logging, registers, set_nccl_env, set_num_threads, setup_logger\n'), ((1790, 1818), 'loguru.logger.info', 'logger.info', (['f"""args: {args}"""'], {}), "(f'args: {args}')\n", (1801, 1818), False, 'from loguru import logger\n'), ((2165, 2181), 'basecls.models.build_model', 'build_model', (['cfg'], {}), '(cfg)\n', (2176, 2181), False, 'from basecls.models import build_model, load_model\n'), ((2186, 2216), 'basecls.models.load_model', 'load_model', (['model', 'cfg.weights'], {}), '(model, cfg.weights)\n', (2196, 2216), False, 'from basecls.models import build_model, load_model\n'), ((2239, 2266), 'basecls.utils.default_logging', 'default_logging', (['cfg', 'model'], {}), '(cfg, model)\n', (2254, 2266), False, 'from basecls.utils import default_logging, registers, set_nccl_env, set_num_threads, setup_logger\n'), ((2475, 2508), 'basecls.engine.ClsTester', 'ClsTester', (['cfg', 'model', 'dataloader'], {}), '(cfg, model, dataloader)\n', (2484, 2508), False, 'from basecls.engine import ClsTester\n'), ((2630, 2658), 'multiprocessing.set_start_method', 'mp.set_start_method', (['"""spawn"""'], {}), "('spawn')\n", (2649, 2658), True, 'import multiprocessing as mp\n'), ((2664, 2678), 'basecls.utils.set_nccl_env', 'set_nccl_env', ([], {}), '()\n', (2676, 2678), False, 'from basecls.utils import default_logging, registers, set_nccl_env, set_num_threads, setup_logger\n'), ((2683, 2700), 'basecls.utils.set_num_threads', 'set_num_threads', ([], {}), '()\n', (2698, 2700), False, 'from basecls.utils import default_logging, registers, set_nccl_env, set_num_threads, setup_logger\n'), ((2820, 2854), 'megengine.device.get_device_count', 'mge.device.get_device_count', (['"""gpu"""'], {}), "('gpu')\n", (2847, 2854), True, 'import megengine as mge\n'), ((1103, 1129), 'os.path.dirname', 'os.path.dirname', (['args.file'], {}), '(args.file)\n', (1118, 1129), False, 'import os\n'), ((1519, 1561), 'os.path.join', 'os.path.join', (['cfg.output_dir', '"""latest.pkl"""'], {}), "(cfg.output_dir, 'latest.pkl')\n", (1531, 1561), False, 'import os\n'), ((1667, 1694), 'os.makedirs', 'os.makedirs', (['cfg.output_dir'], {}), '(cfg.output_dir)\n', (1678, 1694), False, 'import os\n'), ((1848, 1884), 'loguru.logger.info', 'logger.info', (['"""Using fastrun mode..."""'], {}), "('Using fastrun mode...')\n", (1859, 1884), False, 'from loguru import logger\n'), ((1893, 1953), 'megengine.functional.debug_param.set_execution_strategy', 'mge.functional.debug_param.set_execution_strategy', (['"""PROFILE"""'], {}), "('PROFILE')\n", (1942, 1953), True, 'import megengine as mge\n'), ((2713, 2738), 'os.path.exists', 'os.path.exists', (['args.file'], {}), '(args.file)\n', (2727, 2738), False, 'import os\n'), ((2890, 2940), 'loguru.logger.warning', 'logger.warning', (['"""No GPU was found, testing on CPU"""'], {}), "('No GPU was found, testing on CPU')\n", (2904, 2940), False, 'from loguru import logger\n'), ((1166, 1193), 'os.path.basename', 'os.path.basename', (['args.file'], {}), '(args.file)\n', (1182, 1193), False, 'import os\n'), ((1598, 1613), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (1611, 1613), True, 'import megengine.distributed as dist\n'), ((1627, 1657), 'os.path.exists', 'os.path.exists', (['cfg.output_dir'], {}), '(cfg.output_dir)\n', (1641, 1657), False, 'import os\n'), ((2285, 2325), 'basecls.utils.registers.dataloaders.get', 'registers.dataloaders.get', (['cfg.data.name'], {}), '(cfg.data.name)\n', (2310, 2325), False, 'from basecls.utils import default_logging, registers, set_nccl_env, set_num_threads, setup_logger\n'), ((3009, 3030), 'megengine.distributed.launcher', 'dist.launcher', (['worker'], {}), '(worker)\n', (3022, 3030), True, 'import megengine.distributed as dist\n'), ((1058, 1073), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (1071, 1073), True, 'import megengine.distributed as dist\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import argparse
import importlib
import os
import sys
import megengine as mge
import megengine.distributed as dist
from basecore.config import ConfigDict
from loguru import logger
from basecls.models import build_model, load_model, sync_model
from basecls.utils import registers, set_nccl_env, set_num_threads, setup_logger
def default_parser() -> argparse.ArgumentParser:
"""Build args parser for training script.
Returns:
The args parser.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", type=str, help="training process description file")
parser.add_argument(
"--resume", action="store_true", help="resume training from saved checkpoint or not"
)
parser.add_argument(
"opts",
default=None,
help="Modify config options using the command-line",
nargs=argparse.REMAINDER,
)
return parser
@logger.catch
def worker(args: argparse.Namespace):
"""Worker function for training script.
Args:
args: args for training script.
"""
logger.info(f"Init process group for gpu{dist.get_rank()} done")
sys.path.append(os.path.dirname(args.file))
module_name = os.path.splitext(os.path.basename(args.file))[0]
current_network = importlib.import_module(module_name)
cfg = current_network.Cfg()
cfg.merge(args.opts)
cfg.resume = args.resume
if cfg.output_dir is None:
cfg.output_dir = f"./logs_{module_name}"
cfg.output_dir = os.path.abspath(cfg.output_dir)
cfg.set_mode("freeze")
if dist.get_rank() == 0 and not os.path.exists(cfg.output_dir):
os.makedirs(cfg.output_dir)
dist.group_barrier()
# FIXME: will hang in fork mode, however spawn mode meets other issues
# try:
# from clearml import Task
# if dist.get_rank() == 0:
# Task.current_task().connect_configuration(cfg)
# except Exception as e:
# logger.warning(e)
setup_logger(cfg.output_dir, "train_log.txt", to_loguru=True)
logger.info(f"args: {args}")
if cfg.fastrun:
logger.info("Using fastrun mode...")
mge.functional.debug_param.set_execution_strategy("PROFILE")
if cfg.dtr:
logger.info("Enabling DTR...")
mge.dtr.enable()
trainer = build(cfg)
trainer.train()
def build(cfg: ConfigDict):
"""Build function for training script.
Args:
cfg: config for training.
Returns:
A trainer.
"""
model = build_model(cfg)
if getattr(cfg, "weights", None) is not None:
load_model(model, cfg.weights, strict=False)
sync_model(model)
model.train()
logger.info(f"Using augments named {cfg.augments.name}")
augments = registers.augments.get(cfg.augments.name).build(cfg)
logger.info(f"Using dataloader named {cfg.data.name}")
dataloader = registers.dataloaders.get(cfg.data.name).build(cfg, True, augments)
logger.info(f"Using solver named {cfg.solver.name}")
solver = registers.solvers.get(cfg.solver.name).build(cfg, model)
logger.info(f"Using hooks named {cfg.hooks_name}")
hooks = registers.hooks.get(cfg.hooks_name).build(cfg)
logger.info(f"Using trainer named {cfg.trainer_name}")
TrainerClass = registers.trainers.get(cfg.trainer_name)
return TrainerClass(cfg, model, dataloader, solver, hooks=hooks)
def main():
"""Main function for training script."""
parser = default_parser()
args = parser.parse_args()
set_nccl_env()
set_num_threads()
# FIXME: will hang in fork mode, however spawn mode meets other issues
# try:
# import getpass
# from clearml import Task
# task_name = f"{getpass.getuser()}-{os.path.splitext(os.path.basename(args.file))[0]}"
# task = Task.init(project_name="basecls", task_name=task_name) # noqa: F841
# except Exception as e:
# logger.warning(e)
device_count = mge.device.get_device_count("gpu")
launcher = dist.launcher
if not os.path.exists(args.file):
raise ValueError("Description file does not exist")
if device_count == 0:
raise ValueError("Number of devices should be greater than 0")
elif device_count > 1 or os.environ.get("RLAUNCH_REPLICA_TOTAL", 0) > 1:
mp_worker = launcher(worker)
mp_worker(args)
else:
worker(args)
if __name__ == "__main__":
main()
|
[
"megengine.functional.debug_param.set_execution_strategy",
"megengine.device.get_device_count",
"megengine.distributed.group_barrier",
"megengine.dtr.enable",
"megengine.distributed.get_rank"
] |
[((565, 590), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (588, 590), False, 'import argparse\n'), ((1353, 1389), 'importlib.import_module', 'importlib.import_module', (['module_name'], {}), '(module_name)\n', (1376, 1389), False, 'import importlib\n'), ((1577, 1608), 'os.path.abspath', 'os.path.abspath', (['cfg.output_dir'], {}), '(cfg.output_dir)\n', (1592, 1608), False, 'import os\n'), ((1746, 1766), 'megengine.distributed.group_barrier', 'dist.group_barrier', ([], {}), '()\n', (1764, 1766), True, 'import megengine.distributed as dist\n'), ((2048, 2109), 'basecls.utils.setup_logger', 'setup_logger', (['cfg.output_dir', '"""train_log.txt"""'], {'to_loguru': '(True)'}), "(cfg.output_dir, 'train_log.txt', to_loguru=True)\n", (2060, 2109), False, 'from basecls.utils import registers, set_nccl_env, set_num_threads, setup_logger\n'), ((2114, 2142), 'loguru.logger.info', 'logger.info', (['f"""args: {args}"""'], {}), "(f'args: {args}')\n", (2125, 2142), False, 'from loguru import logger\n'), ((2576, 2592), 'basecls.models.build_model', 'build_model', (['cfg'], {}), '(cfg)\n', (2587, 2592), False, 'from basecls.models import build_model, load_model, sync_model\n'), ((2700, 2717), 'basecls.models.sync_model', 'sync_model', (['model'], {}), '(model)\n', (2710, 2717), False, 'from basecls.models import build_model, load_model, sync_model\n'), ((2741, 2797), 'loguru.logger.info', 'logger.info', (['f"""Using augments named {cfg.augments.name}"""'], {}), "(f'Using augments named {cfg.augments.name}')\n", (2752, 2797), False, 'from loguru import logger\n'), ((2870, 2924), 'loguru.logger.info', 'logger.info', (['f"""Using dataloader named {cfg.data.name}"""'], {}), "(f'Using dataloader named {cfg.data.name}')\n", (2881, 2924), False, 'from loguru import logger\n'), ((3014, 3066), 'loguru.logger.info', 'logger.info', (['f"""Using solver named {cfg.solver.name}"""'], {}), "(f'Using solver named {cfg.solver.name}')\n", (3025, 3066), False, 'from loguru import logger\n'), ((3141, 3191), 'loguru.logger.info', 'logger.info', (['f"""Using hooks named {cfg.hooks_name}"""'], {}), "(f'Using hooks named {cfg.hooks_name}')\n", (3152, 3191), False, 'from loguru import logger\n'), ((3256, 3310), 'loguru.logger.info', 'logger.info', (['f"""Using trainer named {cfg.trainer_name}"""'], {}), "(f'Using trainer named {cfg.trainer_name}')\n", (3267, 3310), False, 'from loguru import logger\n'), ((3330, 3370), 'basecls.utils.registers.trainers.get', 'registers.trainers.get', (['cfg.trainer_name'], {}), '(cfg.trainer_name)\n', (3352, 3370), False, 'from basecls.utils import registers, set_nccl_env, set_num_threads, setup_logger\n'), ((3565, 3579), 'basecls.utils.set_nccl_env', 'set_nccl_env', ([], {}), '()\n', (3577, 3579), False, 'from basecls.utils import registers, set_nccl_env, set_num_threads, setup_logger\n'), ((3584, 3601), 'basecls.utils.set_num_threads', 'set_num_threads', ([], {}), '()\n', (3599, 3601), False, 'from basecls.utils import registers, set_nccl_env, set_num_threads, setup_logger\n'), ((4009, 4043), 'megengine.device.get_device_count', 'mge.device.get_device_count', (['"""gpu"""'], {}), "('gpu')\n", (4036, 4043), True, 'import megengine as mge\n'), ((1236, 1262), 'os.path.dirname', 'os.path.dirname', (['args.file'], {}), '(args.file)\n', (1251, 1262), False, 'import os\n'), ((1714, 1741), 'os.makedirs', 'os.makedirs', (['cfg.output_dir'], {}), '(cfg.output_dir)\n', (1725, 1741), False, 'import os\n'), ((2172, 2208), 'loguru.logger.info', 'logger.info', (['"""Using fastrun mode..."""'], {}), "('Using fastrun mode...')\n", (2183, 2208), False, 'from loguru import logger\n'), ((2217, 2277), 'megengine.functional.debug_param.set_execution_strategy', 'mge.functional.debug_param.set_execution_strategy', (['"""PROFILE"""'], {}), "('PROFILE')\n", (2266, 2277), True, 'import megengine as mge\n'), ((2303, 2333), 'loguru.logger.info', 'logger.info', (['"""Enabling DTR..."""'], {}), "('Enabling DTR...')\n", (2314, 2333), False, 'from loguru import logger\n'), ((2342, 2358), 'megengine.dtr.enable', 'mge.dtr.enable', ([], {}), '()\n', (2356, 2358), True, 'import megengine as mge\n'), ((2651, 2695), 'basecls.models.load_model', 'load_model', (['model', 'cfg.weights'], {'strict': '(False)'}), '(model, cfg.weights, strict=False)\n', (2661, 2695), False, 'from basecls.models import build_model, load_model, sync_model\n'), ((4085, 4110), 'os.path.exists', 'os.path.exists', (['args.file'], {}), '(args.file)\n', (4099, 4110), False, 'import os\n'), ((1299, 1326), 'os.path.basename', 'os.path.basename', (['args.file'], {}), '(args.file)\n', (1315, 1326), False, 'import os\n'), ((1645, 1660), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (1658, 1660), True, 'import megengine.distributed as dist\n'), ((1674, 1704), 'os.path.exists', 'os.path.exists', (['cfg.output_dir'], {}), '(cfg.output_dir)\n', (1688, 1704), False, 'import os\n'), ((2813, 2854), 'basecls.utils.registers.augments.get', 'registers.augments.get', (['cfg.augments.name'], {}), '(cfg.augments.name)\n', (2835, 2854), False, 'from basecls.utils import registers, set_nccl_env, set_num_threads, setup_logger\n'), ((2942, 2982), 'basecls.utils.registers.dataloaders.get', 'registers.dataloaders.get', (['cfg.data.name'], {}), '(cfg.data.name)\n', (2967, 2982), False, 'from basecls.utils import registers, set_nccl_env, set_num_threads, setup_logger\n'), ((3080, 3118), 'basecls.utils.registers.solvers.get', 'registers.solvers.get', (['cfg.solver.name'], {}), '(cfg.solver.name)\n', (3101, 3118), False, 'from basecls.utils import registers, set_nccl_env, set_num_threads, setup_logger\n'), ((3204, 3239), 'basecls.utils.registers.hooks.get', 'registers.hooks.get', (['cfg.hooks_name'], {}), '(cfg.hooks_name)\n', (3223, 3239), False, 'from basecls.utils import registers, set_nccl_env, set_num_threads, setup_logger\n'), ((1191, 1206), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (1204, 1206), True, 'import megengine.distributed as dist\n'), ((4299, 4341), 'os.environ.get', 'os.environ.get', (['"""RLAUNCH_REPLICA_TOTAL"""', '(0)'], {}), "('RLAUNCH_REPLICA_TOTAL', 0)\n", (4313, 4341), False, 'import os\n')]
|
from typing import Optional, List
from sqlmodel import SQLModel, Field, Relationship
class SongBase(SQLModel):
name: str
artist: str
year: Optional[int] = None
class Song(SongBase, table=True):
id: int = Field(primary_key=True)
class SongRead(SongBase):
id: int
class SongCreate(SongBase):
pass
class Increment(SQLModel, table=True):
id: int = Field(primary_key=True)
# #############################################################################
class ListingBase(SQLModel):
url: str
class Listing(ListingBase, table=True):
__tablename__ = 'listings'
id: int = Field(primary_key=True)
images: List["Image"] = Relationship(back_populates="listing",
sa_relationship_kwargs={'lazy': 'selectin'})
class ListingRead(ListingBase):
id: str
# #############################################################################
class ImageBase(SQLModel):
url: str
size_x: float
size_y: float
listing_id: Optional[int] = Field(default=None, foreign_key="listings.id")
class Image(ImageBase, table=True):
__tablename__ = 'images'
id: int = Field(primary_key=True)
listing: Optional[Listing] = Relationship(back_populates="images",
sa_relationship_kwargs={'lazy': 'selectin'})
class ImageRead(ImageBase):
id: int
class ImageReadWithListings(ImageRead):
listing: Optional[Listing] = None
class ListingReadWithImages(ListingRead):
images: List["ImageRead"] = []
|
[
"sqlmodel.Relationship",
"sqlmodel.Field"
] |
[((224, 247), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (229, 247), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((379, 402), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (384, 402), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((617, 640), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (622, 640), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((669, 756), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""listing"""', 'sa_relationship_kwargs': "{'lazy': 'selectin'}"}), "(back_populates='listing', sa_relationship_kwargs={'lazy':\n 'selectin'})\n", (681, 756), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((997, 1043), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""listings.id"""'}), "(default=None, foreign_key='listings.id')\n", (1002, 1043), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((1124, 1147), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (1129, 1147), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((1181, 1267), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""images"""', 'sa_relationship_kwargs': "{'lazy': 'selectin'}"}), "(back_populates='images', sa_relationship_kwargs={'lazy':\n 'selectin'})\n", (1193, 1267), False, 'from sqlmodel import SQLModel, Field, Relationship\n')]
|
from collections import OrderedDict
import numpy as np
import megengine.functional as F
import megengine.module as M
from megengine import Tensor
from megengine.core._imperative_rt.core2 import apply
from megengine.core.ops import builtin
from megengine.module import Module
from megengine.traced_module import TracedModule, enable_expr_checker, trace_module
from megengine.traced_module.expr import Apply, CallFunction, Constant
class MyModule1(M.Module):
def forward(self, x):
y = Tensor(x)
y += 1
x = x + 2
return x, y
class MyModule2(M.Module):
def forward(self, x):
y = Tensor([1, x, 1])
y += 1
x = x + 2
return x, y
class MyModule3(M.Module):
def __init__(self):
super().__init__()
self.modules = [
M.Elemwise("ADD"),
M.Elemwise("ADD"),
OrderedDict([("a", M.Elemwise("ADD")), ("b", M.Elemwise("ADD"))]),
M.Elemwise("RELU"),
M.Elemwise("RELU"),
]
def forward(self, a, b):
x = self.modules[0](a, b)
y = self.modules[1](a, b)
assert list(self.modules[2].keys()) == ["a", "b"]
for _, m in self.modules[2].items():
y = m(x, y)
for m in self.modules[3:]:
y = m(y)
return y
class MyModule4(M.Module):
def __init__(self):
super().__init__()
self.add = F.add
def forward(self, x, y):
return self.add(x, y)
def test_trace_module():
enable_expr_checker()
x = Tensor(1)
m1 = MyModule1()
tm1 = trace_module(m1, x)
m2 = MyModule2()
tm2 = trace_module(m2, x)
inp = Tensor(2)
gt = m1(inp)
output = tm1(inp)
for a, b in zip(output, gt):
np.testing.assert_equal(a.numpy(), b.numpy())
gt1 = m2(inp)
output1 = tm2(inp)
for a, b in zip(output1, gt1):
np.testing.assert_equal(a.numpy(), b.numpy())
a, b = Tensor(1), Tensor(2)
m3 = MyModule3()
gt = m3(a, b)
tm3 = trace_module(m3, a, b)
out = tm3(a, b)
np.testing.assert_equal(out.numpy(), gt.numpy())
assert isinstance(tm3.modules.__dict__["0"], M.Elemwise)
assert isinstance(tm3.modules.__dict__["2"], TracedModule)
assert isinstance(tm3.modules.__dict__["2"].a, M.Elemwise)
assert isinstance(tm3.modules.__dict__["3"], M.Elemwise)
m4 = MyModule4()
tm4 = trace_module(m4, a, b)
np.testing.assert_equal(tm4(a, b).numpy(), 3)
np.testing.assert_equal(tm4(a, y=b).numpy(), 3)
np.testing.assert_equal(tm4(x=a, y=b).numpy(), 3)
tm4 = trace_module(m4, a, y=b)
np.testing.assert_equal(tm4(a, b).numpy(), 3)
np.testing.assert_equal(tm4(a, y=b).numpy(), 3)
np.testing.assert_equal(tm4(x=a, y=b).numpy(), 3)
tm4 = trace_module(m4, x=a, y=b)
np.testing.assert_equal(tm4(a, b).numpy(), 3)
np.testing.assert_equal(tm4(a, y=b).numpy(), 3)
np.testing.assert_equal(tm4(x=a, y=b).numpy(), 3)
tm5 = trace_module(tm4, a, b)
np.testing.assert_equal(tm5(a, b).numpy(), 3)
np.testing.assert_equal(tm5(a, y=b).numpy(), 3)
np.testing.assert_equal(tm5(x=a, y=b).numpy(), 3)
tm5 = trace_module(tm4, a, y=b)
np.testing.assert_equal(tm5(a, b).numpy(), 3)
np.testing.assert_equal(tm5(a, y=b).numpy(), 3)
np.testing.assert_equal(tm5(x=a, y=b).numpy(), 3)
tm5 = trace_module(tm4, x=a, y=b)
np.testing.assert_equal(tm5(a, b).numpy(), 3)
np.testing.assert_equal(tm5(a, y=b).numpy(), 3)
np.testing.assert_equal(tm5(x=a, y=b).numpy(), 3)
assert len(tm4.graph._exprs) == 1
assert isinstance(tm4.graph._exprs[0], CallFunction)
class MyModule5(Module):
def __init__(self):
super().__init__()
self.m1 = tm4
def forward(self, x, y):
return self.m1(x, y)
tm6 = trace_module(MyModule5(), a, b)
assert tm6.m1.argspec is None
assert tm6.m1._is_top is False
def test_trace_module_2():
class Model(M.Module):
def __init__(self):
super().__init__()
def forward(self, x):
out = x.shape
out = apply(builtin.Elemwise(mode="ADD"), out, Tensor(1))
return out
traced_model = trace_module(Model(), Tensor(([1,])))
assert isinstance(traced_model.graph._exprs[0], Apply) and isinstance(
traced_model.graph._exprs[0].opdef, builtin.GetVarShape
)
assert isinstance(traced_model.graph._exprs[1], Constant)
assert isinstance(traced_model.graph._exprs[2], Apply) and isinstance(
traced_model.graph._exprs[2].opdef, builtin.Elemwise
)
assert int(traced_model(Tensor([1, 2]))[0]) == 3
|
[
"megengine.traced_module.enable_expr_checker",
"megengine.Tensor",
"megengine.module.Elemwise",
"megengine.core.ops.builtin.Elemwise",
"megengine.traced_module.trace_module"
] |
[((1514, 1535), 'megengine.traced_module.enable_expr_checker', 'enable_expr_checker', ([], {}), '()\n', (1533, 1535), False, 'from megengine.traced_module import TracedModule, enable_expr_checker, trace_module\n'), ((1544, 1553), 'megengine.Tensor', 'Tensor', (['(1)'], {}), '(1)\n', (1550, 1553), False, 'from megengine import Tensor\n'), ((1585, 1604), 'megengine.traced_module.trace_module', 'trace_module', (['m1', 'x'], {}), '(m1, x)\n', (1597, 1604), False, 'from megengine.traced_module import TracedModule, enable_expr_checker, trace_module\n'), ((1637, 1656), 'megengine.traced_module.trace_module', 'trace_module', (['m2', 'x'], {}), '(m2, x)\n', (1649, 1656), False, 'from megengine.traced_module import TracedModule, enable_expr_checker, trace_module\n'), ((1667, 1676), 'megengine.Tensor', 'Tensor', (['(2)'], {}), '(2)\n', (1673, 1676), False, 'from megengine import Tensor\n'), ((2017, 2039), 'megengine.traced_module.trace_module', 'trace_module', (['m3', 'a', 'b'], {}), '(m3, a, b)\n', (2029, 2039), False, 'from megengine.traced_module import TracedModule, enable_expr_checker, trace_module\n'), ((2393, 2415), 'megengine.traced_module.trace_module', 'trace_module', (['m4', 'a', 'b'], {}), '(m4, a, b)\n', (2405, 2415), False, 'from megengine.traced_module import TracedModule, enable_expr_checker, trace_module\n'), ((2583, 2607), 'megengine.traced_module.trace_module', 'trace_module', (['m4', 'a'], {'y': 'b'}), '(m4, a, y=b)\n', (2595, 2607), False, 'from megengine.traced_module import TracedModule, enable_expr_checker, trace_module\n'), ((2775, 2801), 'megengine.traced_module.trace_module', 'trace_module', (['m4'], {'x': 'a', 'y': 'b'}), '(m4, x=a, y=b)\n', (2787, 2801), False, 'from megengine.traced_module import TracedModule, enable_expr_checker, trace_module\n'), ((2969, 2992), 'megengine.traced_module.trace_module', 'trace_module', (['tm4', 'a', 'b'], {}), '(tm4, a, b)\n', (2981, 2992), False, 'from megengine.traced_module import TracedModule, enable_expr_checker, trace_module\n'), ((3160, 3185), 'megengine.traced_module.trace_module', 'trace_module', (['tm4', 'a'], {'y': 'b'}), '(tm4, a, y=b)\n', (3172, 3185), False, 'from megengine.traced_module import TracedModule, enable_expr_checker, trace_module\n'), ((3353, 3380), 'megengine.traced_module.trace_module', 'trace_module', (['tm4'], {'x': 'a', 'y': 'b'}), '(tm4, x=a, y=b)\n', (3365, 3380), False, 'from megengine.traced_module import TracedModule, enable_expr_checker, trace_module\n'), ((499, 508), 'megengine.Tensor', 'Tensor', (['x'], {}), '(x)\n', (505, 508), False, 'from megengine import Tensor\n'), ((629, 646), 'megengine.Tensor', 'Tensor', (['[1, x, 1]'], {}), '([1, x, 1])\n', (635, 646), False, 'from megengine import Tensor\n'), ((1947, 1956), 'megengine.Tensor', 'Tensor', (['(1)'], {}), '(1)\n', (1953, 1956), False, 'from megengine import Tensor\n'), ((1958, 1967), 'megengine.Tensor', 'Tensor', (['(2)'], {}), '(2)\n', (1964, 1967), False, 'from megengine import Tensor\n'), ((4234, 4245), 'megengine.Tensor', 'Tensor', (['[1]'], {}), '([1])\n', (4240, 4245), False, 'from megengine import Tensor\n'), ((817, 834), 'megengine.module.Elemwise', 'M.Elemwise', (['"""ADD"""'], {}), "('ADD')\n", (827, 834), True, 'import megengine.module as M\n'), ((848, 865), 'megengine.module.Elemwise', 'M.Elemwise', (['"""ADD"""'], {}), "('ADD')\n", (858, 865), True, 'import megengine.module as M\n'), ((958, 976), 'megengine.module.Elemwise', 'M.Elemwise', (['"""RELU"""'], {}), "('RELU')\n", (968, 976), True, 'import megengine.module as M\n'), ((990, 1008), 'megengine.module.Elemwise', 'M.Elemwise', (['"""RELU"""'], {}), "('RELU')\n", (1000, 1008), True, 'import megengine.module as M\n'), ((4123, 4151), 'megengine.core.ops.builtin.Elemwise', 'builtin.Elemwise', ([], {'mode': '"""ADD"""'}), "(mode='ADD')\n", (4139, 4151), False, 'from megengine.core.ops import builtin\n'), ((4158, 4167), 'megengine.Tensor', 'Tensor', (['(1)'], {}), '(1)\n', (4164, 4167), False, 'from megengine import Tensor\n'), ((4628, 4642), 'megengine.Tensor', 'Tensor', (['[1, 2]'], {}), '([1, 2])\n', (4634, 4642), False, 'from megengine import Tensor\n'), ((898, 915), 'megengine.module.Elemwise', 'M.Elemwise', (['"""ADD"""'], {}), "('ADD')\n", (908, 915), True, 'import megengine.module as M\n'), ((924, 941), 'megengine.module.Elemwise', 'M.Elemwise', (['"""ADD"""'], {}), "('ADD')\n", (934, 941), True, 'import megengine.module as M\n')]
|
"""
Utility functions based on igakit.
"""
import numpy as nm
from sfepy.base.base import Struct
from sfepy.discrete.fem import Mesh
from sfepy.mesh.mesh_generators import get_tensor_product_conn
def create_linear_fe_mesh(nurbs, pars=None):
"""
Convert a NURBS object into a nD-linear tensor product FE mesh.
Parameters
----------
nurbs : igakit.nurbs.NURBS instance
The NURBS object.
pars : sequence of array, optional
The values of parameters in each parametric dimension. If not given,
the values are set so that the resulting mesh has the same number of
vertices as the number of control points/basis functions of the NURBS
object.
Returns
-------
coors : array
The coordinates of mesh vertices.
conn : array
The vertex connectivity array.
desc : str
The cell kind.
"""
knots = nurbs.knots
shape = nurbs.weights.shape
if pars is None:
pars = []
for ii, kv in enumerate(knots):
par = nm.linspace(kv[0], kv[-1], shape[ii])
pars.append(par)
coors = nurbs(*pars)
coors.shape = (-1, coors.shape[-1])
conn, desc = get_tensor_product_conn([len(ii) for ii in pars])
if (coors[:, -1] == 0.0).all():
coors = coors[:, :-1]
return coors, conn, desc
def create_mesh_and_output(nurbs, pars=None, **kwargs):
"""
Create a nD-linear tensor product FE mesh using
:func:`create_linear_fe_mesh()`, evaluate field variables given as keyword
arguments in the mesh vertices and create a dictionary of output data
usable by Mesh.write().
Parameters
----------
nurbs : igakit.nurbs.NURBS instance
The NURBS object.
pars : sequence of array, optional
The values of parameters in each parametric dimension. If not given,
the values are set so that the resulting mesh has the same number of
vertices as the number of control points/basis functions of the NURBS
object.
**kwargs : kwargs
The field variables as keyword arguments. Their names serve as keys in
the output dictionary.
Returns
-------
mesh : Mesh instance
The finite element mesh.
out : dict
The output dictionary.
"""
coors, conn, desc = create_linear_fe_mesh(nurbs, pars)
mat_id = nm.zeros(conn.shape[0], dtype=nm.int32)
mesh = Mesh.from_data('nurbs', coors, None, [conn], [mat_id], [desc])
out = {}
for key, variable in kwargs.iteritems():
if variable.ndim == 2:
nc = variable.shape[1]
field = variable.reshape(nurbs.weights.shape + (nc,))
else:
field = variable.reshape(nurbs.weights.shape)
nc = 1
vals = nurbs.evaluate(field, *pars)
out[key] = Struct(name='output_data', mode='vertex',
data=vals.reshape((-1, nc)))
return mesh, out
def save_basis(nurbs, pars):
"""
Save a NURBS object basis on a FE mesh corresponding to the given
parametrization in VTK files.
Parameters
----------
nurbs : igakit.nurbs.NURBS instance
The NURBS object.
pars : sequence of array, optional
The values of parameters in each parametric dimension.
"""
coors, conn, desc = create_linear_fe_mesh(nurbs, pars)
mat_id = nm.zeros(conn.shape[0], dtype=nm.int32)
mesh = Mesh.from_data('nurbs', coors, None, [conn], [mat_id], [desc])
n_dof = nurbs.weights.ravel().shape[0]
variable = nm.zeros(n_dof, dtype=nm.float64)
field = variable.reshape(nurbs.weights.shape)
for ic in xrange(n_dof):
variable[ic - 1] = 0.0
variable[ic] = 1.0
vals = nurbs.evaluate(field, *pars).reshape((-1))
out = {}
out['bf'] = Struct(name='output_data', mode='vertex',
data=vals[:, None])
mesh.write('iga_basis_%03d.vtk' % ic, io='auto', out=out)
|
[
"sfepy.base.base.Struct",
"sfepy.discrete.fem.Mesh.from_data"
] |
[((2365, 2404), 'numpy.zeros', 'nm.zeros', (['conn.shape[0]'], {'dtype': 'nm.int32'}), '(conn.shape[0], dtype=nm.int32)\n', (2373, 2404), True, 'import numpy as nm\n'), ((2416, 2478), 'sfepy.discrete.fem.Mesh.from_data', 'Mesh.from_data', (['"""nurbs"""', 'coors', 'None', '[conn]', '[mat_id]', '[desc]'], {}), "('nurbs', coors, None, [conn], [mat_id], [desc])\n", (2430, 2478), False, 'from sfepy.discrete.fem import Mesh\n'), ((3366, 3405), 'numpy.zeros', 'nm.zeros', (['conn.shape[0]'], {'dtype': 'nm.int32'}), '(conn.shape[0], dtype=nm.int32)\n', (3374, 3405), True, 'import numpy as nm\n'), ((3417, 3479), 'sfepy.discrete.fem.Mesh.from_data', 'Mesh.from_data', (['"""nurbs"""', 'coors', 'None', '[conn]', '[mat_id]', '[desc]'], {}), "('nurbs', coors, None, [conn], [mat_id], [desc])\n", (3431, 3479), False, 'from sfepy.discrete.fem import Mesh\n'), ((3539, 3572), 'numpy.zeros', 'nm.zeros', (['n_dof'], {'dtype': 'nm.float64'}), '(n_dof, dtype=nm.float64)\n', (3547, 3572), True, 'import numpy as nm\n'), ((3806, 3867), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""vertex"""', 'data': 'vals[:, None]'}), "(name='output_data', mode='vertex', data=vals[:, None])\n", (3812, 3867), False, 'from sfepy.base.base import Struct\n'), ((1044, 1081), 'numpy.linspace', 'nm.linspace', (['kv[0]', 'kv[-1]', 'shape[ii]'], {}), '(kv[0], kv[-1], shape[ii])\n', (1055, 1081), True, 'import numpy as nm\n')]
|
from fastapi.encoders import jsonable_encoder
from fastapi.testclient import TestClient
from sqlmodel import Session, select
from model.item import Item
TEST_ITEM = {
"name": "aasif126",
"sku": "AASI126",
"metric": "m",
"cost": 2200
}
def test_get_items_returns_no_item_when_table_is_empty(client: TestClient):
response = client.get("/item/get_all/")
assert response.status_code == 200
assert len(response.json()) == 0
def test_get_items_returns_existing_items(client: TestClient, session: Session, item: Item):
item_2 = Item(**TEST_ITEM)
session.add(item_2)
session.commit()
session.refresh(item_2)
item_ids = [item.id, item_2.id]
response = client.get("/item/get_all/")
response_ids = list(map(lambda _item: _item['id'], response.json()))
assert response.status_code == 200
assert item_ids == response_ids
def test_get_items_csv_returns_csv(client: TestClient, session: Session, item: Item):
response = client.get("/item/get_all.csv")
assert response.status_code == 200
def test_get_items_csv_raises_error_for_when_no_items_present(
client: TestClient, session: Session):
response = client.get("/item/get_all.csv")
assert response.status_code == 404
def test_add_item_returns_inserted_row(client: TestClient, session: Session):
response = client.post("/item/", json=TEST_ITEM)
assert response.status_code == 200
item = Item(**response.json())
row = session.exec(
select(Item).where(Item.id == item.id)).one()
assert row == item
def test_add_item_returns_error_for_invalid_body(
client: TestClient, session: Session
):
data = {
"name": "aasif125",
"sku": "AASI125",
}
response = client.post("/item/", json=data)
assert response.status_code == 422
def test_add_item_returns_error_existing_item(
client: TestClient,
session: Session,
item: Item
):
response = client.post("/item/", json=jsonable_encoder(item))
assert response.status_code == 412
def test_edit_item_returns_error_for_invalid_item_id(
client: TestClient, session: Session
):
response = client.put("/item/1", json=TEST_ITEM)
assert response.status_code == 404
def test_edit_item_returns_updated_item(
client: TestClient, session: Session,
item: Item
):
item.cost = 2200
response = client.put("/item/1", data=item.json())
assert response.status_code == 200
assert response.json()['cost'] == item.cost
def test_delete_item_returns_error_for_invalid_item_id(
client: TestClient, session: Session
):
response = client.delete("/item/1")
assert response.status_code == 404
def test_delete_item_returns_success(
client: TestClient, session: Session,
item: Item
):
response = client.delete("/item/1")
assert response.status_code == 200
|
[
"sqlmodel.select"
] |
[((559, 576), 'model.item.Item', 'Item', ([], {}), '(**TEST_ITEM)\n', (563, 576), False, 'from model.item import Item\n'), ((1989, 2011), 'fastapi.encoders.jsonable_encoder', 'jsonable_encoder', (['item'], {}), '(item)\n', (2005, 2011), False, 'from fastapi.encoders import jsonable_encoder\n'), ((1491, 1503), 'sqlmodel.select', 'select', (['Item'], {}), '(Item)\n', (1497, 1503), False, 'from sqlmodel import Session, select\n')]
|
import uuid
from datetime import datetime, timedelta, timezone
from typing import AsyncGenerator
import pytest
from pydantic import UUID4
from sqlalchemy import exc
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
from sqlalchemy.orm import sessionmaker
from sqlmodel import Session, SQLModel, create_engine
from fastapi_users_db_sqlmodel import SQLModelUserDatabase, SQLModelUserDatabaseAsync
from fastapi_users_db_sqlmodel.access_token import (
SQLModelAccessTokenDatabase,
SQLModelAccessTokenDatabaseAsync,
SQLModelBaseAccessToken,
)
from tests.conftest import UserDB
class AccessToken(SQLModelBaseAccessToken, table=True):
pass
@pytest.fixture
def user_id() -> UUID4:
return uuid.UUID("a9089e5d-2642-406d-a7c0-cbc641aca0ec")
async def init_sync_session(url: str) -> AsyncGenerator[Session, None]:
engine = create_engine(url, connect_args={"check_same_thread": False})
SQLModel.metadata.create_all(engine)
with Session(engine) as session:
yield session
SQLModel.metadata.drop_all(engine)
async def init_async_session(url: str) -> AsyncGenerator[AsyncSession, None]:
engine = create_async_engine(url, connect_args={"check_same_thread": False})
make_session = sessionmaker(engine, class_=AsyncSession, expire_on_commit=False)
async with engine.begin() as conn:
await conn.run_sync(SQLModel.metadata.create_all)
async with make_session() as session:
yield session
await conn.run_sync(SQLModel.metadata.drop_all)
@pytest.fixture(
params=[
(
init_sync_session,
"sqlite:///./test-sqlmodel-access-token.db",
SQLModelAccessTokenDatabase,
SQLModelUserDatabase,
),
(
init_async_session,
"sqlite+aiosqlite:///./test-sqlmodel-access-token.db",
SQLModelAccessTokenDatabaseAsync,
SQLModelUserDatabaseAsync,
),
],
ids=["sync", "async"],
)
async def sqlmodel_access_token_db(
request, user_id: UUID4
) -> AsyncGenerator[SQLModelAccessTokenDatabase, None]:
create_session = request.param[0]
database_url = request.param[1]
access_token_database_class = request.param[2]
user_database_class = request.param[3]
async for session in create_session(database_url):
user = UserDB(
id=user_id, email="<EMAIL>", hashed_password="<PASSWORD>"
)
user_db = user_database_class(UserDB, session)
await user_db.create(user)
yield access_token_database_class(AccessToken, session)
@pytest.mark.asyncio
@pytest.mark.db
async def test_queries(
sqlmodel_access_token_db: SQLModelAccessTokenDatabase[AccessToken],
user_id: UUID4,
):
access_token = AccessToken(token="TOKEN", user_id=user_id)
# Create
access_token_db = await sqlmodel_access_token_db.create(access_token)
assert access_token_db.token == "TOKEN"
assert access_token_db.user_id == user_id
# Update
access_token_db.created_at = datetime.now(timezone.utc)
await sqlmodel_access_token_db.update(access_token_db)
# Get by token
access_token_by_token = await sqlmodel_access_token_db.get_by_token(
access_token_db.token
)
assert access_token_by_token is not None
# Get by token expired
access_token_by_token = await sqlmodel_access_token_db.get_by_token(
access_token_db.token, max_age=datetime.now(timezone.utc) + timedelta(hours=1)
)
assert access_token_by_token is None
# Get by token not expired
access_token_by_token = await sqlmodel_access_token_db.get_by_token(
access_token_db.token, max_age=datetime.now(timezone.utc) - timedelta(hours=1)
)
assert access_token_by_token is not None
# Get by token unknown
access_token_by_token = await sqlmodel_access_token_db.get_by_token(
"NOT_EXISTING_TOKEN"
)
assert access_token_by_token is None
# Delete token
await sqlmodel_access_token_db.delete(access_token_db)
deleted_access_token = await sqlmodel_access_token_db.get_by_token(
access_token_db.token
)
assert deleted_access_token is None
@pytest.mark.asyncio
@pytest.mark.db
async def test_insert_existing_token(
sqlmodel_access_token_db: SQLModelAccessTokenDatabase[AccessToken], user_id: UUID4
):
access_token = AccessToken(token="TOKEN", user_id=user_id)
await sqlmodel_access_token_db.create(access_token)
with pytest.raises(exc.IntegrityError):
await sqlmodel_access_token_db.create(
AccessToken(token="TOKEN", user_id=user_id)
)
|
[
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.SQLModel.metadata.drop_all",
"sqlmodel.Session",
"sqlmodel.create_engine"
] |
[((1537, 1859), 'pytest.fixture', 'pytest.fixture', ([], {'params': "[(init_sync_session, 'sqlite:///./test-sqlmodel-access-token.db',\n SQLModelAccessTokenDatabase, SQLModelUserDatabase), (init_async_session,\n 'sqlite+aiosqlite:///./test-sqlmodel-access-token.db',\n SQLModelAccessTokenDatabaseAsync, SQLModelUserDatabaseAsync)]", 'ids': "['sync', 'async']"}), "(params=[(init_sync_session,\n 'sqlite:///./test-sqlmodel-access-token.db',\n SQLModelAccessTokenDatabase, SQLModelUserDatabase), (init_async_session,\n 'sqlite+aiosqlite:///./test-sqlmodel-access-token.db',\n SQLModelAccessTokenDatabaseAsync, SQLModelUserDatabaseAsync)], ids=[\n 'sync', 'async'])\n", (1551, 1859), False, 'import pytest\n'), ((725, 774), 'uuid.UUID', 'uuid.UUID', (['"""a9089e5d-2642-406d-a7c0-cbc641aca0ec"""'], {}), "('a9089e5d-2642-406d-a7c0-cbc641aca0ec')\n", (734, 774), False, 'import uuid\n'), ((862, 923), 'sqlmodel.create_engine', 'create_engine', (['url'], {'connect_args': "{'check_same_thread': False}"}), "(url, connect_args={'check_same_thread': False})\n", (875, 923), False, 'from sqlmodel import Session, SQLModel, create_engine\n'), ((928, 964), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (956, 964), False, 'from sqlmodel import Session, SQLModel, create_engine\n'), ((1028, 1062), 'sqlmodel.SQLModel.metadata.drop_all', 'SQLModel.metadata.drop_all', (['engine'], {}), '(engine)\n', (1054, 1062), False, 'from sqlmodel import Session, SQLModel, create_engine\n'), ((1156, 1223), 'sqlalchemy.ext.asyncio.create_async_engine', 'create_async_engine', (['url'], {'connect_args': "{'check_same_thread': False}"}), "(url, connect_args={'check_same_thread': False})\n", (1175, 1223), False, 'from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine\n'), ((1243, 1308), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', (['engine'], {'class_': 'AsyncSession', 'expire_on_commit': '(False)'}), '(engine, class_=AsyncSession, expire_on_commit=False)\n', (1255, 1308), False, 'from sqlalchemy.orm import sessionmaker\n'), ((3037, 3063), 'datetime.datetime.now', 'datetime.now', (['timezone.utc'], {}), '(timezone.utc)\n', (3049, 3063), False, 'from datetime import datetime, timedelta, timezone\n'), ((974, 989), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (981, 989), False, 'from sqlmodel import Session, SQLModel, create_engine\n'), ((2349, 2414), 'tests.conftest.UserDB', 'UserDB', ([], {'id': 'user_id', 'email': '"""<EMAIL>"""', 'hashed_password': '"""<PASSWORD>"""'}), "(id=user_id, email='<EMAIL>', hashed_password='<PASSWORD>')\n", (2355, 2414), False, 'from tests.conftest import UserDB\n'), ((4475, 4508), 'pytest.raises', 'pytest.raises', (['exc.IntegrityError'], {}), '(exc.IntegrityError)\n', (4488, 4508), False, 'import pytest\n'), ((3437, 3463), 'datetime.datetime.now', 'datetime.now', (['timezone.utc'], {}), '(timezone.utc)\n', (3449, 3463), False, 'from datetime import datetime, timedelta, timezone\n'), ((3466, 3484), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (3475, 3484), False, 'from datetime import datetime, timedelta, timezone\n'), ((3676, 3702), 'datetime.datetime.now', 'datetime.now', (['timezone.utc'], {}), '(timezone.utc)\n', (3688, 3702), False, 'from datetime import datetime, timedelta, timezone\n'), ((3705, 3723), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (3714, 3723), False, 'from datetime import datetime, timedelta, timezone\n')]
|
"""
DAG related functions.
"""
import operator
from collections import defaultdict
from io import StringIO
from typing import Any, DefaultDict, Dict, List, Optional, Set
import asciidag.graph
import asciidag.node
from sqlmodel import Session, select
from sqloxide import parse_sql
from datajunction.constants import DJ_DATABASE_ID
from datajunction.models.database import Database
from datajunction.models.node import Node
from datajunction.sql.parse import find_nodes_by_key
from datajunction.typing import ParseTree
def render_dag(dependencies: Dict[str, Set[str]], **kwargs: Any) -> str:
"""
Render the DAG of dependencies.
"""
out = StringIO()
graph = asciidag.graph.Graph(out, **kwargs)
asciidag_nodes: Dict[str, asciidag.node.Node] = {}
tips = sorted(
[build_asciidag(name, dependencies, asciidag_nodes) for name in dependencies],
key=lambda n: n.item,
)
graph.show_nodes(tips)
out.seek(0)
return out.getvalue()
def build_asciidag(
name: str,
dependencies: Dict[str, Set[str]],
asciidag_nodes: Dict[str, asciidag.node.Node],
) -> asciidag.node.Node:
"""
Build the nodes for ``asciidag``.
"""
if name in asciidag_nodes:
asciidag_node = asciidag_nodes[name]
else:
asciidag_node = asciidag.node.Node(name)
asciidag_nodes[name] = asciidag_node
asciidag_node.parents = sorted(
[
build_asciidag(child, dependencies, asciidag_nodes)
for child in dependencies[name]
],
key=lambda n: n.item,
)
return asciidag_node
def get_computable_databases(
node: Node,
columns: Optional[Set[str]] = None,
) -> Set[Database]:
"""
Return all the databases where a given node can be computed.
This takes into consideration the node expression, since some of the columns might
not be present in all databases.
"""
if columns is None:
columns = {column.name for column in node.columns}
# add all the databases where the node is explicitly materialized
tables = [
table
for table in node.tables
if columns <= {column.name for column in table.columns}
]
databases = {table.database for table in tables}
# add all the databases that are common between the parents and match all the columns
parent_columns = get_referenced_columns_from_sql(node.expression, node.parents)
if node.parents:
parent_databases = [
get_computable_databases(parent, parent_columns[parent.name])
for parent in node.parents
]
databases |= set.intersection(*parent_databases)
return databases
def get_database_for_nodes(
session: Session,
nodes: List[Node],
node_columns: Dict[str, Set[str]],
database_id: Optional[int] = None,
) -> Database:
"""
Given a list of nodes, return the best database to compute metric.
When no nodes are passed, the database with the lowest cost is returned.
"""
if nodes:
databases = set.intersection(
*[get_computable_databases(node, node_columns[node.name]) for node in nodes]
)
else:
databases = session.exec(
select(Database).where(Database.id != DJ_DATABASE_ID),
).all()
if not databases:
raise Exception("No valid database was found")
if database_id is not None:
for database in databases:
if database.id == database_id:
return database
raise Exception(f"Database ID {database_id} is not valid")
return sorted(databases, key=operator.attrgetter("cost"))[0]
def get_referenced_columns_from_sql(
sql: Optional[str],
parents: List[Node],
) -> DefaultDict[str, Set[str]]:
"""
Given a SQL expression, return the referenced columns.
Referenced columns are a dictionary mapping parent name to column name(s).
"""
if not sql:
return defaultdict(set)
tree = parse_sql(sql, dialect="ansi")
return get_referenced_columns_from_tree(tree, parents)
def get_referenced_columns_from_tree(
tree: ParseTree,
parents: List[Node],
) -> DefaultDict[str, Set[str]]:
"""
Return the columns referenced in parents given a parse tree.
"""
referenced_columns: DefaultDict[str, Set[str]] = defaultdict(set)
parent_columns = {
parent.name: {column.name for column in parent.columns} for parent in parents
}
# compound identifiers are fully qualified
for compound_identifier in find_nodes_by_key(tree, "CompoundIdentifier"):
parent = ".".join(part["value"] for part in compound_identifier[:-1])
column = compound_identifier[-1]["value"]
referenced_columns[parent].add(column)
# for regular identifiers we need to figure out which parent the columns belongs to
for identifier in find_nodes_by_key(tree, "Identifier"):
column = identifier["value"]
candidates = [
parent for parent, columns in parent_columns.items() if column in columns
]
if not candidates:
raise Exception(f"Column {column} not found in any parent")
if len(candidates) > 1:
raise Exception(f"Column {column} is ambiguous")
parent = candidates[0]
referenced_columns[parent].add(column)
return referenced_columns
def get_dimensions(node: Node) -> List[str]:
"""
Return the available dimensions in a given node.
"""
dimensions = []
for parent in node.parents:
for column in parent.columns:
dimensions.append(f"{parent.name}.{column.name}")
if column.dimension:
for dimension_column in column.dimension.columns:
dimensions.append(
f"{column.dimension.name}.{dimension_column.name}",
)
return sorted(dimensions)
|
[
"sqlmodel.select"
] |
[((658, 668), 'io.StringIO', 'StringIO', ([], {}), '()\n', (666, 668), False, 'from io import StringIO\n'), ((3980, 4010), 'sqloxide.parse_sql', 'parse_sql', (['sql'], {'dialect': '"""ansi"""'}), "(sql, dialect='ansi')\n", (3989, 4010), False, 'from sqloxide import parse_sql\n'), ((4324, 4340), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (4335, 4340), False, 'from collections import defaultdict\n'), ((4536, 4581), 'datajunction.sql.parse.find_nodes_by_key', 'find_nodes_by_key', (['tree', '"""CompoundIdentifier"""'], {}), "(tree, 'CompoundIdentifier')\n", (4553, 4581), False, 'from datajunction.sql.parse import find_nodes_by_key\n'), ((4869, 4906), 'datajunction.sql.parse.find_nodes_by_key', 'find_nodes_by_key', (['tree', '"""Identifier"""'], {}), "(tree, 'Identifier')\n", (4886, 4906), False, 'from datajunction.sql.parse import find_nodes_by_key\n'), ((3951, 3967), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (3962, 3967), False, 'from collections import defaultdict\n'), ((3612, 3639), 'operator.attrgetter', 'operator.attrgetter', (['"""cost"""'], {}), "('cost')\n", (3631, 3639), False, 'import operator\n'), ((3219, 3235), 'sqlmodel.select', 'select', (['Database'], {}), '(Database)\n', (3225, 3235), False, 'from sqlmodel import Session, select\n')]
|
"""
Notes
-----
Important attributes of continuous (order > 0) :class:`Field` and
:class:`SurfaceField` instances:
- `vertex_remap` : `econn[:, :n_vertex] = vertex_remap[conn]`
- `vertex_remap_i` : `conn = vertex_remap_i[econn[:, :n_vertex]]`
where `conn` is the mesh vertex connectivity, `econn` is the
region-local field connectivity.
"""
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import output, get_default, assert_
from sfepy.base.base import Struct
from sfepy.discrete.common.fields import parse_shape, Field
from sfepy.discrete.fem.mesh import Mesh
from sfepy.discrete.fem.meshio import convert_complex_output
from sfepy.discrete.fem.utils import (extend_cell_data, prepare_remap,
invert_remap, get_min_value)
from sfepy.discrete.fem.mappings import VolumeMapping, SurfaceMapping
from sfepy.discrete.fem.poly_spaces import PolySpace
from sfepy.discrete.fem.fe_surface import FESurface
from sfepy.discrete.integrals import Integral
from sfepy.discrete.fem.linearizer import (get_eval_dofs, get_eval_coors,
create_output)
import six
def set_mesh_coors(domain, fields, coors, update_fields=False, actual=False,
clear_all=True, extra_dofs=False):
if actual:
if not hasattr(domain.mesh, 'coors_act'):
domain.mesh.coors_act = nm.zeros_like(domain.mesh.coors)
domain.mesh.coors_act[:] = coors[:domain.mesh.n_nod]
else:
domain.cmesh.coors[:] = coors[:domain.mesh.n_nod]
if update_fields:
for field in six.itervalues(fields):
field.set_coors(coors, extra_dofs=extra_dofs)
field.clear_mappings(clear_all=clear_all)
def eval_nodal_coors(coors, mesh_coors, region, poly_space, geom_poly_space,
econn, only_extra=True):
"""
Compute coordinates of nodes corresponding to `poly_space`, given
mesh coordinates and `geom_poly_space`.
"""
if only_extra:
iex = (poly_space.nts[:,0] > 0).nonzero()[0]
if iex.shape[0] == 0: return
qp_coors = poly_space.node_coors[iex, :]
econn = econn[:, iex].copy()
else:
qp_coors = poly_space.node_coors
##
# Evaluate geometry interpolation base functions in (extra) nodes.
bf = geom_poly_space.eval_base(qp_coors)
bf = bf[:,0,:].copy()
##
# Evaluate extra coordinates with 'bf'.
cmesh = region.domain.cmesh
conn = cmesh.get_incident(0, region.cells, region.tdim)
conn.shape = (econn.shape[0], -1)
ecoors = nm.dot(bf, mesh_coors[conn])
coors[econn] = nm.swapaxes(ecoors, 0, 1)
def _interp_to_faces(vertex_vals, bfs, faces):
dim = vertex_vals.shape[1]
n_face = faces.shape[0]
n_qp = bfs.shape[0]
faces_vals = nm.zeros((n_face, n_qp, dim), nm.float64)
for ii, face in enumerate(faces):
vals = vertex_vals[face,:dim]
faces_vals[ii,:,:] = nm.dot(bfs[:,0,:], vals)
return(faces_vals)
def get_eval_expression(expression,
fields, materials, variables,
functions=None, mode='eval', term_mode=None,
extra_args=None, verbose=True, kwargs=None):
"""
Get the function for evaluating an expression given a list of elements,
and reference element coordinates.
"""
from sfepy.discrete.evaluate import eval_in_els_and_qp
def _eval(iels, coors):
val = eval_in_els_and_qp(expression, iels, coors,
fields, materials, variables,
functions=functions, mode=mode,
term_mode=term_mode,
extra_args=extra_args, verbose=verbose,
kwargs=kwargs)
return val[..., 0]
return _eval
def create_expression_output(expression, name, primary_field_name,
fields, materials, variables,
functions=None, mode='eval', term_mode=None,
extra_args=None, verbose=True, kwargs=None,
min_level=0, max_level=1, eps=1e-4):
"""
Create output mesh and data for the expression using the adaptive
linearizer.
Parameters
----------
expression : str
The expression to evaluate.
name : str
The name of the data.
primary_field_name : str
The name of field that defines the element groups and polynomial
spaces.
fields : dict
The dictionary of fields used in `variables`.
materials : Materials instance
The materials used in the expression.
variables : Variables instance
The variables used in the expression.
functions : Functions instance, optional
The user functions for materials etc.
mode : one of 'eval', 'el_avg', 'qp'
The evaluation mode - 'qp' requests the values in quadrature points,
'el_avg' element averages and 'eval' means integration over
each term region.
term_mode : str
The term call mode - some terms support different call modes
and depending on the call mode different values are
returned.
extra_args : dict, optional
Extra arguments to be passed to terms in the expression.
verbose : bool
If False, reduce verbosity.
kwargs : dict, optional
The variables (dictionary of (variable name) : (Variable
instance)) to be used in the expression.
min_level : int
The minimum required level of mesh refinement.
max_level : int
The maximum level of mesh refinement.
eps : float
The relative tolerance parameter of mesh adaptivity.
Returns
-------
out : dict
The output dictionary.
"""
field = fields[primary_field_name]
vertex_coors = field.coors[:field.n_vertex_dof, :]
ps = field.poly_space
gps = field.gel.poly_space
vertex_conn = field.econn[:, :field.gel.n_vertex]
eval_dofs = get_eval_expression(expression,
fields, materials, variables,
functions=functions,
mode=mode, extra_args=extra_args,
verbose=verbose, kwargs=kwargs)
eval_coors = get_eval_coors(vertex_coors, vertex_conn, gps)
(level, coors, conn,
vdofs, mat_ids) = create_output(eval_dofs, eval_coors,
vertex_conn.shape[0], ps,
min_level=min_level,
max_level=max_level, eps=eps)
mesh = Mesh.from_data('linearized_mesh', coors, None, [conn], [mat_ids],
field.domain.mesh.descs)
out = {}
out[name] = Struct(name='output_data', mode='vertex',
data=vdofs, var_name=name, dofs=None,
mesh=mesh, level=level)
out = convert_complex_output(out)
return out
class FEField(Field):
"""
Base class for finite element fields.
Notes
-----
- interps and hence node_descs are per region (must have single
geometry!)
Field shape information:
- ``shape`` - the shape of the base functions in a point
- ``n_components`` - the number of DOFs per FE node
- ``val_shape`` - the shape of field value (the product of DOFs and
base functions) in a point
"""
def __init__(self, name, dtype, shape, region, approx_order=1):
"""
Create a finite element field.
Parameters
----------
name : str
The field name.
dtype : numpy.dtype
The field data type: float64 or complex128.
shape : int/tuple/str
The field shape: 1 or (1,) or 'scalar', space dimension (2, or (2,)
or 3 or (3,)) or 'vector', or a tuple. The field shape determines
the shape of the FE base functions and is related to the number of
components of variables and to the DOF per node count, depending
on the field kind.
region : Region
The region where the field is defined.
approx_order : int or tuple
The FE approximation order. The tuple form is (order, has_bubble),
e.g. (1, True) means order 1 with a bubble function.
Notes
-----
Assumes one cell type for the whole region!
"""
shape = parse_shape(shape, region.domain.shape.dim)
if not self._check_region(region):
raise ValueError('unsuitable region for field %s! (%s)' %
(name, region.name))
Struct.__init__(self, name=name, dtype=dtype, shape=shape,
region=region)
self.domain = self.region.domain
self._set_approx_order(approx_order)
self._setup_geometry()
self._setup_kind()
self._setup_shape()
self.surface_data = {}
self.point_data = {}
self.ori = None
self._create_interpolant()
self._setup_global_base()
self.setup_coors()
self.clear_mappings(clear_all=True)
self.clear_qp_base()
self.basis_transform = None
self.econn0 = None
self.unused_dofs = None
self.stored_subs = None
def _set_approx_order(self, approx_order):
"""
Set a uniform approximation order.
"""
if isinstance(approx_order, tuple):
self.approx_order = approx_order[0]
self.force_bubble = approx_order[1]
else:
self.approx_order = approx_order
self.force_bubble = False
def get_true_order(self):
"""
Get the true approximation order depending on the reference
element geometry.
For example, for P1 (linear) approximation the true order is 1,
while for Q1 (bilinear) approximation in 2D the true order is 2.
"""
gel = self.gel
if (gel.dim + 1) == gel.n_vertex:
order = self.approx_order
else:
order = gel.dim * self.approx_order
if self.force_bubble:
bubble_order = gel.dim + 1
order = max(order, bubble_order)
return order
def is_higher_order(self):
"""
Return True, if the field's approximation order is greater than one.
"""
return self.force_bubble or (self.approx_order > 1)
def _setup_global_base(self):
"""
Setup global DOF/base functions, their indices and connectivity of the
field. Called methods implemented in subclasses.
"""
self._setup_facet_orientations()
self._init_econn()
self.n_vertex_dof, self.vertex_remap = self._setup_vertex_dofs()
self.vertex_remap_i = invert_remap(self.vertex_remap)
aux = self._setup_edge_dofs()
self.n_edge_dof, self.edge_dofs, self.edge_remap = aux
aux = self._setup_face_dofs()
self.n_face_dof, self.face_dofs, self.face_remap = aux
aux = self._setup_bubble_dofs()
self.n_bubble_dof, self.bubble_dofs, self.bubble_remap = aux
self.n_nod = self.n_vertex_dof + self.n_edge_dof \
+ self.n_face_dof + self.n_bubble_dof
self._setup_esurface()
def _setup_esurface(self):
"""
Setup extended surface entities (edges in 2D, faces in 3D),
i.e. indices of surface entities into the extended connectivity.
"""
node_desc = self.node_desc
gel = self.gel
self.efaces = gel.get_surface_entities().copy()
nd = node_desc.edge
if nd is not None:
efs = []
for eof in gel.get_edges_per_face():
efs.append(nm.concatenate([nd[ie] for ie in eof]))
efs = nm.array(efs).squeeze()
if efs.ndim < 2:
efs = efs[:,nm.newaxis]
self.efaces = nm.hstack((self.efaces, efs))
efs = node_desc.face
if efs is not None:
efs = nm.array(efs).squeeze()
if efs.ndim < 2:
efs = efs[:,nm.newaxis]
self.efaces = nm.hstack((self.efaces, efs))
if gel.dim == 3:
self.eedges = gel.edges.copy()
efs = node_desc.edge
if efs is not None:
efs = nm.array(efs).squeeze()
if efs.ndim < 2:
efs = efs[:,nm.newaxis]
self.eedges = nm.hstack((self.eedges, efs))
def set_coors(self, coors, extra_dofs=False):
"""
Set coordinates of field nodes.
"""
# Mesh vertex nodes.
if self.n_vertex_dof:
indx = self.vertex_remap_i
self.coors[:self.n_vertex_dof] = nm.take(coors,
indx.astype(nm.int32),
axis=0)
n_ex_dof = self.n_bubble_dof + self.n_edge_dof + self.n_face_dof
# extra nodes
if n_ex_dof:
if extra_dofs:
if self.n_nod != coors.shape[0]:
raise NotImplementedError
self.coors[:] = coors
else:
gps = self.gel.poly_space
ps = self.poly_space
eval_nodal_coors(self.coors, coors, self.region,
ps, gps, self.econn)
def setup_coors(self):
"""
Setup coordinates of field nodes.
"""
mesh = self.domain.mesh
self.coors = nm.empty((self.n_nod, mesh.dim), nm.float64)
self.set_coors(mesh.coors)
def get_vertices(self):
"""
Return indices of vertices belonging to the field region.
"""
return self.vertex_remap_i
def _get_facet_dofs(self, rfacets, remap, dofs):
facets = remap[rfacets]
return dofs[facets[facets >= 0]].ravel()
def get_data_shape(self, integral, integration='volume', region_name=None):
"""
Get element data dimensions.
Parameters
----------
integral : Integral instance
The integral describing used numerical quadrature.
integration : 'volume', 'surface', 'surface_extra', 'point' or 'custom'
The term integration type.
region_name : str
The name of the region of the integral.
Returns
-------
data_shape : 4 ints
The `(n_el, n_qp, dim, n_en)` for volume shape kind,
`(n_fa, n_qp, dim, n_fn)` for surface shape kind and
`(n_nod, 0, 0, 1)` for point shape kind.
Notes
-----
- `n_el`, `n_fa` = number of elements/facets
- `n_qp` = number of quadrature points per element/facet
- `dim` = spatial dimension
- `n_en`, `n_fn` = number of element/facet nodes
- `n_nod` = number of element nodes
"""
region = self.domain.regions[region_name]
shape = region.shape
dim = region.dim
if integration in ('surface', 'surface_extra'):
sd = self.surface_data[region_name]
# This works also for surface fields.
key = sd.face_type
weights = self.get_qp(key, integral).weights
n_qp = weights.shape[0]
if integration == 'surface':
data_shape = (sd.n_fa, n_qp, dim, sd.n_fp)
else:
data_shape = (sd.n_fa, n_qp, dim, self.econn.shape[1])
elif integration in ('volume', 'custom'):
_, weights = integral.get_qp(self.gel.name)
n_qp = weights.shape[0]
data_shape = (shape.n_cell, n_qp, dim, self.econn.shape[1])
elif integration == 'point':
dofs = self.get_dofs_in_region(region, merge=True)
data_shape = (dofs.shape[0], 0, 0, 1)
else:
raise NotImplementedError('unsupported integration! (%s)'
% integration)
return data_shape
def get_dofs_in_region(self, region, merge=True):
"""
Return indices of DOFs that belong to the given region and group.
"""
node_desc = self.node_desc
dofs = []
vdofs = nm.empty((0,), dtype=nm.int32)
if node_desc.vertex is not None:
vdofs = self.vertex_remap[region.vertices]
vdofs = vdofs[vdofs >= 0]
dofs.append(vdofs)
edofs = nm.empty((0,), dtype=nm.int32)
if node_desc.edge is not None:
edofs = self._get_facet_dofs(region.edges,
self.edge_remap,
self.edge_dofs)
dofs.append(edofs)
fdofs = nm.empty((0,), dtype=nm.int32)
if node_desc.face is not None:
fdofs = self._get_facet_dofs(region.faces,
self.face_remap,
self.face_dofs)
dofs.append(fdofs)
bdofs = nm.empty((0,), dtype=nm.int32)
if (node_desc.bubble is not None) and region.has_cells():
els = self.bubble_remap[region.cells]
bdofs = self.bubble_dofs[els[els >= 0]].ravel()
dofs.append(bdofs)
if merge:
dofs = nm.concatenate(dofs)
return dofs
def clear_qp_base(self):
"""
Remove cached quadrature points and base functions.
"""
self.qp_coors = {}
self.bf = {}
def get_qp(self, key, integral):
"""
Get quadrature points and weights corresponding to the given key
and integral. The key is 'v' or 's#', where # is the number of
face vertices.
"""
qpkey = (integral.order, key)
if qpkey not in self.qp_coors:
if (key[0] == 's') and not self.is_surface:
dim = self.gel.dim - 1
n_fp = self.gel.surface_facet.n_vertex
geometry = '%d_%d' % (dim, n_fp)
else:
geometry = self.gel.name
vals, weights = integral.get_qp(geometry)
self.qp_coors[qpkey] = Struct(vals=vals, weights=weights)
return self.qp_coors[qpkey]
def substitute_dofs(self, subs, restore=False):
"""
Perform facet DOF substitutions according to `subs`.
Modifies `self.econn` in-place and sets `self.econn0`,
`self.unused_dofs` and `self.basis_transform`.
"""
if restore and (self.stored_subs is not None):
self.econn0 = self.econn
self.econn, self.unused_dofs, basis_transform = self.stored_subs
else:
if subs is None:
self.econn0 = self.econn
return
else:
self.econn0 = self.econn.copy()
self._substitute_dofs(subs)
self.unused_dofs = nm.setdiff1d(self.econn0, self.econn)
basis_transform = self._eval_basis_transform(subs)
self.set_basis_transform(basis_transform)
def restore_dofs(self, store=False):
"""
Undoes the effect of :func:`FEField.substitute_dofs()`.
"""
if self.econn0 is None:
raise ValueError('no original DOFs to restore!')
if store:
self.stored_subs = (self.econn,
self.unused_dofs,
self.basis_transform)
else:
self.stored_subs = None
self.econn = self.econn0
self.econn0 = None
self.unused_dofs = None
self.basis_transform = None
def set_basis_transform(self, transform):
"""
Set local element basis transformation.
The basis transformation is applied in :func:`FEField.get_base()` and
:func:`FEField.create_mapping()`.
Parameters
----------
transform : array, shape `(n_cell, n_ep, n_ep)`
The array with `(n_ep, n_ep)` transformation matrices for each cell
in the field's region, where `n_ep` is the number of element DOFs.
"""
self.basis_transform = transform
def restore_substituted(self, vec):
"""
Restore values of the unused DOFs using the transpose of the applied
basis transformation.
"""
if (self.econn0 is None) or (self.basis_transform is None):
raise ValueError('no original DOF values to restore!!')
vec = vec.reshape((self.n_nod, self.n_components)).copy()
evec = vec[self.econn]
vec[self.econn0] = nm.einsum('cji,cjk->cik', self.basis_transform, evec)
return vec.ravel()
def get_base(self, key, derivative, integral, iels=None,
from_geometry=False, base_only=True):
qp = self.get_qp(key, integral)
if from_geometry:
ps = self.gel.poly_space
else:
ps = self.poly_space
_key = key if not from_geometry else 'g' + key
bf_key = (integral.order, _key, derivative)
if bf_key not in self.bf:
if (iels is not None) and (self.ori is not None):
ori = self.ori[iels]
else:
ori = self.ori
self.bf[bf_key] = ps.eval_base(qp.vals, diff=derivative, ori=ori,
transform=self.basis_transform)
if base_only:
return self.bf[bf_key]
else:
return self.bf[bf_key], qp.weights
def create_bqp(self, region_name, integral):
gel = self.gel
sd = self.surface_data[region_name]
bqpkey = (integral.order, sd.bkey)
if not bqpkey in self.qp_coors:
qp = self.get_qp(sd.face_type, integral)
ps_s = self.gel.surface_facet.poly_space
bf_s = ps_s.eval_base(qp.vals)
coors, faces = gel.coors, gel.get_surface_entities()
vals = _interp_to_faces(coors, bf_s, faces)
self.qp_coors[bqpkey] = Struct(name='BQP_%s' % sd.bkey,
vals=vals, weights=qp.weights)
def extend_dofs(self, dofs, fill_value=None):
"""
Extend DOFs to the whole domain using the `fill_value`, or the
smallest value in `dofs` if `fill_value` is None.
"""
if fill_value is None:
if nm.isrealobj(dofs):
fill_value = get_min_value(dofs)
else:
# Complex values - treat real and imaginary parts separately.
fill_value = get_min_value(dofs.real)
fill_value += 1j * get_min_value(dofs.imag)
if self.approx_order != 0:
indx = self.get_vertices()
n_nod = self.domain.shape.n_nod
new_dofs = nm.empty((n_nod, dofs.shape[1]), dtype=self.dtype)
new_dofs.fill(fill_value)
new_dofs[indx] = dofs[:indx.size]
else:
new_dofs = extend_cell_data(dofs, self.domain, self.region,
val=fill_value)
return new_dofs
def remove_extra_dofs(self, dofs):
"""
Remove DOFs defined in higher order nodes (order > 1).
"""
if self.approx_order != 0:
new_dofs = dofs[:self.n_vertex_dof]
else:
new_dofs = dofs
return new_dofs
def linearize(self, dofs, min_level=0, max_level=1, eps=1e-4):
"""
Linearize the solution for post-processing.
Parameters
----------
dofs : array, shape (n_nod, n_component)
The array of DOFs reshaped so that each column corresponds
to one component.
min_level : int
The minimum required level of mesh refinement.
max_level : int
The maximum level of mesh refinement.
eps : float
The relative tolerance parameter of mesh adaptivity.
Returns
-------
mesh : Mesh instance
The adapted, nonconforming, mesh.
vdofs : array
The DOFs defined in vertices of `mesh`.
levels : array of ints
The refinement level used for each element group.
"""
assert_(dofs.ndim == 2)
n_nod, dpn = dofs.shape
assert_(n_nod == self.n_nod)
assert_(dpn == self.shape[0])
vertex_coors = self.coors[:self.n_vertex_dof, :]
ps = self.poly_space
gps = self.gel.poly_space
vertex_conn = self.econn[:, :self.gel.n_vertex]
eval_dofs = get_eval_dofs(dofs, self.econn, ps, ori=self.ori)
eval_coors = get_eval_coors(vertex_coors, vertex_conn, gps)
(level, coors, conn,
vdofs, mat_ids) = create_output(eval_dofs, eval_coors,
vertex_conn.shape[0], ps,
min_level=min_level,
max_level=max_level, eps=eps)
mesh = Mesh.from_data('linearized_mesh', coors, None, [conn], [mat_ids],
self.domain.mesh.descs)
return mesh, vdofs, level
def get_output_approx_order(self):
"""
Get the approximation order used in the output file.
"""
return min(self.approx_order, 1)
def create_output(self, dofs, var_name, dof_names=None,
key=None, extend=True, fill_value=None,
linearization=None):
"""
Convert the DOFs corresponding to the field to a dictionary of
output data usable by Mesh.write().
Parameters
----------
dofs : array, shape (n_nod, n_component)
The array of DOFs reshaped so that each column corresponds
to one component.
var_name : str
The variable name corresponding to `dofs`.
dof_names : tuple of str
The names of DOF components.
key : str, optional
The key to be used in the output dictionary instead of the
variable name.
extend : bool
Extend the DOF values to cover the whole domain.
fill_value : float or complex
The value used to fill the missing DOF values if `extend` is True.
linearization : Struct or None
The linearization configuration for higher order approximations.
Returns
-------
out : dict
The output dictionary.
"""
linearization = get_default(linearization, Struct(kind='strip'))
out = {}
if linearization.kind is None:
out[key] = Struct(name='output_data', mode='full',
data=dofs, var_name=var_name,
dofs=dof_names, field_name=self.name)
elif linearization.kind == 'strip':
if extend:
ext = self.extend_dofs(dofs, fill_value)
else:
ext = self.remove_extra_dofs(dofs)
if ext is not None:
approx_order = self.get_output_approx_order()
if approx_order != 0:
# Has vertex data.
out[key] = Struct(name='output_data', mode='vertex',
data=ext, var_name=var_name,
dofs=dof_names)
else:
ext.shape = (ext.shape[0], 1, ext.shape[1], 1)
out[key] = Struct(name='output_data', mode='cell',
data=ext, var_name=var_name,
dofs=dof_names)
else:
mesh, vdofs, levels = self.linearize(dofs,
linearization.min_level,
linearization.max_level,
linearization.eps)
out[key] = Struct(name='output_data', mode='vertex',
data=vdofs, var_name=var_name, dofs=dof_names,
mesh=mesh, levels=levels)
out = convert_complex_output(out)
return out
def create_mesh(self, extra_nodes=True):
"""
Create a mesh from the field region, optionally including the field
extra nodes.
"""
mesh = self.domain.mesh
if self.approx_order != 0:
if extra_nodes:
conn = self.econn
else:
conn = self.econn[:, :self.gel.n_vertex]
conns = [conn]
mat_ids = [mesh.cmesh.cell_groups]
descs = mesh.descs[:1]
if extra_nodes:
coors = self.coors
else:
coors = self.coors[:self.n_vertex_dof]
mesh = Mesh.from_data(self.name, coors, None, conns,
mat_ids, descs)
return mesh
def get_evaluate_cache(self, cache=None, share_geometry=False,
verbose=False):
"""
Get the evaluate cache for :func:`Variable.evaluate_at()
<sfepy.discrete.variables.Variable.evaluate_at()>`.
Parameters
----------
cache : Struct instance, optional
Optionally, use the provided instance to store the cache data.
share_geometry : bool
Set to True to indicate that all the evaluations will work on the
same region. Certain data are then computed only for the first
probe and cached.
verbose : bool
If False, reduce verbosity.
Returns
-------
cache : Struct instance
The evaluate cache.
"""
import time
try:
from scipy.spatial import cKDTree as KDTree
except ImportError:
from scipy.spatial import KDTree
from sfepy.discrete.fem.geometry_element import create_geometry_elements
if cache is None:
cache = Struct(name='evaluate_cache')
tt = time.clock()
if (cache.get('cmesh', None) is None) or not share_geometry:
mesh = self.create_mesh(extra_nodes=False)
cache.cmesh = cmesh = mesh.cmesh
gels = create_geometry_elements()
cmesh.set_local_entities(gels)
cmesh.setup_entities()
cache.centroids = cmesh.get_centroids(cmesh.tdim)
if self.gel.name != '3_8':
cache.normals0 = cmesh.get_facet_normals()
cache.normals1 = None
else:
cache.normals0 = cmesh.get_facet_normals(0)
cache.normals1 = cmesh.get_facet_normals(1)
output('cmesh setup: %f s' % (time.clock()-tt), verbose=verbose)
tt = time.clock()
if (cache.get('kdtree', None) is None) or not share_geometry:
cache.kdtree = KDTree(cmesh.coors)
output('kdtree: %f s' % (time.clock()-tt), verbose=verbose)
return cache
def interp_to_qp(self, dofs):
"""
Interpolate DOFs into quadrature points.
The quadrature order is given by the field approximation order.
Parameters
----------
dofs : array
The array of DOF values of shape `(n_nod, n_component)`.
Returns
-------
data_qp : array
The values interpolated into the quadrature points.
integral : Integral
The corresponding integral defining the quadrature points.
"""
integral = Integral('i', order=self.approx_order)
bf = self.get_base('v', False, integral)
bf = bf[:,0,:].copy()
data_qp = nm.dot(bf, dofs[self.econn])
data_qp = nm.swapaxes(data_qp, 0, 1)
data_qp.shape = data_qp.shape + (1,)
return data_qp, integral
def get_coor(self, nods=None):
"""
Get coordinates of the field nodes.
Parameters
----------
nods : array, optional
The indices of the required nodes. If not given, the
coordinates of all the nodes are returned.
"""
if nods is None:
return self.coors
else:
return self.coors[nods]
def get_connectivity(self, region, integration, is_trace=False):
"""
Convenience alias to `Field.get_econn()`, that is used in some terms.
"""
return self.get_econn(integration, region, is_trace=is_trace)
def create_mapping(self, region, integral, integration,
return_mapping=True):
"""
Create a new reference mapping.
Compute jacobians, element volumes and base function derivatives
for Volume-type geometries (volume mappings), and jacobians,
normals and base function derivatives for Surface-type
geometries (surface mappings).
Notes
-----
- surface mappings are defined on the surface region
- surface mappings require field order to be > 0
"""
domain = self.domain
coors = domain.get_mesh_coors(actual=True)
dconn = domain.get_conn()
if integration == 'volume':
qp = self.get_qp('v', integral)
iels = region.get_cells()
geo_ps = self.gel.poly_space
ps = self.poly_space
bf = self.get_base('v', 0, integral, iels=iels)
conn = nm.take(dconn, iels.astype(nm.int32), axis=0)
mapping = VolumeMapping(coors, conn, poly_space=geo_ps)
vg = mapping.get_mapping(qp.vals, qp.weights, poly_space=ps,
ori=self.ori,
transform=self.basis_transform)
out = vg
elif (integration == 'surface') or (integration == 'surface_extra'):
assert_(self.approx_order > 0)
if self.ori is not None:
msg = 'surface integrals do not work yet with the' \
' hierarchical basis!'
raise ValueError(msg)
sd = domain.surface_groups[region.name]
esd = self.surface_data[region.name]
geo_ps = self.gel.poly_space
ps = self.poly_space
conn = sd.get_connectivity()
mapping = SurfaceMapping(coors, conn, poly_space=geo_ps)
if not self.is_surface:
self.create_bqp(region.name, integral)
qp = self.qp_coors[(integral.order, esd.bkey)]
abf = ps.eval_base(qp.vals[0], transform=self.basis_transform)
bf = abf[..., self.efaces[0]]
indx = self.gel.get_surface_entities()[0]
# Fix geometry element's 1st facet orientation for gradients.
indx = nm.roll(indx, -1)[::-1]
mapping.set_basis_indices(indx)
sg = mapping.get_mapping(qp.vals[0], qp.weights,
poly_space=Struct(n_nod=bf.shape[-1]),
mode=integration)
if integration == 'surface_extra':
sg.alloc_extra_data(self.econn.shape[1])
bf_bg = geo_ps.eval_base(qp.vals, diff=True)
ebf_bg = self.get_base(esd.bkey, 1, integral)
sg.evaluate_bfbgm(bf_bg, ebf_bg, coors, sd.fis, dconn)
else:
# Do not use BQP for surface fields.
qp = self.get_qp(sd.face_type, integral)
bf = ps.eval_base(qp.vals, transform=self.basis_transform)
sg = mapping.get_mapping(qp.vals, qp.weights,
poly_space=Struct(n_nod=bf.shape[-1]),
mode=integration)
out = sg
elif integration == 'point':
out = mapping = None
elif integration == 'custom':
raise ValueError('cannot create custom mapping!')
else:
raise ValueError('unknown integration geometry type: %s'
% integration)
if out is not None:
# Store the integral used.
out.integral = integral
out.qp = qp
out.ps = ps
# Update base.
out.bf[:] = bf
if return_mapping:
out = (out, mapping)
return out
class VolumeField(FEField):
"""
Finite element field base class over volume elements (element dimension
equals space dimension).
"""
def _check_region(self, region):
"""
Check whether the `region` can be used for the
field.
Returns
-------
ok : bool
True if the region is usable for the field.
"""
ok = True
domain = region.domain
if region.kind != 'cell':
output("bad region kind! (is: %r, should be: 'cell')"
% region.kind)
ok = False
elif (region.kind_tdim != domain.shape.tdim):
output('cells with a bad topological dimension! (%d == %d)'
% (region.kind_tdim, domain.shape.tdim))
ok = False
return ok
def _setup_geometry(self):
"""
Setup the field region geometry.
"""
cmesh = self.domain.cmesh
for key, gel in six.iteritems(self.domain.geom_els):
ct = cmesh.cell_types
if (ct[self.region.cells] == cmesh.key_to_index[gel.name]).all():
self.gel = gel
break
else:
raise ValueError('region %s of field %s contains multiple'
' reference geometries!'
% (self.region.name, self.name))
self.is_surface = False
def _create_interpolant(self):
name = '%s_%s_%s_%d%s' % (self.gel.name, self.space,
self.poly_space_base, self.approx_order,
'B' * self.force_bubble)
ps = PolySpace.any_from_args(name, self.gel, self.approx_order,
base=self.poly_space_base,
force_bubble=self.force_bubble)
self.poly_space = ps
def _init_econn(self):
"""
Initialize the extended DOF connectivity.
"""
n_ep = self.poly_space.n_nod
n_cell = self.region.get_n_cells()
self.econn = nm.zeros((n_cell, n_ep), nm.int32)
def _setup_vertex_dofs(self):
"""
Setup vertex DOF connectivity.
"""
if self.node_desc.vertex is None:
return 0, None
region = self.region
cmesh = self.domain.cmesh
conn, offsets = cmesh.get_incident(0, region.cells, region.tdim,
ret_offsets=True)
vertices = nm.unique(conn)
remap = prepare_remap(vertices, region.n_v_max)
n_dof = vertices.shape[0]
aux = nm.unique(nm.diff(offsets))
assert_(len(aux) == 1, 'region with multiple reference geometries!')
offset = aux[0]
# Remap vertex node connectivity to field-local numbering.
aux = conn.reshape((-1, offset)).astype(nm.int32)
self.econn[:, :offset] = nm.take(remap, aux)
return n_dof, remap
def setup_extra_data(self, geometry, info, is_trace):
dct = info.dc_type.type
if geometry != None:
geometry_flag = 'surface' in geometry
else:
geometry_flag = False
if (dct == 'surface') or (geometry_flag):
reg = info.get_region()
mreg_name = info.get_region_name(can_trace=False)
self.domain.create_surface_group(reg)
self.setup_surface_data(reg, is_trace, mreg_name)
elif dct == 'edge':
raise NotImplementedError('dof connectivity type %s' % dct)
elif dct == 'point':
self.setup_point_data(self, info.region)
elif dct not in ('volume', 'scalar', 'custom'):
raise ValueError('unknown dof connectivity type! (%s)' % dct)
def setup_point_data(self, field, region):
if region.name not in self.point_data:
conn = field.get_dofs_in_region(region, merge=True)
conn.shape += (1,)
self.point_data[region.name] = conn
def setup_surface_data(self, region, is_trace=False, trace_region=None):
"""nodes[leconn] == econn"""
"""nodes are sorted by node number -> same order as region.vertices"""
if region.name not in self.surface_data:
sd = FESurface('surface_data_%s' % region.name, region,
self.efaces, self.econn, self.region)
self.surface_data[region.name] = sd
if region.name in self.surface_data and is_trace:
sd = self.surface_data[region.name]
sd.setup_mirror_connectivity(region, trace_region)
return self.surface_data[region.name]
def get_econn(self, conn_type, region, is_trace=False, integration=None):
"""
Get extended connectivity of the given type in the given region.
"""
ct = conn_type.type if isinstance(conn_type, Struct) else conn_type
if ct in ('volume', 'custom'):
if region.name == self.region.name:
conn = self.econn
else:
tco = integration in ('volume', 'custom')
cells = region.get_cells(true_cells_only=tco)
ii = self.region.get_cell_indices(cells, true_cells_only=tco)
conn = nm.take(self.econn, ii, axis=0)
elif ct == 'surface':
sd = self.surface_data[region.name]
conn = sd.get_connectivity(is_trace=is_trace)
elif ct == 'edge':
raise NotImplementedError('connectivity type %s' % ct)
elif ct == 'point':
conn = self.point_data[region.name]
else:
raise ValueError('unknown connectivity type! (%s)' % ct)
return conn
def average_qp_to_vertices(self, data_qp, integral):
"""
Average data given in quadrature points in region elements into
region vertices.
.. math::
u_n = \sum_e (u_{e,avg} * volume_e) / \sum_e volume_e
= \sum_e \int_{volume_e} u / \sum volume_e
"""
region = self.region
n_cells = region.get_n_cells()
if n_cells != data_qp.shape[0]:
msg = 'incomatible shape! (%d == %d)' % (n_cells,
data_qp.shape[0])
raise ValueError(msg)
n_vertex = self.n_vertex_dof
nc = data_qp.shape[2]
nod_vol = nm.zeros((n_vertex,), dtype=nm.float64)
data_vertex = nm.zeros((n_vertex, nc), dtype=nm.float64)
vg = self.get_mapping(self.region, integral, 'volume')[0]
volume = nm.squeeze(vg.volume)
iels = self.region.get_cells()
data_e = nm.zeros((volume.shape[0], 1, nc, 1), dtype=nm.float64)
vg.integrate(data_e, data_qp[iels])
ir = nm.arange(nc, dtype=nm.int32)
conn = self.econn[:, :self.gel.n_vertex]
for ii, cc in enumerate(conn):
# Assumes unique nodes in cc!
ind2, ind1 = nm.meshgrid(ir, cc)
data_vertex[ind1,ind2] += data_e[iels[ii],0,:,0]
nod_vol[cc] += volume[ii]
data_vertex /= nod_vol[:,nm.newaxis]
return data_vertex
class SurfaceField(FEField):
"""
Finite element field base class over surface (element dimension is one
less than space dimension).
"""
def _check_region(self, region):
"""
Check whether the `region` can be used for the
field.
Returns
-------
ok : bool
True if the region is usable for the field.
"""
ok1 = ((region.kind_tdim == (region.tdim - 1))
and (region.get_n_cells(True) > 0))
if not ok1:
output('bad region topological dimension and kind! (%d, %s)'
% (region.tdim, region.kind))
n_ns = region.get_facet_indices().shape[0] - region.get_n_cells(True)
ok2 = n_ns == 0
if not ok2:
output('%d region facets are not on the domain surface!' % n_ns)
return ok1 and ok2
def _setup_geometry(self):
"""
Setup the field region geometry.
"""
for key, vgel in six.iteritems(self.domain.geom_els):
self.gel = vgel.surface_facet
break
if self.gel is None:
raise ValueError('cells with no surface!')
self.is_surface = True
def _create_interpolant(self):
name = '%s_%s_%s_%d%s' % (self.gel.name, self.space,
self.poly_space_base, self.approx_order,
'B' * self.force_bubble)
ps = PolySpace.any_from_args(name, self.gel, self.approx_order,
base=self.poly_space_base,
force_bubble=self.force_bubble)
self.poly_space = ps
def setup_extra_data(self, geometry, info, is_trace):
dct = info.dc_type.type
if dct != 'surface':
msg = "dof connectivity type must be 'surface'! (%s)" % dct
raise ValueError(msg)
reg = info.get_region()
if reg.name not in self.surface_data:
# Defined in setup_vertex_dofs()
msg = 'no surface data of surface field! (%s)' % reg.name
raise ValueError(msg)
if reg.name in self.surface_data and is_trace:
sd = self.surface_data[reg.name]
mreg_name = info.get_region_name(can_trace=False)
sd.setup_mirror_connectivity(reg, mreg_name)
def _init_econn(self):
"""
Initialize the extended DOF connectivity.
"""
n_ep = self.poly_space.n_nod
n_cell = self.region.get_n_cells(is_surface=self.is_surface)
self.econn = nm.zeros((n_cell, n_ep), nm.int32)
def _setup_vertex_dofs(self):
"""
Setup vertex DOF connectivity.
"""
if self.node_desc.vertex is None:
return 0, None
region = self.region
remap = prepare_remap(region.vertices, region.n_v_max)
n_dof = region.vertices.shape[0]
# Remap vertex node connectivity to field-local numbering.
conn, gel = self.domain.get_conn(ret_gel=True)
faces = gel.get_surface_entities()
aux = FESurface('aux', region, faces, conn)
self.econn[:, :aux.n_fp] = aux.leconn
self.surface_data[region.name] = aux
return n_dof, remap
def _setup_bubble_dofs(self):
"""
Setup bubble DOF connectivity.
"""
return 0, None, None
def get_econn(self, conn_type, region, is_trace=False,
integration=None):
"""
Get extended connectivity of the given type in the given region.
"""
ct = conn_type.type if isinstance(conn_type, Struct) else conn_type
if ct != 'surface':
msg = 'connectivity type must be "surface"! (%s)' % ct
raise ValueError(msg)
sd = self.surface_data[region.name]
conn = sd.get_connectivity(local=True, is_trace=is_trace)
return conn
def average_qp_to_vertices(self, data_qp, integral):
"""
Average data given in quadrature points in region elements into
region vertices.
.. math::
u_n = \sum_e (u_{e,avg} * area_e) / \sum_e area_e
= \sum_e \int_{area_e} u / \sum area_e
"""
region = self.region
n_cells = region.get_n_cells(True)
if n_cells != data_qp.shape[0]:
msg = 'incomatible shape! (%d == %d)' % (n_cells,
data_qp.shape[0])
raise ValueError(msg)
n_vertex = len(region.vertices)
nc = data_qp.shape[2]
nod_vol = nm.zeros((n_vertex,), dtype=nm.float64)
data_vertex = nm.zeros((n_vertex, nc), dtype=nm.float64)
sg = self.get_mapping(self.region, integral, 'surface')[0]
area = nm.squeeze(sg.volume)
n_cells = region.get_n_cells(True)
iels = nm.arange(n_cells, dtype=nm.int32)
data_e = nm.zeros((area.shape[0], 1, nc, 1), dtype=nm.float64)
sg.integrate(data_e, data_qp[iels])
ir = nm.arange(nc, dtype=nm.int32)
sd = self.domain.surface_groups[region.name]
# Should be vertex connectivity!
conn = sd.get_connectivity(local=True)
for ii, cc in enumerate(conn):
# Assumes unique nodes in cc!
ind2, ind1 = nm.meshgrid(ir, cc)
data_vertex[ind1,ind2] += data_e[iels[ii],0,:,0]
nod_vol[cc] += area[ii]
data_vertex /= nod_vol[:,nm.newaxis]
return data_vertex
class H1Mixin(Struct):
"""
Methods of fields specific to H1 space.
"""
def _setup_shape(self):
"""
Setup the field's shape-related attributes, see :class:`Field`.
"""
self.n_components = nm.prod(self.shape)
self.val_shape = self.shape
|
[
"sfepy.discrete.fem.mesh.Mesh.from_data",
"sfepy.discrete.integrals.Integral",
"sfepy.base.base.Struct",
"sfepy.discrete.fem.linearizer.get_eval_coors",
"sfepy.discrete.fem.meshio.convert_complex_output",
"sfepy.discrete.fem.linearizer.create_output",
"sfepy.discrete.fem.poly_spaces.PolySpace.any_from_args",
"sfepy.discrete.fem.linearizer.get_eval_dofs",
"sfepy.discrete.fem.utils.invert_remap",
"sfepy.discrete.fem.geometry_element.create_geometry_elements",
"sfepy.base.base.Struct.__init__",
"sfepy.base.base.output",
"sfepy.discrete.common.fields.parse_shape",
"sfepy.base.base.assert_",
"sfepy.discrete.fem.utils.get_min_value",
"sfepy.discrete.fem.utils.extend_cell_data",
"sfepy.discrete.fem.mappings.VolumeMapping",
"sfepy.discrete.fem.fe_surface.FESurface",
"sfepy.discrete.fem.mappings.SurfaceMapping",
"sfepy.discrete.fem.utils.prepare_remap",
"sfepy.discrete.evaluate.eval_in_els_and_qp"
] |
[((2582, 2610), 'numpy.dot', 'nm.dot', (['bf', 'mesh_coors[conn]'], {}), '(bf, mesh_coors[conn])\n', (2588, 2610), True, 'import numpy as nm\n'), ((2630, 2655), 'numpy.swapaxes', 'nm.swapaxes', (['ecoors', '(0)', '(1)'], {}), '(ecoors, 0, 1)\n', (2641, 2655), True, 'import numpy as nm\n'), ((2805, 2846), 'numpy.zeros', 'nm.zeros', (['(n_face, n_qp, dim)', 'nm.float64'], {}), '((n_face, n_qp, dim), nm.float64)\n', (2813, 2846), True, 'import numpy as nm\n'), ((6368, 6414), 'sfepy.discrete.fem.linearizer.get_eval_coors', 'get_eval_coors', (['vertex_coors', 'vertex_conn', 'gps'], {}), '(vertex_coors, vertex_conn, gps)\n', (6382, 6414), False, 'from sfepy.discrete.fem.linearizer import get_eval_dofs, get_eval_coors, create_output\n'), ((6464, 6582), 'sfepy.discrete.fem.linearizer.create_output', 'create_output', (['eval_dofs', 'eval_coors', 'vertex_conn.shape[0]', 'ps'], {'min_level': 'min_level', 'max_level': 'max_level', 'eps': 'eps'}), '(eval_dofs, eval_coors, vertex_conn.shape[0], ps, min_level=\n min_level, max_level=max_level, eps=eps)\n', (6477, 6582), False, 'from sfepy.discrete.fem.linearizer import get_eval_dofs, get_eval_coors, create_output\n'), ((6701, 6796), 'sfepy.discrete.fem.mesh.Mesh.from_data', 'Mesh.from_data', (['"""linearized_mesh"""', 'coors', 'None', '[conn]', '[mat_ids]', 'field.domain.mesh.descs'], {}), "('linearized_mesh', coors, None, [conn], [mat_ids], field.\n domain.mesh.descs)\n", (6715, 6796), False, 'from sfepy.discrete.fem.mesh import Mesh\n'), ((6848, 6956), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""vertex"""', 'data': 'vdofs', 'var_name': 'name', 'dofs': 'None', 'mesh': 'mesh', 'level': 'level'}), "(name='output_data', mode='vertex', data=vdofs, var_name=name, dofs=\n None, mesh=mesh, level=level)\n", (6854, 6956), False, 'from sfepy.base.base import Struct\n'), ((7009, 7036), 'sfepy.discrete.fem.meshio.convert_complex_output', 'convert_complex_output', (['out'], {}), '(out)\n', (7031, 7036), False, 'from sfepy.discrete.fem.meshio import convert_complex_output\n'), ((1598, 1620), 'six.itervalues', 'six.itervalues', (['fields'], {}), '(fields)\n', (1612, 1620), False, 'import six\n'), ((2952, 2978), 'numpy.dot', 'nm.dot', (['bfs[:, 0, :]', 'vals'], {}), '(bfs[:, 0, :], vals)\n', (2958, 2978), True, 'import numpy as nm\n'), ((3463, 3653), 'sfepy.discrete.evaluate.eval_in_els_and_qp', 'eval_in_els_and_qp', (['expression', 'iels', 'coors', 'fields', 'materials', 'variables'], {'functions': 'functions', 'mode': 'mode', 'term_mode': 'term_mode', 'extra_args': 'extra_args', 'verbose': 'verbose', 'kwargs': 'kwargs'}), '(expression, iels, coors, fields, materials, variables,\n functions=functions, mode=mode, term_mode=term_mode, extra_args=\n extra_args, verbose=verbose, kwargs=kwargs)\n', (3481, 3653), False, 'from sfepy.discrete.evaluate import eval_in_els_and_qp\n'), ((8522, 8565), 'sfepy.discrete.common.fields.parse_shape', 'parse_shape', (['shape', 'region.domain.shape.dim'], {}), '(shape, region.domain.shape.dim)\n', (8533, 8565), False, 'from sfepy.discrete.common.fields import parse_shape, Field\n'), ((8738, 8811), 'sfepy.base.base.Struct.__init__', 'Struct.__init__', (['self'], {'name': 'name', 'dtype': 'dtype', 'shape': 'shape', 'region': 'region'}), '(self, name=name, dtype=dtype, shape=shape, region=region)\n', (8753, 8811), False, 'from sfepy.base.base import Struct\n'), ((10902, 10933), 'sfepy.discrete.fem.utils.invert_remap', 'invert_remap', (['self.vertex_remap'], {}), '(self.vertex_remap)\n', (10914, 10933), False, 'from sfepy.discrete.fem.utils import extend_cell_data, prepare_remap, invert_remap, get_min_value\n'), ((13667, 13711), 'numpy.empty', 'nm.empty', (['(self.n_nod, mesh.dim)', 'nm.float64'], {}), '((self.n_nod, mesh.dim), nm.float64)\n', (13675, 13711), True, 'import numpy as nm\n'), ((16374, 16404), 'numpy.empty', 'nm.empty', (['(0,)'], {'dtype': 'nm.int32'}), '((0,), dtype=nm.int32)\n', (16382, 16404), True, 'import numpy as nm\n'), ((16583, 16613), 'numpy.empty', 'nm.empty', (['(0,)'], {'dtype': 'nm.int32'}), '((0,), dtype=nm.int32)\n', (16591, 16613), True, 'import numpy as nm\n'), ((16867, 16897), 'numpy.empty', 'nm.empty', (['(0,)'], {'dtype': 'nm.int32'}), '((0,), dtype=nm.int32)\n', (16875, 16897), True, 'import numpy as nm\n'), ((17151, 17181), 'numpy.empty', 'nm.empty', (['(0,)'], {'dtype': 'nm.int32'}), '((0,), dtype=nm.int32)\n', (17159, 17181), True, 'import numpy as nm\n'), ((20721, 20774), 'numpy.einsum', 'nm.einsum', (['"""cji,cjk->cik"""', 'self.basis_transform', 'evec'], {}), "('cji,cjk->cik', self.basis_transform, evec)\n", (20730, 20774), True, 'import numpy as nm\n'), ((24365, 24388), 'sfepy.base.base.assert_', 'assert_', (['(dofs.ndim == 2)'], {}), '(dofs.ndim == 2)\n', (24372, 24388), False, 'from sfepy.base.base import output, get_default, assert_\n'), ((24431, 24459), 'sfepy.base.base.assert_', 'assert_', (['(n_nod == self.n_nod)'], {}), '(n_nod == self.n_nod)\n', (24438, 24459), False, 'from sfepy.base.base import output, get_default, assert_\n'), ((24468, 24497), 'sfepy.base.base.assert_', 'assert_', (['(dpn == self.shape[0])'], {}), '(dpn == self.shape[0])\n', (24475, 24497), False, 'from sfepy.base.base import output, get_default, assert_\n'), ((24698, 24747), 'sfepy.discrete.fem.linearizer.get_eval_dofs', 'get_eval_dofs', (['dofs', 'self.econn', 'ps'], {'ori': 'self.ori'}), '(dofs, self.econn, ps, ori=self.ori)\n', (24711, 24747), False, 'from sfepy.discrete.fem.linearizer import get_eval_dofs, get_eval_coors, create_output\n'), ((24769, 24815), 'sfepy.discrete.fem.linearizer.get_eval_coors', 'get_eval_coors', (['vertex_coors', 'vertex_conn', 'gps'], {}), '(vertex_coors, vertex_conn, gps)\n', (24783, 24815), False, 'from sfepy.discrete.fem.linearizer import get_eval_dofs, get_eval_coors, create_output\n'), ((24873, 24991), 'sfepy.discrete.fem.linearizer.create_output', 'create_output', (['eval_dofs', 'eval_coors', 'vertex_conn.shape[0]', 'ps'], {'min_level': 'min_level', 'max_level': 'max_level', 'eps': 'eps'}), '(eval_dofs, eval_coors, vertex_conn.shape[0], ps, min_level=\n min_level, max_level=max_level, eps=eps)\n', (24886, 24991), False, 'from sfepy.discrete.fem.linearizer import get_eval_dofs, get_eval_coors, create_output\n'), ((25126, 25220), 'sfepy.discrete.fem.mesh.Mesh.from_data', 'Mesh.from_data', (['"""linearized_mesh"""', 'coors', 'None', '[conn]', '[mat_ids]', 'self.domain.mesh.descs'], {}), "('linearized_mesh', coors, None, [conn], [mat_ids], self.\n domain.mesh.descs)\n", (25140, 25220), False, 'from sfepy.discrete.fem.mesh import Mesh\n'), ((28285, 28312), 'sfepy.discrete.fem.meshio.convert_complex_output', 'convert_complex_output', (['out'], {}), '(out)\n', (28307, 28312), False, 'from sfepy.discrete.fem.meshio import convert_complex_output\n'), ((30216, 30228), 'time.clock', 'time.clock', ([], {}), '()\n', (30226, 30228), False, 'import time\n'), ((30951, 30963), 'time.clock', 'time.clock', ([], {}), '()\n', (30961, 30963), False, 'import time\n'), ((31721, 31759), 'sfepy.discrete.integrals.Integral', 'Integral', (['"""i"""'], {'order': 'self.approx_order'}), "('i', order=self.approx_order)\n", (31729, 31759), False, 'from sfepy.discrete.integrals import Integral\n'), ((31859, 31887), 'numpy.dot', 'nm.dot', (['bf', 'dofs[self.econn]'], {}), '(bf, dofs[self.econn])\n', (31865, 31887), True, 'import numpy as nm\n'), ((31906, 31932), 'numpy.swapaxes', 'nm.swapaxes', (['data_qp', '(0)', '(1)'], {}), '(data_qp, 0, 1)\n', (31917, 31932), True, 'import numpy as nm\n'), ((37564, 37599), 'six.iteritems', 'six.iteritems', (['self.domain.geom_els'], {}), '(self.domain.geom_els)\n', (37577, 37599), False, 'import six\n'), ((38245, 38367), 'sfepy.discrete.fem.poly_spaces.PolySpace.any_from_args', 'PolySpace.any_from_args', (['name', 'self.gel', 'self.approx_order'], {'base': 'self.poly_space_base', 'force_bubble': 'self.force_bubble'}), '(name, self.gel, self.approx_order, base=self.\n poly_space_base, force_bubble=self.force_bubble)\n', (38268, 38367), False, 'from sfepy.discrete.fem.poly_spaces import PolySpace\n'), ((38669, 38703), 'numpy.zeros', 'nm.zeros', (['(n_cell, n_ep)', 'nm.int32'], {}), '((n_cell, n_ep), nm.int32)\n', (38677, 38703), True, 'import numpy as nm\n'), ((39090, 39105), 'numpy.unique', 'nm.unique', (['conn'], {}), '(conn)\n', (39099, 39105), True, 'import numpy as nm\n'), ((39122, 39161), 'sfepy.discrete.fem.utils.prepare_remap', 'prepare_remap', (['vertices', 'region.n_v_max'], {}), '(vertices, region.n_v_max)\n', (39135, 39161), False, 'from sfepy.discrete.fem.utils import extend_cell_data, prepare_remap, invert_remap, get_min_value\n'), ((39500, 39519), 'numpy.take', 'nm.take', (['remap', 'aux'], {}), '(remap, aux)\n', (39507, 39519), True, 'import numpy as nm\n'), ((42967, 43006), 'numpy.zeros', 'nm.zeros', (['(n_vertex,)'], {'dtype': 'nm.float64'}), '((n_vertex,), dtype=nm.float64)\n', (42975, 43006), True, 'import numpy as nm\n'), ((43029, 43071), 'numpy.zeros', 'nm.zeros', (['(n_vertex, nc)'], {'dtype': 'nm.float64'}), '((n_vertex, nc), dtype=nm.float64)\n', (43037, 43071), True, 'import numpy as nm\n'), ((43157, 43178), 'numpy.squeeze', 'nm.squeeze', (['vg.volume'], {}), '(vg.volume)\n', (43167, 43178), True, 'import numpy as nm\n'), ((43236, 43291), 'numpy.zeros', 'nm.zeros', (['(volume.shape[0], 1, nc, 1)'], {'dtype': 'nm.float64'}), '((volume.shape[0], 1, nc, 1), dtype=nm.float64)\n', (43244, 43291), True, 'import numpy as nm\n'), ((43350, 43379), 'numpy.arange', 'nm.arange', (['nc'], {'dtype': 'nm.int32'}), '(nc, dtype=nm.int32)\n', (43359, 43379), True, 'import numpy as nm\n'), ((44719, 44754), 'six.iteritems', 'six.iteritems', (['self.domain.geom_els'], {}), '(self.domain.geom_els)\n', (44732, 44754), False, 'import six\n'), ((45177, 45299), 'sfepy.discrete.fem.poly_spaces.PolySpace.any_from_args', 'PolySpace.any_from_args', (['name', 'self.gel', 'self.approx_order'], {'base': 'self.poly_space_base', 'force_bubble': 'self.force_bubble'}), '(name, self.gel, self.approx_order, base=self.\n poly_space_base, force_bubble=self.force_bubble)\n', (45200, 45299), False, 'from sfepy.discrete.fem.poly_spaces import PolySpace\n'), ((46303, 46337), 'numpy.zeros', 'nm.zeros', (['(n_cell, n_ep)', 'nm.int32'], {}), '((n_cell, n_ep), nm.int32)\n', (46311, 46337), True, 'import numpy as nm\n'), ((46552, 46598), 'sfepy.discrete.fem.utils.prepare_remap', 'prepare_remap', (['region.vertices', 'region.n_v_max'], {}), '(region.vertices, region.n_v_max)\n', (46565, 46598), False, 'from sfepy.discrete.fem.utils import extend_cell_data, prepare_remap, invert_remap, get_min_value\n'), ((46820, 46857), 'sfepy.discrete.fem.fe_surface.FESurface', 'FESurface', (['"""aux"""', 'region', 'faces', 'conn'], {}), "('aux', region, faces, conn)\n", (46829, 46857), False, 'from sfepy.discrete.fem.fe_surface import FESurface\n'), ((48320, 48359), 'numpy.zeros', 'nm.zeros', (['(n_vertex,)'], {'dtype': 'nm.float64'}), '((n_vertex,), dtype=nm.float64)\n', (48328, 48359), True, 'import numpy as nm\n'), ((48382, 48424), 'numpy.zeros', 'nm.zeros', (['(n_vertex, nc)'], {'dtype': 'nm.float64'}), '((n_vertex, nc), dtype=nm.float64)\n', (48390, 48424), True, 'import numpy as nm\n'), ((48509, 48530), 'numpy.squeeze', 'nm.squeeze', (['sg.volume'], {}), '(sg.volume)\n', (48519, 48530), True, 'import numpy as nm\n'), ((48589, 48623), 'numpy.arange', 'nm.arange', (['n_cells'], {'dtype': 'nm.int32'}), '(n_cells, dtype=nm.int32)\n', (48598, 48623), True, 'import numpy as nm\n'), ((48642, 48695), 'numpy.zeros', 'nm.zeros', (['(area.shape[0], 1, nc, 1)'], {'dtype': 'nm.float64'}), '((area.shape[0], 1, nc, 1), dtype=nm.float64)\n', (48650, 48695), True, 'import numpy as nm\n'), ((48754, 48783), 'numpy.arange', 'nm.arange', (['nc'], {'dtype': 'nm.int32'}), '(nc, dtype=nm.int32)\n', (48763, 48783), True, 'import numpy as nm\n'), ((49460, 49479), 'numpy.prod', 'nm.prod', (['self.shape'], {}), '(self.shape)\n', (49467, 49479), True, 'import numpy as nm\n'), ((1392, 1424), 'numpy.zeros_like', 'nm.zeros_like', (['domain.mesh.coors'], {}), '(domain.mesh.coors)\n', (1405, 1424), True, 'import numpy as nm\n'), ((12042, 12071), 'numpy.hstack', 'nm.hstack', (['(self.efaces, efs)'], {}), '((self.efaces, efs))\n', (12051, 12071), True, 'import numpy as nm\n'), ((12268, 12297), 'numpy.hstack', 'nm.hstack', (['(self.efaces, efs)'], {}), '((self.efaces, efs))\n', (12277, 12297), True, 'import numpy as nm\n'), ((17423, 17443), 'numpy.concatenate', 'nm.concatenate', (['dofs'], {}), '(dofs)\n', (17437, 17443), True, 'import numpy as nm\n'), ((18283, 18317), 'sfepy.base.base.Struct', 'Struct', ([], {'vals': 'vals', 'weights': 'weights'}), '(vals=vals, weights=weights)\n', (18289, 18317), False, 'from sfepy.base.base import Struct\n'), ((19029, 19066), 'numpy.setdiff1d', 'nm.setdiff1d', (['self.econn0', 'self.econn'], {}), '(self.econn0, self.econn)\n', (19041, 19066), True, 'import numpy as nm\n'), ((22147, 22209), 'sfepy.base.base.Struct', 'Struct', ([], {'name': "('BQP_%s' % sd.bkey)", 'vals': 'vals', 'weights': 'qp.weights'}), "(name='BQP_%s' % sd.bkey, vals=vals, weights=qp.weights)\n", (22153, 22209), False, 'from sfepy.base.base import Struct\n'), ((22503, 22521), 'numpy.isrealobj', 'nm.isrealobj', (['dofs'], {}), '(dofs)\n', (22515, 22521), True, 'import numpy as nm\n'), ((22926, 22976), 'numpy.empty', 'nm.empty', (['(n_nod, dofs.shape[1])'], {'dtype': 'self.dtype'}), '((n_nod, dofs.shape[1]), dtype=self.dtype)\n', (22934, 22976), True, 'import numpy as nm\n'), ((23099, 23163), 'sfepy.discrete.fem.utils.extend_cell_data', 'extend_cell_data', (['dofs', 'self.domain', 'self.region'], {'val': 'fill_value'}), '(dofs, self.domain, self.region, val=fill_value)\n', (23115, 23163), False, 'from sfepy.discrete.fem.utils import extend_cell_data, prepare_remap, invert_remap, get_min_value\n'), ((26672, 26692), 'sfepy.base.base.Struct', 'Struct', ([], {'kind': '"""strip"""'}), "(kind='strip')\n", (26678, 26692), False, 'from sfepy.base.base import Struct\n'), ((26774, 26886), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""full"""', 'data': 'dofs', 'var_name': 'var_name', 'dofs': 'dof_names', 'field_name': 'self.name'}), "(name='output_data', mode='full', data=dofs, var_name=var_name, dofs=\n dof_names, field_name=self.name)\n", (26780, 26886), False, 'from sfepy.base.base import Struct\n'), ((28974, 29035), 'sfepy.discrete.fem.mesh.Mesh.from_data', 'Mesh.from_data', (['self.name', 'coors', 'None', 'conns', 'mat_ids', 'descs'], {}), '(self.name, coors, None, conns, mat_ids, descs)\n', (28988, 29035), False, 'from sfepy.discrete.fem.mesh import Mesh\n'), ((30172, 30201), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""evaluate_cache"""'}), "(name='evaluate_cache')\n", (30178, 30201), False, 'from sfepy.base.base import Struct\n'), ((30418, 30444), 'sfepy.discrete.fem.geometry_element.create_geometry_elements', 'create_geometry_elements', ([], {}), '()\n', (30442, 30444), False, 'from sfepy.discrete.fem.geometry_element import create_geometry_elements\n'), ((31061, 31080), 'scipy.spatial.KDTree', 'KDTree', (['cmesh.coors'], {}), '(cmesh.coors)\n', (31067, 31080), False, 'from scipy.spatial import KDTree\n'), ((33670, 33715), 'sfepy.discrete.fem.mappings.VolumeMapping', 'VolumeMapping', (['coors', 'conn'], {'poly_space': 'geo_ps'}), '(coors, conn, poly_space=geo_ps)\n', (33683, 33715), False, 'from sfepy.discrete.fem.mappings import VolumeMapping, SurfaceMapping\n'), ((37069, 37137), 'sfepy.base.base.output', 'output', (['("bad region kind! (is: %r, should be: \'cell\')" % region.kind)'], {}), '("bad region kind! (is: %r, should be: \'cell\')" % region.kind)\n', (37075, 37137), False, 'from sfepy.base.base import output, get_default, assert_\n'), ((39221, 39237), 'numpy.diff', 'nm.diff', (['offsets'], {}), '(offsets)\n', (39228, 39237), True, 'import numpy as nm\n'), ((40842, 40934), 'sfepy.discrete.fem.fe_surface.FESurface', 'FESurface', (["('surface_data_%s' % region.name)", 'region', 'self.efaces', 'self.econn', 'self.region'], {}), "('surface_data_%s' % region.name, region, self.efaces, self.econn,\n self.region)\n", (40851, 40934), False, 'from sfepy.discrete.fem.fe_surface import FESurface\n'), ((43536, 43555), 'numpy.meshgrid', 'nm.meshgrid', (['ir', 'cc'], {}), '(ir, cc)\n', (43547, 43555), True, 'import numpy as nm\n'), ((44259, 44353), 'sfepy.base.base.output', 'output', (["('bad region topological dimension and kind! (%d, %s)' % (region.tdim,\n region.kind))"], {}), "('bad region topological dimension and kind! (%d, %s)' % (region.tdim,\n region.kind))\n", (44265, 44353), False, 'from sfepy.base.base import output, get_default, assert_\n'), ((44504, 44568), 'sfepy.base.base.output', 'output', (["('%d region facets are not on the domain surface!' % n_ns)"], {}), "('%d region facets are not on the domain surface!' % n_ns)\n", (44510, 44568), False, 'from sfepy.base.base import output, get_default, assert_\n'), ((49032, 49051), 'numpy.meshgrid', 'nm.meshgrid', (['ir', 'cc'], {}), '(ir, cc)\n', (49043, 49051), True, 'import numpy as nm\n'), ((12586, 12615), 'numpy.hstack', 'nm.hstack', (['(self.eedges, efs)'], {}), '((self.eedges, efs))\n', (12595, 12615), True, 'import numpy as nm\n'), ((22552, 22571), 'sfepy.discrete.fem.utils.get_min_value', 'get_min_value', (['dofs'], {}), '(dofs)\n', (22565, 22571), False, 'from sfepy.discrete.fem.utils import extend_cell_data, prepare_remap, invert_remap, get_min_value\n'), ((22698, 22722), 'sfepy.discrete.fem.utils.get_min_value', 'get_min_value', (['dofs.real'], {}), '(dofs.real)\n', (22711, 22722), False, 'from sfepy.discrete.fem.utils import extend_cell_data, prepare_remap, invert_remap, get_min_value\n'), ((28095, 28213), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""vertex"""', 'data': 'vdofs', 'var_name': 'var_name', 'dofs': 'dof_names', 'mesh': 'mesh', 'levels': 'levels'}), "(name='output_data', mode='vertex', data=vdofs, var_name=var_name,\n dofs=dof_names, mesh=mesh, levels=levels)\n", (28101, 28213), False, 'from sfepy.base.base import Struct\n'), ((34021, 34051), 'sfepy.base.base.assert_', 'assert_', (['(self.approx_order > 0)'], {}), '(self.approx_order > 0)\n', (34028, 34051), False, 'from sfepy.base.base import output, get_default, assert_\n'), ((34483, 34529), 'sfepy.discrete.fem.mappings.SurfaceMapping', 'SurfaceMapping', (['coors', 'conn'], {'poly_space': 'geo_ps'}), '(coors, conn, poly_space=geo_ps)\n', (34497, 34529), False, 'from sfepy.discrete.fem.mappings import VolumeMapping, SurfaceMapping\n'), ((37247, 37352), 'sfepy.base.base.output', 'output', (["('cells with a bad topological dimension! (%d == %d)' % (region.kind_tdim,\n domain.shape.tdim))"], {}), "('cells with a bad topological dimension! (%d == %d)' % (region.\n kind_tdim, domain.shape.tdim))\n", (37253, 37352), False, 'from sfepy.base.base import output, get_default, assert_\n'), ((41837, 41868), 'numpy.take', 'nm.take', (['self.econn', 'ii'], {'axis': '(0)'}), '(self.econn, ii, axis=0)\n', (41844, 41868), True, 'import numpy as nm\n'), ((11864, 11902), 'numpy.concatenate', 'nm.concatenate', (['[nd[ie] for ie in eof]'], {}), '([nd[ie] for ie in eof])\n', (11878, 11902), True, 'import numpy as nm\n'), ((11922, 11935), 'numpy.array', 'nm.array', (['efs'], {}), '(efs)\n', (11930, 11935), True, 'import numpy as nm\n'), ((12148, 12161), 'numpy.array', 'nm.array', (['efs'], {}), '(efs)\n', (12156, 12161), True, 'import numpy as nm\n'), ((22758, 22782), 'sfepy.discrete.fem.utils.get_min_value', 'get_min_value', (['dofs.imag'], {}), '(dofs.imag)\n', (22771, 22782), False, 'from sfepy.discrete.fem.utils import extend_cell_data, prepare_remap, invert_remap, get_min_value\n'), ((30902, 30914), 'time.clock', 'time.clock', ([], {}), '()\n', (30912, 30914), False, 'import time\n'), ((31115, 31127), 'time.clock', 'time.clock', ([], {}), '()\n', (31125, 31127), False, 'import time\n'), ((12454, 12467), 'numpy.array', 'nm.array', (['efs'], {}), '(efs)\n', (12462, 12467), True, 'import numpy as nm\n'), ((27341, 27432), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""vertex"""', 'data': 'ext', 'var_name': 'var_name', 'dofs': 'dof_names'}), "(name='output_data', mode='vertex', data=ext, var_name=var_name, dofs\n =dof_names)\n", (27347, 27432), False, 'from sfepy.base.base import Struct\n'), ((27625, 27714), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""cell"""', 'data': 'ext', 'var_name': 'var_name', 'dofs': 'dof_names'}), "(name='output_data', mode='cell', data=ext, var_name=var_name, dofs=\n dof_names)\n", (27631, 27714), False, 'from sfepy.base.base import Struct\n'), ((34971, 34988), 'numpy.roll', 'nm.roll', (['indx', '(-1)'], {}), '(indx, -1)\n', (34978, 34988), True, 'import numpy as nm\n'), ((35161, 35187), 'sfepy.base.base.Struct', 'Struct', ([], {'n_nod': 'bf.shape[-1]'}), '(n_nod=bf.shape[-1])\n', (35167, 35187), False, 'from sfepy.base.base import Struct\n'), ((35888, 35914), 'sfepy.base.base.Struct', 'Struct', ([], {'n_nod': 'bf.shape[-1]'}), '(n_nod=bf.shape[-1])\n', (35894, 35914), False, 'from sfepy.base.base import Struct\n')]
|
from typing import Optional
from sqlmodel import Field, SQLModel
from pydantic import validator
from datetime import datetime
import numpy as np
class Forecast(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
user_id: int = Field(foreign_key="app_db.appuser.id")
epic_id: int = Field(foreign_key="app_db.epic.id")
days: float
month: int
year: int
created_at: datetime
updated_at: datetime
is_locked: bool
__table_args__ = {"schema": "app_db"}
@validator("days")
def valid_days(cls, days_input):
assert days_input in np.arange(
0, 24, 0.1
), "Work days cannot be greater than 24"
return days_input
@validator("year")
def valid_year(cls, year_input):
assert year_input >= datetime.now().year
return year_input
|
[
"sqlmodel.Field"
] |
[((210, 247), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (215, 247), False, 'from sqlmodel import Field, SQLModel\n'), ((267, 305), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""app_db.appuser.id"""'}), "(foreign_key='app_db.appuser.id')\n", (272, 305), False, 'from sqlmodel import Field, SQLModel\n'), ((325, 360), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""app_db.epic.id"""'}), "(foreign_key='app_db.epic.id')\n", (330, 360), False, 'from sqlmodel import Field, SQLModel\n'), ((525, 542), 'pydantic.validator', 'validator', (['"""days"""'], {}), "('days')\n", (534, 542), False, 'from pydantic import validator\n'), ((724, 741), 'pydantic.validator', 'validator', (['"""year"""'], {}), "('year')\n", (733, 741), False, 'from pydantic import validator\n'), ((609, 630), 'numpy.arange', 'np.arange', (['(0)', '(24)', '(0.1)'], {}), '(0, 24, 0.1)\n', (618, 630), True, 'import numpy as np\n'), ((808, 822), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (820, 822), False, 'from datetime import datetime\n')]
|
from typing import Optional
import strawberry
from sqlmodel import (
SQLModel,
Field,
create_engine,
select,
Session
)
engine = create_engine('sqlite:///database.db')
class Person(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
nome: str
idade: int
SQLModel.metadata.create_all(engine)
def create_app(nome: str, idade:int):
person = Person(nome=nome, idade=idade)
with Session(engine) as session:
session.add(person)
session.commit()
session.refresh(person)
return person
@strawberry.type
class Pessoa:
id: Optional[int]
nome: str
idade: int
@strawberry.type
class Query:
@strawberry.field
def all_pessoa(self) -> list[Pessoa]:
query = select(Person)
with Session(engine) as session:
result = session.execute(query).scalars().all()
return result
@strawberry.type
class Mutation:
create_pessoa: Pessoa = strawberry.field(resolver=create_app)
schema = strawberry.Schema(query=Query, mutation=Mutation)
|
[
"sqlmodel.create_engine",
"sqlmodel.Session",
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.Field",
"sqlmodel.select"
] |
[((150, 188), 'sqlmodel.create_engine', 'create_engine', (['"""sqlite:///database.db"""'], {}), "('sqlite:///database.db')\n", (163, 188), False, 'from sqlmodel import SQLModel, Field, create_engine, select, Session\n'), ((320, 356), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (348, 356), False, 'from sqlmodel import SQLModel, Field, create_engine, select, Session\n'), ((1026, 1075), 'strawberry.Schema', 'strawberry.Schema', ([], {'query': 'Query', 'mutation': 'Mutation'}), '(query=Query, mutation=Mutation)\n', (1043, 1075), False, 'import strawberry\n'), ((251, 288), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (256, 288), False, 'from sqlmodel import SQLModel, Field, create_engine, select, Session\n'), ((978, 1015), 'strawberry.field', 'strawberry.field', ([], {'resolver': 'create_app'}), '(resolver=create_app)\n', (994, 1015), False, 'import strawberry\n'), ((449, 464), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (456, 464), False, 'from sqlmodel import SQLModel, Field, create_engine, select, Session\n'), ((777, 791), 'sqlmodel.select', 'select', (['Person'], {}), '(Person)\n', (783, 791), False, 'from sqlmodel import SQLModel, Field, create_engine, select, Session\n'), ((805, 820), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (812, 820), False, 'from sqlmodel import SQLModel, Field, create_engine, select, Session\n')]
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from test.utils import (
ActiveOpr,
AdaptiveAvgPool2dOpr,
BnOpr,
BroadcastOpr,
ConvBn2dOpr,
ConvBnRelu2dOpr,
ConvOpr,
ConvRelu2dOpr,
DropoutOpr,
ElemwiseOpr,
FConcatOpr,
FlattenOpr,
LinearBnOpr,
LinearOpr,
MatrixMulBnOpr,
PoolOpr,
ReduceOpr,
RepeatOpr,
ReshapeOpr,
SqueezeOpr,
SubtensorOpr,
TransposeOpr,
XORNet,
XORNet_LeakyRelu,
)
import caffe # pylint: disable=import-error
import megengine as mge
import megengine.hub
import numpy as np
import pytest
from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe
from .tm_utils import get_traced_module
max_error = 1e-6
tmp_file = "test_module"
def _test_convert_result(
inputs,
trace_module,
mge_results,
max_err,
input_data_type=None,
input_scales=None,
input_zero_points=None,
require_quantize=False,
param_fake_quant=False,
split_conv_relu=False,
fuse_bn=False,
input_name="x",
convert_backend=1,
):
tracedmodule_to_caffe(
trace_module,
prototxt=tmp_file + ".txt",
caffemodel=tmp_file + ".caffemodel",
input_data_type=input_data_type,
input_scales=input_scales,
input_zero_points=input_zero_points,
require_quantize=require_quantize,
param_fake_quant=param_fake_quant,
split_conv_relu=split_conv_relu,
fuse_bn=fuse_bn,
convert_backend=convert_backend,
)
caffe_net = caffe.Net(tmp_file + ".txt", tmp_file + ".caffemodel", caffe.TEST)
for i in caffe_net.blobs.keys():
if isinstance(input_name, list):
for idx, name in enumerate(input_name):
if name.strip() == i.strip():
caffe_net.blobs[i].data[...] = inputs[idx]
break
else:
if input_name in i:
caffe_net.blobs[i].data[...] = inputs
break
out_dict = caffe_net.forward()
if isinstance(mge_results, dict):
assert len(list(out_dict.keys())) == len(list(mge_results.keys()))
for name in mge_results.keys():
assert name._name in out_dict.keys()
assert out_dict[name._name].shape == mge_results[name].shape
np.testing.assert_allclose(
out_dict[name._name], mge_results[name], atol=max_err
)
else:
caffe_results = list(out_dict.values())[0]
assert caffe_results.shape == mge_results.shape
np.testing.assert_allclose(
caffe_results, mge_results, rtol=max_err, atol=max_err
)
@pytest.mark.parametrize("mode", ["normal", "group", "transpose"])
def test_conv2d(mode):
net = ConvOpr(mode)
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error)
def test_convrelu():
net = ConvRelu2dOpr()
traced_module, tm_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, traced_module, tm_result, max_error)
def test_convbn():
net = ConvBn2dOpr()
net.eval()
traced_module, tm_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, traced_module, tm_result, max_error)
def test_convbnrelu():
net = ConvBnRelu2dOpr()
net.eval()
traced_module, tm_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, traced_module, tm_result, max_error)
def test_linear():
net = LinearOpr()
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error)
def test_flatten_linear():
net = LinearOpr("flatten")
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data1))
_test_convert_result(net.data1, tm_module, mge_result, max_error, convert_backend=4)
def test_linear_bn():
net = LinearBnOpr()
for _ in range(10):
net(mge.tensor(net.data)).numpy()
net.eval()
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True)
@pytest.mark.parametrize("mode", [True, False])
def test_matmul_bn(mode):
net = MatrixMulBnOpr(mode)
for _ in range(10):
net(mge.tensor(net.data)).numpy()
net.eval()
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True)
def test_squeeze():
net = SqueezeOpr()
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a")
@pytest.mark.parametrize("mode", ["max", "avg"])
def test_pooling(mode):
if megengine.__version__ > "0.6.0" and mode == "avg":
return
net = PoolOpr(mode)
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error)
@pytest.mark.parametrize("mode", ["bn1d", "bn2d"])
def test_batchnorm(mode):
net = BnOpr(mode)
net.eval()
data = net.data1 if mode == "bn1d" else net.data2
tm_module, mge_result = get_traced_module(net, mge.tensor(data))
_test_convert_result(data, tm_module, mge_result, max_error)
def test_subtensor():
net = SubtensorOpr()
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error)
def test_transpose():
net = TransposeOpr()
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error)
def test_concat():
net = FConcatOpr()
data = np.random.random((1, 2, 4, 5)).astype(np.float32)
list_data = [mge.tensor(data), mge.tensor(data)]
tm_module, mge_result = get_traced_module(net, list_data)
_test_convert_result(
[data, data], tm_module, mge_result, max_error, input_name=["inps_0", "inps_1"]
)
def test_reshape():
net = ReshapeOpr()
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error)
@pytest.mark.parametrize(
"mode", ["add", "sub", "mul", "div", "abs", "exp", "log", "max", "pow"]
)
def test_elemwise(mode):
net = ElemwiseOpr(mode)
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a")
@pytest.mark.parametrize(
"mode", ["add", "sub", "mul", "div", "abs", "exp", "log", "pow"]
)
def test_elemwise_broadcast(mode):
net = ElemwiseOpr(mode)
tm_module, mge_result = get_traced_module(
net, mge.tensor(np.array([2.0]).astype("float32"))
)
_test_convert_result(
np.array([2.0]), tm_module, mge_result, max_error, input_name="a"
)
@pytest.mark.parametrize(
"mode",
[
"relu",
"sigmoid",
"tanh",
"leaky_relu",
"softmax",
"silu",
"relu6",
"hsigmoid",
"hswish",
],
)
def test_active(mode):
if megengine.__version__ < "1.5.0" and mode == "silu":
return
net = ActiveOpr(mode)
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error)
@pytest.mark.parametrize("mode", ["relu",])
def test_active_inplace(mode):
net = ActiveOpr(mode)
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error, convert_backend=4)
@pytest.mark.parametrize("mode", ["max", "sum", "mean"])
def test_reduce(mode):
net = ReduceOpr(mode)
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a")
def test_broadcast():
net = BroadcastOpr()
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error)
def test_repeat():
net = RepeatOpr()
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error)
def test_flatten():
net = FlattenOpr()
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error, input_name="inps")
def test_dropout():
net = DropoutOpr()
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error, input_name="inps")
def test_adapetive_avg_pool():
net = AdaptiveAvgPool2dOpr()
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error, input_name="inps")
@pytest.mark.parametrize(
"model",
[
"shufflenet_v2_x0_5",
"shufflenet_v2_x1_0",
"resnet18",
"resnet50",
"resnet101",
"resnext50_32x4d",
],
)
def test_model(model):
data = (
np.random.randint(0, 255, 3 * 224 * 224)
.reshape((1, 3, 224, 224))
.astype(np.float32)
)
if megengine.__version__ < "1.1.0":
commit_id = "dc2f2cfb228a135747d083517b98aea56e7aab92"
else:
commit_id = None
net = megengine.hub.load(
"megengine/models", model, use_cache=False, commit=commit_id, pretrained=True
)
net.eval()
tm_module, mge_result = get_traced_module(net, mge.tensor(data))
_test_convert_result(data, tm_module, mge_result, 1e-2)
def test_xornet():
if megengine.__version__ < "1.1.0":
return
net = XORNet()
net.eval()
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error)
def test_leakyrelu_model():
if megengine.__version__ < "1.1.0":
return
net = XORNet_LeakyRelu()
net.eval()
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error)
|
[
"megengine.tensor"
] |
[((2960, 3025), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['normal', 'group', 'transpose']"], {}), "('mode', ['normal', 'group', 'transpose'])\n", (2983, 3025), False, 'import pytest\n'), ((4527, 4573), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', '[True, False]'], {}), "('mode', [True, False])\n", (4550, 4573), False, 'import pytest\n'), ((5069, 5116), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['max', 'avg']"], {}), "('mode', ['max', 'avg'])\n", (5092, 5116), False, 'import pytest\n'), ((5383, 5432), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['bn1d', 'bn2d']"], {}), "('mode', ['bn1d', 'bn2d'])\n", (5406, 5432), False, 'import pytest\n'), ((6596, 6696), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['add', 'sub', 'mul', 'div', 'abs', 'exp', 'log', 'max', 'pow']"], {}), "('mode', ['add', 'sub', 'mul', 'div', 'abs', 'exp',\n 'log', 'max', 'pow'])\n", (6619, 6696), False, 'import pytest\n'), ((6913, 7006), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['add', 'sub', 'mul', 'div', 'abs', 'exp', 'log', 'pow']"], {}), "('mode', ['add', 'sub', 'mul', 'div', 'abs', 'exp',\n 'log', 'pow'])\n", (6936, 7006), False, 'import pytest\n'), ((7293, 7421), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['relu', 'sigmoid', 'tanh', 'leaky_relu', 'softmax', 'silu', 'relu6',\n 'hsigmoid', 'hswish']"], {}), "('mode', ['relu', 'sigmoid', 'tanh', 'leaky_relu',\n 'softmax', 'silu', 'relu6', 'hsigmoid', 'hswish'])\n", (7316, 7421), False, 'import pytest\n'), ((7776, 7817), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['relu']"], {}), "('mode', ['relu'])\n", (7799, 7817), False, 'import pytest\n'), ((8040, 8095), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['max', 'sum', 'mean']"], {}), "('mode', ['max', 'sum', 'mean'])\n", (8063, 8095), False, 'import pytest\n'), ((9321, 9463), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""model"""', "['shufflenet_v2_x0_5', 'shufflenet_v2_x1_0', 'resnet18', 'resnet50',\n 'resnet101', 'resnext50_32x4d']"], {}), "('model', ['shufflenet_v2_x0_5',\n 'shufflenet_v2_x1_0', 'resnet18', 'resnet50', 'resnet101',\n 'resnext50_32x4d'])\n", (9344, 9463), False, 'import pytest\n'), ((1375, 1745), 'mgeconvert.converters.tm_to_caffe.tracedmodule_to_caffe', 'tracedmodule_to_caffe', (['trace_module'], {'prototxt': "(tmp_file + '.txt')", 'caffemodel': "(tmp_file + '.caffemodel')", 'input_data_type': 'input_data_type', 'input_scales': 'input_scales', 'input_zero_points': 'input_zero_points', 'require_quantize': 'require_quantize', 'param_fake_quant': 'param_fake_quant', 'split_conv_relu': 'split_conv_relu', 'fuse_bn': 'fuse_bn', 'convert_backend': 'convert_backend'}), "(trace_module, prototxt=tmp_file + '.txt', caffemodel=\n tmp_file + '.caffemodel', input_data_type=input_data_type, input_scales\n =input_scales, input_zero_points=input_zero_points, require_quantize=\n require_quantize, param_fake_quant=param_fake_quant, split_conv_relu=\n split_conv_relu, fuse_bn=fuse_bn, convert_backend=convert_backend)\n", (1396, 1745), False, 'from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe\n'), ((1838, 1904), 'caffe.Net', 'caffe.Net', (["(tmp_file + '.txt')", "(tmp_file + '.caffemodel')", 'caffe.TEST'], {}), "(tmp_file + '.txt', tmp_file + '.caffemodel', caffe.TEST)\n", (1847, 1904), False, 'import caffe\n'), ((3059, 3072), 'test.utils.ConvOpr', 'ConvOpr', (['mode'], {}), '(mode)\n', (3066, 3072), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((3248, 3263), 'test.utils.ConvRelu2dOpr', 'ConvRelu2dOpr', ([], {}), '()\n', (3261, 3263), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((3443, 3456), 'test.utils.ConvBn2dOpr', 'ConvBn2dOpr', ([], {}), '()\n', (3454, 3456), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((3655, 3672), 'test.utils.ConvBnRelu2dOpr', 'ConvBnRelu2dOpr', ([], {}), '()\n', (3670, 3672), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((3867, 3878), 'test.utils.LinearOpr', 'LinearOpr', ([], {}), '()\n', (3876, 3878), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((4060, 4080), 'test.utils.LinearOpr', 'LinearOpr', (['"""flatten"""'], {}), "('flatten')\n", (4069, 4080), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((4278, 4291), 'test.utils.LinearBnOpr', 'LinearBnOpr', ([], {}), '()\n', (4289, 4291), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((4610, 4630), 'test.utils.MatrixMulBnOpr', 'MatrixMulBnOpr', (['mode'], {}), '(mode)\n', (4624, 4630), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((4895, 4907), 'test.utils.SqueezeOpr', 'SqueezeOpr', ([], {}), '()\n', (4905, 4907), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((5224, 5237), 'test.utils.PoolOpr', 'PoolOpr', (['mode'], {}), '(mode)\n', (5231, 5237), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((5469, 5480), 'test.utils.BnOpr', 'BnOpr', (['mode'], {}), '(mode)\n', (5474, 5480), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((5718, 5732), 'test.utils.SubtensorOpr', 'SubtensorOpr', ([], {}), '()\n', (5730, 5732), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((5909, 5923), 'test.utils.TransposeOpr', 'TransposeOpr', ([], {}), '()\n', (5921, 5923), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((6097, 6109), 'test.utils.FConcatOpr', 'FConcatOpr', ([], {}), '()\n', (6107, 6109), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((6438, 6450), 'test.utils.ReshapeOpr', 'ReshapeOpr', ([], {}), '()\n', (6448, 6450), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((6734, 6751), 'test.utils.ElemwiseOpr', 'ElemwiseOpr', (['mode'], {}), '(mode)\n', (6745, 6751), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((7054, 7071), 'test.utils.ElemwiseOpr', 'ElemwiseOpr', (['mode'], {}), '(mode)\n', (7065, 7071), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((7615, 7630), 'test.utils.ActiveOpr', 'ActiveOpr', (['mode'], {}), '(mode)\n', (7624, 7630), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((7860, 7875), 'test.utils.ActiveOpr', 'ActiveOpr', (['mode'], {}), '(mode)\n', (7869, 7875), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((8129, 8144), 'test.utils.ReduceOpr', 'ReduceOpr', (['mode'], {}), '(mode)\n', (8138, 8144), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((8337, 8351), 'test.utils.BroadcastOpr', 'BroadcastOpr', ([], {}), '()\n', (8349, 8351), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((8525, 8536), 'test.utils.RepeatOpr', 'RepeatOpr', ([], {}), '()\n', (8534, 8536), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((8711, 8723), 'test.utils.FlattenOpr', 'FlattenOpr', ([], {}), '()\n', (8721, 8723), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((8917, 8929), 'test.utils.DropoutOpr', 'DropoutOpr', ([], {}), '()\n', (8927, 8929), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((9134, 9156), 'test.utils.AdaptiveAvgPool2dOpr', 'AdaptiveAvgPool2dOpr', ([], {}), '()\n', (9154, 9156), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((10166, 10174), 'test.utils.XORNet', 'XORNet', ([], {}), '()\n', (10172, 10174), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((10427, 10445), 'test.utils.XORNet_LeakyRelu', 'XORNet_LeakyRelu', ([], {}), '()\n', (10443, 10445), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((2852, 2939), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['caffe_results', 'mge_results'], {'rtol': 'max_err', 'atol': 'max_err'}), '(caffe_results, mge_results, rtol=max_err, atol=\n max_err)\n', (2878, 2939), True, 'import numpy as np\n'), ((3124, 3144), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (3134, 3144), True, 'import megengine as mge\n'), ((3318, 3338), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (3328, 3338), True, 'import megengine as mge\n'), ((3526, 3546), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (3536, 3546), True, 'import megengine as mge\n'), ((3742, 3762), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (3752, 3762), True, 'import megengine as mge\n'), ((3930, 3950), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (3940, 3950), True, 'import megengine as mge\n'), ((4132, 4153), 'megengine.tensor', 'mge.tensor', (['net.data1'], {}), '(net.data1)\n', (4142, 4153), True, 'import megengine as mge\n'), ((4424, 4444), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (4434, 4444), True, 'import megengine as mge\n'), ((4763, 4783), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (4773, 4783), True, 'import megengine as mge\n'), ((4959, 4979), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (4969, 4979), True, 'import megengine as mge\n'), ((5289, 5309), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (5299, 5309), True, 'import megengine as mge\n'), ((5601, 5617), 'megengine.tensor', 'mge.tensor', (['data'], {}), '(data)\n', (5611, 5617), True, 'import megengine as mge\n'), ((5784, 5804), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (5794, 5804), True, 'import megengine as mge\n'), ((5975, 5995), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (5985, 5995), True, 'import megengine as mge\n'), ((6188, 6204), 'megengine.tensor', 'mge.tensor', (['data'], {}), '(data)\n', (6198, 6204), True, 'import megengine as mge\n'), ((6206, 6222), 'megengine.tensor', 'mge.tensor', (['data'], {}), '(data)\n', (6216, 6222), True, 'import megengine as mge\n'), ((6502, 6522), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (6512, 6522), True, 'import megengine as mge\n'), ((6803, 6823), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (6813, 6823), True, 'import megengine as mge\n'), ((7218, 7233), 'numpy.array', 'np.array', (['[2.0]'], {}), '([2.0])\n', (7226, 7233), True, 'import numpy as np\n'), ((7682, 7702), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (7692, 7702), True, 'import megengine as mge\n'), ((7927, 7947), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (7937, 7947), True, 'import megengine as mge\n'), ((8196, 8216), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (8206, 8216), True, 'import megengine as mge\n'), ((8403, 8423), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (8413, 8423), True, 'import megengine as mge\n'), ((8588, 8608), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (8598, 8608), True, 'import megengine as mge\n'), ((8775, 8795), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (8785, 8795), True, 'import megengine as mge\n'), ((8981, 9001), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (8991, 9001), True, 'import megengine as mge\n'), ((9208, 9228), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (9218, 9228), True, 'import megengine as mge\n'), ((10002, 10018), 'megengine.tensor', 'mge.tensor', (['data'], {}), '(data)\n', (10012, 10018), True, 'import megengine as mge\n'), ((10241, 10261), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (10251, 10261), True, 'import megengine as mge\n'), ((10512, 10532), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (10522, 10532), True, 'import megengine as mge\n'), ((2615, 2701), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['out_dict[name._name]', 'mge_results[name]'], {'atol': 'max_err'}), '(out_dict[name._name], mge_results[name], atol=\n max_err)\n', (2641, 2701), True, 'import numpy as np\n'), ((6121, 6151), 'numpy.random.random', 'np.random.random', (['(1, 2, 4, 5)'], {}), '((1, 2, 4, 5))\n', (6137, 6151), True, 'import numpy as np\n'), ((4328, 4348), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (4338, 4348), True, 'import megengine as mge\n'), ((4667, 4687), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (4677, 4687), True, 'import megengine as mge\n'), ((7143, 7158), 'numpy.array', 'np.array', (['[2.0]'], {}), '([2.0])\n', (7151, 7158), True, 'import numpy as np\n'), ((9566, 9606), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', '(3 * 224 * 224)'], {}), '(0, 255, 3 * 224 * 224)\n', (9583, 9606), True, 'import numpy as np\n')]
|
import streamlit as st
import pandas as pd
import argparse
import datetime as dt
from sqlmodel import create_engine, Session
parser = argparse.ArgumentParser(description="A steamlit dashboard for visualising news analytics.")
parser.add_argument("--database", type=str, default="sqlite:///database.db")
class Dasboard:
def __init__(self, database) -> None:
self.engine = create_engine(database)
self.attr = {
"date_range": self.get_date_range(),
}
self.state = st.session_state
self.build()
def build(self):
# ~~~ Build Sidebar ~~~~~~~~~
st.sidebar.header("Newsreader")
with st.form(key="date_picker"):
st.sidebar.date_input(
"Period From",
key="period_from",
min_value=self.attr["date_range"][0],
max_value=self.attr["date_range"][1],
value=self.attr["date_range"][0]
)
st.sidebar.date_input(
"Period To",
key="period_to",
min_value=self.attr["date_range"][0],
max_value=self.attr["date_range"][1],
value=self.attr["date_range"][1]
)
# ~~~ Build Main UI ~~~~~~~~~~
st.text_input("Search", key="search_text")
st.write(self.state.search_text)
st.write([self.state.period_from, self.state.period_to])
st.write(self.get_total_sentiment("00375cd420e37d4084c6668975f91648"))
# ~~~ Callbacks ~~~~~~~~~~~
# ~~~ Analytics Operations ~~~~~~
def exec(self, stmt: str, params = {}):
with Session(self.engine) as session:
return session.exec(stmt, params=params).all()
def get_date_range(self):
ans = self.exec("""
SELECT MIN(date) as first_date, MAX(date) as last_date FROM document
""")
return list(map(dt.datetime.fromisoformat, ans[0]))
def get_total_sentiment(self, document_id: str):
ans = self.exec("""
WITH t1 AS (
SELECT
document.id as document_id,
paragraph.sentiment as sent,
COUNT(*) as paragraphs
FROM document
JOIN paragraph ON document.id = paragraph.document_id
WHERE document.id = :document_id
GROUP BY document.id, paragraph.sentiment
)
SELECT sent, paragraphs FROM t1
""", params={ "document_id": document_id })
return pd.DataFrame(ans, columns=["lable", "total_paragraphs"])
if __name__ == "__main__":
args = parser.parse_args()
dashboard = Dasboard(args.database)
|
[
"sqlmodel.create_engine",
"sqlmodel.Session"
] |
[((135, 231), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""A steamlit dashboard for visualising news analytics."""'}), "(description=\n 'A steamlit dashboard for visualising news analytics.')\n", (158, 231), False, 'import argparse\n'), ((385, 408), 'sqlmodel.create_engine', 'create_engine', (['database'], {}), '(database)\n', (398, 408), False, 'from sqlmodel import create_engine, Session\n'), ((619, 650), 'streamlit.sidebar.header', 'st.sidebar.header', (['"""Newsreader"""'], {}), "('Newsreader')\n", (636, 650), True, 'import streamlit as st\n'), ((1282, 1324), 'streamlit.text_input', 'st.text_input', (['"""Search"""'], {'key': '"""search_text"""'}), "('Search', key='search_text')\n", (1295, 1324), True, 'import streamlit as st\n'), ((1334, 1366), 'streamlit.write', 'st.write', (['self.state.search_text'], {}), '(self.state.search_text)\n', (1342, 1366), True, 'import streamlit as st\n'), ((1375, 1431), 'streamlit.write', 'st.write', (['[self.state.period_from, self.state.period_to]'], {}), '([self.state.period_from, self.state.period_to])\n', (1383, 1431), True, 'import streamlit as st\n'), ((2549, 2605), 'pandas.DataFrame', 'pd.DataFrame', (['ans'], {'columns': "['lable', 'total_paragraphs']"}), "(ans, columns=['lable', 'total_paragraphs'])\n", (2561, 2605), True, 'import pandas as pd\n'), ((665, 691), 'streamlit.form', 'st.form', ([], {'key': '"""date_picker"""'}), "(key='date_picker')\n", (672, 691), True, 'import streamlit as st\n'), ((705, 880), 'streamlit.sidebar.date_input', 'st.sidebar.date_input', (['"""Period From"""'], {'key': '"""period_from"""', 'min_value': "self.attr['date_range'][0]", 'max_value': "self.attr['date_range'][1]", 'value': "self.attr['date_range'][0]"}), "('Period From', key='period_from', min_value=self.attr\n ['date_range'][0], max_value=self.attr['date_range'][1], value=self.\n attr['date_range'][0])\n", (726, 880), True, 'import streamlit as st\n'), ((977, 1148), 'streamlit.sidebar.date_input', 'st.sidebar.date_input', (['"""Period To"""'], {'key': '"""period_to"""', 'min_value': "self.attr['date_range'][0]", 'max_value': "self.attr['date_range'][1]", 'value': "self.attr['date_range'][1]"}), "('Period To', key='period_to', min_value=self.attr[\n 'date_range'][0], max_value=self.attr['date_range'][1], value=self.attr\n ['date_range'][1])\n", (998, 1148), True, 'import streamlit as st\n'), ((1650, 1670), 'sqlmodel.Session', 'Session', (['self.engine'], {}), '(self.engine)\n', (1657, 1670), False, 'from sqlmodel import create_engine, Session\n')]
|
"""
Explicit time stepping solvers for use with DG FEM
"""
import numpy as nm
import numpy.linalg as nla
# sfepy imports
from sfepy.base.base import get_default, output
from sfepy.solvers import TimeSteppingSolver
from sfepy.solvers.solvers import SolverMeta
from sfepy.solvers.ts import TimeStepper
from sfepy.solvers.ts_solvers import standard_ts_call
class DGMultiStageTSS(TimeSteppingSolver):
"""Explicit time stepping solver with multistage solve_step method"""
__metaclass__ = SolverMeta
name = "ts.multistaged"
_parameters = [
('t0', 'float', 0.0, False,
'The initial time.'),
('t1', 'float', 1.0, False,
'The final time.'),
('dt', 'float', None, False,
'The time step. Used if `n_step` is not given.'),
('n_step', 'int', 10, False,
'The number of time steps. Has precedence over `dt`.'),
# this option is required by TimeSteppingSolver constructor
('quasistatic', 'bool', False, False,
"""If True, assume a quasistatic time-stepping. Then the non-linear
solver is invoked also for the initial time."""),
('limiters', 'dictionary', None, None,
"Limiters for DGFields, keys: field name, values: limiter class"),
]
def __init__(self, conf, nls=None, context=None, **kwargs):
TimeSteppingSolver.__init__(self, conf, nls=nls, context=context,
**kwargs)
self.ts = TimeStepper.from_conf(self.conf)
nd = self.ts.n_digit
self.stage_format = '---- ' + \
self.name + ' stage {}: linear system sol error {}'+ \
' ----'
format = '\n\n====== time %%e (step %%%dd of %%%dd) =====' % (nd, nd)
self.format = format
self.verbose = self.conf.verbose
self.post_stage_hook = lambda x: x
limiters = {}
if self.conf.limiters is not None:
limiters = self.conf.limiters
# what if we have more fields or limiters?
for field_name, limiter in limiters.items():
self.post_stage_hook = limiter(context.fields[field_name],
verbose=self.verbose)
def solve_step0(self, nls, vec0):
res = nls.fun(vec0)
err = nm.linalg.norm(res)
output('initial residual: %e' % err, verbose=self.verbose)
vec = vec0.copy()
return vec
def solve_step(self, ts, nls, vec, prestep_fun=None, poststep_fun=None,
status=None):
raise NotImplementedError("Called abstract solver, use subclass.")
def output_step_info(self, ts):
output(self.format % (ts.time, ts.step + 1, ts.n_step),
verbose=self.verbose)
@standard_ts_call
def __call__(self, vec0=None, nls=None, init_fun=None, prestep_fun=None,
poststep_fun=None, status=None):
"""
Solve the time-dependent problem.
"""
ts = self.ts
nls = get_default(nls, self.nls)
vec0 = init_fun(ts, vec0)
self.output_step_info(ts)
if ts.step == 0:
prestep_fun(ts, vec0)
vec = self.solve_step0(nls, vec0)
poststep_fun(ts, vec)
ts.advance()
else:
vec = vec0
for step, time in ts.iter_from(ts.step):
self.output_step_info(ts)
prestep_fun(ts, vec)
vect = self.solve_step(ts, nls, vec, prestep_fun, poststep_fun,
status)
poststep_fun(ts, vect)
vec = vect
return vec
class EulerStepSolver(DGMultiStageTSS):
"""Simple forward euler method"""
name = 'ts.euler'
__metaclass__ = SolverMeta
def solve_step(self, ts, nls, vec_x0, status=None,
prestep_fun=None, poststep_fun=None):
if ts is None:
raise ValueError("Provide TimeStepper to explicit Euler solver")
fun = nls.fun
fun_grad = nls.fun_grad
lin_solver = nls.lin_solver
ls_eps_a, ls_eps_r = lin_solver.get_tolerance()
eps_a = get_default(ls_eps_a, 1.0)
eps_r = get_default(ls_eps_r, 1.0)
vec_x = vec_x0.copy()
vec_r = fun(vec_x)
mtx_a = fun_grad(vec_x)
ls_status = {}
vec_dx = lin_solver(vec_r, x0=vec_x,
eps_a=eps_a, eps_r=eps_r, mtx=mtx_a,
status=ls_status)
vec_e = mtx_a * vec_dx - vec_r
lerr = nla.norm(vec_e)
if self.verbose:
output(self.name + ' linear system sol error {}'.format(lerr))
output(self.name + ' mtx max {}, min {}, trace {}'
.format(mtx_a.max(), mtx_a.min(), nm.sum(mtx_a.diagonal())))
vec_x = vec_x - ts.dt * (vec_dx - vec_x)
vec_x = self.post_stage_hook(vec_x)
return vec_x
class TVDRK3StepSolver(DGMultiStageTSS):
r"""3rd order Total Variation Diminishing Runge-Kutta method
based on [1]_
.. math::
\begin{aligned}
\mathbf{p}^{(1)} &= \mathbf{p}^n - \Delta t
\bar{\mathcal{L}}(\mathbf{p}^n),\\
\mathbf{\mathbf{p}}^{(2)} &= \frac{3}{4}\mathbf{p}^n
+\frac{1}{4}\mathbf{p}^{(1)} - \frac{1}{4}\Delta t
\bar{\mathcal{L}}(\mathbf{p}^{(1)}),\\
\mathbf{p}^{(n+1)} &= \frac{1}{3}\mathbf{p}^n
+\frac{2}{3}\mathbf{p}^{(2)} - \frac{2}{3}\Delta t
\bar{\mathcal{L}}(\mathbf{p}^{(2)}).
\end{aligned}
.. [1] <NAME>., & <NAME>. (2002). Total variation diminishing Runge-Kutta
schemes. Mathematics of Computation of the American Mathematical Society,
67(221), 73–85. https://doi.org/10.1090/s0025-5718-98-00913-2
"""
name = 'ts.tvd_runge_kutta_3'
__metaclass__ = SolverMeta
def solve_step(self, ts, nls, vec_x0, status=None,
prestep_fun=None, poststep_fun=None):
if ts is None:
raise ValueError("Provide TimeStepper to explicit Runge-Kutta solver")
fun = nls.fun
fun_grad = nls.fun_grad
lin_solver = nls.lin_solver
ls_eps_a, ls_eps_r = lin_solver.get_tolerance()
eps_a = get_default(ls_eps_a, 1.0)
eps_r = get_default(ls_eps_r, 1.0)
ls_status = {}
# ----1st stage----
vec_x = vec_x0.copy()
vec_r = fun(vec_x)
mtx_a = fun_grad(vec_x)
vec_dx = lin_solver(vec_r, x0=vec_x,
eps_a=eps_a, eps_r=eps_r, mtx=mtx_a,
status=ls_status)
vec_x1 = vec_x - ts.dt * (vec_dx - vec_x)
vec_e = mtx_a * vec_dx - vec_r
lerr = nla.norm(vec_e)
if self.verbose:
output(self.stage_format.format(1, lerr))
vec_x1 = self.post_stage_hook(vec_x1)
# ----2nd stage----
vec_r = fun(vec_x1)
mtx_a = fun_grad(vec_x1)
vec_dx = lin_solver(vec_r, x0=vec_x1,
eps_a=eps_a, eps_r=eps_r, mtx=mtx_a,
status=ls_status)
vec_x2 = (3 * vec_x + vec_x1 - ts.dt * (vec_dx - vec_x1)) / 4
vec_e = mtx_a * vec_dx - vec_r
lerr = nla.norm(vec_e)
if self.verbose:
output(self.stage_format.format(2, lerr))
vec_x2 = self.post_stage_hook(vec_x2)
# ----3rd stage-----
ts.set_substep_time(1. / 2. * ts.dt)
prestep_fun(ts, vec_x2)
vec_r = fun(vec_x2)
mtx_a = fun_grad(vec_x2)
vec_dx = lin_solver(vec_r, x0=vec_x2,
eps_a=eps_a, eps_r=eps_r, mtx=mtx_a,
status=ls_status)
vec_x3 = (vec_x + 2 * vec_x2 - 2 * ts.dt * (vec_dx - vec_x2)) / 3
vec_e = mtx_a * vec_dx - vec_r
lerr = nla.norm(vec_e)
if self.verbose:
output(self.stage_format.format(3, lerr))
vec_x3 = self.post_stage_hook(vec_x3)
return vec_x3
class RK4StepSolver(DGMultiStageTSS):
"""Classical 4th order Runge-Kutta method,
implemetantions is based on [1]_
.. [1] <NAME>., & <NAME>. (2008). Nodal Discontinuous Galerkin Methods.
Journal of Physics A: Mathematical and Theoretical (Vol. 54). New York,
NY: Springer New York. http://doi.org/10.1007/978-0-387-72067-8, p. 63
"""
name = 'ts.runge_kutta_4'
__metaclass__ = SolverMeta
stage_updates = (
lambda u, k_, dt: u,
lambda u, k1, dt: u + 1. / 2. * dt * k1,
lambda u, k2, dt: u + 1. / 2. * dt * k2,
lambda u, k3, dt: u + dt * k3
)
def solve_step(self, ts, nls, vec_x0, status=None,
prestep_fun=None, poststep_fun=None):
if ts is None:
raise ValueError("Provide TimeStepper to explicit Runge-Kutta solver")
fun = nls.fun
fun_grad = nls.fun_grad
lin_solver = nls.lin_solver
ls_eps_a, ls_eps_r = lin_solver.get_tolerance()
eps_a = get_default(ls_eps_a, 1.0)
eps_r = get_default(ls_eps_r, 1.0)
ls_status = {}
dt = ts.dt
vec_x = None
vec_xs = []
for stage, stage_update in enumerate(self.stage_updates):
stage_vec = stage_update(vec_x0, vec_x, dt)
vec_r = fun(stage_vec)
mtx_a = fun_grad(stage_vec)
vec_dx = lin_solver(vec_r, # x0=stage_vec,
eps_a=eps_a, eps_r=eps_r, mtx=mtx_a,
status=ls_status)
vec_e = mtx_a * vec_dx - vec_r
lerr = nla.norm(vec_e)
if self.verbose:
output(self.stage_format.format(stage, lerr))
vec_x = - vec_dx - stage_vec
vec_x = self.post_stage_hook(vec_x)
vec_xs.append(vec_x)
vec_fin = vec_x0 + \
1. / 6. * ts.dt * (vec_xs[0] + 2 * vec_xs[1]
+ 2 * vec_xs[2] + vec_xs[3])
vec_fin = self.post_stage_hook(vec_fin)
return vec_fin
|
[
"sfepy.base.base.get_default",
"sfepy.solvers.TimeSteppingSolver.__init__",
"sfepy.base.base.output",
"sfepy.solvers.ts.TimeStepper.from_conf"
] |
[((1340, 1415), 'sfepy.solvers.TimeSteppingSolver.__init__', 'TimeSteppingSolver.__init__', (['self', 'conf'], {'nls': 'nls', 'context': 'context'}), '(self, conf, nls=nls, context=context, **kwargs)\n', (1367, 1415), False, 'from sfepy.solvers import TimeSteppingSolver\n'), ((1470, 1502), 'sfepy.solvers.ts.TimeStepper.from_conf', 'TimeStepper.from_conf', (['self.conf'], {}), '(self.conf)\n', (1491, 1502), False, 'from sfepy.solvers.ts import TimeStepper\n'), ((2300, 2319), 'numpy.linalg.norm', 'nm.linalg.norm', (['res'], {}), '(res)\n', (2314, 2319), True, 'import numpy as nm\n'), ((2328, 2386), 'sfepy.base.base.output', 'output', (["('initial residual: %e' % err)"], {'verbose': 'self.verbose'}), "('initial residual: %e' % err, verbose=self.verbose)\n", (2334, 2386), False, 'from sfepy.base.base import get_default, output\n'), ((2663, 2740), 'sfepy.base.base.output', 'output', (['(self.format % (ts.time, ts.step + 1, ts.n_step))'], {'verbose': 'self.verbose'}), '(self.format % (ts.time, ts.step + 1, ts.n_step), verbose=self.verbose)\n', (2669, 2740), False, 'from sfepy.base.base import get_default, output\n'), ((3007, 3033), 'sfepy.base.base.get_default', 'get_default', (['nls', 'self.nls'], {}), '(nls, self.nls)\n', (3018, 3033), False, 'from sfepy.base.base import get_default, output\n'), ((4140, 4166), 'sfepy.base.base.get_default', 'get_default', (['ls_eps_a', '(1.0)'], {}), '(ls_eps_a, 1.0)\n', (4151, 4166), False, 'from sfepy.base.base import get_default, output\n'), ((4183, 4209), 'sfepy.base.base.get_default', 'get_default', (['ls_eps_r', '(1.0)'], {}), '(ls_eps_r, 1.0)\n', (4194, 4209), False, 'from sfepy.base.base import get_default, output\n'), ((4537, 4552), 'numpy.linalg.norm', 'nla.norm', (['vec_e'], {}), '(vec_e)\n', (4545, 4552), True, 'import numpy.linalg as nla\n'), ((6243, 6269), 'sfepy.base.base.get_default', 'get_default', (['ls_eps_a', '(1.0)'], {}), '(ls_eps_a, 1.0)\n', (6254, 6269), False, 'from sfepy.base.base import get_default, output\n'), ((6286, 6312), 'sfepy.base.base.get_default', 'get_default', (['ls_eps_r', '(1.0)'], {}), '(ls_eps_r, 1.0)\n', (6297, 6312), False, 'from sfepy.base.base import get_default, output\n'), ((6717, 6732), 'numpy.linalg.norm', 'nla.norm', (['vec_e'], {}), '(vec_e)\n', (6725, 6732), True, 'import numpy.linalg as nla\n'), ((7232, 7247), 'numpy.linalg.norm', 'nla.norm', (['vec_e'], {}), '(vec_e)\n', (7240, 7247), True, 'import numpy.linalg as nla\n'), ((7829, 7844), 'numpy.linalg.norm', 'nla.norm', (['vec_e'], {}), '(vec_e)\n', (7837, 7844), True, 'import numpy.linalg as nla\n'), ((9001, 9027), 'sfepy.base.base.get_default', 'get_default', (['ls_eps_a', '(1.0)'], {}), '(ls_eps_a, 1.0)\n', (9012, 9027), False, 'from sfepy.base.base import get_default, output\n'), ((9044, 9070), 'sfepy.base.base.get_default', 'get_default', (['ls_eps_r', '(1.0)'], {}), '(ls_eps_r, 1.0)\n', (9055, 9070), False, 'from sfepy.base.base import get_default, output\n'), ((9591, 9606), 'numpy.linalg.norm', 'nla.norm', (['vec_e'], {}), '(vec_e)\n', (9599, 9606), True, 'import numpy.linalg as nla\n')]
|
import pytest
from fastapi.testclient import TestClient
from sqlmodel import Session, SQLModel, create_engine
from sqlmodel.pool import StaticPool
from .main import Hero, app, get_session
@pytest.fixture(name="session")
def session_fixture():
engine = create_engine(
"sqlite://", connect_args={"check_same_thread": False}, poolclass=StaticPool
)
SQLModel.metadata.create_all(engine)
with Session(engine) as session:
yield session
@pytest.fixture(name="client")
def client_fixture(session: Session):
def get_session_override():
return session
app.dependency_overrides[get_session] = get_session_override
client = TestClient(app)
yield client
app.dependency_overrides.clear()
def test_create_hero(client: TestClient):
response = client.post(
"/heroes/", json={"name": "Deadpond", "secret_name": "<NAME>"}
)
data = response.json()
assert response.status_code == 200
assert data["name"] == "Deadpond"
assert data["secret_name"] == "<NAME>"
assert data["age"] is None
assert data["id"] is not None
def test_create_hero_incomplete(client: TestClient):
# No secret_name
response = client.post("/heroes/", json={"name": "Deadpond"})
assert response.status_code == 422
def test_create_hero_invalid(client: TestClient):
# secret_name has an invalid type
response = client.post(
"/heroes/",
json={
"name": "Deadpond",
"secret_name": {"message": "Do you wanna know my secret identity?"},
},
)
assert response.status_code == 422
def test_read_heroes(session: Session, client: TestClient):
hero_1 = Hero(name="Deadpond", secret_name="<NAME>")
hero_2 = Hero(name="Rusty-Man", secret_name="<NAME>", age=48)
session.add(hero_1)
session.add(hero_2)
session.commit()
response = client.get("/heroes/")
data = response.json()
assert response.status_code == 200
assert len(data) == 2
assert data[0]["name"] == hero_1.name
assert data[0]["secret_name"] == hero_1.secret_name
assert data[0]["age"] == hero_1.age
assert data[0]["id"] == hero_1.id
assert data[1]["name"] == hero_2.name
assert data[1]["secret_name"] == hero_2.secret_name
assert data[1]["age"] == hero_2.age
assert data[1]["id"] == hero_2.id
def test_read_hero(session: Session, client: TestClient):
hero_1 = Hero(name="Deadpond", secret_name="<NAME>")
session.add(hero_1)
session.commit()
response = client.get(f"/heroes/{hero_1.id}")
data = response.json()
assert response.status_code == 200
assert data["name"] == hero_1.name
assert data["secret_name"] == hero_1.secret_name
assert data["age"] == hero_1.age
assert data["id"] == hero_1.id
def test_update_hero(session: Session, client: TestClient):
hero_1 = Hero(name="Deadpond", secret_name="<NAME>")
session.add(hero_1)
session.commit()
response = client.patch(f"/heroes/{hero_1.id}", json={"name": "Deadpuddle"})
data = response.json()
assert response.status_code == 200
assert data["name"] == "Deadpuddle"
assert data["secret_name"] == "<NAME>"
assert data["age"] is None
assert data["id"] == hero_1.id
def test_delete_hero(session: Session, client: TestClient):
hero_1 = Hero(name="Deadpond", secret_name="<NAME>")
session.add(hero_1)
session.commit()
response = client.delete(f"/heroes/{hero_1.id}")
hero_in_db = session.get(Hero, hero_1.id)
assert response.status_code == 200
assert hero_in_db is None
|
[
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.Session",
"sqlmodel.create_engine"
] |
[((192, 222), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""session"""'}), "(name='session')\n", (206, 222), False, 'import pytest\n'), ((468, 497), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""client"""'}), "(name='client')\n", (482, 497), False, 'import pytest\n'), ((259, 354), 'sqlmodel.create_engine', 'create_engine', (['"""sqlite://"""'], {'connect_args': "{'check_same_thread': False}", 'poolclass': 'StaticPool'}), "('sqlite://', connect_args={'check_same_thread': False},\n poolclass=StaticPool)\n", (272, 354), False, 'from sqlmodel import Session, SQLModel, create_engine\n'), ((369, 405), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (397, 405), False, 'from sqlmodel import Session, SQLModel, create_engine\n'), ((670, 685), 'fastapi.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (680, 685), False, 'from fastapi.testclient import TestClient\n'), ((415, 430), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (422, 430), False, 'from sqlmodel import Session, SQLModel, create_engine\n')]
|
import types
from dataclasses import dataclass
from typing import Callable, List, Union
from fastapi import Depends, FastAPI, HTTPException, Query
from sqlmodel import Field, Session, SQLModel, select
# Model generator + container -------------------------------------------------------------
@dataclass
class MultipleModels:
path: str
base: SQLModel
response: SQLModel
def __post_init__(self):
self.creation: SQLModel = self.make_creator_cls()
self.table: SQLModel = self.make_table_cls()
self.update: SQLModel = self.make_updater_cls()
@staticmethod
def make_cls_name(base: type, rename_base_to: str) -> str:
"""For a class name of format ``"ClassBase"``, return a modified name in which
the substring ``"Base"`` is replaced with the string passed to ``rename_base_to``.
:param base: The base model. It's name must end with the substring ``"Base"``.
:param rename_base_to: String to replace `"Base"` with.
"""
return base.__name__.replace("Base", rename_base_to)
def make_creator_cls(self) -> SQLModel:
"""From a base model, make and return a creation model. As described in
https://sqlmodel.tiangolo.com/tutorial/fastapi/multiple-models/#the-herocreate-data-model,
the creation model is simply a copy of the base model, with the substring ``"Base"`` in the
class name replaced by the substring ``"Create"``.
:param base: The base model.
"""
cls_name = self.make_cls_name(self.base, "Create")
return type(cls_name, (self.base,), {})
def make_updater_cls(self) -> SQLModel:
"""From a base model, make and return an update model. As described in
https://sqlmodel.tiangolo.com/tutorial/fastapi/update/#heroupdate-model, the update model
is the same as the base model, but with all fields annotated as ``Optional`` and all field
defaults set to ``None``.
:param base: The base model. Note that unlike in ``make_creator``, this is not the base for
inheritance (all updaters inherit directly from ``SQLModel``) but rather is used to derive
the output class name, attributes, and type annotations.
"""
cls_name = self.make_cls_name(self.base, "Update")
sig = self.base.__signature__
params = list(sig.parameters)
# Pulling type via `__signature__` rather than `__annotation__` because
# this accessor drops the `typing.Union[...]` wrapper for optional fields
annotations = {p: Union[sig.parameters[p].annotation, None] for p in params}
defaults = {p: None for p in params}
attrs = {**defaults, "__annotations__": annotations}
return type(cls_name, (SQLModel,), attrs)
def make_table_cls(self) -> SQLModel:
"""From a base model, make and return a table model. As described in
https://sqlmodel.tiangolo.com/tutorial/fastapi/multiple-models/#the-hero-table-model,
the table model is the same as the base model, with the addition of the ``table=True`` class
creation keyword and an ``id`` attribute of type ``Optional[int]`` set to a default value of
``Field(default=None, primary_key=True)``.
:param base: The base model.
"""
cls_name = self.make_cls_name(self.base, "")
attrs = dict(id=Field(default=None, primary_key=True))
annotations = dict(id=Union[int, None])
attrs.update(dict(__annotations__=annotations))
# We are using `typing.new_class` (vs. `type`) b/c it supports the `table=True` kwarg.
# https://twitter.com/simonw/status/1430255521127305216?s=20
# https://docs.python.org/3/reference/datamodel.html#customizing-class-creation
return types.new_class(
cls_name, (self.base,), dict(table=True), lambda ns: ns.update(attrs)
)
# SQLModel database interface functions ---------------------------------------------------
def create(*, session: Session, table_cls: SQLModel, model: SQLModel) -> SQLModel:
db_model = table_cls.from_orm(model)
session.add(db_model)
session.commit()
session.refresh(db_model)
return db_model
def read_range(*, session: Session, table_cls: SQLModel, offset: int, limit: int) -> List:
return session.exec(select(table_cls).offset(offset).limit(limit)).all()
def read_single(*, session: Session, table_cls: SQLModel, id: int):
db_model = session.get(table_cls, id)
if not db_model:
raise HTTPException(status_code=404, detail=f"{table_cls.__name__} not found")
return db_model
def update(*, session: Session, table_cls: SQLModel, id: int, model: SQLModel) -> SQLModel:
db_model = session.get(table_cls, id)
if not db_model:
raise HTTPException(status_code=404, detail=f"{table_cls.__name__} not found")
model_data = model.dict(exclude_unset=True)
for key, value in model_data.items():
setattr(db_model, key, value)
session.add(db_model)
session.commit()
session.refresh(db_model)
return db_model
def delete(*, session: Session, table_cls: SQLModel, id: int) -> dict:
db_model = session.get(table_cls, id)
if not db_model:
raise HTTPException(status_code=404, detail=f"{table_cls.__name__} not found")
session.delete(db_model)
session.commit()
return {"ok": True}
# Endpoint registration -------------------------------------------------------------------
@dataclass
class RegisterEndpoints:
"""From a ``MultipleModels`` object, register create, read, update, delete (CRUD) API endpoints.
:param api: The ``FastAPI`` instance.
:param get_session: A function which yields a context-managed ``sqlmodel.Session`` object.
:param models: The ``MultipleModels`` object.
:param limit: The bounds for an API read requests.
"""
api: FastAPI
get_session: Callable
models: MultipleModels
limit: Query = Query(default=100, lte=100)
def __post_init__(self):
self.register_all()
def register_all(self):
self.register_create_endpoint()
self.register_read_range_endpoint()
self.register_read_single_endpoint()
self.register_update_endpoint()
self.register_delete_endpoint()
def register_create_endpoint(self):
@self.api.post(self.models.path, response_model=self.models.response)
def endpoint(*, session: Session = Depends(self.get_session), model: self.models.creation):
return create(session=session, table_cls=self.models.table, model=model)
def register_read_range_endpoint(self):
@self.api.get(self.models.path, response_model=List[self.models.response])
def endpoint(
*, session: Session = Depends(self.get_session), offset: int = 0, limit: int = self.limit,
):
return read_range(
session=session, table_cls=self.models.table, offset=offset, limit=limit,
)
def register_read_single_endpoint(self):
@self.api.get(self.models.path + "{id}", response_model=self.models.response)
def endpoint(*, session: Session = Depends(self.get_session), id: int):
return read_single(session=session, table_cls=self.models.table, id=id)
def register_update_endpoint(self):
@self.api.patch(self.models.path + "{id}", response_model=self.models.response)
def endpoint(
*, session: Session = Depends(self.get_session), id: int, model: self.models.update,
):
return update(session=session, table_cls=self.models.table, id=id, model=model)
def register_delete_endpoint(self):
@self.api.delete(self.models.path + "{id}")
def endpoint(*, session: Session = Depends(self.get_session), id: int):
return delete(session=session, table_cls=self.models.table, id=id)
def register_endpoints(
api: FastAPI,
get_session: Callable,
models: MultipleModels,
limit: Query = Query(default=100, lte=100)
):
_ = RegisterEndpoints(api, get_session, models, limit)
|
[
"sqlmodel.select",
"sqlmodel.Field"
] |
[((5955, 5982), 'fastapi.Query', 'Query', ([], {'default': '(100)', 'lte': '(100)'}), '(default=100, lte=100)\n', (5960, 5982), False, 'from fastapi import Depends, FastAPI, HTTPException, Query\n'), ((7999, 8026), 'fastapi.Query', 'Query', ([], {'default': '(100)', 'lte': '(100)'}), '(default=100, lte=100)\n', (8004, 8026), False, 'from fastapi import Depends, FastAPI, HTTPException, Query\n'), ((4522, 4594), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': 'f"""{table_cls.__name__} not found"""'}), "(status_code=404, detail=f'{table_cls.__name__} not found')\n", (4535, 4594), False, 'from fastapi import Depends, FastAPI, HTTPException, Query\n'), ((4786, 4858), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': 'f"""{table_cls.__name__} not found"""'}), "(status_code=404, detail=f'{table_cls.__name__} not found')\n", (4799, 4858), False, 'from fastapi import Depends, FastAPI, HTTPException, Query\n'), ((5234, 5306), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': 'f"""{table_cls.__name__} not found"""'}), "(status_code=404, detail=f'{table_cls.__name__} not found')\n", (5247, 5306), False, 'from fastapi import Depends, FastAPI, HTTPException, Query\n'), ((6441, 6466), 'fastapi.Depends', 'Depends', (['self.get_session'], {}), '(self.get_session)\n', (6448, 6466), False, 'from fastapi import Depends, FastAPI, HTTPException, Query\n'), ((6767, 6792), 'fastapi.Depends', 'Depends', (['self.get_session'], {}), '(self.get_session)\n', (6774, 6792), False, 'from fastapi import Depends, FastAPI, HTTPException, Query\n'), ((7157, 7182), 'fastapi.Depends', 'Depends', (['self.get_session'], {}), '(self.get_session)\n', (7164, 7182), False, 'from fastapi import Depends, FastAPI, HTTPException, Query\n'), ((7463, 7488), 'fastapi.Depends', 'Depends', (['self.get_session'], {}), '(self.get_session)\n', (7470, 7488), False, 'from fastapi import Depends, FastAPI, HTTPException, Query\n'), ((7765, 7790), 'fastapi.Depends', 'Depends', (['self.get_session'], {}), '(self.get_session)\n', (7772, 7790), False, 'from fastapi import Depends, FastAPI, HTTPException, Query\n'), ((3369, 3406), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (3374, 3406), False, 'from sqlmodel import Field, Session, SQLModel, select\n'), ((4322, 4339), 'sqlmodel.select', 'select', (['table_cls'], {}), '(table_cls)\n', (4328, 4339), False, 'from sqlmodel import Field, Session, SQLModel, select\n')]
|
import numpy as nm
from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson
from sfepy.discrete.fem.meshio import UserMeshIO
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy import data_dir
def mesh_hook(mesh, mode):
if mode == 'read':
mesh = gen_block_mesh([0.0098, 0.0011, 0.1], [5, 3, 17],
[0, 0, 0.05], name='specimen',
verbose=False)
return mesh
elif mode == 'write':
pass
def optimization_hook(pb):
cnf = pb.conf
out = []
yield pb, out
state = out[-1][1].get_parts()
coors = pb.domain.cmesh.coors
displ = state['u'].reshape((coors.shape[0],3))
# elongation
mcoors = coors[cnf.mnodes, 2]
mdispl = displ[cnf.mnodes, 2]
dl = (mdispl[1] - mdispl[0]) / (mcoors[1] - mcoors[0])
if hasattr(cnf, 'opt_data'):
# compute slope of the force-elongation curve
cnf.opt_data['k'] = cnf.F / dl
yield None
def get_mat(coors, mode, pb):
if mode == 'qp':
# get material data
if hasattr(pb.conf, 'opt_data'):
# from homogenization
D = pb.conf.opt_data['D_homog']
else:
# given values
D = stiffness_from_youngpoisson(3, 150.0e9, 0.3)
nqp = coors.shape[0]
return {'D': nm.tile(D, (nqp, 1, 1))}
def define(is_opt=False):
filename_mesh = UserMeshIO(mesh_hook)
mnodes = (107, 113) # nodes for elongation eval.
regions = {
'Omega': 'all',
'Bottom': ('vertices in (z < 0.001)', 'facet'),
'Top': ('vertices in (z > 0.099)', 'facet'),
}
functions = {
'get_mat': (lambda ts, coors, mode=None, problem=None, **kwargs:
get_mat(coors, mode, problem),),
}
S = 1.083500e-05 # specimen cross-section
F = 5.0e3 # force
materials = {
'solid': 'get_mat',
'load': ({'val': F / S},),
}
fields = {
'displacement': ('real', 'vector', 'Omega', 1),
}
variables = {
'u': ('unknown field', 'displacement', 0),
'v': ('test field', 'displacement', 'u'),
}
ebcs = {
'FixedBottom': ('Bottom', {'u.all': 0.0}),
'FixedTop': ('Top', {'u.0': 0.0, 'u.1': 0.0}),
}
equations = {
'balance_of_forces' :
"""dw_lin_elastic.5.Omega(solid.D, v, u)
= dw_surface_ltr.5.Top(load.val, v)""",
}
solvers = {
'ls': ('ls.scipy_direct', {}),
'newton': ('nls.newton', {'eps_a': 1e-6, 'eps_r': 1.e-6,
'check': 0, 'problem': 'nonlinear'}),
}
options = {
'parametric_hook': 'optimization_hook',
'output_dir' : 'output',
}
return locals()
|
[
"sfepy.discrete.fem.meshio.UserMeshIO",
"sfepy.mesh.mesh_generators.gen_block_mesh",
"sfepy.mechanics.matcoefs.stiffness_from_youngpoisson"
] |
[((1406, 1427), 'sfepy.discrete.fem.meshio.UserMeshIO', 'UserMeshIO', (['mesh_hook'], {}), '(mesh_hook)\n', (1416, 1427), False, 'from sfepy.discrete.fem.meshio import UserMeshIO\n'), ((281, 381), 'sfepy.mesh.mesh_generators.gen_block_mesh', 'gen_block_mesh', (['[0.0098, 0.0011, 0.1]', '[5, 3, 17]', '[0, 0, 0.05]'], {'name': '"""specimen"""', 'verbose': '(False)'}), "([0.0098, 0.0011, 0.1], [5, 3, 17], [0, 0, 0.05], name=\n 'specimen', verbose=False)\n", (295, 381), False, 'from sfepy.mesh.mesh_generators import gen_block_mesh\n'), ((1238, 1289), 'sfepy.mechanics.matcoefs.stiffness_from_youngpoisson', 'stiffness_from_youngpoisson', (['(3)', '(150000000000.0)', '(0.3)'], {}), '(3, 150000000000.0, 0.3)\n', (1265, 1289), False, 'from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson\n'), ((1334, 1357), 'numpy.tile', 'nm.tile', (['D', '(nqp, 1, 1)'], {}), '(D, (nqp, 1, 1))\n', (1341, 1357), True, 'import numpy as nm\n')]
|
import os
import pytest
import time
from fastapi.testclient import TestClient
from sqlmodel import Session, create_engine
from main import app
from database import get_db
from settings import Settings
from alembic.command import upgrade, downgrade
from alembic.config import Config
@pytest.fixture(autouse=True)
def slow_down_tests():
yield
time.sleep(1)
@pytest.fixture(scope="session")
def apply_migrations():
config = Config("alembic.ini")
upgrade(config, "head")
yield
downgrade(config, "base")
@pytest.fixture(name="session")
def session_fixture(apply_migrations: None):
settings = Settings()
engine = create_engine(
settings.db_uri_test, connect_args={"check_same_thread": False}
)
with Session(engine) as session:
yield session
@pytest.fixture(name="client")
def client_fixture(session: Session):
def get_session_override():
return session
app.dependency_overrides[get_db] = get_session_override
client = TestClient(app)
yield client
app.dependency_overrides.clear()
def pytest_configure(config):
"""
Allows plugins and conftest files to perform initial configuration.
This hook is called for every plugin and initial conftest
file after command line options have been parsed.
"""
pytest.access_token = None
def pytest_sessionstart(session):
"""
Called after the Session object has been created and
before performing collection and entering the run test loop.
"""
def pytest_sessionfinish(session, exitstatus):
"""
Called after whole test run finished, right before
returning the exit status to the system.
"""
def pytest_unconfigure(config):
"""
called before test process is exited.
"""
# Delete DB file
db_file = os.path.join('.', 'test.db')
if os.path.exists(db_file):
os.remove(db_file)
|
[
"sqlmodel.create_engine",
"sqlmodel.Session"
] |
[((285, 313), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (299, 313), False, 'import pytest\n'), ((368, 399), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (382, 399), False, 'import pytest\n'), ((530, 560), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""session"""'}), "(name='session')\n", (544, 560), False, 'import pytest\n'), ((800, 829), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""client"""'}), "(name='client')\n", (814, 829), False, 'import pytest\n'), ((351, 364), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (361, 364), False, 'import time\n'), ((437, 458), 'alembic.config.Config', 'Config', (['"""alembic.ini"""'], {}), "('alembic.ini')\n", (443, 458), False, 'from alembic.config import Config\n'), ((463, 486), 'alembic.command.upgrade', 'upgrade', (['config', '"""head"""'], {}), "(config, 'head')\n", (470, 486), False, 'from alembic.command import upgrade, downgrade\n'), ((501, 526), 'alembic.command.downgrade', 'downgrade', (['config', '"""base"""'], {}), "(config, 'base')\n", (510, 526), False, 'from alembic.command import upgrade, downgrade\n'), ((621, 631), 'settings.Settings', 'Settings', ([], {}), '()\n', (629, 631), False, 'from settings import Settings\n'), ((645, 723), 'sqlmodel.create_engine', 'create_engine', (['settings.db_uri_test'], {'connect_args': "{'check_same_thread': False}"}), "(settings.db_uri_test, connect_args={'check_same_thread': False})\n", (658, 723), False, 'from sqlmodel import Session, create_engine\n'), ((996, 1011), 'fastapi.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (1006, 1011), False, 'from fastapi.testclient import TestClient\n'), ((1033, 1065), 'main.app.dependency_overrides.clear', 'app.dependency_overrides.clear', ([], {}), '()\n', (1063, 1065), False, 'from main import app\n'), ((1801, 1829), 'os.path.join', 'os.path.join', (['"""."""', '"""test.db"""'], {}), "('.', 'test.db')\n", (1813, 1829), False, 'import os\n'), ((1837, 1860), 'os.path.exists', 'os.path.exists', (['db_file'], {}), '(db_file)\n', (1851, 1860), False, 'import os\n'), ((747, 762), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (754, 762), False, 'from sqlmodel import Session, create_engine\n'), ((1870, 1888), 'os.remove', 'os.remove', (['db_file'], {}), '(db_file)\n', (1879, 1888), False, 'import os\n')]
|
from typing import AsyncGenerator, Generator
from aiobotocore.client import AioBaseClient
from aiobotocore.session import get_session
from sqlmodel import Session
from sqlmodel.ext.asyncio.session import AsyncSession
from ..core.config import settings
from ..db.db import engine, engine_async
async def get_s3() -> AsyncGenerator[AioBaseClient, None]:
session = get_session()
async with session.create_client(
"s3",
region_name=settings.MINIO_REGION_NAME,
endpoint_url=settings.MINIO_URL,
use_ssl=False,
aws_secret_access_key=settings.MINIO_SECRET_KEY,
aws_access_key_id=settings.MINIO_ACCESS_KEY,
) as client:
yield client
async def get_db_async() -> AsyncGenerator[AsyncSession, None]:
async with AsyncSession(engine_async) as session:
yield session
def get_db() -> Generator[Session, None, None]:
with Session(engine) as session:
yield session
|
[
"sqlmodel.ext.asyncio.session.AsyncSession",
"sqlmodel.Session"
] |
[((370, 383), 'aiobotocore.session.get_session', 'get_session', ([], {}), '()\n', (381, 383), False, 'from aiobotocore.session import get_session\n'), ((777, 803), 'sqlmodel.ext.asyncio.session.AsyncSession', 'AsyncSession', (['engine_async'], {}), '(engine_async)\n', (789, 803), False, 'from sqlmodel.ext.asyncio.session import AsyncSession\n'), ((897, 912), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (904, 912), False, 'from sqlmodel import Session\n')]
|
"""
Functions to visualize quadrature points in reference elements.
"""
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import output
from sfepy.postprocess.plot_dofs import _get_axes, _to2d
from sfepy.postprocess.plot_facets import plot_geometry
def _get_qp(geometry, order):
from sfepy.discrete import Integral
from sfepy.discrete.fem.geometry_element import GeometryElement
aux = Integral('aux', order=order)
coors, weights = aux.get_qp(geometry)
true_order = aux.qps[geometry].order
output('geometry:', geometry, 'order:', order, 'num. points:',
coors.shape[0], 'true_order:', true_order)
output('min. weight:', weights.min())
output('max. weight:', weights.max())
return GeometryElement(geometry), coors, weights
def _get_bqp(geometry, order):
from sfepy.discrete import Integral
from sfepy.discrete.fem.geometry_element import GeometryElement
from sfepy.discrete.fem import Mesh, FEDomain, Field
gel = GeometryElement(geometry)
mesh = Mesh.from_data('aux', gel.coors, None,
[gel.conn[None, :]], [[0]], [geometry])
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
surf = domain.create_region('Surf', 'vertices of surface', 'facet')
field = Field.from_args('f', nm.float64, shape=1,
region=omega, approx_order=1)
field.setup_surface_data(surf)
integral = Integral('aux', order=order)
field.create_bqp('Surf', integral)
sd = field.surface_data['Surf']
qp = field.qp_coors[(integral.order, sd.bkey)]
output('geometry:', geometry, 'order:', order, 'num. points:',
qp.vals.shape[1], 'true_order:',
integral.qps[gel.surface_facet_name].order)
output('min. weight:', qp.weights.min())
output('max. weight:', qp.weights.max())
return (gel, qp.vals.reshape((-1, mesh.dim)),
nm.tile(qp.weights, qp.vals.shape[0]))
def plot_weighted_points(ax, coors, weights, min_radius=10, max_radius=50,
show_colorbar=False):
"""
Plot points with given coordinates as circles/spheres with radii given by
weights.
"""
dim = coors.shape[1]
ax = _get_axes(ax, dim)
wmin, wmax = weights.min(), weights.max()
if (wmax - wmin) < 1e-12:
nweights = weights * max_radius / wmax
else:
nweights = ((weights - wmin) * (max_radius - min_radius)
/ (wmax - wmin) + min_radius)
coors = _to2d(coors)
sc = ax.scatter(*coors.T, s=nweights, c=weights, alpha=1)
if show_colorbar:
plt.colorbar(sc)
return ax
def label_points(ax, coors):
"""
Label points with their indices.
"""
dim = coors.shape[1]
ax = _get_axes(ax, dim)
shift = 0.02 * (coors.max(0) - coors.min(0))
ccs = coors + shift
for ic, cc in enumerate(ccs):
ax.text(*cc, s='%d' % ic, color='b')
def plot_quadrature(ax, geometry, order, boundary=False,
min_radius=10, max_radius=50,
show_colorbar=False, show_labels=False):
"""
Plot quadrature points for the given geometry and integration order.
The points are plotted as circles/spheres with radii given by quadrature
weights - the weights are mapped to [`min_radius`, `max_radius`] interval.
"""
if not boundary:
gel, coors, weights = _get_qp(geometry, order)
else:
gel, coors, weights = _get_bqp(geometry, order)
dim = coors.shape[1]
ax = _get_axes(ax, dim)
plot_geometry(ax, gel)
plot_weighted_points(ax, coors, weights,
min_radius=min_radius, max_radius=max_radius,
show_colorbar=show_colorbar)
if show_labels:
label_points(ax, coors)
return ax, coors, weights
|
[
"sfepy.postprocess.plot_dofs._to2d",
"sfepy.discrete.fem.geometry_element.GeometryElement",
"sfepy.base.base.output",
"sfepy.postprocess.plot_dofs._get_axes",
"sfepy.discrete.fem.Mesh.from_data",
"sfepy.discrete.Integral",
"sfepy.discrete.fem.Field.from_args",
"sfepy.postprocess.plot_facets.plot_geometry",
"sfepy.discrete.fem.FEDomain"
] |
[((423, 451), 'sfepy.discrete.Integral', 'Integral', (['"""aux"""'], {'order': 'order'}), "('aux', order=order)\n", (431, 451), False, 'from sfepy.discrete import Integral\n'), ((540, 650), 'sfepy.base.base.output', 'output', (['"""geometry:"""', 'geometry', '"""order:"""', 'order', '"""num. points:"""', 'coors.shape[0]', '"""true_order:"""', 'true_order'], {}), "('geometry:', geometry, 'order:', order, 'num. points:', coors.shape[\n 0], 'true_order:', true_order)\n", (546, 650), False, 'from sfepy.base.base import output\n'), ((1003, 1028), 'sfepy.discrete.fem.geometry_element.GeometryElement', 'GeometryElement', (['geometry'], {}), '(geometry)\n', (1018, 1028), False, 'from sfepy.discrete.fem.geometry_element import GeometryElement\n'), ((1041, 1119), 'sfepy.discrete.fem.Mesh.from_data', 'Mesh.from_data', (['"""aux"""', 'gel.coors', 'None', '[gel.conn[None, :]]', '[[0]]', '[geometry]'], {}), "('aux', gel.coors, None, [gel.conn[None, :]], [[0]], [geometry])\n", (1055, 1119), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((1159, 1183), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['"""domain"""', 'mesh'], {}), "('domain', mesh)\n", (1167, 1183), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((1318, 1389), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""f"""', 'nm.float64'], {'shape': '(1)', 'region': 'omega', 'approx_order': '(1)'}), "('f', nm.float64, shape=1, region=omega, approx_order=1)\n", (1333, 1389), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((1469, 1497), 'sfepy.discrete.Integral', 'Integral', (['"""aux"""'], {'order': 'order'}), "('aux', order=order)\n", (1477, 1497), False, 'from sfepy.discrete import Integral\n'), ((1630, 1774), 'sfepy.base.base.output', 'output', (['"""geometry:"""', 'geometry', '"""order:"""', 'order', '"""num. points:"""', 'qp.vals.shape[1]', '"""true_order:"""', 'integral.qps[gel.surface_facet_name].order'], {}), "('geometry:', geometry, 'order:', order, 'num. points:', qp.vals.\n shape[1], 'true_order:', integral.qps[gel.surface_facet_name].order)\n", (1636, 1774), False, 'from sfepy.base.base import output\n'), ((2248, 2266), 'sfepy.postprocess.plot_dofs._get_axes', '_get_axes', (['ax', 'dim'], {}), '(ax, dim)\n', (2257, 2266), False, 'from sfepy.postprocess.plot_dofs import _get_axes, _to2d\n'), ((2530, 2542), 'sfepy.postprocess.plot_dofs._to2d', '_to2d', (['coors'], {}), '(coors)\n', (2535, 2542), False, 'from sfepy.postprocess.plot_dofs import _get_axes, _to2d\n'), ((2785, 2803), 'sfepy.postprocess.plot_dofs._get_axes', '_get_axes', (['ax', 'dim'], {}), '(ax, dim)\n', (2794, 2803), False, 'from sfepy.postprocess.plot_dofs import _get_axes, _to2d\n'), ((3551, 3569), 'sfepy.postprocess.plot_dofs._get_axes', '_get_axes', (['ax', 'dim'], {}), '(ax, dim)\n', (3560, 3569), False, 'from sfepy.postprocess.plot_dofs import _get_axes, _to2d\n'), ((3575, 3597), 'sfepy.postprocess.plot_facets.plot_geometry', 'plot_geometry', (['ax', 'gel'], {}), '(ax, gel)\n', (3588, 3597), False, 'from sfepy.postprocess.plot_facets import plot_geometry\n'), ((753, 778), 'sfepy.discrete.fem.geometry_element.GeometryElement', 'GeometryElement', (['geometry'], {}), '(geometry)\n', (768, 778), False, 'from sfepy.discrete.fem.geometry_element import GeometryElement\n'), ((1945, 1982), 'numpy.tile', 'nm.tile', (['qp.weights', 'qp.vals.shape[0]'], {}), '(qp.weights, qp.vals.shape[0])\n', (1952, 1982), True, 'import numpy as nm\n'), ((2636, 2652), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['sc'], {}), '(sc)\n', (2648, 2652), True, 'import matplotlib.pyplot as plt\n')]
|
from fastapi import Depends, Response
from fastapi.routing import APIRouter
from pydantic import BaseModel # pylint: disable=E0611
from sqlmodel import Session, select
from starlette.responses import JSONResponse
from fastapi_server.database.database import get_session
from fastapi_server.models.user import User
login_router = APIRouter()
class LoginModel(BaseModel):
email: str
password: str
# TODO: Replace /login endpoint when Response is available in strawberry query info-context
@login_router.post('/login')
async def login(login_data: LoginModel, session: Session = Depends(get_session)) -> Response:
statement = select(User).where(User.email == login_data.email, User.password_hashed == login_data.password)
user = session.exec(statement).first()
if user is None:
raise FileNotFoundError('Email and password do not match')
# Set message and cookies in frontend
content = {'message': 'Come to the dark side, we have cookies'}
response = JSONResponse(content=content)
response.set_cookie(key='fakesession', value='fake-cookie-session-value', httponly=True, secure=True, expires=3600)
return response
|
[
"sqlmodel.select"
] |
[((332, 343), 'fastapi.routing.APIRouter', 'APIRouter', ([], {}), '()\n', (341, 343), False, 'from fastapi.routing import APIRouter\n'), ((590, 610), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (597, 610), False, 'from fastapi import Depends, Response\n'), ((994, 1023), 'starlette.responses.JSONResponse', 'JSONResponse', ([], {'content': 'content'}), '(content=content)\n', (1006, 1023), False, 'from starlette.responses import JSONResponse\n'), ((641, 653), 'sqlmodel.select', 'select', (['User'], {}), '(User)\n', (647, 653), False, 'from sqlmodel import Session, select\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 28 09:33:53 2020
@author: dhulls
"""
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct, Struct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.postprocess.probes_vtk import ProbeFromFile, Probe
import numpy as np
helps = {
'show' : 'show the results figure',
}
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
mesh = Mesh.from_file(data_dir + '/meshes/3d/fluid_mesh.inp')
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field_1 = Field.from_args(name='3_velocity', dtype=nm.float64, shape=3, region=omega, approx_order=1)
field_2 = Field.from_args(name='pressure', dtype=nm.float64, shape=1, region=omega, approx_order=1)
region_0 = domain.create_region(name='Walls1', select='vertices in (y < -0.049)', kind='facet')
region_1 = domain.create_region(name='Walls2', select='vertices in (y > 0.049)', kind='facet')
region_2 = domain.create_region(name='Inlet', select='vertices in (x < -0.499)', kind='facet')
region_3 = domain.create_region(name='Outlet', select='vertices in (x > -0.499)', kind='facet')
ebc_1 = EssentialBC(name='Walls1', region=region_0, dofs={'u.[0,1,2]' : 0.0})
ebc_2 = EssentialBC(name='Walls2', region=region_1, dofs={'u.[0,1,2]' : 0.0})
ebc_3 = EssentialBC(name='Inlet', region=region_2, dofs={'u.0' : 1.0, 'u.[1,2]' : 0.0})
ebc_4 = EssentialBC(name='Outlet', region=region_3, dofs={'p':0.0, 'u.[1,2]' : 0.0})
viscosity = Material(name='viscosity', value=1.25e-3)
variable_1 = FieldVariable('u', 'unknown', field_1)
variable_2 = FieldVariable(name='v', kind='test', field=field_1, primary_var_name='u')
variable_3 = FieldVariable(name='p', kind='unknown', field=field_2)
variable_4 = FieldVariable(name='q', kind='test', field=field_2, primary_var_name='p')
integral_1 = Integral('i1', order=2)
integral_2 = Integral('i2', order=3)
t1 = Term.new(name='dw_div_grad(viscosity.value, v, u)',
integral=integral_2, region=omega, viscosity=viscosity, v=variable_2, u=variable_1)
t2 = Term.new(name='dw_convect(v, u)',
integral=integral_2, region=omega, v=variable_2, u=variable_1)
t3 = Term.new(name='dw_stokes(v, p)',
integral=integral_1, region=omega, v=variable_2, p=variable_3)
t4 = Term.new(name='dw_stokes(u, q)',
integral=integral_1, region=omega, u=variable_1, q=variable_4)
eq1 = Equation('balance', t1+t2-t3)
eq2 = Equation('incompressibility', t4)
eqs = Equations([eq1,eq2])
ls = ScipyDirect({})
nls_status = IndexedStruct()
nls = Newton({'i_max' : 20, 'eps_a' : 1e-8, 'eps_r' : 1.0, 'macheps' : 1e-16, 'lin_red' : 1e-2, 'ls_red' : 0.1, 'ls_red_warp' : 0.001, 'ls_on' : 0.99999, 'ls_min' : 1e-5, 'check' : 0, 'delta' : 1e-6}, lin_solver=ls, status=nls_status)
pb = Problem('Navier-Stokes', equations=eqs)
pb.set_bcs(ebcs=Conditions([ebc_1, ebc_2, ebc_3]))
pb.set_solver(nls)
status = IndexedStruct()
state = pb.solve(status=status, save_results=True)
out = state.create_output_dict()
pb.save_state('Navier_Stokes.vtk', out=out)
view = Viewer('Navier_Stokes.vtk')
view(rel_scaling=2,
is_scalar_bar=True, is_wireframe=True)
|
[
"sfepy.discrete.Equations",
"sfepy.discrete.conditions.EssentialBC",
"sfepy.solvers.ls.ScipyDirect",
"sfepy.discrete.Equation",
"sfepy.solvers.nls.Newton",
"sfepy.discrete.conditions.Conditions",
"sfepy.discrete.fem.Mesh.from_file",
"sfepy.discrete.Integral",
"sfepy.discrete.fem.Field.from_args",
"sfepy.terms.Term.new",
"sfepy.postprocess.viewer.Viewer",
"sfepy.discrete.FieldVariable",
"sfepy.base.base.IndexedStruct",
"sfepy.discrete.Material",
"sfepy.discrete.Problem",
"sfepy.discrete.fem.FEDomain"
] |
[((253, 273), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (268, 273), False, 'import sys\n'), ((912, 928), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (926, 928), False, 'from argparse import ArgumentParser\n'), ((1183, 1237), 'sfepy.discrete.fem.Mesh.from_file', 'Mesh.from_file', (["(data_dir + '/meshes/3d/fluid_mesh.inp')"], {}), "(data_dir + '/meshes/3d/fluid_mesh.inp')\n", (1197, 1237), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((1247, 1271), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['"""domain"""', 'mesh'], {}), "('domain', mesh)\n", (1255, 1271), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((1328, 1423), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', ([], {'name': '"""3_velocity"""', 'dtype': 'nm.float64', 'shape': '(3)', 'region': 'omega', 'approx_order': '(1)'}), "(name='3_velocity', dtype=nm.float64, shape=3, region=omega,\n approx_order=1)\n", (1343, 1423), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((1430, 1523), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', ([], {'name': '"""pressure"""', 'dtype': 'nm.float64', 'shape': '(1)', 'region': 'omega', 'approx_order': '(1)'}), "(name='pressure', dtype=nm.float64, shape=1, region=omega,\n approx_order=1)\n", (1445, 1523), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((1912, 1980), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', ([], {'name': '"""Walls1"""', 'region': 'region_0', 'dofs': "{'u.[0,1,2]': 0.0}"}), "(name='Walls1', region=region_0, dofs={'u.[0,1,2]': 0.0})\n", (1923, 1980), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition\n'), ((1990, 2058), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', ([], {'name': '"""Walls2"""', 'region': 'region_1', 'dofs': "{'u.[0,1,2]': 0.0}"}), "(name='Walls2', region=region_1, dofs={'u.[0,1,2]': 0.0})\n", (2001, 2058), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition\n'), ((2068, 2145), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', ([], {'name': '"""Inlet"""', 'region': 'region_2', 'dofs': "{'u.0': 1.0, 'u.[1,2]': 0.0}"}), "(name='Inlet', region=region_2, dofs={'u.0': 1.0, 'u.[1,2]': 0.0})\n", (2079, 2145), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition\n'), ((2156, 2232), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', ([], {'name': '"""Outlet"""', 'region': 'region_3', 'dofs': "{'p': 0.0, 'u.[1,2]': 0.0}"}), "(name='Outlet', region=region_3, dofs={'p': 0.0, 'u.[1,2]': 0.0})\n", (2167, 2232), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition\n'), ((2246, 2287), 'sfepy.discrete.Material', 'Material', ([], {'name': '"""viscosity"""', 'value': '(0.00125)'}), "(name='viscosity', value=0.00125)\n", (2254, 2287), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((2302, 2340), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""u"""', '"""unknown"""', 'field_1'], {}), "('u', 'unknown', field_1)\n", (2315, 2340), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((2354, 2427), 'sfepy.discrete.FieldVariable', 'FieldVariable', ([], {'name': '"""v"""', 'kind': '"""test"""', 'field': 'field_1', 'primary_var_name': '"""u"""'}), "(name='v', kind='test', field=field_1, primary_var_name='u')\n", (2367, 2427), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((2441, 2495), 'sfepy.discrete.FieldVariable', 'FieldVariable', ([], {'name': '"""p"""', 'kind': '"""unknown"""', 'field': 'field_2'}), "(name='p', kind='unknown', field=field_2)\n", (2454, 2495), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((2509, 2582), 'sfepy.discrete.FieldVariable', 'FieldVariable', ([], {'name': '"""q"""', 'kind': '"""test"""', 'field': 'field_2', 'primary_var_name': '"""p"""'}), "(name='q', kind='test', field=field_2, primary_var_name='p')\n", (2522, 2582), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((2597, 2620), 'sfepy.discrete.Integral', 'Integral', (['"""i1"""'], {'order': '(2)'}), "('i1', order=2)\n", (2605, 2620), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((2634, 2657), 'sfepy.discrete.Integral', 'Integral', (['"""i2"""'], {'order': '(3)'}), "('i2', order=3)\n", (2642, 2657), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((2664, 2803), 'sfepy.terms.Term.new', 'Term.new', ([], {'name': '"""dw_div_grad(viscosity.value, v, u)"""', 'integral': 'integral_2', 'region': 'omega', 'viscosity': 'viscosity', 'v': 'variable_2', 'u': 'variable_1'}), "(name='dw_div_grad(viscosity.value, v, u)', integral=integral_2,\n region=omega, viscosity=viscosity, v=variable_2, u=variable_1)\n", (2672, 2803), False, 'from sfepy.terms import Term\n'), ((2819, 2920), 'sfepy.terms.Term.new', 'Term.new', ([], {'name': '"""dw_convect(v, u)"""', 'integral': 'integral_2', 'region': 'omega', 'v': 'variable_2', 'u': 'variable_1'}), "(name='dw_convect(v, u)', integral=integral_2, region=omega, v=\n variable_2, u=variable_1)\n", (2827, 2920), False, 'from sfepy.terms import Term\n'), ((2935, 3035), 'sfepy.terms.Term.new', 'Term.new', ([], {'name': '"""dw_stokes(v, p)"""', 'integral': 'integral_1', 'region': 'omega', 'v': 'variable_2', 'p': 'variable_3'}), "(name='dw_stokes(v, p)', integral=integral_1, region=omega, v=\n variable_2, p=variable_3)\n", (2943, 3035), False, 'from sfepy.terms import Term\n'), ((3050, 3150), 'sfepy.terms.Term.new', 'Term.new', ([], {'name': '"""dw_stokes(u, q)"""', 'integral': 'integral_1', 'region': 'omega', 'u': 'variable_1', 'q': 'variable_4'}), "(name='dw_stokes(u, q)', integral=integral_1, region=omega, u=\n variable_1, q=variable_4)\n", (3058, 3150), False, 'from sfepy.terms import Term\n'), ((3166, 3199), 'sfepy.discrete.Equation', 'Equation', (['"""balance"""', '(t1 + t2 - t3)'], {}), "('balance', t1 + t2 - t3)\n", (3174, 3199), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((3202, 3235), 'sfepy.discrete.Equation', 'Equation', (['"""incompressibility"""', 't4'], {}), "('incompressibility', t4)\n", (3210, 3235), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((3242, 3263), 'sfepy.discrete.Equations', 'Equations', (['[eq1, eq2]'], {}), '([eq1, eq2])\n', (3251, 3263), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((3269, 3284), 'sfepy.solvers.ls.ScipyDirect', 'ScipyDirect', (['{}'], {}), '({})\n', (3280, 3284), False, 'from sfepy.solvers.ls import ScipyDirect\n'), ((3298, 3313), 'sfepy.base.base.IndexedStruct', 'IndexedStruct', ([], {}), '()\n', (3311, 3313), False, 'from sfepy.base.base import IndexedStruct, Struct\n'), ((3320, 3553), 'sfepy.solvers.nls.Newton', 'Newton', (["{'i_max': 20, 'eps_a': 1e-08, 'eps_r': 1.0, 'macheps': 1e-16, 'lin_red': \n 0.01, 'ls_red': 0.1, 'ls_red_warp': 0.001, 'ls_on': 0.99999, 'ls_min': \n 1e-05, 'check': 0, 'delta': 1e-06}"], {'lin_solver': 'ls', 'status': 'nls_status'}), "({'i_max': 20, 'eps_a': 1e-08, 'eps_r': 1.0, 'macheps': 1e-16,\n 'lin_red': 0.01, 'ls_red': 0.1, 'ls_red_warp': 0.001, 'ls_on': 0.99999,\n 'ls_min': 1e-05, 'check': 0, 'delta': 1e-06}, lin_solver=ls, status=\n nls_status)\n", (3326, 3553), False, 'from sfepy.solvers.nls import Newton\n'), ((3554, 3593), 'sfepy.discrete.Problem', 'Problem', (['"""Navier-Stokes"""'], {'equations': 'eqs'}), "('Navier-Stokes', equations=eqs)\n", (3561, 3593), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((3673, 3688), 'sfepy.base.base.IndexedStruct', 'IndexedStruct', ([], {}), '()\n', (3686, 3688), False, 'from sfepy.base.base import IndexedStruct, Struct\n'), ((3826, 3853), 'sfepy.postprocess.viewer.Viewer', 'Viewer', (['"""Navier_Stokes.vtk"""'], {}), "('Navier_Stokes.vtk')\n", (3832, 3853), False, 'from sfepy.postprocess.viewer import Viewer\n'), ((3610, 3643), 'sfepy.discrete.conditions.Conditions', 'Conditions', (['[ebc_1, ebc_2, ebc_3]'], {}), '([ebc_1, ebc_2, ebc_3])\n', (3620, 3643), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition\n')]
|
"""
Nonlinear solvers.
"""
import time
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, debug, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.solvers.solvers import SolverMeta, NonlinearSolver
def check_tangent_matrix(conf, vec_x0, fun, fun_grad):
"""
Verify the correctness of the tangent matrix as computed by `fun_grad()` by
comparing it with its finite difference approximation evaluated by
repeatedly calling `fun()` with `vec_x0` items perturbed by a small delta.
"""
vec_x = vec_x0.copy()
delta = conf.delta
vec_r = fun(vec_x) # Update state.
mtx_a0 = fun_grad(vec_x)
mtx_a = mtx_a0.tocsc()
mtx_d = mtx_a.copy()
mtx_d.data[:] = 0.0
vec_dx = nm.zeros_like(vec_r)
for ic in range(vec_dx.shape[0]):
vec_dx[ic] = delta
xx = vec_x.copy() - vec_dx
vec_r1 = fun(xx)
vec_dx[ic] = -delta
xx = vec_x.copy() - vec_dx
vec_r2 = fun(xx)
vec_dx[ic] = 0.0;
vec = 0.5 * (vec_r2 - vec_r1) / delta
ir = mtx_a.indices[mtx_a.indptr[ic]:mtx_a.indptr[ic+1]]
mtx_d.data[mtx_a.indptr[ic]:mtx_a.indptr[ic+1]] = vec[ir]
vec_r = fun(vec_x) # Restore.
tt = time.clock()
output(mtx_a, '.. analytical')
output(mtx_d, '.. difference')
import sfepy.base.plotutils as plu
plu.plot_matrix_diff(mtx_d, mtx_a, delta, ['difference', 'analytical'],
conf.check)
return time.clock() - tt
def conv_test(conf, it, err, err0):
"""
Nonlinear solver convergence test.
Parameters
----------
conf : Struct instance
The nonlinear solver configuration.
it : int
The current iteration.
err : float
The current iteration error.
err0 : float
The initial error.
Returns
-------
status : int
The convergence status: -1 = no convergence (yet), 0 = solver converged
- tolerances were met, 1 = max. number of iterations reached.
"""
status = -1
if (abs(err0) < conf.macheps):
err_r = 0.0
else:
err_r = err / err0
output('nls: iter: %d, residual: %e (rel: %e)' % (it, err, err_r))
conv_a = err < conf.eps_a
if it > 0:
conv_r = err_r < conf.eps_r
if conv_a and conv_r:
status = 0
elif (conf.get('eps_mode', '') == 'or') and (conv_a or conv_r):
status = 0
else:
if conv_a:
status = 0
if (status == -1) and (it >= conf.i_max):
status = 1
return status
class Newton(NonlinearSolver):
r"""
Solves a nonlinear system :math:`f(x) = 0` using the Newton method with
backtracking line-search, starting with an initial guess :math:`x^0`.
"""
name = 'nls.newton'
__metaclass__ = SolverMeta
_parameters = [
('i_max', 'int', 1, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-10, False,
'The absolute tolerance for the residual, i.e. :math:`||f(x^i)||`.'),
('eps_r', 'float', 1.0, False,
"""The relative tolerance for the residual, i.e. :math:`||f(x^i)|| /
||f(x^0)||`."""),
('eps_mode', "'and' or 'or'", 'and', False,
"""The logical operator to use for combining the absolute and relative
tolerances."""),
('macheps', 'float', nm.finfo(nm.float64).eps, False,
'The float considered to be machine "zero".'),
('lin_red', 'float', 1.0, False,
"""The linear system solution error should be smaller than (`eps_a` *
`lin_red`), otherwise a warning is printed."""),
('lin_precision', 'float or None', None, False,
"""If not None, the linear system solution tolerances are set in each
nonlinear iteration relative to the current residual norm by the
`lin_precision` factor. Ignored for direct linear solvers."""),
('ls_on', 'float', 0.99999, False,
"""Start the backtracking line-search by reducing the step, if
:math:`||f(x^i)|| / ||f(x^{i-1})||` is larger than `ls_on`."""),
('ls_red', '0.0 < float < 1.0', 0.1, False,
'The step reduction factor in case of correct residual assembling.'),
('ls_red_warp', '0.0 < float < 1.0', 0.001, False,
"""The step reduction factor in case of failed residual assembling
(e.g. the "warp violation" error caused by a negative volume
element resulting from too large deformations)."""),
('ls_min', '0.0 < float < 1.0', 1e-5, False,
'The minimum step reduction factor.'),
('give_up_warp', 'bool', False, False,
'If True, abort on the "warp violation" error.'),
('check', '0, 1 or 2', 0, False,
"""If >= 1, check the tangent matrix using finite differences. If 2,
plot the resulting sparsity patterns."""),
('delta', 'float', 1e-6, False,
r"""If `check >= 1`, the finite difference matrix is taken as
:math:`A_{ij} = \frac{f_i(x_j + \delta) - f_i(x_j - \delta)}{2
\delta}`."""),
('log', 'dict or None', None, False,
"""If not None, log the convergence according to the configuration in
the following form: ``{'text' : 'log.txt', 'plot' : 'log.pdf'}``.
Each of the dict items can be None."""),
('is_linear', 'bool', False, False,
'If True, the problem is considered to be linear.'),
]
def __init__(self, conf, **kwargs):
NonlinearSolver.__init__(self, conf, **kwargs)
conf = self.conf
log = get_logging_conf(conf)
conf.log = log = Struct(name='log_conf', **log)
conf.is_any_log = (log.text is not None) or (log.plot is not None)
if conf.is_any_log:
self.log = Log([[r'$||r||$'], ['iteration']],
xlabels=['', 'all iterations'],
ylabels=[r'$||r||$', 'iteration'],
yscales=['log', 'linear'],
is_plot=conf.log.plot is not None,
log_filename=conf.log.text,
formats=[['%.8e'], ['%d']])
else:
self.log = None
def __call__(self, vec_x0, conf=None, fun=None, fun_grad=None,
lin_solver=None, iter_hook=None, status=None):
"""
Nonlinear system solver call.
Solves a nonlinear system :math:`f(x) = 0` using the Newton method with
backtracking line-search, starting with an initial guess :math:`x^0`.
Parameters
----------
vec_x0 : array
The initial guess vector :math:`x_0`.
conf : Struct instance, optional
The solver configuration parameters,
fun : function, optional
The function :math:`f(x)` whose zero is sought - the residual.
fun_grad : function, optional
The gradient of :math:`f(x)` - the tangent matrix.
lin_solver : LinearSolver instance, optional
The linear solver for each nonlinear iteration.
iter_hook : function, optional
User-supplied function to call before each iteration.
status : dict-like, optional
The user-supplied object to hold convergence statistics.
Notes
-----
* The optional parameters except `iter_hook` and `status` need
to be given either here or upon `Newton` construction.
* Setting `conf.is_linear == True` means a pre-assembled and possibly
pre-solved matrix. This is mostly useful for linear time-dependent
problems.
"""
conf = get_default(conf, self.conf)
fun = get_default(fun, self.fun)
fun_grad = get_default(fun_grad, self.fun_grad)
lin_solver = get_default(lin_solver, self.lin_solver)
iter_hook = get_default(iter_hook, self.iter_hook)
status = get_default(status, self.status)
ls_eps_a, ls_eps_r = lin_solver.get_tolerance()
eps_a = get_default(ls_eps_a, 1.0)
eps_r = get_default(ls_eps_r, 1.0)
lin_red = conf.eps_a * conf.lin_red
time_stats = {}
vec_x = vec_x0.copy()
vec_x_last = vec_x0.copy()
vec_dx = None
if self.log is not None:
self.log.plot_vlines(color='r', linewidth=1.0)
err = err0 = -1.0
err_last = -1.0
it = 0
while 1:
if iter_hook is not None:
iter_hook(self, vec_x, it, err, err0)
ls = 1.0
vec_dx0 = vec_dx;
while 1:
tt = time.clock()
try:
vec_r = fun(vec_x)
except ValueError:
if (it == 0) or (ls < conf.ls_min):
output('giving up!')
raise
else:
ok = False
else:
ok = True
time_stats['rezidual'] = time.clock() - tt
if ok:
try:
err = nla.norm(vec_r)
except:
output('infs or nans in the residual:', vec_r)
output(nm.isfinite(vec_r).all())
debug()
if self.log is not None:
self.log(err, it)
if it == 0:
err0 = err;
break
if err < (err_last * conf.ls_on): break
red = conf.ls_red;
output('linesearch: iter %d, (%.5e < %.5e) (new ls: %e)'
% (it, err, err_last * conf.ls_on, red * ls))
else: # Failure.
if conf.give_up_warp:
output('giving up!')
break
red = conf.ls_red_warp;
output('rezidual computation failed for iter %d'
' (new ls: %e)!' % (it, red * ls))
if ls < conf.ls_min:
output('linesearch failed, continuing anyway')
break
ls *= red;
vec_dx = ls * vec_dx0;
vec_x = vec_x_last.copy() - vec_dx
# End residual loop.
if self.log is not None:
self.log.plot_vlines([1], color='g', linewidth=0.5)
err_last = err;
vec_x_last = vec_x.copy()
condition = conv_test(conf, it, err, err0)
if condition >= 0:
break
if (not ok) and conf.give_up_warp:
condition = 2
break
tt = time.clock()
if not conf.is_linear:
mtx_a = fun_grad(vec_x)
else:
mtx_a = fun_grad('linear')
time_stats['matrix'] = time.clock() - tt
if conf.check:
tt = time.clock()
wt = check_tangent_matrix(conf, vec_x, fun, fun_grad)
time_stats['check'] = time.clock() - tt - wt
if conf.lin_precision is not None:
if ls_eps_a is not None:
eps_a = max(err * conf.lin_precision, ls_eps_a)
elif ls_eps_r is not None:
eps_r = max(conf.lin_precision, ls_eps_r)
lin_red = max(eps_a, err * eps_r)
if conf.verbose:
output('solving linear system...')
tt = time.clock()
vec_dx = lin_solver(vec_r, x0=vec_x,
eps_a=eps_a, eps_r=eps_r, mtx=mtx_a)
time_stats['solve'] = time.clock() - tt
if conf.verbose:
output('...done')
for kv in time_stats.iteritems():
output('%10s: %7.2f [s]' % kv)
vec_e = mtx_a * vec_dx - vec_r
lerr = nla.norm(vec_e)
if lerr > lin_red:
output('warning: linear system solution precision is lower')
output('then the value set in solver options! (err = %e < %e)'
% (lerr, lin_red))
vec_x -= vec_dx
it += 1
if status is not None:
status['time_stats'] = time_stats
status['err0'] = err0
status['err'] = err
status['n_iter'] = it
status['condition'] = condition
if conf.log.plot is not None:
if self.log is not None:
self.log(save_figure=conf.log.plot)
return vec_x
class ScipyBroyden(NonlinearSolver):
"""
Interface to Broyden and Anderson solvers from ``scipy.optimize``.
"""
name = 'nls.scipy_broyden_like'
__metaclass__ = SolverMeta
_parameters = [
('method', 'str', 'anderson', False,
'The name of the solver in ``scipy.optimize``.'),
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('alpha', 'float', 0.9, False,
'See ``scipy.optimize``.'),
('M', 'float', 5, False,
'See ``scipy.optimize``.'),
('f_tol', 'float', 1e-6, False,
'See ``scipy.optimize``.'),
('w0', 'float', 0.1, False,
'See ``scipy.optimize``.'),
]
def __init__(self, conf, **kwargs):
NonlinearSolver.__init__(self, conf, **kwargs)
self.set_method(self.conf)
def set_method(self, conf):
import scipy.optimize as so
try:
solver = getattr(so, conf.method)
except AttributeError:
output('scipy solver %s does not exist!' % conf.method)
output('using broyden3 instead')
solver = so.broyden3
self.solver = solver
def __call__(self, vec_x0, conf=None, fun=None, fun_grad=None,
lin_solver=None, iter_hook=None, status=None):
if conf is not None:
self.set_method(conf)
else:
conf = self.conf
fun = get_default(fun, self.fun)
status = get_default(status, self.status)
tt = time.clock()
kwargs = {'iter' : conf.i_max,
'alpha' : conf.alpha,
'verbose' : conf.verbose}
if conf.method == 'broyden_generalized':
kwargs.update({'M' : conf.M})
elif conf.method in ['anderson', 'anderson2']:
kwargs.update({'M' : conf.M, 'w0' : conf.w0})
if conf.method in ['anderson', 'anderson2',
'broyden', 'broyden2' , 'newton_krylov']:
kwargs.update({'f_tol' : conf.f_tol })
vec_x = self.solver(fun, vec_x0, **kwargs)
vec_x = nm.asarray(vec_x)
if status is not None:
status['time_stats'] = time.clock() - tt
return vec_x
class PETScNonlinearSolver(NonlinearSolver):
"""
Interface to PETSc SNES (Scalable Nonlinear Equations Solvers).
The solver supports parallel use with a given MPI communicator (see `comm`
argument of :func:`PETScNonlinearSolver.__init__()`). Returns a (global)
PETSc solution vector instead of a (local) numpy array, when given a PETSc
initial guess vector.
For parallel use, the `fun` and `fun_grad` callbacks should be provided by
:class:`PETScParallelEvaluator
<sfepy.parallel.evaluate.PETScParallelEvaluator>`.
"""
name = 'nls.petsc'
__metaclass__ = SolverMeta
_parameters = [
('method', 'str', 'newtonls', False,
'The SNES type.'),
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('if_max', 'int', 100, False,
'The maximum number of function evaluations.'),
('eps_a', 'float', 1e-10, False,
'The absolute tolerance for the residual, i.e. :math:`||f(x^i)||`.'),
('eps_r', 'float', 1.0, False,
"""The relative tolerance for the residual, i.e. :math:`||f(x^i)|| /
||f(x^0)||`."""),
('eps_s', 'float', 0.0, False,
"""The convergence tolerance in terms of the norm of the change in
the solution between steps,
i.e. $||delta x|| < \epsilon_s ||x||$"""),
]
def __init__(self, conf, pmtx=None, prhs=None, comm=None, **kwargs):
if comm is None:
try:
import petsc4py
petsc4py.init([])
except ImportError:
msg = 'cannot import petsc4py!'
raise ImportError(msg)
from petsc4py import PETSc as petsc
NonlinearSolver.__init__(self, conf, petsc=petsc,
pmtx=pmtx, prhs=prhs, comm=comm, **kwargs)
def __call__(self, vec_x0, conf=None, fun=None, fun_grad=None,
lin_solver=None, iter_hook=None, status=None,
pmtx=None, prhs=None, comm=None):
conf = self.conf
fun = get_default(fun, self.fun)
fun_grad = get_default(fun_grad, self.fun_grad)
lin_solver = get_default(lin_solver, self.lin_solver)
iter_hook = get_default(iter_hook, self.iter_hook)
status = get_default(status, self.status)
pmtx = get_default(pmtx, self.pmtx)
prhs = get_default(prhs, self.prhs)
comm = get_default(comm, self.comm)
tt = time.clock()
if isinstance(vec_x0, self.petsc.Vec):
psol = vec_x0
else:
psol = pmtx.getVecLeft()
psol[...] = vec_x0
snes = self.petsc.SNES()
snes.create(comm)
snes.setType(conf.method)
ksp = lin_solver.create_ksp()
snes.setKSP(ksp)
ls_conf = lin_solver.conf
ksp.setTolerances(atol=ls_conf.eps_a, rtol=ls_conf.eps_r,
divtol=ls_conf.eps_d, max_it=ls_conf.i_max)
snes.setFunction(fun, prhs)
snes.setJacobian(fun_grad, pmtx)
snes.setTolerances(atol=conf.eps_a, rtol=conf.eps_r,
stol=conf.eps_s, max_it=conf.i_max)
snes.setMaxFunctionEvaluations(conf.if_max)
snes.setFromOptions()
snes.solve(prhs.duplicate(), psol)
if status is not None:
status['time_stats'] = time.clock() - tt
if isinstance(vec_x0, self.petsc.Vec):
sol = psol
else:
sol = psol[...].copy()
return sol
|
[
"sfepy.base.log.get_logging_conf",
"sfepy.base.base.get_default",
"sfepy.solvers.solvers.NonlinearSolver.__init__",
"sfepy.base.base.Struct",
"sfepy.base.plotutils.plot_matrix_diff",
"sfepy.base.log.Log",
"sfepy.base.base.debug",
"sfepy.base.base.output"
] |
[((772, 792), 'numpy.zeros_like', 'nm.zeros_like', (['vec_r'], {}), '(vec_r)\n', (785, 792), True, 'import numpy as nm\n'), ((1258, 1270), 'time.clock', 'time.clock', ([], {}), '()\n', (1268, 1270), False, 'import time\n'), ((1275, 1305), 'sfepy.base.base.output', 'output', (['mtx_a', '""".. analytical"""'], {}), "(mtx_a, '.. analytical')\n", (1281, 1305), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((1310, 1340), 'sfepy.base.base.output', 'output', (['mtx_d', '""".. difference"""'], {}), "(mtx_d, '.. difference')\n", (1316, 1340), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((1384, 1471), 'sfepy.base.plotutils.plot_matrix_diff', 'plu.plot_matrix_diff', (['mtx_d', 'mtx_a', 'delta', "['difference', 'analytical']", 'conf.check'], {}), "(mtx_d, mtx_a, delta, ['difference', 'analytical'],\n conf.check)\n", (1404, 1471), True, 'import sfepy.base.plotutils as plu\n'), ((2163, 2229), 'sfepy.base.base.output', 'output', (["('nls: iter: %d, residual: %e (rel: %e)' % (it, err, err_r))"], {}), "('nls: iter: %d, residual: %e (rel: %e)' % (it, err, err_r))\n", (2169, 2229), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((1505, 1517), 'time.clock', 'time.clock', ([], {}), '()\n', (1515, 1517), False, 'import time\n'), ((5580, 5626), 'sfepy.solvers.solvers.NonlinearSolver.__init__', 'NonlinearSolver.__init__', (['self', 'conf'], {}), '(self, conf, **kwargs)\n', (5604, 5626), False, 'from sfepy.solvers.solvers import SolverMeta, NonlinearSolver\n'), ((5668, 5690), 'sfepy.base.log.get_logging_conf', 'get_logging_conf', (['conf'], {}), '(conf)\n', (5684, 5690), False, 'from sfepy.base.log import Log, get_logging_conf\n'), ((5716, 5746), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""log_conf"""'}), "(name='log_conf', **log)\n", (5722, 5746), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((7742, 7770), 'sfepy.base.base.get_default', 'get_default', (['conf', 'self.conf'], {}), '(conf, self.conf)\n', (7753, 7770), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((7785, 7811), 'sfepy.base.base.get_default', 'get_default', (['fun', 'self.fun'], {}), '(fun, self.fun)\n', (7796, 7811), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((7831, 7867), 'sfepy.base.base.get_default', 'get_default', (['fun_grad', 'self.fun_grad'], {}), '(fun_grad, self.fun_grad)\n', (7842, 7867), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((7889, 7929), 'sfepy.base.base.get_default', 'get_default', (['lin_solver', 'self.lin_solver'], {}), '(lin_solver, self.lin_solver)\n', (7900, 7929), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((7950, 7988), 'sfepy.base.base.get_default', 'get_default', (['iter_hook', 'self.iter_hook'], {}), '(iter_hook, self.iter_hook)\n', (7961, 7988), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((8006, 8038), 'sfepy.base.base.get_default', 'get_default', (['status', 'self.status'], {}), '(status, self.status)\n', (8017, 8038), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((8112, 8138), 'sfepy.base.base.get_default', 'get_default', (['ls_eps_a', '(1.0)'], {}), '(ls_eps_a, 1.0)\n', (8123, 8138), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((8155, 8181), 'sfepy.base.base.get_default', 'get_default', (['ls_eps_r', '(1.0)'], {}), '(ls_eps_r, 1.0)\n', (8166, 8181), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((13476, 13522), 'sfepy.solvers.solvers.NonlinearSolver.__init__', 'NonlinearSolver.__init__', (['self', 'conf'], {}), '(self, conf, **kwargs)\n', (13500, 13522), False, 'from sfepy.solvers.solvers import SolverMeta, NonlinearSolver\n'), ((14145, 14171), 'sfepy.base.base.get_default', 'get_default', (['fun', 'self.fun'], {}), '(fun, self.fun)\n', (14156, 14171), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((14189, 14221), 'sfepy.base.base.get_default', 'get_default', (['status', 'self.status'], {}), '(status, self.status)\n', (14200, 14221), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((14236, 14248), 'time.clock', 'time.clock', ([], {}), '()\n', (14246, 14248), False, 'import time\n'), ((14820, 14837), 'numpy.asarray', 'nm.asarray', (['vec_x'], {}), '(vec_x)\n', (14830, 14837), True, 'import numpy as nm\n'), ((16672, 16768), 'sfepy.solvers.solvers.NonlinearSolver.__init__', 'NonlinearSolver.__init__', (['self', 'conf'], {'petsc': 'petsc', 'pmtx': 'pmtx', 'prhs': 'prhs', 'comm': 'comm'}), '(self, conf, petsc=petsc, pmtx=pmtx, prhs=prhs,\n comm=comm, **kwargs)\n', (16696, 16768), False, 'from sfepy.solvers.solvers import SolverMeta, NonlinearSolver\n'), ((17019, 17045), 'sfepy.base.base.get_default', 'get_default', (['fun', 'self.fun'], {}), '(fun, self.fun)\n', (17030, 17045), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((17065, 17101), 'sfepy.base.base.get_default', 'get_default', (['fun_grad', 'self.fun_grad'], {}), '(fun_grad, self.fun_grad)\n', (17076, 17101), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((17123, 17163), 'sfepy.base.base.get_default', 'get_default', (['lin_solver', 'self.lin_solver'], {}), '(lin_solver, self.lin_solver)\n', (17134, 17163), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((17184, 17222), 'sfepy.base.base.get_default', 'get_default', (['iter_hook', 'self.iter_hook'], {}), '(iter_hook, self.iter_hook)\n', (17195, 17222), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((17240, 17272), 'sfepy.base.base.get_default', 'get_default', (['status', 'self.status'], {}), '(status, self.status)\n', (17251, 17272), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((17288, 17316), 'sfepy.base.base.get_default', 'get_default', (['pmtx', 'self.pmtx'], {}), '(pmtx, self.pmtx)\n', (17299, 17316), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((17332, 17360), 'sfepy.base.base.get_default', 'get_default', (['prhs', 'self.prhs'], {}), '(prhs, self.prhs)\n', (17343, 17360), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((17376, 17404), 'sfepy.base.base.get_default', 'get_default', (['comm', 'self.comm'], {}), '(comm, self.comm)\n', (17387, 17404), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((17419, 17431), 'time.clock', 'time.clock', ([], {}), '()\n', (17429, 17431), False, 'import time\n'), ((5874, 6101), 'sfepy.base.log.Log', 'Log', (["[['$||r||$'], ['iteration']]"], {'xlabels': "['', 'all iterations']", 'ylabels': "['$||r||$', 'iteration']", 'yscales': "['log', 'linear']", 'is_plot': '(conf.log.plot is not None)', 'log_filename': 'conf.log.text', 'formats': "[['%.8e'], ['%d']]"}), "([['$||r||$'], ['iteration']], xlabels=['', 'all iterations'], ylabels=[\n '$||r||$', 'iteration'], yscales=['log', 'linear'], is_plot=conf.log.\n plot is not None, log_filename=conf.log.text, formats=[['%.8e'], ['%d']])\n", (5877, 6101), False, 'from sfepy.base.log import Log, get_logging_conf\n'), ((10844, 10856), 'time.clock', 'time.clock', ([], {}), '()\n', (10854, 10856), False, 'import time\n'), ((11654, 11666), 'time.clock', 'time.clock', ([], {}), '()\n', (11664, 11666), False, 'import time\n'), ((12058, 12073), 'numpy.linalg.norm', 'nla.norm', (['vec_e'], {}), '(vec_e)\n', (12066, 12073), True, 'import numpy.linalg as nla\n'), ((3415, 3435), 'numpy.finfo', 'nm.finfo', (['nm.float64'], {}), '(nm.float64)\n', (3423, 3435), True, 'import numpy as nm\n'), ((8701, 8713), 'time.clock', 'time.clock', ([], {}), '()\n', (8711, 8713), False, 'import time\n'), ((11030, 11042), 'time.clock', 'time.clock', ([], {}), '()\n', (11040, 11042), False, 'import time\n'), ((11097, 11109), 'time.clock', 'time.clock', ([], {}), '()\n', (11107, 11109), False, 'import time\n'), ((11601, 11635), 'sfepy.base.base.output', 'output', (['"""solving linear system..."""'], {}), "('solving linear system...')\n", (11607, 11635), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((11819, 11831), 'time.clock', 'time.clock', ([], {}), '()\n', (11829, 11831), False, 'import time\n'), ((11883, 11900), 'sfepy.base.base.output', 'output', (['"""...done"""'], {}), "('...done')\n", (11889, 11900), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((11964, 11994), 'sfepy.base.base.output', 'output', (["('%10s: %7.2f [s]' % kv)"], {}), "('%10s: %7.2f [s]' % kv)\n", (11970, 11994), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((12121, 12181), 'sfepy.base.base.output', 'output', (['"""warning: linear system solution precision is lower"""'], {}), "('warning: linear system solution precision is lower')\n", (12127, 12181), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((12198, 12283), 'sfepy.base.base.output', 'output', (["('then the value set in solver options! (err = %e < %e)' % (lerr, lin_red))"], {}), "('then the value set in solver options! (err = %e < %e)' % (lerr,\n lin_red))\n", (12204, 12283), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((13730, 13785), 'sfepy.base.base.output', 'output', (["('scipy solver %s does not exist!' % conf.method)"], {}), "('scipy solver %s does not exist!' % conf.method)\n", (13736, 13785), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((13798, 13830), 'sfepy.base.base.output', 'output', (['"""using broyden3 instead"""'], {}), "('using broyden3 instead')\n", (13804, 13830), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((14905, 14917), 'time.clock', 'time.clock', ([], {}), '()\n', (14915, 14917), False, 'import time\n'), ((16481, 16498), 'petsc4py.init', 'petsc4py.init', (['[]'], {}), '([])\n', (16494, 16498), False, 'import petsc4py\n'), ((18313, 18325), 'time.clock', 'time.clock', ([], {}), '()\n', (18323, 18325), False, 'import time\n'), ((9099, 9111), 'time.clock', 'time.clock', ([], {}), '()\n', (9109, 9111), False, 'import time\n'), ((9705, 9812), 'sfepy.base.base.output', 'output', (["('linesearch: iter %d, (%.5e < %.5e) (new ls: %e)' % (it, err, err_last *\n conf.ls_on, red * ls))"], {}), "('linesearch: iter %d, (%.5e < %.5e) (new ls: %e)' % (it, err, \n err_last * conf.ls_on, red * ls))\n", (9711, 9812), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((10050, 10135), 'sfepy.base.base.output', 'output', (["('rezidual computation failed for iter %d (new ls: %e)!' % (it, red * ls))"], {}), "('rezidual computation failed for iter %d (new ls: %e)!' % (it, red * ls)\n )\n", (10056, 10135), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((10219, 10265), 'sfepy.base.base.output', 'output', (['"""linesearch failed, continuing anyway"""'], {}), "('linesearch failed, continuing anyway')\n", (10225, 10265), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((9195, 9210), 'numpy.linalg.norm', 'nla.norm', (['vec_r'], {}), '(vec_r)\n', (9203, 9210), True, 'import numpy.linalg as nla\n'), ((9934, 9954), 'sfepy.base.base.output', 'output', (['"""giving up!"""'], {}), "('giving up!')\n", (9940, 9954), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((11218, 11230), 'time.clock', 'time.clock', ([], {}), '()\n', (11228, 11230), False, 'import time\n'), ((8891, 8911), 'sfepy.base.base.output', 'output', (['"""giving up!"""'], {}), "('giving up!')\n", (8897, 8911), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((9263, 9309), 'sfepy.base.base.output', 'output', (['"""infs or nans in the residual:"""', 'vec_r'], {}), "('infs or nans in the residual:', vec_r)\n", (9269, 9309), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((9391, 9398), 'sfepy.base.base.debug', 'debug', ([], {}), '()\n', (9396, 9398), False, 'from sfepy.base.base import output, get_default, debug, Struct\n'), ((9341, 9359), 'numpy.isfinite', 'nm.isfinite', (['vec_r'], {}), '(vec_r)\n', (9352, 9359), True, 'import numpy as nm\n')]
|
from typing import List, Dict
from fastapi import APIRouter, Depends, HTTPException, Query
from sqlmodel import Session, select
from app import models
from app.api import deps
router = APIRouter()
@router.post("/", response_model=models.TeamRead)
def create_team(
*, session: Session = Depends(deps.get_session), team: models.TeamCreate
) -> models.Team:
db_team = models.Team.from_orm(team)
session.add(db_team)
session.commit()
session.refresh(db_team)
return db_team
@router.get("/", response_model=List[models.TeamRead])
def read_teams(
*,
session: Session = Depends(deps.get_session),
offset: int = 0,
limit: int = Query(default=100, lte=100),
) -> List[models.Team]:
teams = session.exec(select(models.Team).offset(offset).limit(limit)).all()
return teams
@router.get("/{team_id}", response_model=models.TeamReadWithHeroes)
def read_team(
*, team_id: int, session: Session = Depends(deps.get_session)
) -> models.Team:
team = session.get(models.Team, team_id)
if not team:
raise HTTPException(status_code=404, detail="Team not found")
return team
@router.patch("/{team_id}", response_model=models.TeamRead)
def update_team(
*,
session: Session = Depends(deps.get_session),
team_id: int,
team: models.TeamUpdate,
) -> models.Team:
db_team = session.get(models.Team, team_id)
if not db_team:
raise HTTPException(status_code=404, detail="Team not found")
team_data = team.dict(exclude_unset=True)
for key, value in team_data.items():
setattr(db_team, key, value)
session.add(db_team)
session.commit()
session.refresh(db_team)
return db_team
@router.delete("/teams/{team_id}")
def delete_team(
*, session: Session = Depends(deps.get_session), team_id: int
) -> Dict[str, bool]:
team = session.get(models.Team, team_id)
if not team:
raise HTTPException(status_code=404, detail="Team not found")
session.delete(team)
session.commit()
return {"ok": True}
|
[
"sqlmodel.select"
] |
[((198, 209), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (207, 209), False, 'from fastapi import APIRouter, Depends, HTTPException, Query\n'), ((310, 335), 'fastapi.Depends', 'Depends', (['deps.get_session'], {}), '(deps.get_session)\n', (317, 335), False, 'from fastapi import APIRouter, Depends, HTTPException, Query\n'), ((395, 421), 'app.models.Team.from_orm', 'models.Team.from_orm', (['team'], {}), '(team)\n', (415, 421), False, 'from app import models\n'), ((629, 654), 'fastapi.Depends', 'Depends', (['deps.get_session'], {}), '(deps.get_session)\n', (636, 654), False, 'from fastapi import APIRouter, Depends, HTTPException, Query\n'), ((696, 723), 'fastapi.Query', 'Query', ([], {'default': '(100)', 'lte': '(100)'}), '(default=100, lte=100)\n', (701, 723), False, 'from fastapi import APIRouter, Depends, HTTPException, Query\n'), ((979, 1004), 'fastapi.Depends', 'Depends', (['deps.get_session'], {}), '(deps.get_session)\n', (986, 1004), False, 'from fastapi import APIRouter, Depends, HTTPException, Query\n'), ((1291, 1316), 'fastapi.Depends', 'Depends', (['deps.get_session'], {}), '(deps.get_session)\n', (1298, 1316), False, 'from fastapi import APIRouter, Depends, HTTPException, Query\n'), ((1837, 1862), 'fastapi.Depends', 'Depends', (['deps.get_session'], {}), '(deps.get_session)\n', (1844, 1862), False, 'from fastapi import APIRouter, Depends, HTTPException, Query\n'), ((1103, 1158), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Team not found"""'}), "(status_code=404, detail='Team not found')\n", (1116, 1158), False, 'from fastapi import APIRouter, Depends, HTTPException, Query\n'), ((1471, 1526), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Team not found"""'}), "(status_code=404, detail='Team not found')\n", (1484, 1526), False, 'from fastapi import APIRouter, Depends, HTTPException, Query\n'), ((1979, 2034), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Team not found"""'}), "(status_code=404, detail='Team not found')\n", (1992, 2034), False, 'from fastapi import APIRouter, Depends, HTTPException, Query\n'), ((776, 795), 'sqlmodel.select', 'select', (['models.Team'], {}), '(models.Team)\n', (782, 795), False, 'from sqlmodel import Session, select\n')]
|
from minio import Minio
import os
from typing import Optional
from glob import glob
import pathlib
from sqlmodel import Field, Session, SQLModel, create_engine
def get_images(folder:str="../cls_labeling/public/images"):
return glob(str(pathlib.Path(folder,"**","*.jpg")), recursive=True)
class Image(SQLModel, table=True):
key: Optional[int] = Field(default=None, primary_key=True)
image_name: str
label: str
image_url: str
if __name__ == "__main__":
engine = create_engine("sqlite:///image.db")
client = Minio(
"localhost:9001",
secure=False,
access_key="<KEY>",
secret_key="<KEY>"
)
bucket_found = client.bucket_exists("image")
if not bucket_found:
client.make_bucket("image")
else:
for obj in client.list_objects("image"):
client.remove_object("image", obj.object_name)
client.remove_bucket("image")
client.make_bucket("image")
os.remove("./image.db")
SQLModel.metadata.create_all(engine)
images = []
for i, image in enumerate(get_images()):
print(pathlib.Path(image).stem, image)
image_name = pathlib.Path(image).stem+'.jpg'
client.fput_object(
"image", image_name, image
)
image_url = f"http://localhost:9001/image/{image_name}"
images.append(
Image(key=i, image_name=pathlib.Path(image).stem, label="", image_url=image_url)
)
with Session(engine) as session:
for data in images: session.add(data)
session.commit()
|
[
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.Session",
"sqlmodel.Field",
"sqlmodel.create_engine"
] |
[((355, 392), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (360, 392), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((489, 524), 'sqlmodel.create_engine', 'create_engine', (['"""sqlite:///image.db"""'], {}), "('sqlite:///image.db')\n", (502, 524), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((538, 615), 'minio.Minio', 'Minio', (['"""localhost:9001"""'], {'secure': '(False)', 'access_key': '"""<KEY>"""', 'secret_key': '"""<KEY>"""'}), "('localhost:9001', secure=False, access_key='<KEY>', secret_key='<KEY>')\n", (543, 615), False, 'from minio import Minio\n'), ((994, 1030), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (1022, 1030), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((965, 988), 'os.remove', 'os.remove', (['"""./image.db"""'], {}), "('./image.db')\n", (974, 988), False, 'import os\n'), ((1473, 1488), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1480, 1488), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((241, 276), 'pathlib.Path', 'pathlib.Path', (['folder', '"""**"""', '"""*.jpg"""'], {}), "(folder, '**', '*.jpg')\n", (253, 276), False, 'import pathlib\n'), ((1108, 1127), 'pathlib.Path', 'pathlib.Path', (['image'], {}), '(image)\n', (1120, 1127), False, 'import pathlib\n'), ((1162, 1181), 'pathlib.Path', 'pathlib.Path', (['image'], {}), '(image)\n', (1174, 1181), False, 'import pathlib\n'), ((1396, 1415), 'pathlib.Path', 'pathlib.Path', (['image'], {}), '(image)\n', (1408, 1415), False, 'import pathlib\n')]
|
#!/usr/bin/env mdl
# This file will seal the nms opr within a better way than lib_nms
import ctypes
import os
import struct
import numpy as np
import megengine as mge
import megengine.functional as F
from megengine._internal.craniotome import CraniotomeBase
from megengine.core.tensor import wrap_io_tensor
_current_path = os.path.dirname(os.path.abspath(__file__))
_so_path = os.path.join(_current_path, "lib_nms.so")
try:
_so_lib = ctypes.CDLL(_so_path)
except Exception:
import subprocess
mge_path = os.path.join(os.path.dirname(mge.__file__), "_internal", "include")
assert os.path.exists(mge_path), "{} file not found".format(mge_path)
src_file = os.path.join(_current_path, "gpu_nms", "nms.cu")
assert os.path.exists(src_file), "{} file not found".format(src_file)
cmd = (
"nvcc -I {} -shared -o {} -Xcompiler '-fno-strict-aliasing -fPIC' {}".format(
mge_path, _so_path, src_file
)
)
subprocess.check_call(cmd, shell=True)
_so_lib = ctypes.CDLL(_so_path)
_TYPE_POINTER = ctypes.c_void_p
_TYPE_POINTER = ctypes.c_void_p
_TYPE_INT = ctypes.c_int32
_TYPE_FLOAT = ctypes.c_float
_so_lib.NMSForwardGpu.argtypes = [
_TYPE_POINTER,
_TYPE_POINTER,
_TYPE_POINTER,
_TYPE_POINTER,
_TYPE_FLOAT,
_TYPE_INT,
_TYPE_POINTER,
]
_so_lib.NMSForwardGpu.restype = _TYPE_INT
_so_lib.CreateHostDevice.restype = _TYPE_POINTER
class NMSCran(CraniotomeBase):
__nr_inputs__ = 1
__nr_outputs__ = 3
def setup(self, iou_threshold, max_output):
self._iou_threshold = iou_threshold
self._max_output = max_output
# Load the necessary host device
self._host_device = _so_lib.CreateHostDevice()
def execute(self, inputs, outputs):
box_tensor_ptr = inputs[0].pubapi_dev_tensor_ptr
output_tensor_ptr = outputs[0].pubapi_dev_tensor_ptr
output_num_tensor_ptr = outputs[1].pubapi_dev_tensor_ptr
mask_tensor_ptr = outputs[2].pubapi_dev_tensor_ptr
_so_lib.NMSForwardGpu(
box_tensor_ptr,
mask_tensor_ptr,
output_tensor_ptr,
output_num_tensor_ptr,
self._iou_threshold,
self._max_output,
self._host_device,
)
def grad(self, wrt_idx, inputs, outputs, out_grad):
return 0
def init_output_dtype(self, input_dtypes):
return [np.int32, np.int32, np.int32]
def get_serialize_params(self):
return ("nms", struct.pack("fi", self._iou_threshold, self._max_output))
def infer_shape(self, inp_shapes):
nr_box = inp_shapes[0][0]
threadsPerBlock = 64
output_size = nr_box
# here we compute the number of int32 used in mask_outputs.
# In original version, we compute the bytes only.
mask_size = int(
nr_box
* (nr_box // threadsPerBlock + int((nr_box % threadsPerBlock) > 0))
* 8
/ 4
)
return [[output_size], [1], [mask_size]]
@wrap_io_tensor
def gpu_nms(box, iou_threshold, max_output):
keep, num, _ = NMSCran.make(box, iou_threshold=iou_threshold, max_output=max_output)
return keep[:num]
def batched_nms(boxes, scores, idxs, iou_threshold, num_keep, use_offset=False):
if use_offset:
boxes_offset = (
mge.tensor([0, 0, 1, 1], device=boxes.device)
.reshape(1, 4)
.broadcast(boxes.shapeof(0), 4)
)
boxes = boxes - boxes_offset
max_coordinate = boxes.max()
offsets = idxs * (max_coordinate + 1)
boxes_for_nms = boxes + offsets.reshape(-1, 1).broadcast(boxes.shapeof(0), 4)
boxes_with_scores = F.concat([boxes_for_nms, scores.reshape(-1, 1)], axis=1)
keep_inds = gpu_nms(boxes_with_scores, iou_threshold, num_keep)
return keep_inds
|
[
"megengine.tensor"
] |
[((380, 421), 'os.path.join', 'os.path.join', (['_current_path', '"""lib_nms.so"""'], {}), "(_current_path, 'lib_nms.so')\n", (392, 421), False, 'import os\n'), ((342, 367), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (357, 367), False, 'import os\n'), ((441, 462), 'ctypes.CDLL', 'ctypes.CDLL', (['_so_path'], {}), '(_so_path)\n', (452, 462), False, 'import ctypes\n'), ((597, 621), 'os.path.exists', 'os.path.exists', (['mge_path'], {}), '(mge_path)\n', (611, 621), False, 'import os\n'), ((675, 723), 'os.path.join', 'os.path.join', (['_current_path', '"""gpu_nms"""', '"""nms.cu"""'], {}), "(_current_path, 'gpu_nms', 'nms.cu')\n", (687, 723), False, 'import os\n'), ((735, 759), 'os.path.exists', 'os.path.exists', (['src_file'], {}), '(src_file)\n', (749, 759), False, 'import os\n'), ((958, 996), 'subprocess.check_call', 'subprocess.check_call', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (979, 996), False, 'import subprocess\n'), ((1011, 1032), 'ctypes.CDLL', 'ctypes.CDLL', (['_so_path'], {}), '(_so_path)\n', (1022, 1032), False, 'import ctypes\n'), ((531, 560), 'os.path.dirname', 'os.path.dirname', (['mge.__file__'], {}), '(mge.__file__)\n', (546, 560), False, 'import os\n'), ((2486, 2542), 'struct.pack', 'struct.pack', (['"""fi"""', 'self._iou_threshold', 'self._max_output'], {}), "('fi', self._iou_threshold, self._max_output)\n", (2497, 2542), False, 'import struct\n'), ((3330, 3375), 'megengine.tensor', 'mge.tensor', (['[0, 0, 1, 1]'], {'device': 'boxes.device'}), '([0, 0, 1, 1], device=boxes.device)\n', (3340, 3375), True, 'import megengine as mge\n')]
|
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import assert_
from sfepy.linalg import norm_l2_along_axis as norm
from sfepy.linalg import dot_sequences, insert_strided_axis
from sfepy.discrete.fem.poly_spaces import PolySpace
from sfepy.discrete.fem.mappings import VolumeMapping
from sfepy.mechanics.tensors import dim2sym
from six.moves import range
def create_transformation_matrix(coors):
"""
Create a transposed coordinate transformation matrix, that
transforms 3D coordinates of element face nodes so that the
transformed nodes are in the `x-y` plane. The rotation is performed
w.r.t. the first node of each face.
Parameters
----------
coors : array
The coordinates of element nodes, shape `(n_el, n_ep, dim)`.
Returns
-------
mtx_t : array
The transposed transformation matrix :math:`T`, i.e.
:math:`X_{inplane} = T^T X_{3D}`.
Notes
-----
:math:`T = [t_1, t_2, n]`, where :math:`t_1`, :math:`t_2`, are unit
in-plane (column) vectors and :math:`n` is the unit normal vector,
all mutually orthonormal.
"""
# Local coordinate system.
t1 = coors[:, 1, :] - coors[:, 0, :]
t2 = coors[:, -1, :] - coors[:, 0, :]
n = nm.cross(t1, t2)
t2 = nm.cross(n, t1)
t1 = t1 / norm(t1)[:, None]
t2 = t2 / norm(t2)[:, None]
n = n / norm(n)[:, None]
# Coordinate transformation matrix (transposed!).
mtx_t = nm.concatenate((t1[:, :, None],
t2[:, :, None],
n[:, :, None]), axis=2)
return mtx_t
def transform_asm_vectors(out, mtx_t):
"""
Transform vector assembling contributions to global coordinate system, one
node at a time.
Parameters
----------
out : array
The array of vectors, transformed in-place.
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
"""
n_ep = out.shape[2] // mtx_t.shape[2]
for iep in range(n_ep):
ir = slice(iep, None, n_ep)
fn = out[:, 0, ir, 0]
fn[:] = dot_sequences(mtx_t, fn, 'AB')
def transform_asm_matrices(out, mtx_t):
"""
Transform matrix assembling contributions to global coordinate system, one
node at a time.
Parameters
----------
out : array
The array of matrices, transformed in-place.
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
"""
n_ep = out.shape[-1] // mtx_t.shape[-1]
for iepr in range(n_ep):
ir = slice(iepr, None, n_ep)
for iepc in range(n_ep):
ic = slice(iepc, None, n_ep)
fn = out[:, 0, ir, ic]
fn[:] = dot_sequences(dot_sequences(mtx_t, fn, 'AB'), mtx_t, 'ABT')
def create_mapping(coors, gel, order):
"""
Create mapping from transformed (in `x-y` plane) element faces to
reference element faces.
Parameters
----------
coors : array
The transformed coordinates of element nodes, shape `(n_el,
n_ep, dim)`. The function verifies that the all `z` components
are zero.
gel : GeometryElement instance
The geometry element corresponding to the faces.
order : int
The polynomial order of the mapping.
Returns
-------
mapping : VolumeMapping instance
The reference element face mapping.
"""
# Strip 'z' component (should be 0 now...).
assert_(nm.allclose(coors[:, :, -1], 0.0, rtol=1e-12, atol=1e-12))
coors = coors[:, :, :-1].copy()
# Mapping from transformed element to reference element.
sh = coors.shape
seq_coors = coors.reshape((sh[0] * sh[1], sh[2]))
seq_conn = nm.arange(seq_coors.shape[0], dtype=nm.int32)
seq_conn.shape = sh[:2]
mapping = VolumeMapping(seq_coors, seq_conn, gel=gel, order=1)
return mapping
def describe_geometry(field, region, integral):
"""
Describe membrane geometry in a given region.
Parameters
----------
field : Field instance
The field defining the FE approximation.
region : Region instance
The surface region to describe.
integral : Integral instance
The integral defining the quadrature points.
Returns
-------
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
membrane_geo : CMapping instance
The mapping from transformed elements to a reference elements.
"""
# Coordinates of element vertices.
sg, _ = field.get_mapping(region, integral, 'surface')
sd = field.surface_data[region.name]
coors = field.coors[sd.econn[:, :sg.n_ep]]
# Coordinate transformation matrix (transposed!).
mtx_t = create_transformation_matrix(coors)
# Transform coordinates to the local coordinate system.
coors_loc = dot_sequences((coors - coors[:, 0:1, :]), mtx_t)
# Mapping from transformed elements to reference elements.
gel = field.gel.surface_facet
vm = create_mapping(coors_loc, gel, 1)
qp = integral.get_qp(gel.name)
ps = PolySpace.any_from_args(None, gel, field.approx_order)
membrane_geo = vm.get_mapping(qp[0], qp[1], poly_space=ps)
membrane_geo.bf[:] = ps.eval_base(qp[0])
return mtx_t, membrane_geo
def describe_deformation(el_disps, bfg):
"""
Describe deformation of a thin incompressible 2D membrane in 3D
space, composed of flat finite element faces.
The coordinate system of each element (face), i.e. the membrane
mid-surface, should coincide with the `x`, `y` axes of the `x-y`
plane.
Parameters
----------
el_disps : array
The displacements of element nodes, shape `(n_el, n_ep, dim)`.
bfg : array
The in-plane base function gradients, shape `(n_el, n_qp, dim-1,
n_ep)`.
Returns
-------
mtx_c ; array
The in-plane right Cauchy-Green deformation tensor
:math:`C_{ij}`, :math:`i, j = 1, 2`.
c33 : array
The component :math:`C_{33}` computed from the incompressibility
condition.
mtx_b : array
The discrete Green strain variation operator.
"""
sh = bfg.shape
n_ep = sh[3]
dim = el_disps.shape[2]
sym2 = dim2sym(dim-1)
# Repeat el_disps by number of quadrature points.
el_disps_qp = insert_strided_axis(el_disps, 1, bfg.shape[1])
# Transformed (in-plane) displacement gradient with
# shape (n_el, n_qp, 2 (-> a), 3 (-> i)), du_i/dX_a.
du = dot_sequences(bfg, el_disps_qp)
# Deformation gradient F w.r.t. in plane coordinates.
# F_{ia} = dx_i / dX_a,
# a \in {1, 2} (rows), i \in {1, 2, 3} (columns).
mtx_f = du + nm.eye(dim - 1, dim, dtype=du.dtype)
# Right Cauchy-Green deformation tensor C.
# C_{ab} = F_{ka} F_{kb}, a, b \in {1, 2}.
mtx_c = dot_sequences(mtx_f, mtx_f, 'ABT')
# C_33 from incompressibility.
c33 = 1.0 / (mtx_c[..., 0, 0] * mtx_c[..., 1, 1]
- mtx_c[..., 0, 1]**2)
# Discrete Green strain variation operator.
mtx_b = nm.empty((sh[0], sh[1], sym2, dim * n_ep), dtype=nm.float64)
mtx_b[..., 0, 0*n_ep:1*n_ep] = bfg[..., 0, :] * mtx_f[..., 0, 0:1]
mtx_b[..., 0, 1*n_ep:2*n_ep] = bfg[..., 0, :] * mtx_f[..., 0, 1:2]
mtx_b[..., 0, 2*n_ep:3*n_ep] = bfg[..., 0, :] * mtx_f[..., 0, 2:3]
mtx_b[..., 1, 0*n_ep:1*n_ep] = bfg[..., 1, :] * mtx_f[..., 1, 0:1]
mtx_b[..., 1, 1*n_ep:2*n_ep] = bfg[..., 1, :] * mtx_f[..., 1, 1:2]
mtx_b[..., 1, 2*n_ep:3*n_ep] = bfg[..., 1, :] * mtx_f[..., 1, 2:3]
mtx_b[..., 2, 0*n_ep:1*n_ep] = bfg[..., 1, :] * mtx_f[..., 0, 0:1] \
+ bfg[..., 0, :] * mtx_f[..., 1, 0:1]
mtx_b[..., 2, 1*n_ep:2*n_ep] = bfg[..., 0, :] * mtx_f[..., 1, 1:2] \
+ bfg[..., 1, :] * mtx_f[..., 0, 1:2]
mtx_b[..., 2, 2*n_ep:3*n_ep] = bfg[..., 0, :] * mtx_f[..., 1, 2:3] \
+ bfg[..., 1, :] * mtx_f[..., 0, 2:3]
return mtx_c, c33, mtx_b
def get_tangent_stress_matrix(stress, bfg):
"""
Get the tangent stress matrix of a thin incompressible 2D membrane
in 3D space, given a stress.
Parameters
----------
stress : array
The components `11, 22, 12` of the second Piola-Kirchhoff stress
tensor, shape `(n_el, n_qp, 3, 1)`.
bfg : array
The in-plane base function gradients, shape `(n_el, n_qp, dim-1,
n_ep)`.
Returns
-------
mtx : array
The tangent stress matrix, shape `(n_el, n_qp, dim*n_ep, dim*n_ep)`.
"""
n_el, n_qp, dim, n_ep = bfg.shape
dim += 1
mtx = nm.zeros((n_el, n_qp, dim * n_ep, dim * n_ep), dtype=nm.float64)
g1tg1 = dot_sequences(bfg[..., 0:1, :], bfg[..., 0:1, :], 'ATB')
g1tg2 = dot_sequences(bfg[..., 0:1, :], bfg[..., 1:2, :], 'ATB')
g2tg1 = dot_sequences(bfg[..., 1:2, :], bfg[..., 0:1, :], 'ATB')
g2tg2 = dot_sequences(bfg[..., 1:2, :], bfg[..., 1:2, :], 'ATB')
aux = stress[..., 0:1, :] * g1tg1 + stress[..., 2:3, :] * g1tg2 \
+ stress[..., 2:3, :] * g2tg1 + stress[..., 1:2, :] * g2tg2
mtx[..., 0 * n_ep : 1 * n_ep, 0 * n_ep : 1 * n_ep] = aux
mtx[..., 1 * n_ep : 2 * n_ep, 1 * n_ep : 2 * n_ep] = aux
mtx[..., 2 * n_ep : 3 * n_ep, 2 * n_ep : 3 * n_ep] = aux
return mtx
def get_invariants(mtx_c, c33):
"""
Get the first and second invariants of the right Cauchy-Green
deformation tensor describing deformation of an incompressible
membrane.
Parameters
----------
mtx_c ; array
The in-plane right Cauchy-Green deformation tensor
:math:`C_{ij}`, :math:`i, j = 1, 2`, shape `(n_el, n_qp, dim-1,
dim-1)`.
c33 : array
The component :math:`C_{33}` computed from the incompressibility
condition, shape `(n_el, n_qp)`.
Returns
-------
i1 : array
The first invariant of :math:`C_{ij}`.
i2 : array
The second invariant of :math:`C_{ij}`.
"""
i1 = mtx_c[..., 0, 0] + mtx_c[..., 1, 1] + c33
i2 = mtx_c[..., 0, 0] * mtx_c[..., 1,1] \
+ mtx_c[..., 1, 1] * c33 \
+ mtx_c[..., 0, 0] * c33 \
- mtx_c[..., 0, 1]**2
return i1, i2
def get_green_strain_sym3d(mtx_c, c33):
r"""
Get the 3D Green strain tensor in symmetric storage.
Parameters
----------
mtx_c ; array
The in-plane right Cauchy-Green deformation tensor
:math:`C_{ij}`, :math:`i, j = 1, 2`, shape `(n_el, n_qp, dim-1,
dim-1)`.
c33 : array
The component :math:`C_{33}` computed from the incompressibility
condition, shape `(n_el, n_qp)`.
Returns
-------
mtx_e : array
The membrane Green strain :math:`E_{ij} = \frac{1}{2} (C_{ij}) -
\delta_{ij}`, symmetric storage: items (11, 22, 33, 12, 13, 23),
shape `(n_el, n_qp, sym, 1)`.
"""
n_el, n_qp, dm, _ = mtx_c.shape
dim = dm + 1
sym = dim2sym(dim)
mtx_e = nm.empty((n_el, n_qp, sym, 1), dtype=mtx_c.dtype)
mtx_e[..., 0, 0] = 0.5 * (mtx_c[..., 0, 0] - 1.0)
mtx_e[..., 1, 0] = 0.5 * (mtx_c[..., 1, 1] - 1.0)
mtx_e[..., 2, 0] = 0.5 * (c33 - 1.0)
mtx_e[..., 3, 0] = 0.5 * mtx_c[..., 0, 1]
mtx_e[..., 4:, 0] = 0.0
return mtx_e
|
[
"sfepy.linalg.norm_l2_along_axis",
"sfepy.discrete.fem.poly_spaces.PolySpace.any_from_args",
"sfepy.mechanics.tensors.dim2sym",
"sfepy.linalg.insert_strided_axis",
"sfepy.linalg.dot_sequences",
"sfepy.discrete.fem.mappings.VolumeMapping"
] |
[((1263, 1279), 'numpy.cross', 'nm.cross', (['t1', 't2'], {}), '(t1, t2)\n', (1271, 1279), True, 'import numpy as nm\n'), ((1289, 1304), 'numpy.cross', 'nm.cross', (['n', 't1'], {}), '(n, t1)\n', (1297, 1304), True, 'import numpy as nm\n'), ((1466, 1537), 'numpy.concatenate', 'nm.concatenate', (['(t1[:, :, None], t2[:, :, None], n[:, :, None])'], {'axis': '(2)'}), '((t1[:, :, None], t2[:, :, None], n[:, :, None]), axis=2)\n', (1480, 1537), True, 'import numpy as nm\n'), ((2047, 2058), 'six.moves.range', 'range', (['n_ep'], {}), '(n_ep)\n', (2052, 2058), False, 'from six.moves import range\n'), ((2613, 2624), 'six.moves.range', 'range', (['n_ep'], {}), '(n_ep)\n', (2618, 2624), False, 'from six.moves import range\n'), ((3779, 3824), 'numpy.arange', 'nm.arange', (['seq_coors.shape[0]'], {'dtype': 'nm.int32'}), '(seq_coors.shape[0], dtype=nm.int32)\n', (3788, 3824), True, 'import numpy as nm\n'), ((3868, 3920), 'sfepy.discrete.fem.mappings.VolumeMapping', 'VolumeMapping', (['seq_coors', 'seq_conn'], {'gel': 'gel', 'order': '(1)'}), '(seq_coors, seq_conn, gel=gel, order=1)\n', (3881, 3920), False, 'from sfepy.discrete.fem.mappings import VolumeMapping\n'), ((4941, 4987), 'sfepy.linalg.dot_sequences', 'dot_sequences', (['(coors - coors[:, 0:1, :])', 'mtx_t'], {}), '(coors - coors[:, 0:1, :], mtx_t)\n', (4954, 4987), False, 'from sfepy.linalg import dot_sequences, insert_strided_axis\n'), ((5176, 5230), 'sfepy.discrete.fem.poly_spaces.PolySpace.any_from_args', 'PolySpace.any_from_args', (['None', 'gel', 'field.approx_order'], {}), '(None, gel, field.approx_order)\n', (5199, 5230), False, 'from sfepy.discrete.fem.poly_spaces import PolySpace\n'), ((6327, 6343), 'sfepy.mechanics.tensors.dim2sym', 'dim2sym', (['(dim - 1)'], {}), '(dim - 1)\n', (6334, 6343), False, 'from sfepy.mechanics.tensors import dim2sym\n'), ((6415, 6461), 'sfepy.linalg.insert_strided_axis', 'insert_strided_axis', (['el_disps', '(1)', 'bfg.shape[1]'], {}), '(el_disps, 1, bfg.shape[1])\n', (6434, 6461), False, 'from sfepy.linalg import dot_sequences, insert_strided_axis\n'), ((6585, 6616), 'sfepy.linalg.dot_sequences', 'dot_sequences', (['bfg', 'el_disps_qp'], {}), '(bfg, el_disps_qp)\n', (6598, 6616), False, 'from sfepy.linalg import dot_sequences, insert_strided_axis\n'), ((6919, 6953), 'sfepy.linalg.dot_sequences', 'dot_sequences', (['mtx_f', 'mtx_f', '"""ABT"""'], {}), "(mtx_f, mtx_f, 'ABT')\n", (6932, 6953), False, 'from sfepy.linalg import dot_sequences, insert_strided_axis\n'), ((7144, 7204), 'numpy.empty', 'nm.empty', (['(sh[0], sh[1], sym2, dim * n_ep)'], {'dtype': 'nm.float64'}), '((sh[0], sh[1], sym2, dim * n_ep), dtype=nm.float64)\n', (7152, 7204), True, 'import numpy as nm\n'), ((8716, 8780), 'numpy.zeros', 'nm.zeros', (['(n_el, n_qp, dim * n_ep, dim * n_ep)'], {'dtype': 'nm.float64'}), '((n_el, n_qp, dim * n_ep, dim * n_ep), dtype=nm.float64)\n', (8724, 8780), True, 'import numpy as nm\n'), ((8794, 8850), 'sfepy.linalg.dot_sequences', 'dot_sequences', (['bfg[..., 0:1, :]', 'bfg[..., 0:1, :]', '"""ATB"""'], {}), "(bfg[..., 0:1, :], bfg[..., 0:1, :], 'ATB')\n", (8807, 8850), False, 'from sfepy.linalg import dot_sequences, insert_strided_axis\n'), ((8863, 8919), 'sfepy.linalg.dot_sequences', 'dot_sequences', (['bfg[..., 0:1, :]', 'bfg[..., 1:2, :]', '"""ATB"""'], {}), "(bfg[..., 0:1, :], bfg[..., 1:2, :], 'ATB')\n", (8876, 8919), False, 'from sfepy.linalg import dot_sequences, insert_strided_axis\n'), ((8932, 8988), 'sfepy.linalg.dot_sequences', 'dot_sequences', (['bfg[..., 1:2, :]', 'bfg[..., 0:1, :]', '"""ATB"""'], {}), "(bfg[..., 1:2, :], bfg[..., 0:1, :], 'ATB')\n", (8945, 8988), False, 'from sfepy.linalg import dot_sequences, insert_strided_axis\n'), ((9001, 9057), 'sfepy.linalg.dot_sequences', 'dot_sequences', (['bfg[..., 1:2, :]', 'bfg[..., 1:2, :]', '"""ATB"""'], {}), "(bfg[..., 1:2, :], bfg[..., 1:2, :], 'ATB')\n", (9014, 9057), False, 'from sfepy.linalg import dot_sequences, insert_strided_axis\n'), ((11024, 11036), 'sfepy.mechanics.tensors.dim2sym', 'dim2sym', (['dim'], {}), '(dim)\n', (11031, 11036), False, 'from sfepy.mechanics.tensors import dim2sym\n'), ((11050, 11099), 'numpy.empty', 'nm.empty', (['(n_el, n_qp, sym, 1)'], {'dtype': 'mtx_c.dtype'}), '((n_el, n_qp, sym, 1), dtype=mtx_c.dtype)\n', (11058, 11099), True, 'import numpy as nm\n'), ((2142, 2172), 'sfepy.linalg.dot_sequences', 'dot_sequences', (['mtx_t', 'fn', '"""AB"""'], {}), "(mtx_t, fn, 'AB')\n", (2155, 2172), False, 'from sfepy.linalg import dot_sequences, insert_strided_axis\n'), ((2683, 2694), 'six.moves.range', 'range', (['n_ep'], {}), '(n_ep)\n', (2688, 2694), False, 'from six.moves import range\n'), ((3532, 3589), 'numpy.allclose', 'nm.allclose', (['coors[:, :, -1]', '(0.0)'], {'rtol': '(1e-12)', 'atol': '(1e-12)'}), '(coors[:, :, -1], 0.0, rtol=1e-12, atol=1e-12)\n', (3543, 3589), True, 'import numpy as nm\n'), ((6775, 6811), 'numpy.eye', 'nm.eye', (['(dim - 1)', 'dim'], {'dtype': 'du.dtype'}), '(dim - 1, dim, dtype=du.dtype)\n', (6781, 6811), True, 'import numpy as nm\n'), ((1320, 1328), 'sfepy.linalg.norm_l2_along_axis', 'norm', (['t1'], {}), '(t1)\n', (1324, 1328), True, 'from sfepy.linalg import norm_l2_along_axis as norm\n'), ((1352, 1360), 'sfepy.linalg.norm_l2_along_axis', 'norm', (['t2'], {}), '(t2)\n', (1356, 1360), True, 'from sfepy.linalg import norm_l2_along_axis as norm\n'), ((1382, 1389), 'sfepy.linalg.norm_l2_along_axis', 'norm', (['n'], {}), '(n)\n', (1386, 1389), True, 'from sfepy.linalg import norm_l2_along_axis as norm\n'), ((2806, 2836), 'sfepy.linalg.dot_sequences', 'dot_sequences', (['mtx_t', 'fn', '"""AB"""'], {}), "(mtx_t, fn, 'AB')\n", (2819, 2836), False, 'from sfepy.linalg import dot_sequences, insert_strided_axis\n')]
|
# -*- coding:utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.module as M
from official.vision.detection import layers
class RCNN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rcnn_reg_mean, cfg.rcnn_reg_std)
# roi head
self.in_features = cfg.rcnn_in_features
self.stride = cfg.rcnn_stride
self.pooling_method = cfg.pooling_method
self.pooling_size = cfg.pooling_size
self.fc1 = M.Linear(256 * self.pooling_size[0] * self.pooling_size[1], 1024)
self.fc2 = M.Linear(1024, 1024)
for l in [self.fc1, self.fc2]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
# box predictor
self.pred_cls = M.Linear(1024, cfg.num_classes + 1)
self.pred_delta = M.Linear(1024, (cfg.num_classes + 1) * 4)
M.init.normal_(self.pred_cls.weight, std=0.01)
M.init.normal_(self.pred_delta.weight, std=0.001)
for l in [self.pred_cls, self.pred_delta]:
M.init.fill_(l.bias, 0)
def forward(self, fpn_fms, rcnn_rois, im_info=None, gt_boxes=None):
rcnn_rois, labels, bbox_targets = self.get_ground_truth(
rcnn_rois, im_info, gt_boxes
)
fpn_fms = [fpn_fms[x] for x in self.in_features]
pool_features = layers.roi_pool(
fpn_fms, rcnn_rois, self.stride, self.pooling_size, self.pooling_method,
)
flatten_feature = F.flatten(pool_features, start_axis=1)
roi_feature = F.relu(self.fc1(flatten_feature))
roi_feature = F.relu(self.fc2(roi_feature))
pred_logits = self.pred_cls(roi_feature)
pred_offsets = self.pred_delta(roi_feature)
if self.training:
# loss for classification
loss_rcnn_cls = layers.softmax_loss(pred_logits, labels)
# loss for regression
pred_offsets = pred_offsets.reshape(-1, self.cfg.num_classes + 1, 4)
vlabels = labels.reshape(-1, 1).broadcast((labels.shapeof(0), 4))
pred_offsets = F.indexing_one_hot(pred_offsets, vlabels, axis=1)
loss_rcnn_loc = layers.get_smooth_l1_loss(
pred_offsets,
bbox_targets,
labels,
self.cfg.rcnn_smooth_l1_beta,
norm_type="all",
)
loss_dict = {"loss_rcnn_cls": loss_rcnn_cls, "loss_rcnn_loc": loss_rcnn_loc}
return loss_dict
else:
# slice 1 for removing background
pred_scores = F.softmax(pred_logits, axis=1)[:, 1:]
pred_offsets = pred_offsets[:, 4:].reshape(-1, 4)
target_shape = (rcnn_rois.shapeof(0), self.cfg.num_classes, 4)
# rois (N, 4) -> (N, 1, 4) -> (N, 80, 4) -> (N * 80, 4)
base_rois = (
F.add_axis(rcnn_rois[:, 1:5], 1).broadcast(target_shape).reshape(-1, 4)
)
pred_bbox = self.box_coder.decode(base_rois, pred_offsets)
return pred_bbox, pred_scores
def get_ground_truth(self, rpn_rois, im_info, gt_boxes):
if not self.training:
return rpn_rois, None, None
return_rois = []
return_labels = []
return_bbox_targets = []
# get per image proposals and gt_boxes
for bid in range(self.cfg.batch_per_gpu):
num_valid_boxes = im_info[bid, 4]
gt_boxes_per_img = gt_boxes[bid, :num_valid_boxes, :]
batch_inds = mge.ones((gt_boxes_per_img.shapeof(0), 1)) * bid
# if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_per_img[:, :4]], axis=1)
batch_roi_mask = rpn_rois[:, 0] == bid
_, batch_roi_inds = F.cond_take(batch_roi_mask == 1, batch_roi_mask)
# all_rois : [batch_id, x1, y1, x2, y2]
all_rois = F.concat([rpn_rois.ai[batch_roi_inds], gt_rois])
overlaps_normal, overlaps_ignore = layers.get_iou(
all_rois[:, 1:5], gt_boxes_per_img, return_ignore=True,
)
max_overlaps_normal = overlaps_normal.max(axis=1)
gt_assignment_normal = F.argmax(overlaps_normal, axis=1)
max_overlaps_ignore = overlaps_ignore.max(axis=1)
gt_assignment_ignore = F.argmax(overlaps_ignore, axis=1)
ignore_assign_mask = (max_overlaps_normal < self.cfg.fg_threshold) * (
max_overlaps_ignore > max_overlaps_normal
)
max_overlaps = (
max_overlaps_normal * (1 - ignore_assign_mask)
+ max_overlaps_ignore * ignore_assign_mask
)
gt_assignment = (
gt_assignment_normal * (1 - ignore_assign_mask)
+ gt_assignment_ignore * ignore_assign_mask
)
gt_assignment = gt_assignment.astype("int32")
labels = gt_boxes_per_img.ai[gt_assignment, 4]
# ---------------- get the fg/bg labels for each roi ---------------#
fg_mask = (max_overlaps >= self.cfg.fg_threshold) * (
labels != self.cfg.ignore_label
)
bg_mask = (max_overlaps < self.cfg.bg_threshold_high) * (
max_overlaps >= self.cfg.bg_threshold_low
)
num_fg_rois = self.cfg.num_rois * self.cfg.fg_ratio
fg_inds_mask = self._bernoulli_sample_masks(fg_mask, num_fg_rois, 1)
num_bg_rois = self.cfg.num_rois - fg_inds_mask.sum()
bg_inds_mask = self._bernoulli_sample_masks(bg_mask, num_bg_rois, 1)
labels = labels * fg_inds_mask
keep_mask = fg_inds_mask + bg_inds_mask
_, keep_inds = F.cond_take(keep_mask == 1, keep_mask)
# Add next line to avoid memory exceed
keep_inds = keep_inds[: F.minimum(self.cfg.num_rois, keep_inds.shapeof(0))]
# labels
labels = labels.ai[keep_inds].astype("int32")
rois = all_rois.ai[keep_inds]
target_boxes = gt_boxes_per_img.ai[gt_assignment.ai[keep_inds], :4]
bbox_targets = self.box_coder.encode(rois[:, 1:5], target_boxes)
bbox_targets = bbox_targets.reshape(-1, 4)
return_rois.append(rois)
return_labels.append(labels)
return_bbox_targets.append(bbox_targets)
return (
F.zero_grad(F.concat(return_rois, axis=0)),
F.zero_grad(F.concat(return_labels, axis=0)),
F.zero_grad(F.concat(return_bbox_targets, axis=0)),
)
def _bernoulli_sample_masks(self, masks, num_samples, sample_value):
""" Using the bernoulli sampling method"""
sample_mask = masks == sample_value
num_mask = sample_mask.sum()
num_final_samples = F.minimum(num_mask, num_samples)
# here, we use the bernoulli probability to sample the anchors
sample_prob = num_final_samples / num_mask
uniform_rng = mge.random.uniform(sample_mask.shapeof(0))
after_sampled_mask = (uniform_rng <= sample_prob) * sample_mask
return after_sampled_mask
|
[
"megengine.functional.argmax",
"megengine.functional.flatten",
"megengine.module.init.fill_",
"megengine.functional.indexing_one_hot",
"megengine.functional.softmax",
"megengine.functional.minimum",
"megengine.module.init.normal_",
"megengine.functional.add_axis",
"megengine.module.Linear",
"megengine.functional.concat",
"megengine.functional.cond_take"
] |
[((636, 688), 'official.vision.detection.layers.BoxCoder', 'layers.BoxCoder', (['cfg.rcnn_reg_mean', 'cfg.rcnn_reg_std'], {}), '(cfg.rcnn_reg_mean, cfg.rcnn_reg_std)\n', (651, 688), False, 'from official.vision.detection import layers\n'), ((909, 974), 'megengine.module.Linear', 'M.Linear', (['(256 * self.pooling_size[0] * self.pooling_size[1])', '(1024)'], {}), '(256 * self.pooling_size[0] * self.pooling_size[1], 1024)\n', (917, 974), True, 'import megengine.module as M\n'), ((994, 1014), 'megengine.module.Linear', 'M.Linear', (['(1024)', '(1024)'], {}), '(1024, 1024)\n', (1002, 1014), True, 'import megengine.module as M\n'), ((1186, 1221), 'megengine.module.Linear', 'M.Linear', (['(1024)', '(cfg.num_classes + 1)'], {}), '(1024, cfg.num_classes + 1)\n', (1194, 1221), True, 'import megengine.module as M\n'), ((1248, 1289), 'megengine.module.Linear', 'M.Linear', (['(1024)', '((cfg.num_classes + 1) * 4)'], {}), '(1024, (cfg.num_classes + 1) * 4)\n', (1256, 1289), True, 'import megengine.module as M\n'), ((1298, 1344), 'megengine.module.init.normal_', 'M.init.normal_', (['self.pred_cls.weight'], {'std': '(0.01)'}), '(self.pred_cls.weight, std=0.01)\n', (1312, 1344), True, 'import megengine.module as M\n'), ((1353, 1402), 'megengine.module.init.normal_', 'M.init.normal_', (['self.pred_delta.weight'], {'std': '(0.001)'}), '(self.pred_delta.weight, std=0.001)\n', (1367, 1402), True, 'import megengine.module as M\n'), ((1761, 1854), 'official.vision.detection.layers.roi_pool', 'layers.roi_pool', (['fpn_fms', 'rcnn_rois', 'self.stride', 'self.pooling_size', 'self.pooling_method'], {}), '(fpn_fms, rcnn_rois, self.stride, self.pooling_size, self.\n pooling_method)\n', (1776, 1854), False, 'from official.vision.detection import layers\n'), ((1899, 1937), 'megengine.functional.flatten', 'F.flatten', (['pool_features'], {'start_axis': '(1)'}), '(pool_features, start_axis=1)\n', (1908, 1937), True, 'import megengine.functional as F\n'), ((7226, 7258), 'megengine.functional.minimum', 'F.minimum', (['num_mask', 'num_samples'], {}), '(num_mask, num_samples)\n', (7235, 7258), True, 'import megengine.functional as F\n'), ((1066, 1100), 'megengine.module.init.normal_', 'M.init.normal_', (['l.weight'], {'std': '(0.01)'}), '(l.weight, std=0.01)\n', (1080, 1100), True, 'import megengine.module as M\n'), ((1113, 1136), 'megengine.module.init.fill_', 'M.init.fill_', (['l.bias', '(0)'], {}), '(l.bias, 0)\n', (1125, 1136), True, 'import megengine.module as M\n'), ((1466, 1489), 'megengine.module.init.fill_', 'M.init.fill_', (['l.bias', '(0)'], {}), '(l.bias, 0)\n', (1478, 1489), True, 'import megengine.module as M\n'), ((2240, 2280), 'official.vision.detection.layers.softmax_loss', 'layers.softmax_loss', (['pred_logits', 'labels'], {}), '(pred_logits, labels)\n', (2259, 2280), False, 'from official.vision.detection import layers\n'), ((2502, 2551), 'megengine.functional.indexing_one_hot', 'F.indexing_one_hot', (['pred_offsets', 'vlabels'], {'axis': '(1)'}), '(pred_offsets, vlabels, axis=1)\n', (2520, 2551), True, 'import megengine.functional as F\n'), ((2581, 2694), 'official.vision.detection.layers.get_smooth_l1_loss', 'layers.get_smooth_l1_loss', (['pred_offsets', 'bbox_targets', 'labels', 'self.cfg.rcnn_smooth_l1_beta'], {'norm_type': '"""all"""'}), "(pred_offsets, bbox_targets, labels, self.cfg.\n rcnn_smooth_l1_beta, norm_type='all')\n", (2606, 2694), False, 'from official.vision.detection import layers\n'), ((4041, 4096), 'megengine.functional.concat', 'F.concat', (['[batch_inds, gt_boxes_per_img[:, :4]]'], {'axis': '(1)'}), '([batch_inds, gt_boxes_per_img[:, :4]], axis=1)\n', (4049, 4096), True, 'import megengine.functional as F\n'), ((4180, 4228), 'megengine.functional.cond_take', 'F.cond_take', (['(batch_roi_mask == 1)', 'batch_roi_mask'], {}), '(batch_roi_mask == 1, batch_roi_mask)\n', (4191, 4228), True, 'import megengine.functional as F\n'), ((4304, 4352), 'megengine.functional.concat', 'F.concat', (['[rpn_rois.ai[batch_roi_inds], gt_rois]'], {}), '([rpn_rois.ai[batch_roi_inds], gt_rois])\n', (4312, 4352), True, 'import megengine.functional as F\n'), ((4401, 4471), 'official.vision.detection.layers.get_iou', 'layers.get_iou', (['all_rois[:, 1:5]', 'gt_boxes_per_img'], {'return_ignore': '(True)'}), '(all_rois[:, 1:5], gt_boxes_per_img, return_ignore=True)\n', (4415, 4471), False, 'from official.vision.detection import layers\n'), ((4601, 4634), 'megengine.functional.argmax', 'F.argmax', (['overlaps_normal'], {'axis': '(1)'}), '(overlaps_normal, axis=1)\n', (4609, 4634), True, 'import megengine.functional as F\n'), ((4733, 4766), 'megengine.functional.argmax', 'F.argmax', (['overlaps_ignore'], {'axis': '(1)'}), '(overlaps_ignore, axis=1)\n', (4741, 4766), True, 'import megengine.functional as F\n'), ((6143, 6181), 'megengine.functional.cond_take', 'F.cond_take', (['(keep_mask == 1)', 'keep_mask'], {}), '(keep_mask == 1, keep_mask)\n', (6154, 6181), True, 'import megengine.functional as F\n'), ((2989, 3019), 'megengine.functional.softmax', 'F.softmax', (['pred_logits'], {'axis': '(1)'}), '(pred_logits, axis=1)\n', (2998, 3019), True, 'import megengine.functional as F\n'), ((6828, 6857), 'megengine.functional.concat', 'F.concat', (['return_rois'], {'axis': '(0)'}), '(return_rois, axis=0)\n', (6836, 6857), True, 'import megengine.functional as F\n'), ((6884, 6915), 'megengine.functional.concat', 'F.concat', (['return_labels'], {'axis': '(0)'}), '(return_labels, axis=0)\n', (6892, 6915), True, 'import megengine.functional as F\n'), ((6942, 6979), 'megengine.functional.concat', 'F.concat', (['return_bbox_targets'], {'axis': '(0)'}), '(return_bbox_targets, axis=0)\n', (6950, 6979), True, 'import megengine.functional as F\n'), ((3274, 3306), 'megengine.functional.add_axis', 'F.add_axis', (['rcnn_rois[:, 1:5]', '(1)'], {}), '(rcnn_rois[:, 1:5], 1)\n', (3284, 3306), True, 'import megengine.functional as F\n')]
|
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import pickle
import boto3
import megengine as mge
from collections import defaultdict
from termcolor import colored
from common import utils
class Manager():
def __init__(self, model, optimizer, params, dataloaders, writer, logger, scheduler):
# params status
self.model = model
self.writer = writer
self.logger = logger
self.params = params
self.optimizer = optimizer
self.scheduler = scheduler
self.dataloaders = dataloaders
# metric_rule should be either Descende or Ascende
self.metric_rule = params.metric_rule
self.epoch = 0
self.step = 0
# 越低越好
if self.metric_rule == "Descende":
self.best_val_score = 100
self.best_test_score = 100
# 越高越好
elif self.metric_rule == "Ascende":
self.best_val_score = 0
self.best_test_score = 0
self.cur_val_score = 0
self.cur_test_score = 0
# train status
self.train_status = defaultdict(utils.AverageMeter)
# val status
self.val_status = defaultdict(utils.AverageMeter)
# test status
self.test_status = defaultdict(utils.AverageMeter)
# model status
self.loss_status = defaultdict(utils.AverageMeter)
# client init
self.s3_client = boto3.client('s3', endpoint_url='http://oss.i.brainpp.cn')
self.bucket_name = params.bucket_name
def update_step(self):
self.step += 1
def update_epoch(self):
self.epoch += 1
def update_loss_status(self, loss, split, bs=None):
if split == "train":
for k, v in loss.items():
bs = self.params.train_batch_size
self.loss_status[k].update(val=v.item(), num=bs)
elif split == "val":
for k, v in loss.items():
self.loss_status[k].update(val=v.item(), num=bs)
elif split == "test":
for k, v in loss.items():
self.loss_status[k].update(val=v.item(), num=bs)
else:
raise ValueError("Wrong eval type: {}".format(split))
def update_metric_status(self, metrics, split, bs):
if split == "val":
for k, v in metrics.items():
self.val_status[k].update(val=v.item(), num=bs)
self.cur_val_score = self.val_status[self.params.major_metric].avg
elif split == "test":
for k, v in metrics.items():
self.test_status[k].update(val=v.item(), num=bs)
self.cur_test_score = self.test_status[self.params.major_metric].avg
else:
raise ValueError("Wrong eval type: {}".format(split))
def reset_loss_status(self):
for k, v in self.loss_status.items():
self.loss_status[k].reset()
def reset_metric_status(self, split):
if split == "val":
for k, v in self.val_status.items():
self.val_status[k].reset()
elif split == "test":
for k, v in self.test_status.items():
self.test_status[k].reset()
else:
raise ValueError("Wrong eval type: {}".format(split))
def print_train_info(self):
exp_name = self.params.model_dir.split('/')[-1]
print_str = "{} Epoch: {:4d}, lr={:.6f} ".format(exp_name, self.epoch, self.scheduler.get_lr()[0])
print_str += "total loss: %.4f(%.4f)" % (self.loss_status['total'].val, self.loss_status['total'].avg)
return print_str
def print_metrics(self, split, title="Eval", color="red"):
if split == "val":
metric_status = self.val_status
elif split == "test":
metric_status = self.test_status
else:
raise ValueError("Wrong eval type: {}".format(split))
if split == "val":
print_str = " | ".join("{}: {:.4f} - avg: {:.4f}".format(k, v.val, v.avg) for k, v in metric_status.items())
print_str += " | lastest: {:.4f} | pre_best: {:.4f}".format(self.cur_val_score, self.best_val_score)
elif split == "test":
print_str = " | ".join("{}: {:.4f} - avg: {:.4f}".format(k, v.val, v.avg) for k, v in metric_status.items())
self.logger.info(colored("{} Results: {}".format(title, print_str), color, attrs=["bold"]))
def check_best_save_last_checkpoints(self, latest_freq=5):
state = {
"state_dict": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
"scheduler": self.scheduler.state_dict(),
"step": self.step,
"epoch": self.epoch,
}
if "val" in self.dataloaders:
state["best_val_score"] = self.best_val_score
if "test" in self.dataloaders:
state["best_test_score"] = self.best_test_score
# save latest checkpoint
if self.epoch % latest_freq == 0:
latest_ckpt_name = os.path.join(self.params.model_dir, "model_latest.pth")
if self.params.save_mode == "local":
# print(state)
mge.save(state, latest_ckpt_name)
elif self.params.save_mode == "oss":
save_dict = pickle.dumps(state)
resp = self.s3_client.put_object(Bucket=self.bucket_name, Key=latest_ckpt_name, Body=save_dict[0:])
else:
raise NotImplementedError
self.logger.info("Saved latest checkpoint to: {}".format(latest_ckpt_name))
# save val latest metrics, and check if val is best checkpoints
if "val" in self.dataloaders:
val_latest_metrics_name = os.path.join(self.params.model_dir, "val_metrics_latest.json")
utils.save_dict_to_json(self.val_status, val_latest_metrics_name)
# 越低越好
if self.metric_rule == "Descende":
is_best = self.cur_val_score < self.best_val_score
# 越高越好
elif self.metric_rule == "Ascende":
is_best = self.cur_val_score > self.best_val_score
else:
raise Exception("metric_rule should be either Descende or Ascende")
if is_best:
# save metrics
self.best_val_score = self.cur_val_score
best_metrics_name = os.path.join(self.params.model_dir, "val_metrics_best.json")
utils.save_dict_to_json(self.val_status, best_metrics_name)
self.logger.info("Current is val best, score={:.4f}".format(self.best_val_score))
# save checkpoint
best_ckpt_name = os.path.join(self.params.model_dir, "val_model_best.pth")
if self.params.save_mode == "local":
mge.save(state, best_ckpt_name)
elif self.params.save_mode == "oss":
save_dict = pickle.dumps(state)
resp = self.s3_client.put_object(Bucket=self.bucket_name, Key=best_ckpt_name, Body=save_dict[0:])
else:
raise NotImplementedError
self.logger.info("Saved val best checkpoint to: {}".format(best_ckpt_name))
# save test latest metrics, and check if test is best checkpoints
# if self.dataloaders["test"] is not None:
if "test" in self.dataloaders:
test_latest_metrics_name = os.path.join(self.params.model_dir, "test_metrics_latest.json")
utils.save_dict_to_json(self.test_status, test_latest_metrics_name)
# 越低越好
if self.metric_rule == "Descende":
is_best = self.cur_test_score < self.best_test_score
# 越高越好
elif self.metric_rule == "Ascende":
is_best = self.cur_test_score > self.best_test_score
else:
raise Exception("metric_rule should be either Descende or Ascende")
if is_best:
# save metrics
self.best_test_score = self.cur_test_score
best_metrics_name = os.path.join(self.params.model_dir, "test_metrics_best.json")
utils.save_dict_to_json(self.test_status, best_metrics_name)
self.logger.info("Current is test best, score={:.4f}".format(self.best_test_score))
# save checkpoint
best_ckpt_name = os.path.join(self.params.model_dir, "test_model_best.pth")
if self.params.save_mode == "local":
mge.save(state, best_ckpt_name)
elif self.params.save_mode == "oss":
save_dict = pickle.dumps(state)
resp = self.s3_client.put_object(Bucket=self.bucket_name, Key=best_ckpt_name, Body=save_dict[0:])
else:
raise NotImplementedError
self.logger.info("Saved test best checkpoint to: {}".format(best_ckpt_name))
def load_checkpoints(self):
if self.params.save_mode == "local":
state = mge.load(self.params.restore_file)
elif self.params.save_mode == "oss":
resp = self.s3_client.get_object(Bucket=self.bucket_name, Key=self.params.restore_file[0:])
state = resp["Body"].read()
state = pickle.loads(state)
else:
raise NotImplementedError
ckpt_component = []
if "state_dict" in state and self.model is not None:
try:
self.model.load_state_dict(state["state_dict"])
except Warning("Using custom loading net"):
net_dict = self.model.state_dict()
if "module" not in list(state["state_dict"].keys())[0]:
state_dict = {"module." + k: v for k, v in state["state_dict"].items() if "module." + k in net_dict.keys()}
else:
state_dict = {k: v for k, v in state["state_dict"].items() if k in net_dict.keys()}
net_dict.update(state_dict)
self.model.load_state_dict(net_dict, strict=False)
ckpt_component.append("net")
if not self.params.only_weights:
print("reload states from checkpoints!")
if "optimizer" in state and self.optimizer is not None:
try:
self.optimizer.load_state_dict(state["optimizer"])
except Warning("Using custom loading optimizer"):
optimizer_dict = self.optimizer.state_dict()
state_dict = {k: v for k, v in state["optimizer"].items() if k in optimizer_dict.keys()}
optimizer_dict.update(state_dict)
self.optimizer.load_state_dict(optimizer_dict)
ckpt_component.append("opt")
if "scheduler" in state and self.train_status["scheduler"] is not None:
try:
self.scheduler.load_state_dict(state["scheduler"])
except Warning("Using custom loading scheduler"):
scheduler_dict = self.scheduler.state_dict()
state_dict = {k: v for k, v in state["scheduler"].items() if k in scheduler_dict.keys()}
scheduler_dict.update(state_dict)
self.scheduler.load_state_dict(scheduler_dict)
ckpt_component.append("sch")
if "step" in state:
self.step = state["step"] + 1
self.train_status["step"] = state["step"] + 1
ckpt_component.append("step: {}".format(self.train_status["step"]))
if "epoch" in state:
self.epoch = state["epoch"] + 1
self.train_status["epoch"] = state["epoch"] + 1
ckpt_component.append("epoch: {}".format(self.train_status["epoch"]))
if "best_val_score" in state:
self.best_val_score = state["best_val_score"]
ckpt_component.append("best val score: {:.3g}".format(self.best_val_score))
if "best_test_score" in state:
self.best_test_score = state["best_test_score"]
ckpt_component.append("best test score: {:.3g}".format(self.best_test_score))
ckpt_component = ", ".join(i for i in ckpt_component)
self.logger.info("Loaded models from: {}".format(self.params.restore_file))
self.logger.info("Ckpt load: {}".format(ckpt_component))
|
[
"megengine.save",
"megengine.load"
] |
[((1472, 1503), 'collections.defaultdict', 'defaultdict', (['utils.AverageMeter'], {}), '(utils.AverageMeter)\n', (1483, 1503), False, 'from collections import defaultdict\n'), ((1555, 1586), 'collections.defaultdict', 'defaultdict', (['utils.AverageMeter'], {}), '(utils.AverageMeter)\n', (1566, 1586), False, 'from collections import defaultdict\n'), ((1640, 1671), 'collections.defaultdict', 'defaultdict', (['utils.AverageMeter'], {}), '(utils.AverageMeter)\n', (1651, 1671), False, 'from collections import defaultdict\n'), ((1726, 1757), 'collections.defaultdict', 'defaultdict', (['utils.AverageMeter'], {}), '(utils.AverageMeter)\n', (1737, 1757), False, 'from collections import defaultdict\n'), ((1809, 1867), 'boto3.client', 'boto3.client', (['"""s3"""'], {'endpoint_url': '"""http://oss.i.brainpp.cn"""'}), "('s3', endpoint_url='http://oss.i.brainpp.cn')\n", (1821, 1867), False, 'import boto3\n'), ((5498, 5553), 'os.path.join', 'os.path.join', (['self.params.model_dir', '"""model_latest.pth"""'], {}), "(self.params.model_dir, 'model_latest.pth')\n", (5510, 5553), False, 'import os\n'), ((6207, 6269), 'os.path.join', 'os.path.join', (['self.params.model_dir', '"""val_metrics_latest.json"""'], {}), "(self.params.model_dir, 'val_metrics_latest.json')\n", (6219, 6269), False, 'import os\n'), ((6283, 6348), 'common.utils.save_dict_to_json', 'utils.save_dict_to_json', (['self.val_status', 'val_latest_metrics_name'], {}), '(self.val_status, val_latest_metrics_name)\n', (6306, 6348), False, 'from common import utils\n'), ((7951, 8014), 'os.path.join', 'os.path.join', (['self.params.model_dir', '"""test_metrics_latest.json"""'], {}), "(self.params.model_dir, 'test_metrics_latest.json')\n", (7963, 8014), False, 'import os\n'), ((8028, 8095), 'common.utils.save_dict_to_json', 'utils.save_dict_to_json', (['self.test_status', 'test_latest_metrics_name'], {}), '(self.test_status, test_latest_metrics_name)\n', (8051, 8095), False, 'from common import utils\n'), ((9599, 9633), 'megengine.load', 'mge.load', (['self.params.restore_file'], {}), '(self.params.restore_file)\n', (9607, 9633), True, 'import megengine as mge\n'), ((5653, 5686), 'megengine.save', 'mge.save', (['state', 'latest_ckpt_name'], {}), '(state, latest_ckpt_name)\n', (5661, 5686), True, 'import megengine as mge\n'), ((6882, 6942), 'os.path.join', 'os.path.join', (['self.params.model_dir', '"""val_metrics_best.json"""'], {}), "(self.params.model_dir, 'val_metrics_best.json')\n", (6894, 6942), False, 'import os\n'), ((6960, 7019), 'common.utils.save_dict_to_json', 'utils.save_dict_to_json', (['self.val_status', 'best_metrics_name'], {}), '(self.val_status, best_metrics_name)\n', (6983, 7019), False, 'from common import utils\n'), ((7188, 7245), 'os.path.join', 'os.path.join', (['self.params.model_dir', '"""val_model_best.pth"""'], {}), "(self.params.model_dir, 'val_model_best.pth')\n", (7200, 7245), False, 'import os\n'), ((8631, 8692), 'os.path.join', 'os.path.join', (['self.params.model_dir', '"""test_metrics_best.json"""'], {}), "(self.params.model_dir, 'test_metrics_best.json')\n", (8643, 8692), False, 'import os\n'), ((8710, 8770), 'common.utils.save_dict_to_json', 'utils.save_dict_to_json', (['self.test_status', 'best_metrics_name'], {}), '(self.test_status, best_metrics_name)\n', (8733, 8770), False, 'from common import utils\n'), ((8941, 8999), 'os.path.join', 'os.path.join', (['self.params.model_dir', '"""test_model_best.pth"""'], {}), "(self.params.model_dir, 'test_model_best.pth')\n", (8953, 8999), False, 'import os\n'), ((9849, 9868), 'pickle.loads', 'pickle.loads', (['state'], {}), '(state)\n', (9861, 9868), False, 'import pickle\n'), ((5766, 5785), 'pickle.dumps', 'pickle.dumps', (['state'], {}), '(state)\n', (5778, 5785), False, 'import pickle\n'), ((7321, 7352), 'megengine.save', 'mge.save', (['state', 'best_ckpt_name'], {}), '(state, best_ckpt_name)\n', (7329, 7352), True, 'import megengine as mge\n'), ((9075, 9106), 'megengine.save', 'mge.save', (['state', 'best_ckpt_name'], {}), '(state, best_ckpt_name)\n', (9083, 9106), True, 'import megengine as mge\n'), ((7440, 7459), 'pickle.dumps', 'pickle.dumps', (['state'], {}), '(state)\n', (7452, 7459), False, 'import pickle\n'), ((9194, 9213), 'pickle.dumps', 'pickle.dumps', (['state'], {}), '(state)\n', (9206, 9213), False, 'import pickle\n')]
|
import asyncio
import logging
import os
import time
from datetime import datetime
from sqlmodel import Session, SQLModel, select
from starlette.concurrency import run_in_threadpool
from ..datatypes import ArtmuseumTimeLabel
from ..scraping.artmuseum import scrap_artmuseum
from ..scraping.philharmonia import scrap_philharmonia
from .models import ArtmuseumExhibition, PhilharmoniaConcert
def refresh_data(engine):
"""
Scrap all the data sources for up-to-date info. Drop local values and replace them with the new data.
We are trying to be an exact mirror of our data sources.
The easiest way to achieve this is to regularly throw out all the data we have and scrap up-to-date info.
The cost of this approach in performance/resources is neglectable and is much preffered over complications
brought by trying to maintain a local copy by continuously patching it up with UPDATEs.
(there can be edits in the source info, urls can change, etc. - it's not worth it to consider all such corner cases)
"""
logging.info("Started scraping up-to-date info.")
known_addrs = {}
with Session(engine) as session:
stmt = select(ArtmuseumExhibition.url, ArtmuseumExhibition.address).where(
ArtmuseumExhibition.address != None
)
known_addrs = dict(session.exec(stmt).all())
exhibitions = scrap_artmuseum(known_addrs)
concerts = scrap_philharmonia()
logging.info("Finished scraping up-to-date info.")
logging.info("Started updating the database.")
with Session(engine) as session:
session.query(PhilharmoniaConcert).delete()
session.query(ArtmuseumExhibition).delete()
session.bulk_save_objects(concerts)
session.bulk_save_objects(exhibitions)
session.commit()
logging.info("Finished updating the database.")
async def loop_refreshing_data(engine, update_interval, initial_sleep_time: int = 0):
if initial_sleep_time > 0:
await asyncio.sleep(initial_sleep_time)
while True:
await run_in_threadpool(refresh_data, engine)
await asyncio.sleep(update_interval)
def init_db(engine):
update_interval = 60 * 60 * 8 # 8 hours
initial_sleep_time = 0
if os.path.isfile(engine.url.database):
last_modified = os.path.getmtime(engine.url.database)
dt = time.time() - last_modified
if dt <= update_interval:
initial_sleep_time = update_interval - dt
last_update = datetime.fromtimestamp(last_modified).replace(microsecond=0)
logging.info(
f"Last database update - {last_update}, the next one is scheduled in ...[N]h [N]m.... (at h:m)"
)
SQLModel.metadata.create_all(engine)
asyncio.create_task(
loop_refreshing_data(engine, update_interval, initial_sleep_time)
)
|
[
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.Session",
"sqlmodel.select"
] |
[((1043, 1092), 'logging.info', 'logging.info', (['"""Started scraping up-to-date info."""'], {}), "('Started scraping up-to-date info.')\n", (1055, 1092), False, 'import logging\n'), ((1432, 1482), 'logging.info', 'logging.info', (['"""Finished scraping up-to-date info."""'], {}), "('Finished scraping up-to-date info.')\n", (1444, 1482), False, 'import logging\n'), ((1488, 1534), 'logging.info', 'logging.info', (['"""Started updating the database."""'], {}), "('Started updating the database.')\n", (1500, 1534), False, 'import logging\n'), ((1798, 1845), 'logging.info', 'logging.info', (['"""Finished updating the database."""'], {}), "('Finished updating the database.')\n", (1810, 1845), False, 'import logging\n'), ((2231, 2266), 'os.path.isfile', 'os.path.isfile', (['engine.url.database'], {}), '(engine.url.database)\n', (2245, 2266), False, 'import os\n'), ((2705, 2741), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (2733, 2741), False, 'from sqlmodel import Session, SQLModel, select\n'), ((1123, 1138), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1130, 1138), False, 'from sqlmodel import Session, SQLModel, select\n'), ((1544, 1559), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1551, 1559), False, 'from sqlmodel import Session, SQLModel, select\n'), ((2292, 2329), 'os.path.getmtime', 'os.path.getmtime', (['engine.url.database'], {}), '(engine.url.database)\n', (2308, 2329), False, 'import os\n'), ((1979, 2012), 'asyncio.sleep', 'asyncio.sleep', (['initial_sleep_time'], {}), '(initial_sleep_time)\n', (1992, 2012), False, 'import asyncio\n'), ((2043, 2082), 'starlette.concurrency.run_in_threadpool', 'run_in_threadpool', (['refresh_data', 'engine'], {}), '(refresh_data, engine)\n', (2060, 2082), False, 'from starlette.concurrency import run_in_threadpool\n'), ((2097, 2127), 'asyncio.sleep', 'asyncio.sleep', (['update_interval'], {}), '(update_interval)\n', (2110, 2127), False, 'import asyncio\n'), ((2343, 2354), 'time.time', 'time.time', ([], {}), '()\n', (2352, 2354), False, 'import time\n'), ((2560, 2679), 'logging.info', 'logging.info', (['f"""Last database update - {last_update}, the next one is scheduled in ...[N]h [N]m.... (at h:m)"""'], {}), "(\n f'Last database update - {last_update}, the next one is scheduled in ...[N]h [N]m.... (at h:m)'\n )\n", (2572, 2679), False, 'import logging\n'), ((1166, 1226), 'sqlmodel.select', 'select', (['ArtmuseumExhibition.url', 'ArtmuseumExhibition.address'], {}), '(ArtmuseumExhibition.url, ArtmuseumExhibition.address)\n', (1172, 1226), False, 'from sqlmodel import Session, SQLModel, select\n'), ((2487, 2524), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['last_modified'], {}), '(last_modified)\n', (2509, 2524), False, 'from datetime import datetime\n')]
|
from typing import Union
from fastapi import FastAPI
from pydantic import BaseSettings
from ...utils import get_settings
try:
from sqlalchemy.engine import Engine
from sqlalchemy.ext.asyncio import AsyncEngine, create_async_engine
from sqlmodel import Session, SQLModel, create_engine
from sqlmodel.ext.asyncio.session import AsyncSession
except ImportError:
raise RuntimeError(
"SQLModel is not installed. Please install it with `pip install sqlmodel pyhumps`"
)
class Database:
"""
A class to wrap the sqlalchemy engine and open a connection session to the db.
"""
def __init__(self, engine: Union[Engine, AsyncEngine], is_async: bool = False):
self.engine = engine
self.is_async = is_async
def open(self) -> Union[Session, AsyncSession]:
if self.is_async:
return AsyncSession(self.engine)
else:
return Session(self.engine)
def setup(app: FastAPI, settings: BaseSettings = None) -> Database:
"""
Install the sqlmodel plugin to the app.
This will attach 1 attribute to `app.state` i.e:
* `db` - `popol.sqlmodel.Database` instance object to open db connection.
Args:
app: FastAPI app.
settings: The settings (can be pydantic.BaseSettings).
Returns:
Database: The database.
"""
settings = get_settings(app, settings)
prefix = "SQLALCHEMY_"
db_uri = getattr(settings, f"{prefix}DATABASE_URI", None)
if not db_uri:
raise RuntimeError(f"{prefix}DATABASE_URI is not set")
async_mode = getattr(settings, f"{prefix}ASYNC_MODE", False)
options = getattr(settings, f"{prefix}OPTIONS", {})
if async_mode:
engine = create_async_engine(db_uri, **options)
else:
engine = create_engine(db_uri, **options)
db = Database(engine, async_mode)
app.state.db = db
async def startup():
# reference: https://github.com/tiangolo/sqlmodel/issues/54#issue-981884262
if async_mode:
async with engine.begin() as conn:
await conn.run_sync(SQLModel.metadata.create_all)
else:
SQLModel.metadata.create_all(engine)
app.add_event_handler("startup", startup)
return db
|
[
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.Session",
"sqlmodel.create_engine",
"sqlmodel.ext.asyncio.session.AsyncSession"
] |
[((1723, 1761), 'sqlalchemy.ext.asyncio.create_async_engine', 'create_async_engine', (['db_uri'], {}), '(db_uri, **options)\n', (1742, 1761), False, 'from sqlalchemy.ext.asyncio import AsyncEngine, create_async_engine\n'), ((1789, 1821), 'sqlmodel.create_engine', 'create_engine', (['db_uri'], {}), '(db_uri, **options)\n', (1802, 1821), False, 'from sqlmodel import Session, SQLModel, create_engine\n'), ((862, 887), 'sqlmodel.ext.asyncio.session.AsyncSession', 'AsyncSession', (['self.engine'], {}), '(self.engine)\n', (874, 887), False, 'from sqlmodel.ext.asyncio.session import AsyncSession\n'), ((921, 941), 'sqlmodel.Session', 'Session', (['self.engine'], {}), '(self.engine)\n', (928, 941), False, 'from sqlmodel import Session, SQLModel, create_engine\n'), ((2155, 2191), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (2183, 2191), False, 'from sqlmodel import Session, SQLModel, create_engine\n')]
|
from http import HTTPStatus
from typing import List
from fastapi import APIRouter, Depends
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.future import select
from sqlmodel import select
from starlette.responses import Response
from icon_governance.db import get_session
from icon_governance.models.preps import Prep
router = APIRouter()
@router.get("/preps")
async def get_preps(
session: AsyncSession = Depends(get_session),
) -> List[Prep]:
"""Return list of preps which is limitted to 150 records so no skip."""
result = await session.execute(select(Prep).order_by(Prep.delegated.desc()))
preps = result.scalars().all()
return preps
@router.get("/preps/{address}")
async def get_prep(
address: str,
session: AsyncSession = Depends(get_session),
) -> List[Prep]:
"""Return a single prep."""
result = await session.execute(select(Prep).where(Prep.address == address))
preps = result.scalars().all()
if len(preps) == 0:
return Response(status_code=HTTPStatus.NO_CONTENT.value)
return preps
|
[
"sqlmodel.select"
] |
[((346, 357), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (355, 357), False, 'from fastapi import APIRouter, Depends\n'), ((431, 451), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (438, 451), False, 'from fastapi import APIRouter, Depends\n'), ((780, 800), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (787, 800), False, 'from fastapi import APIRouter, Depends\n'), ((1006, 1055), 'starlette.responses.Response', 'Response', ([], {'status_code': 'HTTPStatus.NO_CONTENT.value'}), '(status_code=HTTPStatus.NO_CONTENT.value)\n', (1014, 1055), False, 'from starlette.responses import Response\n'), ((603, 624), 'icon_governance.models.preps.Prep.delegated.desc', 'Prep.delegated.desc', ([], {}), '()\n', (622, 624), False, 'from icon_governance.models.preps import Prep\n'), ((581, 593), 'sqlmodel.select', 'select', (['Prep'], {}), '(Prep)\n', (587, 593), False, 'from sqlmodel import select\n'), ((886, 898), 'sqlmodel.select', 'select', (['Prep'], {}), '(Prep)\n', (892, 898), False, 'from sqlmodel import select\n')]
|
"""
Utility functions.
"""
import logging
import os
from functools import lru_cache
from pathlib import Path
from typing import Iterator, List, Optional
from dotenv import load_dotenv
from rich.logging import RichHandler
from sqlalchemy.engine import Engine
from sqlmodel import Session, SQLModel, create_engine
from yarl import URL
from datajunction.config import Settings
from datajunction.typing import ColumnType
def setup_logging(loglevel: str) -> None:
"""
Setup basic logging.
"""
level = getattr(logging, loglevel.upper(), None)
if not isinstance(level, int):
raise ValueError(f"Invalid log level: {loglevel}")
logformat = "[%(asctime)s] %(levelname)s: %(name)s: %(message)s"
logging.basicConfig(
level=level,
format=logformat,
datefmt="[%X]",
handlers=[RichHandler(rich_tracebacks=True)],
force=True,
)
def get_project_repository() -> Path:
"""
Return the project repository.
This is used for unit tests.
"""
return Path(__file__).parent.parent.parent
@lru_cache
def get_settings() -> Settings:
"""
Return a cached settings object.
"""
dotenv_file = os.environ.get("DOTENV_FILE", ".env")
load_dotenv(dotenv_file)
return Settings()
def get_engine() -> Engine:
"""
Create the metadata engine.
"""
settings = get_settings()
engine = create_engine(settings.index)
return engine
def create_db_and_tables() -> None:
"""
Create the database and tables.
"""
engine = get_engine()
SQLModel.metadata.create_all(engine)
def get_session() -> Iterator[Session]:
"""
Per-request session.
"""
engine = get_engine()
with Session(engine, autoflush=False) as session: # pragma: no cover
yield session
def get_name_from_path(repository: Path, path: Path) -> str:
"""
Compute the name of a node given its path and the repository path.
"""
# strip anything before the repository
relative_path = path.relative_to(repository)
if len(relative_path.parts) < 2 or relative_path.parts[0] not in {
"nodes",
"databases",
}:
raise Exception(f"Invalid path: {path}")
# remove the "nodes" directory from the path
relative_path = relative_path.relative_to(relative_path.parts[0])
# remove extension
relative_path = relative_path.with_suffix("")
# encode percent symbols and periods
encoded = (
str(relative_path)
.replace("%", "%25")
.replace(".", "%2E")
.replace(os.path.sep, ".")
)
return encoded
def get_more_specific_type(
current_type: Optional[ColumnType],
new_type: ColumnType,
) -> ColumnType:
"""
Given two types, return the most specific one.
Different databases might store the same column as different types. For example, Hive
might store timestamps as strings, while Postgres would store the same data as a
datetime.
>>> get_more_specific_type(ColumnType.STR, ColumnType.DATETIME)
<ColumnType.DATETIME: 'DATETIME'>
>>> get_more_specific_type(ColumnType.STR, ColumnType.INT)
<ColumnType.INT: 'INT'>
"""
if current_type is None:
return new_type
hierarchy = [
ColumnType.BYTES,
ColumnType.STR,
ColumnType.FLOAT,
ColumnType.INT,
ColumnType.DECIMAL,
ColumnType.BOOL,
ColumnType.DATETIME,
ColumnType.DATE,
ColumnType.TIME,
ColumnType.TIMEDELTA,
ColumnType.LIST,
ColumnType.DICT,
]
return sorted([current_type, new_type], key=hierarchy.index)[1]
def get_issue_url(
baseurl: URL = URL("https://github.com/DataJunction/datajunction/issues/new"),
title: Optional[str] = None,
body: Optional[str] = None,
labels: Optional[List[str]] = None,
) -> URL:
"""
Return the URL to file an issue on GitHub.
https://docs.github.com/en/issues/tracking-your-work-with-issues/creating-an-issue#creating-an-issue-from-a-url-query
"""
query_arguments = {
"title": title,
"body": body,
"labels": ",".join(label.strip() for label in labels) if labels else None,
}
query_arguments = {k: v for k, v in query_arguments.items() if v is not None}
return baseurl % query_arguments
|
[
"sqlmodel.create_engine",
"sqlmodel.Session",
"sqlmodel.SQLModel.metadata.create_all"
] |
[((1186, 1223), 'os.environ.get', 'os.environ.get', (['"""DOTENV_FILE"""', '""".env"""'], {}), "('DOTENV_FILE', '.env')\n", (1200, 1223), False, 'import os\n'), ((1228, 1252), 'dotenv.load_dotenv', 'load_dotenv', (['dotenv_file'], {}), '(dotenv_file)\n', (1239, 1252), False, 'from dotenv import load_dotenv\n'), ((1264, 1274), 'datajunction.config.Settings', 'Settings', ([], {}), '()\n', (1272, 1274), False, 'from datajunction.config import Settings\n'), ((1396, 1425), 'sqlmodel.create_engine', 'create_engine', (['settings.index'], {}), '(settings.index)\n', (1409, 1425), False, 'from sqlmodel import Session, SQLModel, create_engine\n'), ((1565, 1601), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (1593, 1601), False, 'from sqlmodel import Session, SQLModel, create_engine\n'), ((3698, 3760), 'yarl.URL', 'URL', (['"""https://github.com/DataJunction/datajunction/issues/new"""'], {}), "('https://github.com/DataJunction/datajunction/issues/new')\n", (3701, 3760), False, 'from yarl import URL\n'), ((1721, 1753), 'sqlmodel.Session', 'Session', (['engine'], {'autoflush': '(False)'}), '(engine, autoflush=False)\n', (1728, 1753), False, 'from sqlmodel import Session, SQLModel, create_engine\n'), ((836, 869), 'rich.logging.RichHandler', 'RichHandler', ([], {'rich_tracebacks': '(True)'}), '(rich_tracebacks=True)\n', (847, 869), False, 'from rich.logging import RichHandler\n'), ((1034, 1048), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1038, 1048), False, 'from pathlib import Path\n')]
|
from typing import Any, Dict, List
from uuid import UUID
from sqlalchemy.orm import selectinload
from sqlmodel import select
from sqlmodel.ext.asyncio.session import AsyncSession
from service.crud.base import CRUDBase, ModelType
from service.models.models import Topic, TopicBase, TopicCreate, TopicModel, Word
class CRUDTopic(CRUDBase[Topic, TopicCreate, TopicBase]):
async def get_model_topics(
self, db: AsyncSession, *, model_id: UUID, version: int, with_words: bool = False
) -> List[ModelType]:
statement = (
select(self.model)
.join(TopicModel)
.filter(TopicModel.model_id == model_id, TopicModel.version == version)
.order_by(-self.model.count)
)
if with_words:
statement = statement.options(selectinload(self.model.top_words))
return (await db.execute(statement)).scalars().all()
async def save_topics(
self, db: AsyncSession, *, topics: List[Dict[str, Any]], model: TopicModel
) -> None:
for topic in topics:
db_obj = self.model.from_orm(
TopicCreate.parse_obj({**topic, "topic_model_id": model.id})
)
db.add(db_obj)
for word in topic["top_words"]:
db.add(Word.parse_obj({**word, "topic": db_obj}))
await db.commit()
topic = CRUDTopic(Topic)
|
[
"sqlmodel.select"
] |
[((805, 839), 'sqlalchemy.orm.selectinload', 'selectinload', (['self.model.top_words'], {}), '(self.model.top_words)\n', (817, 839), False, 'from sqlalchemy.orm import selectinload\n'), ((1115, 1175), 'service.models.models.TopicCreate.parse_obj', 'TopicCreate.parse_obj', (["{**topic, 'topic_model_id': model.id}"], {}), "({**topic, 'topic_model_id': model.id})\n", (1136, 1175), False, 'from service.models.models import Topic, TopicBase, TopicCreate, TopicModel, Word\n'), ((1284, 1325), 'service.models.models.Word.parse_obj', 'Word.parse_obj', (["{**word, 'topic': db_obj}"], {}), "({**word, 'topic': db_obj})\n", (1298, 1325), False, 'from service.models.models import Topic, TopicBase, TopicCreate, TopicModel, Word\n'), ((556, 574), 'sqlmodel.select', 'select', (['self.model'], {}), '(self.model)\n', (562, 574), False, 'from sqlmodel import select\n')]
|
from datetime import date
from typing import List
from sqlmodel import select
from config.config_utils import get_managed_teams_config
from src.api.fixtures_client import FixturesClient
from src.db.db_manager import NotifierDBManager
from src.db.notif_sql_models import Fixture as DBFixture
from src.db.notif_sql_models import League as DBLeague
from src.db.notif_sql_models import Team as DBTeam
from src.entities import Championship, Team
from src.utils.fixtures_utils import convert_fixture_response_to_db
NOTIFIER_DB_MANAGER = NotifierDBManager()
def insert_league(fixture_league: Championship) -> DBLeague:
league_statement = select(DBLeague).where(DBLeague.id == fixture_league.league_id)
retrieved_league = NOTIFIER_DB_MANAGER.select_records(league_statement)
if not len(retrieved_league):
league = DBLeague(
id=fixture_league.league_id,
name=fixture_league.name,
logo=fixture_league.logo,
country=fixture_league.country,
)
NOTIFIER_DB_MANAGER.insert_record(league)
retrieved_league = NOTIFIER_DB_MANAGER.select_records(league_statement)
return retrieved_league
def insert_team(fixture_team: Team) -> DBTeam:
team_statement = select(DBTeam).where(DBTeam.id == fixture_team.id)
retrieved_team = NOTIFIER_DB_MANAGER.select_records(team_statement)
if not len(retrieved_team):
team = DBTeam(
id=fixture_team.id,
name=fixture_team.name,
picture=fixture_team.picture,
aliases=fixture_team.aliases,
)
NOTIFIER_DB_MANAGER.insert_record(team)
retrieved_team = NOTIFIER_DB_MANAGER.select_records(team_statement)
return retrieved_team
def save_fixtures(team_fixtures: List[dict]) -> None:
converted_fixtures = []
fix_nr = 1
for fixture in team_fixtures:
print(f"Converting & populating fixture {fix_nr}/{len(team_fixtures)}")
converted_fixtures.append(convert_fixture_response_to_db(fixture))
fix_nr += 1
db_fixtures = []
for conv_fix in converted_fixtures:
retrieved_league = insert_league(conv_fix.championship)
retrieved_home_team = insert_team(conv_fix.home_team)
retrieved_away_team = insert_team(conv_fix.away_team)
fixture_statement = select(DBFixture).where(DBFixture.id == conv_fix.id)
retrieved_fixture = NOTIFIER_DB_MANAGER.select_records(fixture_statement)
if not len(retrieved_fixture):
db_fixture = DBFixture(
id=conv_fix.id,
utc_date=conv_fix.utc_date,
league=retrieved_league.pop().id,
round=conv_fix.round,
home_team=retrieved_home_team.pop().id,
away_team=retrieved_away_team.pop().id,
home_score=conv_fix.match_score.home_score,
away_score=conv_fix.match_score.away_score,
)
else:
db_fixture = retrieved_fixture.pop()
db_fixture.id = conv_fix.id
db_fixture.utc_date = conv_fix.utc_date
db_fixture.league = retrieved_league.pop().id
db_fixture.round = conv_fix.round
db_fixture.home_team = retrieved_home_team.pop().id
db_fixture.away_team = retrieved_away_team.pop().id
db_fixture.home_score = conv_fix.match_score.home_score
db_fixture.away_score = conv_fix.match_score.away_score
db_fixtures.append(db_fixture)
NOTIFIER_DB_MANAGER.insert_records(db_fixtures)
def populate_data(is_initial: bool = False) -> None:
managed_teams = get_managed_teams_config()
fixtures_client = FixturesClient()
current_year = date.today().year
last_year = current_year - 1
for team in managed_teams:
if is_initial:
team_fixtures = fixtures_client.get_fixtures_by(str(last_year), team.id)
if "response" in team_fixtures.as_dict:
save_fixtures(team_fixtures.as_dict["response"])
team_fixtures = fixtures_client.get_fixtures_by(str(current_year), team.id)
if "response" in team_fixtures.as_dict:
save_fixtures(team_fixtures.as_dict["response"])
if __name__ == "__main__":
fixtures = NOTIFIER_DB_MANAGER.select_records(select(DBFixture))
is_initial = True if not len(fixtures) else False
populate_data(is_initial)
|
[
"sqlmodel.select"
] |
[((534, 553), 'src.db.db_manager.NotifierDBManager', 'NotifierDBManager', ([], {}), '()\n', (551, 553), False, 'from src.db.db_manager import NotifierDBManager\n'), ((3634, 3660), 'config.config_utils.get_managed_teams_config', 'get_managed_teams_config', ([], {}), '()\n', (3658, 3660), False, 'from config.config_utils import get_managed_teams_config\n'), ((3683, 3699), 'src.api.fixtures_client.FixturesClient', 'FixturesClient', ([], {}), '()\n', (3697, 3699), False, 'from src.api.fixtures_client import FixturesClient\n'), ((832, 958), 'src.db.notif_sql_models.League', 'DBLeague', ([], {'id': 'fixture_league.league_id', 'name': 'fixture_league.name', 'logo': 'fixture_league.logo', 'country': 'fixture_league.country'}), '(id=fixture_league.league_id, name=fixture_league.name, logo=\n fixture_league.logo, country=fixture_league.country)\n', (840, 958), True, 'from src.db.notif_sql_models import League as DBLeague\n'), ((1413, 1528), 'src.db.notif_sql_models.Team', 'DBTeam', ([], {'id': 'fixture_team.id', 'name': 'fixture_team.name', 'picture': 'fixture_team.picture', 'aliases': 'fixture_team.aliases'}), '(id=fixture_team.id, name=fixture_team.name, picture=fixture_team.\n picture, aliases=fixture_team.aliases)\n', (1419, 1528), True, 'from src.db.notif_sql_models import Team as DBTeam\n'), ((3719, 3731), 'datetime.date.today', 'date.today', ([], {}), '()\n', (3729, 3731), False, 'from datetime import date\n'), ((4300, 4317), 'sqlmodel.select', 'select', (['DBFixture'], {}), '(DBFixture)\n', (4306, 4317), False, 'from sqlmodel import select\n'), ((640, 656), 'sqlmodel.select', 'select', (['DBLeague'], {}), '(DBLeague)\n', (646, 656), False, 'from sqlmodel import select\n'), ((1242, 1256), 'sqlmodel.select', 'select', (['DBTeam'], {}), '(DBTeam)\n', (1248, 1256), False, 'from sqlmodel import select\n'), ((1981, 2020), 'src.utils.fixtures_utils.convert_fixture_response_to_db', 'convert_fixture_response_to_db', (['fixture'], {}), '(fixture)\n', (2011, 2020), False, 'from src.utils.fixtures_utils import convert_fixture_response_to_db\n'), ((2322, 2339), 'sqlmodel.select', 'select', (['DBFixture'], {}), '(DBFixture)\n', (2328, 2339), False, 'from sqlmodel import select\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import os
import time
import pickle
import numpy as np
import megengine.amp as amp
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import SGD
import megengine.jit as jit
from tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
PseudoDetectionDataset,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 512 * 1024 * 1024, 2.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-a", "--arch", type=str, help="model architecture",
)
parser.add_argument(
"-n", "--ngpus", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch-size", default=2, type=int, help="batchsize for training",
)
parser.add_argument(
"-s", "--steps", default=100, type=int, help="number of train steps (default: 100)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--preloader",
action='store_true',
default=False,
help="whether use preloader or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"-d", "--loader", default=False, action="store_true", help="use pseudo detection dataset loader",
)
parser.add_argument(
"-p", "--print-freq", default=1, type=int, help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16",
)
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
if args.ngpus > 1:
train_func = dist.launcher(worker, n_gpus=args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
config_file = {
"faster_rcnn": "configs/faster_rcnn_res50_coco_1x_800size.py",
"atss": "configs/atss_res50_coco_1x_800size.py",
"retinanet": "configs/retinanet_res50_coco_1x_800size.py",
}[args.arch]
current_network = import_from_file(config_file)
model = current_network.Net(current_network.Cfg())
model.train()
# if dist.get_rank() == 0:
# logger.info(get_config_info(model.cfg))
# logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size,
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay * dist.get_world_size(),
)
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("SUM", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters(), dist.WORLD) # sync parameters
train_loader = None
for epoch in range(1):
train_one_epoch(model, train_loader, opt, gm, epoch, args)
def train_one_epoch(model, data_queue, opt, gm, epoch, args):
@amp.autocast(enabled=args.mode == "mp")
def train_func(image, im_info, gt_boxes):
with gm:
loss_dict = model(image=image, im_info=im_info, gt_boxes=gt_boxes)
gm.backward(loss_dict["total_loss"])
loss_list = list(loss_dict.values())
opt.step().clear_grad()
return loss_list
if args.trace:
if args.symbolic:
train_func = jit.trace(train_func, symbolic=True, sublinear_memory_config=jit.SublinearMemoryConfig(genetic_nr_iter=50), symbolic_shape=True)
else:
train_func = jit.trace(train_func, symbolic=False, symbolic_shape=False)
else:
assert args.symbolic==False, "invalid arguments: trace=Trace, symbolic=True"
loss_meter = AverageMeter(record_len=model.cfg.num_losses)
time_meter = AverageMeter(record_len=2)
log_interval = model.cfg.log_interval
tot_step = model.cfg.nr_images_epoch // (args.batch_size * dist.get_world_size())
file_dir = os.path.dirname(os.path.abspath(__file__))
data_path = os.path.join(file_dir, 'batch.pkl') # batch_size for batch.pkl is 2
mini_batch = pickle.load(open(data_path, "rb"))
if args.batch_size != 2:
repeats = (args.batch_size+1) // 2
mini_batch['data'] = np.concatenate([mini_batch['data'],]*repeats)[:args.batch_size]
mini_batch['im_info'] = np.concatenate([mini_batch['im_info'],]*repeats)[:args.batch_size]
mini_batch['gt_boxes'] = np.concatenate([mini_batch['gt_boxes'],]*repeats)[:args.batch_size]
# warm up
for step in range(10):
if data_queue:
mini_batch = next(data_queue)
loss_list = train_func(
image=mge.tensor(mini_batch["data"]),
im_info=mge.tensor(mini_batch["im_info"]),
gt_boxes=mge.tensor(mini_batch["gt_boxes"])
)
_ = [loss.numpy() for loss in loss_list]
for step in range(args.steps):
tik = time.time()
if data_queue:
mini_batch = next(data_queue)
data_tok = time.time()
loss_list = train_func(
image=mge.tensor(mini_batch["data"]),
im_info=mge.tensor(mini_batch["im_info"]),
gt_boxes=mge.tensor(mini_batch["gt_boxes"])
)
loss_meter.update([loss.numpy().item() for loss in loss_list])
tok = time.time()
time_meter.update([tok - tik, data_tok - tik])
if step % args.print_freq == 0 and dist.get_rank() == 0:
print(
"Step {}, Loss ({}), Time (tot:{:.3f}, data:{:.3f})".format(
step,
"".join(["{:.3f} ".format(t) for t in loss_meter.average()]),
*time_meter.average(),
))
loss_meter.reset()
if dist.get_rank() == 0:
print("="*20, "summary", "="*20)
print(" benchmark: detection")
if args.trace:
print(" mode: trace(symbolic={})".format("True, sublinear=True" if args.symbolic else "False"))
else:
print(" mode: imperative")
print(" loader: {}".format("" if not args.loader else "--loader"))
if args.loader:
print(" preload: {}".format("" if not args.preload else "--preload"))
print(" arch: {}".format(args.arch))
print("train_mode: {}".format(args.mode))
print(" batchsize: {}".format(args.batch_size))
print(" #GPU: {}".format(args.ngpus))
print(" avg time: {:.3f} seconds".format(time_meter.average()[0]))
# pylint: disable=unused-argument
def build_dataset(dataset_dir, cfg):
return PseudoDetectionDataset(order=["image", "boxes", "boxes_category", "info"])
# pylint: disable=dangerous-default-value
def build_sampler(train_dataset, batch_size, aspect_grouping=[1]):
def _compute_aspect_ratios(dataset):
aspect_ratios = []
for i in range(len(dataset)):
info = dataset.get_img_info(i)
aspect_ratios.append(info["height"] / info["width"])
return aspect_ratios
def _quantize(x, bins):
return list(map(lambda y: bisect.bisect_right(sorted(bins), y), x))
if len(aspect_grouping) == 0:
return Infinite(RandomSampler(train_dataset, batch_size, drop_last=True))
aspect_ratios = _compute_aspect_ratios(train_dataset)
group_ids = _quantize(aspect_ratios, aspect_grouping)
return Infinite(GroupedRandomSampler(train_dataset, batch_size, group_ids))
def build_dataloader(batch_size, dataset_dir, cfg, preloader= False):
train_dataset = build_dataset(dataset_dir, cfg)
train_sampler = build_sampler(train_dataset, batch_size)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
transforms=[
T.ShortestEdgeResize(
cfg.train_image_short_size,
cfg.train_image_max_size,
sample_style="choice",
),
T.RandomHorizontalFlip(),
T.ToMode(),
],
order=["image", "boxes", "boxes_category"],
),
collator=DetectionPadCollator(),
num_workers=8,
preload= preloader,
)
return train_dataloader
if __name__ == "__main__":
main()
|
[
"megengine.data.transform.ToMode",
"megengine.amp.autocast",
"megengine.device.set_prealloc_config",
"megengine.distributed.get_rank",
"megengine.distributed.get_world_size",
"megengine.get_logger",
"megengine.distributed.make_allreduce_cb",
"megengine.data.transform.RandomHorizontalFlip",
"megengine.jit.trace",
"megengine.jit.SublinearMemoryConfig",
"megengine.tensor",
"megengine.data.RandomSampler",
"megengine.data.transform.ShortestEdgeResize",
"megengine.autodiff.GradManager",
"megengine.distributed.launcher"
] |
[((942, 966), 'megengine.get_logger', 'mge.get_logger', (['__name__'], {}), '(__name__)\n', (956, 966), True, 'import megengine as mge\n'), ((991, 1057), 'megengine.device.set_prealloc_config', 'mge.device.set_prealloc_config', (['(1024)', '(1024)', '(512 * 1024 * 1024)', '(2.0)'], {}), '(1024, 1024, 512 * 1024 * 1024, 2.0)\n', (1021, 1057), True, 'import megengine as mge\n'), ((1092, 1117), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1115, 1117), False, 'import argparse\n'), ((3265, 3294), 'tools.utils.import_from_file', 'import_from_file', (['config_file'], {}), '(config_file)\n', (3281, 3294), False, 'from tools.utils import AverageMeter, DetectionPadCollator, GroupedRandomSampler, PseudoDetectionDataset, get_config_info, import_from_file\n'), ((4010, 4023), 'megengine.autodiff.GradManager', 'GradManager', ([], {}), '()\n', (4021, 4023), False, 'from megengine.autodiff import GradManager\n'), ((4528, 4567), 'megengine.amp.autocast', 'amp.autocast', ([], {'enabled': "(args.mode == 'mp')"}), "(enabled=args.mode == 'mp')\n", (4540, 4567), True, 'import megengine.amp as amp\n'), ((5277, 5322), 'tools.utils.AverageMeter', 'AverageMeter', ([], {'record_len': 'model.cfg.num_losses'}), '(record_len=model.cfg.num_losses)\n', (5289, 5322), False, 'from tools.utils import AverageMeter, DetectionPadCollator, GroupedRandomSampler, PseudoDetectionDataset, get_config_info, import_from_file\n'), ((5340, 5366), 'tools.utils.AverageMeter', 'AverageMeter', ([], {'record_len': '(2)'}), '(record_len=2)\n', (5352, 5366), False, 'from tools.utils import AverageMeter, DetectionPadCollator, GroupedRandomSampler, PseudoDetectionDataset, get_config_info, import_from_file\n'), ((5571, 5606), 'os.path.join', 'os.path.join', (['file_dir', '"""batch.pkl"""'], {}), "(file_dir, 'batch.pkl')\n", (5583, 5606), False, 'import os\n'), ((8136, 8210), 'tools.utils.PseudoDetectionDataset', 'PseudoDetectionDataset', ([], {'order': "['image', 'boxes', 'boxes_category', 'info']"}), "(order=['image', 'boxes', 'boxes_category', 'info'])\n", (8158, 8210), False, 'from tools.utils import AverageMeter, DetectionPadCollator, GroupedRandomSampler, PseudoDetectionDataset, get_config_info, import_from_file\n'), ((2893, 2933), 'megengine.distributed.launcher', 'dist.launcher', (['worker'], {'n_gpus': 'args.ngpus'}), '(worker, n_gpus=args.ngpus)\n', (2906, 2933), True, 'import megengine.distributed as dist\n'), ((4031, 4052), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (4050, 4052), True, 'import megengine.distributed as dist\n'), ((4237, 4258), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (4256, 4258), True, 'import megengine.distributed as dist\n'), ((5528, 5553), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (5543, 5553), False, 'import os\n'), ((6466, 6477), 'time.time', 'time.time', ([], {}), '()\n', (6475, 6477), False, 'import time\n'), ((6562, 6573), 'time.time', 'time.time', ([], {}), '()\n', (6571, 6573), False, 'import time\n'), ((6864, 6875), 'time.time', 'time.time', ([], {}), '()\n', (6873, 6875), False, 'import time\n'), ((7287, 7302), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (7300, 7302), True, 'import megengine.distributed as dist\n'), ((8924, 8982), 'tools.utils.GroupedRandomSampler', 'GroupedRandomSampler', (['train_dataset', 'batch_size', 'group_ids'], {}), '(train_dataset, batch_size, group_ids)\n', (8944, 8982), False, 'from tools.utils import AverageMeter, DetectionPadCollator, GroupedRandomSampler, PseudoDetectionDataset, get_config_info, import_from_file\n'), ((5104, 5163), 'megengine.jit.trace', 'jit.trace', (['train_func'], {'symbolic': '(False)', 'symbolic_shape': '(False)'}), '(train_func, symbolic=False, symbolic_shape=False)\n', (5113, 5163), True, 'import megengine.jit as jit\n'), ((5473, 5494), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (5492, 5494), True, 'import megengine.distributed as dist\n'), ((5793, 5839), 'numpy.concatenate', 'np.concatenate', (["([mini_batch['data']] * repeats)"], {}), "([mini_batch['data']] * repeats)\n", (5807, 5839), True, 'import numpy as np\n'), ((5889, 5938), 'numpy.concatenate', 'np.concatenate', (["([mini_batch['im_info']] * repeats)"], {}), "([mini_batch['im_info']] * repeats)\n", (5903, 5938), True, 'import numpy as np\n'), ((5989, 6039), 'numpy.concatenate', 'np.concatenate', (["([mini_batch['gt_boxes']] * repeats)"], {}), "([mini_batch['gt_boxes']] * repeats)\n", (6003, 6039), True, 'import numpy as np\n'), ((8729, 8785), 'megengine.data.RandomSampler', 'RandomSampler', (['train_dataset', 'batch_size'], {'drop_last': '(True)'}), '(train_dataset, batch_size, drop_last=True)\n', (8742, 8785), False, 'from megengine.data import DataLoader, Infinite, RandomSampler\n'), ((9675, 9697), 'tools.utils.DetectionPadCollator', 'DetectionPadCollator', ([], {}), '()\n', (9695, 9697), False, 'from tools.utils import AverageMeter, DetectionPadCollator, GroupedRandomSampler, PseudoDetectionDataset, get_config_info, import_from_file\n'), ((3971, 3992), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (3990, 3992), True, 'import megengine.distributed as dist\n'), ((6214, 6244), 'megengine.tensor', 'mge.tensor', (["mini_batch['data']"], {}), "(mini_batch['data'])\n", (6224, 6244), True, 'import megengine as mge\n'), ((6266, 6299), 'megengine.tensor', 'mge.tensor', (["mini_batch['im_info']"], {}), "(mini_batch['im_info'])\n", (6276, 6299), True, 'import megengine as mge\n'), ((6322, 6356), 'megengine.tensor', 'mge.tensor', (["mini_batch['gt_boxes']"], {}), "(mini_batch['gt_boxes'])\n", (6332, 6356), True, 'import megengine as mge\n'), ((6625, 6655), 'megengine.tensor', 'mge.tensor', (["mini_batch['data']"], {}), "(mini_batch['data'])\n", (6635, 6655), True, 'import megengine as mge\n'), ((6677, 6710), 'megengine.tensor', 'mge.tensor', (["mini_batch['im_info']"], {}), "(mini_batch['im_info'])\n", (6687, 6710), True, 'import megengine as mge\n'), ((6733, 6767), 'megengine.tensor', 'mge.tensor', (["mini_batch['gt_boxes']"], {}), "(mini_batch['gt_boxes'])\n", (6743, 6767), True, 'import megengine as mge\n'), ((6976, 6991), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (6989, 6991), True, 'import megengine.distributed as dist\n'), ((4130, 4171), 'megengine.distributed.make_allreduce_cb', 'dist.make_allreduce_cb', (['"""SUM"""', 'dist.WORLD'], {}), "('SUM', dist.WORLD)\n", (4152, 4171), True, 'import megengine.distributed as dist\n'), ((4997, 5042), 'megengine.jit.SublinearMemoryConfig', 'jit.SublinearMemoryConfig', ([], {'genetic_nr_iter': '(50)'}), '(genetic_nr_iter=50)\n', (5022, 5042), True, 'import megengine.jit as jit\n'), ((9328, 9429), 'megengine.data.transform.ShortestEdgeResize', 'T.ShortestEdgeResize', (['cfg.train_image_short_size', 'cfg.train_image_max_size'], {'sample_style': '"""choice"""'}), "(cfg.train_image_short_size, cfg.train_image_max_size,\n sample_style='choice')\n", (9348, 9429), True, 'from megengine.data import transform as T\n'), ((9522, 9546), 'megengine.data.transform.RandomHorizontalFlip', 'T.RandomHorizontalFlip', ([], {}), '()\n', (9544, 9546), True, 'from megengine.data import transform as T\n'), ((9564, 9574), 'megengine.data.transform.ToMode', 'T.ToMode', ([], {}), '()\n', (9572, 9574), True, 'from megengine.data import transform as T\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import itertools
import os
from typing import Callable
import numpy as np
import pytest
import megengine as mge
import megengine.module.init as init
from megengine.core import tensor
from megengine.functional import cross_entropy_with_softmax, relu
from megengine.jit import trace
from megengine.module import Linear, Module
from megengine.optimizer import SGD, Optimizer
from megengine.test import assertTensorClose
batch_size = 64
data_shape = (batch_size, 2)
label_shape = (batch_size,)
def minibatch_generator():
while True:
inp_data = np.zeros((batch_size, 2))
label = np.zeros(batch_size, dtype=np.int32)
for i in range(batch_size):
# [x0, x1], sampled from U[-1, 1]
inp_data[i, :] = np.random.rand(2) * 2 - 1
label[i] = 0 if np.prod(inp_data[i]) < 0 else 1
yield inp_data.astype(np.float32), label.astype(np.int32)
class SimpleNet(Module):
def __init__(self):
self.mid_layers = 14
self.num_class = 2
super().__init__()
self.fc0 = Linear(self.num_class, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc0.weight)
init.normal_(self.fc0.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc0.bias)
self.fc1 = Linear(self.mid_layers, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc1.weight)
init.normal_(self.fc1.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc1.bias)
self.fc2 = Linear(self.mid_layers, self.num_class, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc2.weight)
init.normal_(self.fc2.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc2.bias)
def forward(self, x):
x = self.fc0(x)
x = relu(x) # Should use tanh but it's not stable now.
x = self.fc1(x)
x = relu(x) # Should use tanh but it's not stable now.
x = self.fc2(x)
return x
def generate_eager_step(net: Module, opt_factory: Callable[[Module], Optimizer]):
data_inp = tensor(np.zeros(data_shape), dtype=np.float32)
label_inp = tensor(np.zeros(label_shape), dtype=np.int32)
opt = opt_factory(net)
def step(data, label):
opt.zero_grad()
data_inp.set_value(data)
label_inp.set_value(label)
pred = net(data_inp)
loss = cross_entropy_with_softmax(pred, label_inp)
opt.backward(loss)
opt.step()
return loss.numpy()[0]
return step
def generate_static_step(net: Module, opt_factory: Callable[[Module], Optimizer]):
data = tensor(np.zeros(data_shape), dtype=np.float32)
label = tensor(np.zeros(label_shape), dtype=np.int32)
opt = opt_factory(net)
# Save state to reset parameters later.
state = copy.deepcopy(net.state_dict())
# Evaluate network in eager mode once.
pred = net(data)
loss = cross_entropy_with_softmax(pred, label)
opt.zero_grad()
grads = opt.backward(loss)
f = mge.graph.compile(loss, grads)
def step(data, label):
opt.zero_grad()
out = f(data=data, label=label)
opt.step()
loss = out[0][0]
return loss
# Reset parameters.
net.load_state_dict(state)
return step
def generate_trace_step(
net: Module, opt_factory: Callable[[Module], Optimizer], enable: bool
):
opt = opt_factory(net)
@trace
def train(data, label):
pred = net(data)
loss = cross_entropy_with_softmax(pred, label)
opt.zero_grad()
opt.backward(loss)
return loss
train.enabled = enable
def step(data, label):
out = train(data, label)
opt.step()
loss = out[0][0]
return loss
return step
def assert_network_equvilence(nets):
net_state = [net.state_dict() for net in nets]
for state in net_state[1:]:
assert len(net_state[0]) == len(state)
for k, v in net_state[0].items():
for state in net_state[1:]:
assert k in state
assertTensorClose(v, state[k])
@pytest.mark.slow
def test_eager_equvilence():
eager_net = SimpleNet()
trace_enable_net = copy.deepcopy(eager_net)
trace_disable_net = copy.deepcopy(eager_net)
opt_factory = lambda net: SGD(
net.parameters(requires_grad=True), lr=0.01, momentum=0.01
)
estep = generate_eager_step(eager_net, opt_factory)
te_step = generate_trace_step(trace_enable_net, opt_factory, True)
td_step = generate_trace_step(trace_disable_net, opt_factory, False)
assert_network_equvilence([eager_net, trace_enable_net, trace_disable_net])
# Use hard code number as limit, may increase if needed.
for data, label in itertools.islice(minibatch_generator(), 200):
eloss = estep(data, label)
te_loss = te_step(data, label)
td_loss = td_step(data, label)
assertTensorClose(eloss, te_loss)
assertTensorClose(eloss, td_loss)
assert_network_equvilence(
[eager_net, trace_enable_net, trace_disable_net,]
)
|
[
"megengine.test.assertTensorClose",
"megengine.graph.compile",
"megengine.module.init.calculate_fan_in_and_fan_out",
"megengine.module.Linear",
"megengine.module.init.zeros_",
"megengine.functional.cross_entropy_with_softmax",
"megengine.functional.relu"
] |
[((3348, 3387), 'megengine.functional.cross_entropy_with_softmax', 'cross_entropy_with_softmax', (['pred', 'label'], {}), '(pred, label)\n', (3374, 3387), False, 'from megengine.functional import cross_entropy_with_softmax, relu\n'), ((3448, 3478), 'megengine.graph.compile', 'mge.graph.compile', (['loss', 'grads'], {}), '(loss, grads)\n', (3465, 3478), True, 'import megengine as mge\n'), ((4617, 4641), 'copy.deepcopy', 'copy.deepcopy', (['eager_net'], {}), '(eager_net)\n', (4630, 4641), False, 'import copy\n'), ((4666, 4690), 'copy.deepcopy', 'copy.deepcopy', (['eager_net'], {}), '(eager_net)\n', (4679, 4690), False, 'import copy\n'), ((946, 971), 'numpy.zeros', 'np.zeros', (['(batch_size, 2)'], {}), '((batch_size, 2))\n', (954, 971), True, 'import numpy as np\n'), ((988, 1024), 'numpy.zeros', 'np.zeros', (['batch_size'], {'dtype': 'np.int32'}), '(batch_size, dtype=np.int32)\n', (996, 1024), True, 'import numpy as np\n'), ((1444, 1494), 'megengine.module.Linear', 'Linear', (['self.num_class', 'self.mid_layers'], {'bias': '(True)'}), '(self.num_class, self.mid_layers, bias=True)\n', (1450, 1494), False, 'from megengine.module import Linear, Module\n'), ((1515, 1565), 'megengine.module.init.calculate_fan_in_and_fan_out', 'init.calculate_fan_in_and_fan_out', (['self.fc0.weight'], {}), '(self.fc0.weight)\n', (1548, 1565), True, 'import megengine.module.init as init\n'), ((1646, 1672), 'megengine.module.init.zeros_', 'init.zeros_', (['self.fc0.bias'], {}), '(self.fc0.bias)\n', (1657, 1672), True, 'import megengine.module.init as init\n'), ((1693, 1744), 'megengine.module.Linear', 'Linear', (['self.mid_layers', 'self.mid_layers'], {'bias': '(True)'}), '(self.mid_layers, self.mid_layers, bias=True)\n', (1699, 1744), False, 'from megengine.module import Linear, Module\n'), ((1765, 1815), 'megengine.module.init.calculate_fan_in_and_fan_out', 'init.calculate_fan_in_and_fan_out', (['self.fc1.weight'], {}), '(self.fc1.weight)\n', (1798, 1815), True, 'import megengine.module.init as init\n'), ((1896, 1922), 'megengine.module.init.zeros_', 'init.zeros_', (['self.fc1.bias'], {}), '(self.fc1.bias)\n', (1907, 1922), True, 'import megengine.module.init as init\n'), ((1943, 1993), 'megengine.module.Linear', 'Linear', (['self.mid_layers', 'self.num_class'], {'bias': '(True)'}), '(self.mid_layers, self.num_class, bias=True)\n', (1949, 1993), False, 'from megengine.module import Linear, Module\n'), ((2014, 2064), 'megengine.module.init.calculate_fan_in_and_fan_out', 'init.calculate_fan_in_and_fan_out', (['self.fc2.weight'], {}), '(self.fc2.weight)\n', (2047, 2064), True, 'import megengine.module.init as init\n'), ((2145, 2171), 'megengine.module.init.zeros_', 'init.zeros_', (['self.fc2.bias'], {}), '(self.fc2.bias)\n', (2156, 2171), True, 'import megengine.module.init as init\n'), ((2235, 2242), 'megengine.functional.relu', 'relu', (['x'], {}), '(x)\n', (2239, 2242), False, 'from megengine.functional import cross_entropy_with_softmax, relu\n'), ((2323, 2330), 'megengine.functional.relu', 'relu', (['x'], {}), '(x)\n', (2327, 2330), False, 'from megengine.functional import cross_entropy_with_softmax, relu\n'), ((2522, 2542), 'numpy.zeros', 'np.zeros', (['data_shape'], {}), '(data_shape)\n', (2530, 2542), True, 'import numpy as np\n'), ((2585, 2606), 'numpy.zeros', 'np.zeros', (['label_shape'], {}), '(label_shape)\n', (2593, 2606), True, 'import numpy as np\n'), ((2816, 2859), 'megengine.functional.cross_entropy_with_softmax', 'cross_entropy_with_softmax', (['pred', 'label_inp'], {}), '(pred, label_inp)\n', (2842, 2859), False, 'from megengine.functional import cross_entropy_with_softmax, relu\n'), ((3058, 3078), 'numpy.zeros', 'np.zeros', (['data_shape'], {}), '(data_shape)\n', (3066, 3078), True, 'import numpy as np\n'), ((3117, 3138), 'numpy.zeros', 'np.zeros', (['label_shape'], {}), '(label_shape)\n', (3125, 3138), True, 'import numpy as np\n'), ((3918, 3957), 'megengine.functional.cross_entropy_with_softmax', 'cross_entropy_with_softmax', (['pred', 'label'], {}), '(pred, label)\n', (3944, 3957), False, 'from megengine.functional import cross_entropy_with_softmax, relu\n'), ((5335, 5368), 'megengine.test.assertTensorClose', 'assertTensorClose', (['eloss', 'te_loss'], {}), '(eloss, te_loss)\n', (5352, 5368), False, 'from megengine.test import assertTensorClose\n'), ((5377, 5410), 'megengine.test.assertTensorClose', 'assertTensorClose', (['eloss', 'td_loss'], {}), '(eloss, td_loss)\n', (5394, 5410), False, 'from megengine.test import assertTensorClose\n'), ((4486, 4516), 'megengine.test.assertTensorClose', 'assertTensorClose', (['v', 'state[k]'], {}), '(v, state[k])\n', (4503, 4516), False, 'from megengine.test import assertTensorClose\n'), ((1137, 1154), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (1151, 1154), True, 'import numpy as np\n'), ((1191, 1211), 'numpy.prod', 'np.prod', (['inp_data[i]'], {}), '(inp_data[i])\n', (1198, 1211), True, 'import numpy as np\n')]
|
"""Movie models."""
import datetime
import typing
import pydantic
import sqlmodel
from app.models import mixins
from app.models import validators
if typing.TYPE_CHECKING:
from app.models.patron import Patron, PatronRead
class MovieBase(sqlmodel.SQLModel, mixins.ProposalMixin, mixins.LinksMixin):
"""Base Movie model."""
title_orig: str
title_en: str
title_it: str | None
release_date: datetime.date | None
running_time: pydantic.PositiveInt | None
notes: str | None
_normalize_title = pydantic.validator("title_orig",
"title_en",
"title_it",
allow_reuse=True)(
validators.normalize_title)
class Movie(MovieBase, mixins.TimestampsMixin, mixins.BaseMixin, table=True):
"""Movie database model."""
patron: "Patron" = sqlmodel.Relationship(
back_populates="movies", sa_relationship_kwargs={"lazy": "selectin"})
class MovieCreate(MovieBase):
"""Movie create model."""
class MovieRead(MovieBase):
"""Movie base model."""
id: pydantic.UUID4
class MovieReadWithPatron(MovieRead):
"""Movie read model with related patron."""
patron: "PatronRead" = None
class MovieUpdate(sqlmodel.SQLModel, mixins.LinksMixin):
"""Movie update model."""
# TODO: Set default to None when
# https://github.com/tiangolo/sqlmodel/issues/230 is resolved.
title_orig: str | None = ""
title_en: str | None = ""
title_it: str | None = ""
release_date: datetime.date | None = None
running_time: pydantic.PositiveInt | None = None
notes: str | None = None
_normalize_title = pydantic.validator("title_orig",
"title_en",
"title_it",
allow_reuse=True)(
validators.normalize_title)
|
[
"sqlmodel.Relationship"
] |
[((941, 1037), 'sqlmodel.Relationship', 'sqlmodel.Relationship', ([], {'back_populates': '"""movies"""', 'sa_relationship_kwargs': "{'lazy': 'selectin'}"}), "(back_populates='movies', sa_relationship_kwargs={\n 'lazy': 'selectin'})\n", (962, 1037), False, 'import sqlmodel\n'), ((529, 603), 'pydantic.validator', 'pydantic.validator', (['"""title_orig"""', '"""title_en"""', '"""title_it"""'], {'allow_reuse': '(True)'}), "('title_orig', 'title_en', 'title_it', allow_reuse=True)\n", (547, 603), False, 'import pydantic\n'), ((1742, 1816), 'pydantic.validator', 'pydantic.validator', (['"""title_orig"""', '"""title_en"""', '"""title_it"""'], {'allow_reuse': '(True)'}), "('title_orig', 'title_en', 'title_it', allow_reuse=True)\n", (1760, 1816), False, 'import pydantic\n')]
|
from datetime import datetime
from typing import TYPE_CHECKING, Dict, List, Optional
from pydantic import root_validator
from sqlalchemy import Column
from sqlmodel import Field, Relationship, SQLModel
from .event_attendance import EventAttendance
from .types import TimeStamp
if TYPE_CHECKING:
from .feedback import Feedback, FeedbackList
from .participant import Participant, ParticipantList
class EventBase(SQLModel):
name: str
code: str
valid_from: datetime = Field(
sa_column=Column(
TimeStamp(timezone=True),
default=datetime.now,
nullable=False,
)
)
valid_until: datetime = Field(
sa_column=Column(
TimeStamp(timezone=True),
default=datetime.now,
nullable=False,
)
)
enabled: bool = Field(default=True, nullable=False)
@root_validator()
def range_is_positive(cls, values: Dict[str, datetime]):
start, end = values.get("valid_from"), values.get("valid_until")
if start is None or end is None:
return values
elif end < start:
raise ValueError("validity range cannot be negative")
return values
class Event(EventBase, table=True):
__tablename__ = "events"
id: Optional[int] = Field(default=None, primary_key=True, nullable=False)
attendees: List["Participant"] = Relationship(
back_populates="attended",
link_model=EventAttendance,
)
feedback: List["Feedback"] = Relationship(
back_populates="event",
sa_relationship_kwargs={"cascade": "all, delete, delete-orphan"},
)
class EventCreate(SQLModel):
name: str
valid_from: datetime
valid_until: datetime
class EventList(SQLModel):
id: int
name: str
code: str
enabled: bool
class EventRead(EventBase):
id: int
feedback: List["FeedbackList"]
attendees: List["ParticipantList"]
class EventUpdate(SQLModel):
name: Optional[str]
valid_from: Optional[datetime]
valid_until: Optional[datetime]
enabled: Optional[bool]
|
[
"sqlmodel.Field",
"sqlmodel.Relationship"
] |
[((838, 873), 'sqlmodel.Field', 'Field', ([], {'default': '(True)', 'nullable': '(False)'}), '(default=True, nullable=False)\n', (843, 873), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((880, 896), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (894, 896), False, 'from pydantic import root_validator\n'), ((1305, 1358), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)', 'nullable': '(False)'}), '(default=None, primary_key=True, nullable=False)\n', (1310, 1358), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((1397, 1464), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""attended"""', 'link_model': 'EventAttendance'}), "(back_populates='attended', link_model=EventAttendance)\n", (1409, 1464), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((1521, 1627), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""event"""', 'sa_relationship_kwargs': "{'cascade': 'all, delete, delete-orphan'}"}), "(back_populates='event', sa_relationship_kwargs={'cascade':\n 'all, delete, delete-orphan'})\n", (1533, 1627), False, 'from sqlmodel import Field, Relationship, SQLModel\n')]
|
"""Initial
Revision ID: ec941f1f8242
Revises: <PASSWORD>
Create Date: 2021-10-10 18:34:18.294594
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = 'd<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('plant',
sa.Column('id', sa.Integer(), nullable=True),
sa.Column('name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_plant_id'), 'plant', ['id'], unique=False)
op.create_index(op.f('ix_plant_name'), 'plant', ['name'], unique=False)
op.drop_index('ix_hero_age', table_name='hero')
op.drop_index('ix_hero_id', table_name='hero')
op.drop_index('ix_hero_name', table_name='hero')
op.drop_index('ix_hero_secret_name', table_name='hero')
op.drop_table('hero')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('hero',
sa.Column('birth_date', sa.DATETIME(), nullable=True),
sa.Column('id', sa.INTEGER(), nullable=True),
sa.Column('name', sa.VARCHAR(), nullable=False),
sa.Column('secret_name', sa.VARCHAR(), nullable=False),
sa.Column('age', sa.INTEGER(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index('ix_hero_secret_name', 'hero', ['secret_name'], unique=False)
op.create_index('ix_hero_name', 'hero', ['name'], unique=False)
op.create_index('ix_hero_id', 'hero', ['id'], unique=False)
op.create_index('ix_hero_age', 'hero', ['age'], unique=False)
op.drop_index(op.f('ix_plant_name'), table_name='plant')
op.drop_index(op.f('ix_plant_id'), table_name='plant')
op.drop_table('plant')
# ### end Alembic commands ###
|
[
"sqlmodel.sql.sqltypes.AutoString"
] |
[((726, 773), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_hero_age"""'], {'table_name': '"""hero"""'}), "('ix_hero_age', table_name='hero')\n", (739, 773), False, 'from alembic import op\n'), ((778, 824), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_hero_id"""'], {'table_name': '"""hero"""'}), "('ix_hero_id', table_name='hero')\n", (791, 824), False, 'from alembic import op\n'), ((829, 877), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_hero_name"""'], {'table_name': '"""hero"""'}), "('ix_hero_name', table_name='hero')\n", (842, 877), False, 'from alembic import op\n'), ((882, 937), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_hero_secret_name"""'], {'table_name': '"""hero"""'}), "('ix_hero_secret_name', table_name='hero')\n", (895, 937), False, 'from alembic import op\n'), ((942, 963), 'alembic.op.drop_table', 'op.drop_table', (['"""hero"""'], {}), "('hero')\n", (955, 963), False, 'from alembic import op\n'), ((1429, 1506), 'alembic.op.create_index', 'op.create_index', (['"""ix_hero_secret_name"""', '"""hero"""', "['secret_name']"], {'unique': '(False)'}), "('ix_hero_secret_name', 'hero', ['secret_name'], unique=False)\n", (1444, 1506), False, 'from alembic import op\n'), ((1511, 1574), 'alembic.op.create_index', 'op.create_index', (['"""ix_hero_name"""', '"""hero"""', "['name']"], {'unique': '(False)'}), "('ix_hero_name', 'hero', ['name'], unique=False)\n", (1526, 1574), False, 'from alembic import op\n'), ((1579, 1638), 'alembic.op.create_index', 'op.create_index', (['"""ix_hero_id"""', '"""hero"""', "['id']"], {'unique': '(False)'}), "('ix_hero_id', 'hero', ['id'], unique=False)\n", (1594, 1638), False, 'from alembic import op\n'), ((1643, 1704), 'alembic.op.create_index', 'op.create_index', (['"""ix_hero_age"""', '"""hero"""', "['age']"], {'unique': '(False)'}), "('ix_hero_age', 'hero', ['age'], unique=False)\n", (1658, 1704), False, 'from alembic import op\n'), ((1829, 1851), 'alembic.op.drop_table', 'op.drop_table', (['"""plant"""'], {}), "('plant')\n", (1842, 1851), False, 'from alembic import op\n'), ((538, 567), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (561, 567), True, 'import sqlalchemy as sa\n'), ((594, 613), 'alembic.op.f', 'op.f', (['"""ix_plant_id"""'], {}), "('ix_plant_id')\n", (598, 613), False, 'from alembic import op\n'), ((666, 687), 'alembic.op.f', 'op.f', (['"""ix_plant_name"""'], {}), "('ix_plant_name')\n", (670, 687), False, 'from alembic import op\n'), ((1389, 1418), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (1412, 1418), True, 'import sqlalchemy as sa\n'), ((1723, 1744), 'alembic.op.f', 'op.f', (['"""ix_plant_name"""'], {}), "('ix_plant_name')\n", (1727, 1744), False, 'from alembic import op\n'), ((1784, 1803), 'alembic.op.f', 'op.f', (['"""ix_plant_id"""'], {}), "('ix_plant_id')\n", (1788, 1803), False, 'from alembic import op\n'), ((429, 441), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (439, 441), True, 'import sqlalchemy as sa\n'), ((481, 515), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (513, 515), False, 'import sqlmodel\n'), ((1140, 1153), 'sqlalchemy.DATETIME', 'sa.DATETIME', ([], {}), '()\n', (1151, 1153), True, 'import sqlalchemy as sa\n'), ((1191, 1203), 'sqlalchemy.INTEGER', 'sa.INTEGER', ([], {}), '()\n', (1201, 1203), True, 'import sqlalchemy as sa\n'), ((1243, 1255), 'sqlalchemy.VARCHAR', 'sa.VARCHAR', ([], {}), '()\n', (1253, 1255), True, 'import sqlalchemy as sa\n'), ((1303, 1315), 'sqlalchemy.VARCHAR', 'sa.VARCHAR', ([], {}), '()\n', (1313, 1315), True, 'import sqlalchemy as sa\n'), ((1355, 1367), 'sqlalchemy.INTEGER', 'sa.INTEGER', ([], {}), '()\n', (1365, 1367), True, 'import sqlalchemy as sa\n')]
|
import functools
import numpy as np
import pytest
import megengine
from megengine.autodiff.grad_manager import GradManager
from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt
from megengine.core.tensor.utils import subgraph_fn
from megengine.device import CompNode, get_default_device
from megengine.jit import trace
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@functools.lru_cache(maxsize=None)
def _get_batch_norm_fn(dtype, device, channels, ndim, interpret, gopt_level):
@subgraph_fn(
"BatchNormNd",
dtype=dtype,
device=device,
nr_inputs=4,
interpret=interpret,
gopt_level=gopt_level,
)
def batch_norm_nd(inputs, f, c):
input, eps, weight, bias = inputs[0:4]
reduce_shape = c(
(1, channels) + (1,) * (ndim - 2), dtype="int32", device=device
)
input_shape = f(GetVarShape(), input)
input_elems = f(Reduce(mode="product", axis=0), input_shape)
reduce_elems = f(Reduce(mode="product", axis=0), reduce_shape)
reduce_size = f("//", input_elems, reduce_elems)
reduce_size = f(TypeCvt(dtype=dtype), reduce_size)
channel_x1s = f(Reduce(mode="sum"), input, reduce_shape)
channel_x2s = f(Reduce(mode="sum_sqr"), input, reduce_shape)
channel_mean = f("/", channel_x1s, reduce_size)
channel_var = f(
"-", f("/", channel_x2s, reduce_size), f("*", channel_mean, channel_mean),
)
invsqrt_channel_var = f("**", f("+", channel_var, eps), c(-0.5))
inv_var_wt = f("*", invsqrt_channel_var, weight)
neg_channel_mean = f("-", channel_mean)
outvar = f(
"fma3", input, inv_var_wt, f("fma3", neg_channel_mean, inv_var_wt, bias),
)
return (outvar,), (True,)
return batch_norm_nd
@pytest.mark.parametrize("device", [get_default_device(), "cpux"])
@pytest.mark.parametrize("batch_size", [1, 8])
@pytest.mark.parametrize("channels", [3])
@pytest.mark.parametrize(
"use_trace, symbolic", [(False, None), (True, False), (True, True)]
)
@pytest.mark.parametrize("gopt_level", [None, 1, 2])
@pytest.mark.parametrize("dtype", ["float32"])
def test_subgraph(device, batch_size, channels, use_trace, symbolic, gopt_level, dtype):
device = CompNode(device)
def subgraph_batch_norm(inp, weight, bias, eps, diff):
inp = inp.detach()
with GradManager().attach(inp) as gm:
batch_norm_fn = _get_batch_norm_fn(
dtype, device, channels, ndim, interpret=False, gopt_level=gopt_level
)
out, *_ = batch_norm_fn(inp, eps, weight, bias)
gm.backward(out * 1e3 + 1e3, diff)
return out, inp.grad
def primitive_batch_norm(inp, weight, bias, eps, diff):
inp = inp.detach()
with GradManager().attach(inp) as gm:
batch_norm_fn = _get_batch_norm_fn(
dtype, device, channels, ndim, interpret=True, gopt_level=gopt_level
)
(out,) = batch_norm_fn(inp, eps, weight, bias)
gm.backward(out * 1e3 + 1e3, diff)
return out, inp.grad
if use_trace:
subgraph_batch_norm = trace(symbolic=symbolic)(subgraph_batch_norm)
primitive_batch_norm = trace(symbolic=symbolic)(primitive_batch_norm)
def rand_tensor(shape, dtype=dtype, device=device):
return megengine.tensor(np.random.random(shape), dtype=dtype, device=device)
# skip this test because could not do several reduce sequentially with opr cache
return
# test shape change
for image_shape in [(223, 223), (10, 20)]:
ndim = len(image_shape) + 2
input_shape = (batch_size, channels) + image_shape
param_shape = (1, channels) + (1,) * len(image_shape)
inp = rand_tensor(input_shape) * 1e3 + 1e3
weight = rand_tensor(param_shape)
bias = rand_tensor(param_shape)
eps = megengine.tensor(1e-5, dtype=dtype, device=device)
diff = rand_tensor(input_shape)
out1, grad1 = subgraph_batch_norm(inp, weight, bias, eps, diff)
out2, grad2 = primitive_batch_norm(inp, weight, bias, eps, diff)
_assert_allclose(out1.numpy(), out2.numpy())
_assert_allclose(grad1.numpy(), grad2.numpy())
@functools.lru_cache(maxsize=None)
def _get_mul_fn(dtype, device):
@subgraph_fn(
"Mul",
dtype=dtype,
device=device,
nr_inputs=2,
gopt_level=None,
jit_fusion=False,
custom_grad=True,
)
def mul(inputs, f, c):
x, y = inputs[0:2]
z = f("*", x, y)
(dz,) = yield (z,)
dx = f("*", dz, y)
dy = f("*", dz, x)
yield (dx, dy)
return mul
def test_subgraph_jit_backward():
x_np = np.random.rand(3, 4, 5).astype("float32")
x1 = megengine.Tensor(x_np)
x2 = megengine.Tensor(x_np)
mul = _get_mul_fn(x1.dtype, x1.device)
gm = GradManager()
gm.attach([x1, x2])
with gm:
y1 = x1 * x1
y2 = mul(x2, x2)
gm.backward(y1)
with gm:
y1 = x1 * x1
y2 = mul(x2, x2)
gm.backward(y1 + y2)
with gm:
y1 = x1 * x1
y2 = mul(x2, x2)
gm.backward(y2)
|
[
"megengine.core.ops.builtin.GetVarShape",
"megengine.tensor",
"megengine.device.CompNode",
"megengine.Tensor",
"megengine.device.get_default_device",
"megengine.jit.trace",
"megengine.core.tensor.utils.subgraph_fn",
"megengine.autodiff.grad_manager.GradManager",
"megengine.core.ops.builtin.Reduce",
"megengine.core.ops.builtin.TypeCvt"
] |
[((355, 424), 'functools.partial', 'functools.partial', (['np.testing.assert_allclose'], {'atol': '(5e-06)', 'rtol': '(5e-06)'}), '(np.testing.assert_allclose, atol=5e-06, rtol=5e-06)\n', (372, 424), False, 'import functools\n'), ((426, 459), 'functools.lru_cache', 'functools.lru_cache', ([], {'maxsize': 'None'}), '(maxsize=None)\n', (445, 459), False, 'import functools\n'), ((1944, 1989), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""batch_size"""', '[1, 8]'], {}), "('batch_size', [1, 8])\n", (1967, 1989), False, 'import pytest\n'), ((1991, 2031), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""channels"""', '[3]'], {}), "('channels', [3])\n", (2014, 2031), False, 'import pytest\n'), ((2033, 2130), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_trace, symbolic"""', '[(False, None), (True, False), (True, True)]'], {}), "('use_trace, symbolic', [(False, None), (True, False\n ), (True, True)])\n", (2056, 2130), False, 'import pytest\n'), ((2133, 2184), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""gopt_level"""', '[None, 1, 2]'], {}), "('gopt_level', [None, 1, 2])\n", (2156, 2184), False, 'import pytest\n'), ((2186, 2231), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', "['float32']"], {}), "('dtype', ['float32'])\n", (2209, 2231), False, 'import pytest\n'), ((4331, 4364), 'functools.lru_cache', 'functools.lru_cache', ([], {'maxsize': 'None'}), '(maxsize=None)\n', (4350, 4364), False, 'import functools\n'), ((543, 658), 'megengine.core.tensor.utils.subgraph_fn', 'subgraph_fn', (['"""BatchNormNd"""'], {'dtype': 'dtype', 'device': 'device', 'nr_inputs': '(4)', 'interpret': 'interpret', 'gopt_level': 'gopt_level'}), "('BatchNormNd', dtype=dtype, device=device, nr_inputs=4,\n interpret=interpret, gopt_level=gopt_level)\n", (554, 658), False, 'from megengine.core.tensor.utils import subgraph_fn\n'), ((2334, 2350), 'megengine.device.CompNode', 'CompNode', (['device'], {}), '(device)\n', (2342, 2350), False, 'from megengine.device import CompNode, get_default_device\n'), ((4402, 4518), 'megengine.core.tensor.utils.subgraph_fn', 'subgraph_fn', (['"""Mul"""'], {'dtype': 'dtype', 'device': 'device', 'nr_inputs': '(2)', 'gopt_level': 'None', 'jit_fusion': '(False)', 'custom_grad': '(True)'}), "('Mul', dtype=dtype, device=device, nr_inputs=2, gopt_level=None,\n jit_fusion=False, custom_grad=True)\n", (4413, 4518), False, 'from megengine.core.tensor.utils import subgraph_fn\n'), ((4875, 4897), 'megengine.Tensor', 'megengine.Tensor', (['x_np'], {}), '(x_np)\n', (4891, 4897), False, 'import megengine\n'), ((4907, 4929), 'megengine.Tensor', 'megengine.Tensor', (['x_np'], {}), '(x_np)\n', (4923, 4929), False, 'import megengine\n'), ((4982, 4995), 'megengine.autodiff.grad_manager.GradManager', 'GradManager', ([], {}), '()\n', (4993, 4995), False, 'from megengine.autodiff.grad_manager import GradManager\n'), ((3981, 4032), 'megengine.tensor', 'megengine.tensor', (['(1e-05)'], {'dtype': 'dtype', 'device': 'device'}), '(1e-05, dtype=dtype, device=device)\n', (3997, 4032), False, 'import megengine\n'), ((1912, 1932), 'megengine.device.get_default_device', 'get_default_device', ([], {}), '()\n', (1930, 1932), False, 'from megengine.device import CompNode, get_default_device\n'), ((930, 943), 'megengine.core.ops.builtin.GetVarShape', 'GetVarShape', ([], {}), '()\n', (941, 943), False, 'from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt\n'), ((976, 1006), 'megengine.core.ops.builtin.Reduce', 'Reduce', ([], {'mode': '"""product"""', 'axis': '(0)'}), "(mode='product', axis=0)\n", (982, 1006), False, 'from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt\n'), ((1046, 1076), 'megengine.core.ops.builtin.Reduce', 'Reduce', ([], {'mode': '"""product"""', 'axis': '(0)'}), "(mode='product', axis=0)\n", (1052, 1076), False, 'from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt\n'), ((1173, 1193), 'megengine.core.ops.builtin.TypeCvt', 'TypeCvt', ([], {'dtype': 'dtype'}), '(dtype=dtype)\n', (1180, 1193), False, 'from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt\n'), ((1232, 1250), 'megengine.core.ops.builtin.Reduce', 'Reduce', ([], {'mode': '"""sum"""'}), "(mode='sum')\n", (1238, 1250), False, 'from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt\n'), ((1297, 1319), 'megengine.core.ops.builtin.Reduce', 'Reduce', ([], {'mode': '"""sum_sqr"""'}), "(mode='sum_sqr')\n", (1303, 1319), False, 'from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt\n'), ((3241, 3265), 'megengine.jit.trace', 'trace', ([], {'symbolic': 'symbolic'}), '(symbolic=symbolic)\n', (3246, 3265), False, 'from megengine.jit import trace\n'), ((3318, 3342), 'megengine.jit.trace', 'trace', ([], {'symbolic': 'symbolic'}), '(symbolic=symbolic)\n', (3323, 3342), False, 'from megengine.jit import trace\n'), ((3454, 3477), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (3470, 3477), True, 'import numpy as np\n'), ((4824, 4847), 'numpy.random.rand', 'np.random.rand', (['(3)', '(4)', '(5)'], {}), '(3, 4, 5)\n', (4838, 4847), True, 'import numpy as np\n'), ((2451, 2464), 'megengine.autodiff.grad_manager.GradManager', 'GradManager', ([], {}), '()\n', (2462, 2464), False, 'from megengine.autodiff.grad_manager import GradManager\n'), ((2873, 2886), 'megengine.autodiff.grad_manager.GradManager', 'GradManager', ([], {}), '()\n', (2884, 2886), False, 'from megengine.autodiff.grad_manager import GradManager\n')]
|
"""
Linearization of higher order solutions for the purposes of visualization.
"""
from __future__ import absolute_import
import numpy as nm
from sfepy.linalg import dot_sequences
from sfepy.discrete.fem.refine import refine_reference
from six.moves import range
def get_eval_dofs(dofs, dof_conn, ps, ori=None):
"""
Get default function for evaluating field DOFs given a list of elements and
reference element coordinates.
"""
def _eval(iels, rx):
edofs = dofs[dof_conn[iels]]
if ori is not None:
eori = ori[iels]
else:
eori = None
bf = ps.eval_base(rx, ori=eori, force_axis=True)[...,0,:]
rvals = dot_sequences(bf, edofs)
return rvals
return _eval
def get_eval_coors(coors, conn, ps):
"""
Get default function for evaluating physical coordinates given a list of
elements and reference element coordinates.
"""
def _eval(iels, rx):
ecoors = coors[conn[iels]]
aux = ecoors.transpose((0, 2, 1))
bf = ps.eval_base(rx).squeeze()
phys_coors = nm.dot(aux, bf.T).transpose((0, 2, 1))
return phys_coors
return _eval
def create_output(eval_dofs, eval_coors, n_el, ps, min_level=0, max_level=2,
eps=1e-4):
"""
Create mesh with linear elements that approximates DOFs returned by
`eval_dofs()` corresponding to a higher order approximation with a relative
precision given by `eps`. The DOFs are evaluated in physical coordinates
returned by `eval_coors()`.
"""
def _get_msd(iels, rx, ree):
rvals = eval_dofs(iels, rx)
rng = rvals.max() - rvals.min()
n_components = rvals.shape[-1]
msd = 0.0
for ic in range(n_components):
rval = rvals[..., ic]
sd = rval[:, ree]
# ~ max. second derivative.
msd += nm.abs(sd[..., 0] + sd[..., 2]
- 2.0 * sd[..., 1]).max(axis=-1)
msd /= n_components
return msd, rng
rx0 = ps.geometry.coors
rc0 = ps.geometry.conn[None, :]
rx, rc, ree = refine_reference(ps.geometry, 1)
factor = rc.shape[0] / rc0.shape[0]
iels = nm.arange(n_el)
msd, rng = _get_msd(iels, rx, ree)
eps_r = rng * eps
flag = msd > eps_r
iels0 = flag0 = None
coors = []
conns = []
vdofs = []
inod = 0
for level in range(max_level + 1):
if level < min_level:
flag.fill(True) # Force refinement everywhere.
elif level == max_level:
# Last level - take everything.
flag.fill(False)
# Deal with finished elements.
if flag0 is not None:
ii = nm.searchsorted(iels0, iels)
expand_flag0 = flag0[ii].repeat(factor, axis=1)
else:
expand_flag0 = nm.ones_like(flag)
ie, ir = nm.where((flag == False) & (expand_flag0 == True))
if len(ie):
uie, iies = nm.unique(ie, return_inverse=True)
# Each (sub-)element has own coordinates - no shared vertices.
xes = eval_coors(iels[uie], rx0)
des = eval_dofs(iels[uie], rx0)
# Vectorize (how??) or use cython?
cc = []
vd = []
for ii, iie in enumerate(iies):
ce = rc0[ir[ii]]
xe = xes[iie]
cc.append(xe[ce])
de = des[iie]
vd.append(de[ce])
cc = nm.vstack(cc)
vd = nm.vstack(vd)
nc = cc.shape[0]
np = rc0.shape[1]
conn = nm.arange(nc, dtype=nm.int32).reshape((nc // np, np))
coors.append(cc)
conns.append(conn + inod)
vdofs.append(vd)
inod += nc
if not flag.any():
break
iels0 = iels
flag0 = flag
# Deal with elements to refine.
if level < max_level:
eflag = flag.sum(axis=1, dtype=nm.bool)
iels = iels[eflag]
rc0 = rc
rx0 = rx
rx, rc, ree = refine_reference(ps.geometry, level + 2)
msd, rng = _get_msd(iels, rx, ree)
eps_r = rng * eps
flag = msd > eps_r
all_coors = nm.concatenate(coors, axis=0)
conn = nm.concatenate(conns, axis=0)
all_vdofs = nm.concatenate(vdofs, axis=0)
mat_ids = nm.zeros(conn.shape[0], dtype=nm.int32)
return level, all_coors, conn, all_vdofs, mat_ids
|
[
"sfepy.linalg.dot_sequences",
"sfepy.discrete.fem.refine.refine_reference"
] |
[((2121, 2153), 'sfepy.discrete.fem.refine.refine_reference', 'refine_reference', (['ps.geometry', '(1)'], {}), '(ps.geometry, 1)\n', (2137, 2153), False, 'from sfepy.discrete.fem.refine import refine_reference\n'), ((2207, 2222), 'numpy.arange', 'nm.arange', (['n_el'], {}), '(n_el)\n', (2216, 2222), True, 'import numpy as nm\n'), ((2410, 2430), 'six.moves.range', 'range', (['(max_level + 1)'], {}), '(max_level + 1)\n', (2415, 2430), False, 'from six.moves import range\n'), ((4269, 4298), 'numpy.concatenate', 'nm.concatenate', (['coors'], {'axis': '(0)'}), '(coors, axis=0)\n', (4283, 4298), True, 'import numpy as nm\n'), ((4310, 4339), 'numpy.concatenate', 'nm.concatenate', (['conns'], {'axis': '(0)'}), '(conns, axis=0)\n', (4324, 4339), True, 'import numpy as nm\n'), ((4356, 4385), 'numpy.concatenate', 'nm.concatenate', (['vdofs'], {'axis': '(0)'}), '(vdofs, axis=0)\n', (4370, 4385), True, 'import numpy as nm\n'), ((4401, 4440), 'numpy.zeros', 'nm.zeros', (['conn.shape[0]'], {'dtype': 'nm.int32'}), '(conn.shape[0], dtype=nm.int32)\n', (4409, 4440), True, 'import numpy as nm\n'), ((687, 711), 'sfepy.linalg.dot_sequences', 'dot_sequences', (['bf', 'edofs'], {}), '(bf, edofs)\n', (700, 711), False, 'from sfepy.linalg import dot_sequences\n'), ((1748, 1767), 'six.moves.range', 'range', (['n_components'], {}), '(n_components)\n', (1753, 1767), False, 'from six.moves import range\n'), ((2883, 2933), 'numpy.where', 'nm.where', (['((flag == False) & (expand_flag0 == True))'], {}), '((flag == False) & (expand_flag0 == True))\n', (2891, 2933), True, 'import numpy as nm\n'), ((2715, 2743), 'numpy.searchsorted', 'nm.searchsorted', (['iels0', 'iels'], {}), '(iels0, iels)\n', (2730, 2743), True, 'import numpy as nm\n'), ((2846, 2864), 'numpy.ones_like', 'nm.ones_like', (['flag'], {}), '(flag)\n', (2858, 2864), True, 'import numpy as nm\n'), ((2978, 3012), 'numpy.unique', 'nm.unique', (['ie'], {'return_inverse': '(True)'}), '(ie, return_inverse=True)\n', (2987, 3012), True, 'import numpy as nm\n'), ((3491, 3504), 'numpy.vstack', 'nm.vstack', (['cc'], {}), '(cc)\n', (3500, 3504), True, 'import numpy as nm\n'), ((3522, 3535), 'numpy.vstack', 'nm.vstack', (['vd'], {}), '(vd)\n', (3531, 3535), True, 'import numpy as nm\n'), ((4102, 4142), 'sfepy.discrete.fem.refine.refine_reference', 'refine_reference', (['ps.geometry', '(level + 2)'], {}), '(ps.geometry, level + 2)\n', (4118, 4142), False, 'from sfepy.discrete.fem.refine import refine_reference\n'), ((1095, 1112), 'numpy.dot', 'nm.dot', (['aux', 'bf.T'], {}), '(aux, bf.T)\n', (1101, 1112), True, 'import numpy as nm\n'), ((1893, 1943), 'numpy.abs', 'nm.abs', (['(sd[..., 0] + sd[..., 2] - 2.0 * sd[..., 1])'], {}), '(sd[..., 0] + sd[..., 2] - 2.0 * sd[..., 1])\n', (1899, 1943), True, 'import numpy as nm\n'), ((3615, 3644), 'numpy.arange', 'nm.arange', (['nc'], {'dtype': 'nm.int32'}), '(nc, dtype=nm.int32)\n', (3624, 3644), True, 'import numpy as nm\n')]
|
# -*- coding: utf-8 -*-
# @Time : 2022/1/2 17:50
# @Author : WhaleFall
# @Site :
# @File : __init__.py.py
# @Software: PyCharm
# Flask 应用初始化,工厂函数
from flask import Flask
from flask_login import LoginManager
from config import config
from sqlmodel import create_engine,SQLModel
# 实例化一个登录组件
login_manager = LoginManager()
login_manager.login_view = 'auth.login' # 登录的蓝图
login_manager.login_message = "请小可爱先登录!"
def create_app(config_name):
"""
工厂函数,指定一个配置类型
程序入口文件千万不能和 `app` 重名,惨痛教训!!
"""
app = Flask(__name__) # 实例化
app.config.from_object(config[config_name]) # 从配置类读取配置
config[config_name].init_app(app) # 调用静态方法初始化组件
# 注册组件
login_manager.init_app(app) # 登录组件
# 数据库
from app import models
app.config['engine'] = create_engine(config[config_name].SQLALCHEMY_DATABASE_URI, echo=True)
# 注册蓝图
from .main import main
app.register_blueprint(main)
from .dali import dali
app.register_blueprint(dali)
from .admin import admin
app.register_blueprint(admin)
return app
|
[
"sqlmodel.create_engine"
] |
[((307, 321), 'flask_login.LoginManager', 'LoginManager', ([], {}), '()\n', (319, 321), False, 'from flask_login import LoginManager\n'), ((518, 533), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (523, 533), False, 'from flask import Flask\n'), ((770, 839), 'sqlmodel.create_engine', 'create_engine', (['config[config_name].SQLALCHEMY_DATABASE_URI'], {'echo': '(True)'}), '(config[config_name].SQLALCHEMY_DATABASE_URI, echo=True)\n', (783, 839), False, 'from sqlmodel import create_engine, SQLModel\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.