code
stringlengths 110
64.5k
| apis
list | extract_api
stringlengths 123
69.9k
|
---|---|---|
from datetime import datetime, date
from typing import Optional
from fastapi import APIRouter
from sqlmodel import Field, SQLModel
router = APIRouter()
class History(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
patient_id: int
hospital_id: Optional[int]
hospital_node_id: Optional[int]
hospital_room: str
discipline_group_id: int
discipline_id: int
date: date
source: str
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class HistoryDoctor(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
history_id: int
doctor_id: int
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class HistoryModuleMap(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
history_id: int
module_id: int
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class HistoryTag(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
history_id: int
tag_id: int
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
|
[
"sqlmodel.Field"
] |
[((142, 153), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (151, 153), False, 'from fastapi import APIRouter\n'), ((217, 254), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (222, 254), False, 'from sqlmodel import Field, SQLModel\n'), ((624, 661), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (629, 661), False, 'from sqlmodel import Field, SQLModel\n'), ((880, 917), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (885, 917), False, 'from sqlmodel import Field, SQLModel\n'), ((1130, 1167), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (1135, 1167), False, 'from sqlmodel import Field, SQLModel\n')]
|
r"""
Diametrically point loaded 2-D disk with postprocessing and probes. See
:ref:`sec-primer`.
Find :math:`\ul{u}` such that:
.. math::
\int_{\Omega} D_{ijkl}\ e_{ij}(\ul{v}) e_{kl}(\ul{u})
= 0
\;, \quad \forall \ul{v} \;,
where
.. math::
D_{ijkl} = \mu (\delta_{ik} \delta_{jl}+\delta_{il} \delta_{jk}) +
\lambda \ \delta_{ij} \delta_{kl}
\;.
"""
from __future__ import absolute_import
from examples.linear_elasticity.its2D_1 import *
from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson
from sfepy.postprocess.probes_vtk import Probe
import os
from six.moves import range
def stress_strain(out, pb, state, extend=False):
"""
Calculate and output strain and stress for given displacements.
"""
from sfepy.base.base import Struct
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
ev = pb.evaluate
strain = ev('ev_cauchy_strain.2.Omega(u)', mode='el_avg')
stress = ev('ev_cauchy_stress.2.Omega(Asphalt.D, u)', mode='el_avg')
out['cauchy_strain'] = Struct(name='output_data', mode='cell',
data=strain, dofs=None)
out['cauchy_stress'] = Struct(name='output_data', mode='cell',
data=stress, dofs=None)
probe = Probe(out, pb.domain.mesh, probe_view=True)
ps0 = [[0.0, 0.0, 0.0], [ 0.0, 0.0, 0.0]]
ps1 = [[75.0, 0.0, 0.0], [ 0.0, 75.0, 0.0]]
n_point = 10
labels = ['%s -> %s' % (p0, p1) for p0, p1 in zip(ps0, ps1)]
probes = []
for ip in range(len(ps0)):
p0, p1 = ps0[ip], ps1[ip]
probes.append('line%d' % ip)
probe.add_line_probe('line%d' % ip, p0, p1, n_point)
for ip, label in zip(probes, labels):
fig = plt.figure()
plt.clf()
fig.subplots_adjust(hspace=0.4)
plt.subplot(311)
pars, vals = probe(ip, 'u')
for ic in range(vals.shape[1] - 1):
plt.plot(pars, vals[:,ic], label=r'$u_{%d}$' % (ic + 1),
lw=1, ls='-', marker='+', ms=3)
plt.ylabel('displacements')
plt.xlabel('probe %s' % label, fontsize=8)
plt.legend(loc='best', prop=fm.FontProperties(size=10))
sym_indices = [0, 4, 1]
sym_labels = ['11', '22', '12']
plt.subplot(312)
pars, vals = probe(ip, 'cauchy_strain')
for ii, ic in enumerate(sym_indices):
plt.plot(pars, vals[:,ic], label=r'$e_{%s}$' % sym_labels[ii],
lw=1, ls='-', marker='+', ms=3)
plt.ylabel('Cauchy strain')
plt.xlabel('probe %s' % label, fontsize=8)
plt.legend(loc='best', prop=fm.FontProperties(size=8))
plt.subplot(313)
pars, vals = probe(ip, 'cauchy_stress')
for ii, ic in enumerate(sym_indices):
plt.plot(pars, vals[:,ic], label=r'$\sigma_{%s}$' % sym_labels[ii],
lw=1, ls='-', marker='+', ms=3)
plt.ylabel('Cauchy stress')
plt.xlabel('probe %s' % label, fontsize=8)
plt.legend(loc='best', prop=fm.FontProperties(size=8))
opts = pb.conf.options
filename_results = os.path.join(opts.get('output_dir'),
'its2D_probe_%s.png' % ip)
fig.savefig(filename_results)
return out
materials['Asphalt'][0].update({'D' : stiffness_from_youngpoisson(2, young, poisson)})
options.update({
'post_process_hook' : 'stress_strain',
})
|
[
"sfepy.postprocess.probes_vtk.Probe",
"sfepy.mechanics.matcoefs.stiffness_from_youngpoisson",
"sfepy.base.base.Struct"
] |
[((1052, 1115), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""cell"""', 'data': 'strain', 'dofs': 'None'}), "(name='output_data', mode='cell', data=strain, dofs=None)\n", (1058, 1115), False, 'from sfepy.base.base import Struct\n'), ((1177, 1240), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""cell"""', 'data': 'stress', 'dofs': 'None'}), "(name='output_data', mode='cell', data=stress, dofs=None)\n", (1183, 1240), False, 'from sfepy.base.base import Struct\n'), ((1288, 1331), 'sfepy.postprocess.probes_vtk.Probe', 'Probe', (['out', 'pb.domain.mesh'], {'probe_view': '(True)'}), '(out, pb.domain.mesh, probe_view=True)\n', (1293, 1331), False, 'from sfepy.postprocess.probes_vtk import Probe\n'), ((1748, 1760), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1758, 1760), True, 'import matplotlib.pyplot as plt\n'), ((1769, 1778), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1776, 1778), True, 'import matplotlib.pyplot as plt\n'), ((1827, 1843), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(311)'], {}), '(311)\n', (1838, 1843), True, 'import matplotlib.pyplot as plt\n'), ((1898, 1922), 'six.moves.range', 'range', (['(vals.shape[1] - 1)'], {}), '(vals.shape[1] - 1)\n', (1903, 1922), False, 'from six.moves import range\n'), ((2054, 2081), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""displacements"""'], {}), "('displacements')\n", (2064, 2081), True, 'import matplotlib.pyplot as plt\n'), ((2090, 2132), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('probe %s' % label)"], {'fontsize': '(8)'}), "('probe %s' % label, fontsize=8)\n", (2100, 2132), True, 'import matplotlib.pyplot as plt\n'), ((2279, 2295), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(312)'], {}), '(312)\n', (2290, 2295), True, 'import matplotlib.pyplot as plt\n'), ((2526, 2553), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cauchy strain"""'], {}), "('Cauchy strain')\n", (2536, 2553), True, 'import matplotlib.pyplot as plt\n'), ((2562, 2604), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('probe %s' % label)"], {'fontsize': '(8)'}), "('probe %s' % label, fontsize=8)\n", (2572, 2604), True, 'import matplotlib.pyplot as plt\n'), ((2677, 2693), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(313)'], {}), '(313)\n', (2688, 2693), True, 'import matplotlib.pyplot as plt\n'), ((2929, 2956), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cauchy stress"""'], {}), "('Cauchy stress')\n", (2939, 2956), True, 'import matplotlib.pyplot as plt\n'), ((2965, 3007), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('probe %s' % label)"], {'fontsize': '(8)'}), "('probe %s' % label, fontsize=8)\n", (2975, 3007), True, 'import matplotlib.pyplot as plt\n'), ((3328, 3374), 'sfepy.mechanics.matcoefs.stiffness_from_youngpoisson', 'stiffness_from_youngpoisson', (['(2)', 'young', 'poisson'], {}), '(2, young, poisson)\n', (3355, 3374), False, 'from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson\n'), ((1936, 2028), 'matplotlib.pyplot.plot', 'plt.plot', (['pars', 'vals[:, ic]'], {'label': "('$u_{%d}$' % (ic + 1))", 'lw': '(1)', 'ls': '"""-"""', 'marker': '"""+"""', 'ms': '(3)'}), "(pars, vals[:, ic], label='$u_{%d}$' % (ic + 1), lw=1, ls='-',\n marker='+', ms=3)\n", (1944, 2028), True, 'import matplotlib.pyplot as plt\n'), ((2402, 2500), 'matplotlib.pyplot.plot', 'plt.plot', (['pars', 'vals[:, ic]'], {'label': "('$e_{%s}$' % sym_labels[ii])", 'lw': '(1)', 'ls': '"""-"""', 'marker': '"""+"""', 'ms': '(3)'}), "(pars, vals[:, ic], label='$e_{%s}$' % sym_labels[ii], lw=1, ls='-',\n marker='+', ms=3)\n", (2410, 2500), True, 'import matplotlib.pyplot as plt\n'), ((2800, 2904), 'matplotlib.pyplot.plot', 'plt.plot', (['pars', 'vals[:, ic]'], {'label': "('$\\\\sigma_{%s}$' % sym_labels[ii])", 'lw': '(1)', 'ls': '"""-"""', 'marker': '"""+"""', 'ms': '(3)'}), "(pars, vals[:, ic], label='$\\\\sigma_{%s}$' % sym_labels[ii], lw=1,\n ls='-', marker='+', ms=3)\n", (2808, 2904), True, 'import matplotlib.pyplot as plt\n'), ((2169, 2195), 'matplotlib.font_manager.FontProperties', 'fm.FontProperties', ([], {'size': '(10)'}), '(size=10)\n', (2186, 2195), True, 'import matplotlib.font_manager as fm\n'), ((2641, 2666), 'matplotlib.font_manager.FontProperties', 'fm.FontProperties', ([], {'size': '(8)'}), '(size=8)\n', (2658, 2666), True, 'import matplotlib.font_manager as fm\n'), ((3044, 3069), 'matplotlib.font_manager.FontProperties', 'fm.FontProperties', ([], {'size': '(8)'}), '(size=8)\n', (3061, 3069), True, 'import matplotlib.font_manager as fm\n')]
|
from __future__ import absolute_import
import os
import sfepy
from sfepy.base.base import load_classes, insert_static_method
from .solvers import *
from .eigen import eig
solver_files = sfepy.get_paths('sfepy/solvers/*.py')
remove = ['setup.py', 'solvers.py', 'petsc_worker.py']
solver_files = [name for name in solver_files
if os.path.basename(name) not in remove]
solver_table = load_classes(solver_files,
[LinearSolver, NonlinearSolver,
TimeSteppingSolver, EigenvalueSolver,
OptimizationSolver], package_name='sfepy.solvers')
def register_solver(cls):
"""
Register a custom solver.
"""
solver_table[cls.name] = cls
def any_from_conf(conf, **kwargs):
"""Create an instance of a solver class according to the configuration."""
return solver_table[conf.kind](conf, **kwargs)
insert_static_method(Solver, any_from_conf)
del any_from_conf
del sfepy
|
[
"sfepy.get_paths",
"sfepy.base.base.insert_static_method",
"sfepy.base.base.load_classes"
] |
[((187, 224), 'sfepy.get_paths', 'sfepy.get_paths', (['"""sfepy/solvers/*.py"""'], {}), "('sfepy/solvers/*.py')\n", (202, 224), False, 'import sfepy\n'), ((398, 554), 'sfepy.base.base.load_classes', 'load_classes', (['solver_files', '[LinearSolver, NonlinearSolver, TimeSteppingSolver, EigenvalueSolver,\n OptimizationSolver]'], {'package_name': '"""sfepy.solvers"""'}), "(solver_files, [LinearSolver, NonlinearSolver,\n TimeSteppingSolver, EigenvalueSolver, OptimizationSolver], package_name\n ='sfepy.solvers')\n", (410, 554), False, 'from sfepy.base.base import load_classes, insert_static_method\n'), ((904, 947), 'sfepy.base.base.insert_static_method', 'insert_static_method', (['Solver', 'any_from_conf'], {}), '(Solver, any_from_conf)\n', (924, 947), False, 'from sfepy.base.base import load_classes, insert_static_method\n'), ((345, 367), 'os.path.basename', 'os.path.basename', (['name'], {}), '(name)\n', (361, 367), False, 'import os\n')]
|
# Copyright (c) Megvii, Inc. and its affiliates.
import megengine as mge
import megengine.functional as F
import megengine.module as M
from .resnet import BasicBlock
class STN(M.Module):
"""spatial transformer networks from
`"Spatial Transformer Networks" <https://arxiv.org/pdf/1506.02025.pdf>`_
some detailed implements are highly simplified while good performance maintained
"""
def __init__(self, input_size=112):
assert input_size == 112, f"expected input_size == 112, got {input_size}"
super().__init__()
self.input_size = input_size
self.stem = M.Sequential(
M.Conv2d(3, 8, kernel_size=3, stride=2, padding=1, bias=False),
M.BatchNorm2d(8),
M.ReLU(),
M.MaxPool2d(kernel_size=2, stride=2),
BasicBlock(8, 16),
BasicBlock(16, 32, stride=2),
BasicBlock(32, 64, stride=2),
)
self.fc = M.Linear(64, 9)
def _get_transformed_image(self, image, mat3x3):
"""apply perspective transform to the image
note: there is NO need to guarantee the bottom right element equals 1
Args:
image (Tensor): input images (shape: n * 3 * 112 * 112)
mat3x3 (Tensor): perspective matrix (shape: n * 3 * 3)
Returns:
transformed_image (Tensor): perspectively transformed image
"""
s = self.input_size
transformed_image = F.warp_perspective(image, mat3x3, [s, s])
return transformed_image
def _get_mat3x3(self, image):
"""get perspective matrix used in the transformation
note: there are only 8 degrees of freedom in a perspective matrix, while the output matrix has 9 variables.
Args:
image (Tensor): input images (shape: n * 3 * 112 * 112)
Returns:
mat3x3 (Tensor): perspective matrix (shape: n * 3 * 3)
"""
x = self.stem(image)
x = F.avg_pool2d(x, 7)
x = F.flatten(x, 1)
x = self.fc(x)
s = self.input_size
# 0.01 here is a magic number. it aims to maintain identity transform at early stage of training
residual = x.reshape(-1, 3, 3) * 0.01
base = mge.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).astype("float32")
base = F.broadcast_to(base, residual.shape)
left_scale = mge.tensor([[s, 0, 0], [0, s, 0], [0, 0, 1]]).astype("float32")
left_scale = F.broadcast_to(left_scale, residual.shape)
right_scale = mge.tensor([[1 / s, 0, 0], [0, 1 / s, 0], [0, 0, 1]]).astype("float32")
right_scale = F.broadcast_to(right_scale, residual.shape)
mat3x3 = F.matmul(left_scale, F.matmul(base + residual, right_scale))
return mat3x3
def forward(self, image):
mat3x3 = self._get_mat3x3(image)
transformed_image = self._get_transformed_image(image, mat3x3)
return transformed_image
|
[
"megengine.functional.broadcast_to",
"megengine.functional.flatten",
"megengine.module.ReLU",
"megengine.tensor",
"megengine.module.MaxPool2d",
"megengine.functional.matmul",
"megengine.module.Conv2d",
"megengine.functional.warp_perspective",
"megengine.module.Linear",
"megengine.module.BatchNorm2d",
"megengine.functional.avg_pool2d"
] |
[((944, 959), 'megengine.module.Linear', 'M.Linear', (['(64)', '(9)'], {}), '(64, 9)\n', (952, 959), True, 'import megengine.module as M\n'), ((1452, 1493), 'megengine.functional.warp_perspective', 'F.warp_perspective', (['image', 'mat3x3', '[s, s]'], {}), '(image, mat3x3, [s, s])\n', (1470, 1493), True, 'import megengine.functional as F\n'), ((1960, 1978), 'megengine.functional.avg_pool2d', 'F.avg_pool2d', (['x', '(7)'], {}), '(x, 7)\n', (1972, 1978), True, 'import megengine.functional as F\n'), ((1991, 2006), 'megengine.functional.flatten', 'F.flatten', (['x', '(1)'], {}), '(x, 1)\n', (2000, 2006), True, 'import megengine.functional as F\n'), ((2304, 2340), 'megengine.functional.broadcast_to', 'F.broadcast_to', (['base', 'residual.shape'], {}), '(base, residual.shape)\n', (2318, 2340), True, 'import megengine.functional as F\n'), ((2447, 2489), 'megengine.functional.broadcast_to', 'F.broadcast_to', (['left_scale', 'residual.shape'], {}), '(left_scale, residual.shape)\n', (2461, 2489), True, 'import megengine.functional as F\n'), ((2606, 2649), 'megengine.functional.broadcast_to', 'F.broadcast_to', (['right_scale', 'residual.shape'], {}), '(right_scale, residual.shape)\n', (2620, 2649), True, 'import megengine.functional as F\n'), ((635, 697), 'megengine.module.Conv2d', 'M.Conv2d', (['(3)', '(8)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(3, 8, kernel_size=3, stride=2, padding=1, bias=False)\n', (643, 697), True, 'import megengine.module as M\n'), ((711, 727), 'megengine.module.BatchNorm2d', 'M.BatchNorm2d', (['(8)'], {}), '(8)\n', (724, 727), True, 'import megengine.module as M\n'), ((741, 749), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (747, 749), True, 'import megengine.module as M\n'), ((763, 799), 'megengine.module.MaxPool2d', 'M.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (774, 799), True, 'import megengine.module as M\n'), ((2688, 2726), 'megengine.functional.matmul', 'F.matmul', (['(base + residual)', 'right_scale'], {}), '(base + residual, right_scale)\n', (2696, 2726), True, 'import megengine.functional as F\n'), ((2225, 2270), 'megengine.tensor', 'mge.tensor', (['[[1, 0, 0], [0, 1, 0], [0, 0, 1]]'], {}), '([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n', (2235, 2270), True, 'import megengine as mge\n'), ((2362, 2407), 'megengine.tensor', 'mge.tensor', (['[[s, 0, 0], [0, s, 0], [0, 0, 1]]'], {}), '([[s, 0, 0], [0, s, 0], [0, 0, 1]])\n', (2372, 2407), True, 'import megengine as mge\n'), ((2512, 2565), 'megengine.tensor', 'mge.tensor', (['[[1 / s, 0, 0], [0, 1 / s, 0], [0, 0, 1]]'], {}), '([[1 / s, 0, 0], [0, 1 / s, 0], [0, 0, 1]])\n', (2522, 2565), True, 'import megengine as mge\n')]
|
from sqlmodel import Session, select
from sqlalchemy.orm import joinedload
from .models import Pessoa, Livro, engine
def create_livros(titulo: str, pessoa_id: int):
livro = Livro(titulo=titulo, pessoa_id=pessoa_id)
with Session(engine) as session:
session.add(livro)
session.commit()
session.refresh(livro)
return livro
def get_livros():
query = select(Livro).options(joinedload('*'))
with Session(engine) as session:
result = session.execute(query).scalars().unique().all()
return result
def create_pessoas(idade: int, nome: str):
person = Pessoa(nome=nome, idade=idade)
with Session(engine) as session:
session.add(person)
session.commit()
session.refresh(person)
return person
def get_pessoas(
id: int = None,
idade: int = None,
limit: int = 5,
):
query = select(Pessoa)
if id:
query = query.where(Pessoa.id == id)
if idade:
query = query.where(Pessoa.idade == idade)
if limit:
query = query.limit(limit)
with Session(engine) as session:
result = session.execute(query).scalars().all()
return result
|
[
"sqlmodel.select",
"sqlmodel.Session"
] |
[((878, 892), 'sqlmodel.select', 'select', (['Pessoa'], {}), '(Pessoa)\n', (884, 892), False, 'from sqlmodel import Session, select\n'), ((231, 246), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (238, 246), False, 'from sqlmodel import Session, select\n'), ((413, 428), 'sqlalchemy.orm.joinedload', 'joinedload', (['"""*"""'], {}), "('*')\n", (423, 428), False, 'from sqlalchemy.orm import joinedload\n'), ((439, 454), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (446, 454), False, 'from sqlmodel import Session, select\n'), ((650, 665), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (657, 665), False, 'from sqlmodel import Session, select\n'), ((1074, 1089), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1081, 1089), False, 'from sqlmodel import Session, select\n'), ((391, 404), 'sqlmodel.select', 'select', (['Livro'], {}), '(Livro)\n', (397, 404), False, 'from sqlmodel import Session, select\n')]
|
from sqlmodel import create_engine, SQLModel, Session
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
connect_args = {"check_same_thread": False}
engine = create_engine(sqlite_url, echo=True, connect_args=connect_args)
def init_db():
SQLModel.metadata.create_all(engine)
def get_session():
with Session(engine) as session:
yield session
|
[
"sqlmodel.Session",
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.create_engine"
] |
[((188, 251), 'sqlmodel.create_engine', 'create_engine', (['sqlite_url'], {'echo': '(True)', 'connect_args': 'connect_args'}), '(sqlite_url, echo=True, connect_args=connect_args)\n', (201, 251), False, 'from sqlmodel import create_engine, SQLModel, Session\n'), ((273, 309), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (301, 309), False, 'from sqlmodel import create_engine, SQLModel, Session\n'), ((340, 355), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (347, 355), False, 'from sqlmodel import create_engine, SQLModel, Session\n')]
|
from sqlalchemy import inspect
from sqlalchemy.engine.reflection import Inspector
from sqlmodel import create_engine
def test_create_db_and_table(clear_sqlmodel):
from docs_src.tutorial.create_db_and_table import tutorial003 as mod
mod.sqlite_url = "sqlite://"
mod.engine = create_engine(mod.sqlite_url)
mod.create_db_and_tables()
insp: Inspector = inspect(mod.engine)
assert insp.has_table(str(mod.Hero.__tablename__))
|
[
"sqlmodel.create_engine"
] |
[((289, 318), 'sqlmodel.create_engine', 'create_engine', (['mod.sqlite_url'], {}), '(mod.sqlite_url)\n', (302, 318), False, 'from sqlmodel import create_engine\n'), ((323, 349), 'docs_src.tutorial.create_db_and_table.tutorial003.create_db_and_tables', 'mod.create_db_and_tables', ([], {}), '()\n', (347, 349), True, 'from docs_src.tutorial.create_db_and_table import tutorial003 as mod\n'), ((372, 391), 'sqlalchemy.inspect', 'inspect', (['mod.engine'], {}), '(mod.engine)\n', (379, 391), False, 'from sqlalchemy import inspect\n')]
|
# -*- coding: utf-8 -*-
"""
Fields for Discontinous Galerkin method
"""
import numpy as nm
import six
from numpy.lib.stride_tricks import as_strided
from six.moves import range
from sfepy.base.base import (output, assert_, Struct)
from sfepy.discrete import Integral, PolySpace
from sfepy.discrete.common.fields import parse_shape
from sfepy.discrete.fem.fields_base import FEField
from sfepy.discrete.fem.mappings import VolumeMapping
def get_unraveler(n_el_nod, n_cell):
"""Returns function for unraveling i.e. unpacking dof data from
serialized array from shape (n_el_nod*n_cell, 1) to (n_cell, n_el_nod, 1).
The unraveler returns non-writeable view into the input array.
Parameters
----------
n_el_nod : int
expected dimensions of dofs array
n_cell : int
Returns
-------
unravel : callable
"""
def unravel(u):
"""Returns non-writeable view into the input array reshaped to
(n*m, 1) to (m, n, 1) .
Parameters
----------
u : array_like
solution in shape (n*m, 1)
Returns
-------
u : ndarray
unraveledsolution in shape (m, n, 1)
"""
ustride1 = u.strides[0]
ur = as_strided(u,
shape=(n_cell, n_el_nod, 1),
strides=(n_el_nod * ustride1, ustride1, ustride1),
writeable=False)
return ur
return unravel
def get_raveler(n_el_nod, n_cell):
"""Returns function for raveling i.e. packing dof data from
two dimensional array of shape (n_cell, n_el_nod, 1) to (n_el_nod*n_cell, 1)
The raveler returns view into the input array.
Parameters
----------
n_el_nod :
param n_el_nod, n_cell: expected dimensions of dofs array
n_cell : int
Returns
-------
ravel : callable
"""
def ravel(u):
"""Returns view into the input array reshaped from (m, n, 1) to (n*m, 1)
to (m, n, 1) .
Parameters
----------
u : array_like
Returns
-------
u : ndarray
"""
# ustride1 = u.strides[0]
# ur = as_strided(u, shape=(n_el_nod*n_cell, 1),
# strides=(n_cell*ustride1, ustride1))
ur = nm.ravel(u)[:, None]
# possibly use according to
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.ravel.html
# ur = u.reshape(-1)
return ur
return ravel
# mapping between geometry element types
# and their facets types
# TODO move to sfepy/discrete/fem/geometry_element.py?
cell_facet_gel_name = {
"1_2": "0_1",
"2_3": "1_2",
"2_4": "1_2",
"3_4": "2_3",
"3_8": "2_4"
}
def get_gel(region):
"""
Parameters
----------
region : sfepy.discrete.common.region.Region
Returns
-------
gel :
base geometry element of the region
"""
cmesh = region.domain.cmesh
for key, gel in six.iteritems(region.domain.geom_els):
ct = cmesh.cell_types
if (ct[region.cells] == cmesh.key_to_index[gel.name]).all():
return gel
else:
raise ValueError('Region {} contains multiple'
' reference geometries!'.format(region))
class DGField(FEField):
"""Class for usage with DG terms, provides functionality for Discontinous
Galerkin method like neighbour look up, projection to discontinuous basis
and correct DOF treatment.
"""
family_name = 'volume_DG_legendre_discontinuous'
is_surface = False
def __init__(self, name, dtype, shape, region, space="H1",
poly_space_base="legendre", approx_order=1, integral=None):
"""
Creates DGField, with Legendre polyspace and default integral
corresponding to 2 * approx_order.
Parameters
----------
name : string
dtype : type
shape : string
'vector', 'scalar' or something else
region : sfepy.discrete.common.region.Region
space : string
default "H1"
poly_space_base : PolySpace
optionally force polyspace
approx_order : 0 for FVM, default 1
integral : Integral
if None integral of order 2*approx_order is created
"""
shape = parse_shape(shape, region.domain.shape.dim)
Struct.__init__(self, name=name, dtype=dtype, shape=shape,
region=region)
if isinstance(approx_order, tuple):
self.approx_order = approx_order[0]
else:
self.approx_order = approx_order
# geometry
self.domain = region.domain
self.region = region
self.dim = region.tdim
self._setup_geometry()
self._setup_connectivity()
# TODO treat domains embedded into higher dimensional spaces?
self.n_el_facets = self.dim + 1 if self.gel.is_simplex else 2**self.dim
# approximation space
self.space = space
self.poly_space_base = poly_space_base
self.force_bubble = False
self._create_interpolant()
# DOFs
self._setup_shape()
self._setup_all_dofs()
self.ravel_sol = get_raveler(self.n_el_nod, self.n_cell)
self.unravel_sol = get_unraveler(self.n_el_nod, self.n_cell)
# integral
self.clear_qp_base()
self.clear_facet_qp_base()
if integral is None:
self.integral = Integral("dg_fi", order = 2 * self.approx_order)
else:
self.integral = integral
self.ori = None
self.basis_transform = None
# mapping
self.mappings = {}
self.mapping = self.create_mapping(self.region, self.integral, "volume",
return_mapping=True)[1]
self.mappings0 = {}
# neighbour facet mapping and data caches
# TODO use lru cache or different method?
self.clear_facet_neighbour_idx_cache()
self.clear_normals_cache()
self.clear_facet_vols_cache()
self.boundary_facet_local_idx = {}
def _create_interpolant(self):
name = self.gel.name + '_DG_legendre'
ps = PolySpace.any_from_args(name, self.gel, self.approx_order,
base=self.poly_space_base,
force_bubble=False)
self.poly_space = ps
# 'legendre_simplex' is created for '1_2'.
if self.gel.name in ["2_4", "3_8"]:
self.extended = True
else:
self.extended = False
def _setup_all_dofs(self):
"""Sets up all the differet kinds of DOFs, for DG only bubble DOFs"""
self.n_el_nod = self.poly_space.n_nod
self.n_vertex_dof = 0 # in DG we will propably never need vertex DOFs
self.n_edge_dof = 0 # use facets DOFS for AFS methods
self.n_face_dof = 0 # use facet DOF for AFS methods
(self.n_bubble_dof,
self.bubble_remap,
self.bubble_dofs) = self._setup_bubble_dofs()
self.n_nod = self.n_vertex_dof + self.n_edge_dof \
+ self.n_face_dof + self.n_bubble_dof
def _setup_bubble_dofs(self):
"""Creates DOF information for so called element, cell or bubble DOFs
- the only DOFs used in DG
n_dof = n_cells * n_el_nod
remap optional remapping between cells
dofs is mapping between dofs and cells
Returns
-------
n_dof : int
remap : ndarray
dofs : ndarray
"""
self.n_cell = self.region.get_n_cells(self.is_surface)
n_dof = self.n_cell * self.n_el_nod
dofs = nm.arange(n_dof, dtype=nm.int32)\
.reshape(self.n_cell, self.n_el_nod)
remap = nm.arange(self.n_cell)
self.econn = dofs
self.dofs2cells = nm.repeat(nm.arange(self.n_cell), self.n_el_nod)
return n_dof, remap, dofs
def _setup_shape(self):
"""What is shape used for and what it really means.
Does it represent shape of the problem?
"""
self.n_components = nm.prod(self.shape)
self.val_shape = self.shape
def _setup_geometry(self):
"""Setup the field region geometry."""
# get_gel extracts the highest dimension geometry from self.region
self.gel = get_gel(self.region)
def _setup_connectivity(self):
"""Forces self.domain.mesh to build necessary conductivities
so they are available in self.get_nbrhd_dofs
"""
self.region.domain.mesh.cmesh.setup_connectivity(self.dim, self.dim)
self.region.domain.mesh.cmesh.setup_connectivity(self.dim - 1, self.dim)
self.region.domain.mesh.cmesh.setup_connectivity(self.dim, self.dim - 1)
def get_coor(self, nods=None):
"""Returns coors for matching nodes
# TODO revise DG_EPBC and EPBC matching?
Parameters
----------
nods :
if None use all nodes (Default value = None)
Returns
-------
coors : ndarray
coors on surface
"""
if nods is None:
nods = self.bubble_dofs
cells = self.dofs2cells[nods]
coors = self.domain.mesh.cmesh.get_centroids(self.dim)[cells]
eps = min(self.domain.cmesh.get_volumes(self.dim)) / (self.n_el_nod + 2)
if self.dim == 1:
extended_coors = nm.zeros(nm.shape(coors)[:-1] + (2,))
extended_coors[:, 0] = coors[:, 0]
coors = extended_coors
# shift centroid coors to lie within cells but be different for each dof
# use coors of facet QPs?
coors += eps * nm.repeat(nm.arange(self.n_el_nod),
len(nm.unique(cells)))[:, None]
return coors
def clear_facet_qp_base(self):
"""Clears facet_qp_base cache"""
self.facet_bf = {}
self.facet_qp = None
self.facet_whs = None
def _transform_qps_to_facets(self, qps, geo_name):
"""Transforms points given in qps to all facets of the reference element
with geometry geo_name.
Parameters
----------
qps :
qps corresponding to facet dimension to be transformed
geo_name :
element type
Returns
-------
tqps : ndarray
tqps is of shape shape(qps) + (n_el_facets, geo dim)
"""
if geo_name == "1_2":
tqps = nm.zeros(nm.shape(qps) + (2, 1,))
tqps[..., 0, 0] = 0.
tqps[..., 1, 0] = 1.
elif geo_name == "2_3":
tqps = nm.zeros(nm.shape(qps) + (3, 2,))
# 0.
tqps[..., 0, 0] = qps # x = 0 + t
tqps[..., 0, 1] = 0. # y = 0
# 1.
tqps[..., 1, 0] = 1 - qps # x = 1 - t
tqps[..., 1, 1] = qps # y = t
# 2.
tqps[..., 2, 0] = 0 # x = 0
tqps[..., 2, 1] = 1 - qps # y = 1 - t
elif geo_name == "2_4":
tqps = nm.zeros(nm.shape(qps) + (4, 2,))
# 0.
tqps[..., 0, 0] = qps # x = t
tqps[..., 0, 1] = 0. # y = 0
# 1.
tqps[..., 1, 0] = 1 # x = 1
tqps[..., 1, 1] = qps # y = t
# 2.
tqps[..., 2, 0] = 1 - qps # x = 1 -t
tqps[..., 2, 1] = 1 # y = 1
# 3.
tqps[..., 3, 0] = 0 # x = 0
tqps[..., 3, 1] = 1 - qps # y = 1 - t
elif geo_name == "3_4":
# tqps = nm.zeros(nm.shape(qps) + (4, 3,))
raise NotImplementedError("Geometry {} not supported, yet"
.format(geo_name))
elif geo_name == "3_8":
# tqps = nm.zeros(nm.shape(qps) + (8, 3,))
raise NotImplementedError("Geometry {} not supported, yet"
.format(geo_name))
else:
raise NotImplementedError("Geometry {} not supported, yet"
.format(geo_name))
return tqps
def get_facet_qp(self):
"""Returns quadrature points on all facets of the reference element in
array of shape (n_qp, 1 , n_el_facets, dim)
Returns
-------
qps : ndarray
quadrature points
weights : ndarray
Still needs to be transformed to actual facets!
"""
if self.dim == 1:
facet_qps = self._transform_qps_to_facets(nm.zeros((1, 1)), "1_2")
weights = nm.ones((1, 1, 1))
else:
qps, weights = self.integral.get_qp(cell_facet_gel_name[self.gel.name])
weights = weights[None, :, None]
facet_qps = self._transform_qps_to_facets(qps, self.gel.name)
return facet_qps, weights
def get_facet_base(self, derivative=False, base_only=False):
"""
Returns values of base in facets quadrature points, data shape is a bit
crazy right now:
(number of qps, 1, n_el_facets, 1, n_el_nod)
end for derivatine:
(1, number of qps, (dim,) * derivative, n_el_facets, 1, n_el_nod)
Parameters
----------
derivative: truthy or integer
base_only: do not return weights
Returns
-------
facet_bf : ndarray
values of basis functions in facet qps
weights : ndarray, optionally
weights of qps
"""
if derivative:
diff = int(derivative)
else:
diff = 0
if diff in self.facet_bf:
facet_bf = self.facet_bf[diff]
whs = self.facet_whs
else:
qps, whs = self.get_facet_qp()
ps = self.poly_space
self.facet_qp = qps
self.facet_whs = whs
if derivative:
facet_bf = nm.zeros((1,) + nm.shape(qps)[:-1] +
(self.dim,) * diff + (self.n_el_nod,))
else:
facet_bf = nm.zeros(nm.shape(qps)[:-1] + (1, self.n_el_nod,))
for i in range(self.n_el_facets):
facet_bf[..., i, :, :] = \
ps.eval_base(qps[..., i, :], diff=diff,
transform=self.basis_transform)
self.facet_bf[diff] = facet_bf
if base_only:
return facet_bf
else:
return facet_bf, whs
def clear_facet_neighbour_idx_cache(self, region=None):
"""
If region is None clear all!
Parameters
----------
region : sfepy.discrete.common.region.Region
If None clear all.
"""
if region is None:
self.facet_neighbour_index = {}
else:
self.facet_neighbour_index.pop(region.name)
def get_facet_neighbor_idx(self, region=None, eq_map=None):
"""
Returns index of cell neighbours sharing facet, along with local index
of the facet within neighbour, also treats periodic boundary conditions
i.e. plugs correct neighbours for cell on periodic boundary.
Where there are no neighbours specified puts -1 instead of neighbour
and facet id
Cashes neighbour index in self.facet_neighbours
Parameters
----------
region : sfepy.discrete.common.region.Region
Main region, must contain cells.
eq_map :
eq_map from state variable containing information on
EPBC and DG EPBC. (Default value = None)
Returns
-------
facet_neighbours : ndarray
Shape is
(n_cell, n_el_facet, 2),
first value is index of the neighbouring cell,
the second is index of the facet in said nb. cell.
"""
if region is None or eq_map is None:
# HOTFIX enabling limiter to obtain connectivity data without
# knowing eq_map or region
if self.region.name in self.facet_neighbour_index:
return self.facet_neighbour_index[self.region.name]
else:
raise ValueError("No facet neighbour mapping for main " +
"region {}".format(self.region.name) +
" cached yet, call with region and " +
"eq_map first.")
if region.name in self.facet_neighbour_index:
return self.facet_neighbour_index[region.name]
dim, n_cell, n_el_facets = self.get_region_info(region)
cmesh = region.domain.mesh.cmesh
cells = region.cells
facet_neighbours = nm.zeros((n_cell, n_el_facets, 2), dtype=nm.int32)
c2fi, c2fo = cmesh.get_incident(dim - 1, cells, dim, ret_offsets=True)
for ic, o1 in enumerate(c2fo[:-1]): # loop over cells
o2 = c2fo[ic + 1]
# get neighbours per facet of the cell
c2ci, c2co = cmesh.get_incident(dim, c2fi[o1:o2], dim - 1,
ret_offsets=True)
ii = cmesh.get_local_ids(c2fi[o1:o2], dim - 1, c2ci, c2co, dim)
fis = nm.c_[c2ci, ii]
nbrs = []
for ifa, of1 in enumerate(c2co[:-1]): # loop over facets
of2 = c2co[ifa + 1]
if of2 == (of1 + 1): # facet has only one cell
# Surface facet.
nbrs.append([-1, -1]) # c2ci[of1]) # append no neighbours
else:
if c2ci[of1] == cells[ic]: # do not append the cell itself
nbrs.append(fis[of2 - 1])
else:
nbrs.append(fis[of1])
facet_neighbours[ic, :, :] = nbrs
facet_neighbours = \
self._set_fem_periodic_facet_neighbours(facet_neighbours, eq_map)
facet_neighbours = \
self._set_dg_periodic_facet_neighbours(facet_neighbours, eq_map)
# cache results
self.facet_neighbour_index[region.name] = facet_neighbours
return facet_neighbours
def _set_dg_periodic_facet_neighbours(self, facet_neighbours, eq_map):
"""
Parameters
----------
facet_neighbours : array_like
Shape is
(n_cell, n_el_facet, 2),
first value is index of the neighbouring cell
the second is index of the facet in said nb. cell.
eq_map :
must contain dg_ep_bc a List with pairs of slave and master boundary
cell boundary facet mapping
Returns
-------
facet_neighbours : ndarray
Updated incidence array.
"""
# if eq_map.
# treat DG EPBC - these are definitely preferred
if eq_map.n_dg_epbc > 0 and self.gel.name not in ["1_2", "2_4", "3_6"]:
raise ValueError(
"Periodic boundary conditions not supported " +
"for geometry {} elements.".format(self.gel.name))
dg_epbc = eq_map.dg_epbc
for master_bc2bfi, slave_bc2bfi in dg_epbc:
# set neighbours of periodic cells to one another
facet_neighbours[master_bc2bfi[:, 0], master_bc2bfi[:, 1], 0] = \
slave_bc2bfi[:, 0]
facet_neighbours[slave_bc2bfi[:, 0], slave_bc2bfi[:, 1], 0] = \
master_bc2bfi[:, 0]
# set neighbours facets
facet_neighbours[slave_bc2bfi[:, 0], slave_bc2bfi[:, 1], 1] = \
master_bc2bfi[:, 1]
facet_neighbours[master_bc2bfi[:, 0], master_bc2bfi[:, 1], 1] =\
slave_bc2bfi[:, 1]
return facet_neighbours
def _set_fem_periodic_facet_neighbours(self, facet_neighbours, eq_map):
"""Maybe remove after DG EPBC revision in self.get_coor
Parameters
----------
facet_neighbours : array_like
Shape is (n_cell, n_el_facet, 2), first value is index of the
neighbouring cell the second is index of the facet in said nb. cell.
eq_map :
eq_map from state variable containing information on
EPBC and DG EPBC.
Returns
-------
facet_neighbours : ndarray
Updated incidence array.
"""
# treat classical FEM EPBCs - we need to correct neighbours
if eq_map.n_epbc > 0:
# set neighbours of periodic cells to one another
mcells = nm.unique(self.dofs2cells[eq_map.master])
scells = nm.unique(self.dofs2cells[eq_map.slave])
mcells_facets = nm.array(
nm.where(facet_neighbours[mcells] == -1))[1, 0] # facets mcells
scells_facets = nm.array(
nm.where(facet_neighbours[scells] == -1))[1, 0] # facets scells
# [1, 0] above, first we need second axis to get axis on which
# facet indices are stored, second we drop axis with neighbour
# local facet index,
#
# for multiple s/mcells this will have to be
# something like 1 + 2*nm.arange(len(mcells)) - to skip double
# entries for -1 tags in neighbours and neighbour local facet idx
# set neighbours of mcells to scells
facet_neighbours[mcells, mcells_facets, 0] = scells
# set neighbour facets to facets of scell missing neighbour
facet_neighbours[
mcells, mcells_facets, 1] = scells_facets
# we do not need to distinguish EBC and EPBC cells, EBC overwrite
# EPBC, we only need to fix shapes
# set neighbours of scells to mcells
facet_neighbours[scells, scells_facets, 0] = mcells
# set neighbour facets to facets of mcell missing neighbour0
facet_neighbours[
scells, scells_facets, 1] = mcells_facets
return facet_neighbours
@staticmethod
def get_region_info(region):
"""
Extracts information about region needed in various methods of DGField
Parameters
----------
region : sfepy.discrete.common.region.Region
Returns
-------
dim, n_cell, n_el_facets
"""
if not region.has_cells():
raise ValueError("Region {} has no cells".format(region.name))
n_cell = region.get_n_cells()
dim = region.tdim
gel = get_gel(region)
n_el_facets = dim + 1 if gel.is_simplex else 2 ** dim
return dim, n_cell, n_el_facets
def get_both_facet_state_vals(self, state, region,
derivative=None, reduce_nod=True):
"""Computes values of the variable represented by dofs in
quadrature points located at facets, returns both values -
inner and outer, along with weights.
Parameters
----------
state : state variable containing BC info
region : sfepy.discrete.common.region.Region
derivative : compute derivative if truthy,
compute n-th derivative if a number (Default value = None)
reduce_nod : if False DOES NOT sum nodes into values at QPs
(Default value = True)
Returns
-------
inner_facet_values (n_cell, n_el_facets, n_qp),
outer facet values (n_cell, n_el_facets, n_qp),
weights,
if derivative is True:
inner_facet_values (n_cell, n_el_facets, dim, n_qp),
outer_facet values (n_cell, n_el_facets, dim, n_qp)
"""
if derivative:
diff = int(derivative)
else:
diff = 0
unreduce_nod = int(not reduce_nod)
inner_base_vals, outer_base_vals, whs = \
self.get_both_facet_base_vals(state, region, derivative=derivative)
dofs = self.unravel_sol(state.data[0])
n_qp = whs.shape[-1]
outputs_shape = (self.n_cell, self.n_el_facets) + \
(self.n_el_nod,) * unreduce_nod + \
(self.dim,) * diff + \
(n_qp,)
inner_facet_vals = nm.zeros(outputs_shape)
if unreduce_nod:
inner_facet_vals[:] = nm.einsum('id...,idf...->ifd...',
dofs, inner_base_vals)
else:
inner_facet_vals[:] = nm.einsum('id...,id...->i...',
dofs, inner_base_vals)
per_facet_neighbours = self.get_facet_neighbor_idx(region, state.eq_map)
outer_facet_vals = nm.zeros(outputs_shape)
for facet_n in range(self.n_el_facets):
if unreduce_nod:
outer_facet_vals[:, facet_n, :] = \
nm.einsum('id...,id...->id...',
dofs[per_facet_neighbours[:, facet_n, 0]],
outer_base_vals[:, :, facet_n])
else:
outer_facet_vals[:, facet_n, :] = \
nm.einsum('id...,id...->i...',
dofs[per_facet_neighbours[:, facet_n, 0]],
outer_base_vals[:, :, facet_n])
boundary_cells = nm.array(nm.where(per_facet_neighbours[:, :, 0] < 0)).T
outer_facet_vals[boundary_cells[:, 0], boundary_cells[:, 1]] = 0.0
# TODO detect and print boundary cells without defined BCs?
for ebc, ebc_vals in zip(state.eq_map.dg_ebc.get(diff, []),
state.eq_map.dg_ebc_val.get(diff, [])):
if unreduce_nod:
raise NotImplementedError(
"Unreduced DOFs are not available for boundary " +
"outerfacets")
outer_facet_vals[ebc[:, 0], ebc[:, 1], :] = \
nm.einsum("id,id...->id...",
ebc_vals, inner_base_vals[0, :, ebc[:, 1]])
else:
# fix flipping qp order to accomodate for
# opposite facet orientation of neighbours
outer_facet_vals[ebc[:, 0], ebc[:, 1], :] = ebc_vals[:, ::-1]
# flip outer_facet_vals moved to get_both_facet_base_vals
return inner_facet_vals, outer_facet_vals, whs
def get_both_facet_base_vals(self, state, region, derivative=None):
"""Returns values of the basis function in quadrature points on facets
broadcasted to all cells inner to the element as well as outer ones
along with weights for the qps broadcasted and transformed to elements.
Contains quick fix to flip facet QPs for right integration order.
Parameters
----------
state : used to get EPBC info
region : sfepy.discrete.common.region.Region for connectivity
derivative : if u need derivative
(Default value = None)
Returns
-------
outer_facet_base_vals:
inner_facet_base_vals:
shape (n_cell, n_el_nod, n_el_facet, n_qp) or
(n_cell, n_el_nod, n_el_facet, dim, n_qp)
when derivative is True or 1
whs: shape (n_cell, n_el_facet, n_qp)
"""
if derivative:
diff = int(derivative)
else:
diff = 0
facet_bf, whs = self.get_facet_base(derivative=derivative)
n_qp = nm.shape(whs)[1]
facet_vols = self.get_facet_vols(region)
whs = facet_vols * whs[None, :, :, 0]
base_shape = (self.n_cell, self.n_el_nod, self.n_el_facets) + \
(self.dim,) * diff + \
(n_qp,)
inner_facet_base_vals = nm.zeros(base_shape)
outer_facet_base_vals = nm.zeros(base_shape)
if derivative:
inner_facet_base_vals[:] = facet_bf[0, :, 0, :, :, :]\
.swapaxes(-2, -3).T
else:
inner_facet_base_vals[:] = facet_bf[:, 0, :, 0, :].T
per_facet_neighbours = self.get_facet_neighbor_idx(region, state.eq_map)
# numpy prepends shape resulting from multiple
# indexing before remaining shape
if derivative:
outer_facet_base_vals[:] = \
inner_facet_base_vals[0, :, per_facet_neighbours[:, :, 1]]\
.swapaxes(-3, -4)
else:
outer_facet_base_vals[:] = \
inner_facet_base_vals[0, :, per_facet_neighbours[:, :, 1]]\
.swapaxes(-2, -3)
# fix to flip facet QPs for right integration order
return inner_facet_base_vals, outer_facet_base_vals[..., ::-1], whs
def clear_normals_cache(self, region=None):
"""Clears normals cache for given region or all regions.
Parameters
----------
region : sfepy.discrete.common.region.Region
region to clear cache or None to clear all
"""
if region is None:
self.normals_cache = {}
else:
if isinstance(region, str):
self.normals_cache.pop(region)
else:
self.normals_cache.pop(region.name)
def get_cell_normals_per_facet(self, region):
"""Caches results, use clear_normals_cache to clear the cache.
Parameters
----------
region: sfepy.discrete.common.region.Region
Main region, must contain cells.
Returns
-------
normals: ndarray
normals of facets in array of shape (n_cell, n_el_facets, dim)
"""
if region.name in self.normals_cache:
return self.normals_cache[region.name]
dim, n_cell, n_el_facets = self.get_region_info(region)
cmesh = region.domain.mesh.cmesh
normals = cmesh.get_facet_normals()
normals_out = nm.zeros((n_cell, n_el_facets, dim))
c2f = cmesh.get_conn(dim, dim - 1)
for ic, o1 in enumerate(c2f.offsets[:-1]):
o2 = c2f.offsets[ic + 1]
for ifal, ifa in enumerate(c2f.indices[o1:o2]):
normals_out[ic, ifal] = normals[o1 + ifal]
self.normals_cache[region.name] = normals_out
return normals_out
def clear_facet_vols_cache(self, region=None):
"""Clears facet volume cache for given region or all regions.
Parameters
----------
region : sfepy.discrete.common.region.Region
region to clear cache or None to clear all
"""
if region is None:
self.facet_vols_cache = {}
else:
if isinstance(region, str):
self.facet_vols_cache.pop(region)
else:
self.facet_vols_cache.pop(region.name)
def get_facet_vols(self, region):
"""Caches results, use clear_facet_vols_cache to clear the cache
Parameters
----------
region : sfepy.discrete.common.region.Region
Returns
-------
vols_out: ndarray
volumes of the facets by cells shape (n_cell, n_el_facets, 1)
"""
if region.name in self.facet_vols_cache:
return self.facet_vols_cache[region.name]
dim, n_cell, n_el_facets = self.get_region_info(region)
cmesh = region.domain.mesh.cmesh
if dim == 1:
vols = nm.ones((cmesh.num[0], 1))
else:
vols = cmesh.get_volumes(dim - 1)[:, None]
vols_out = nm.zeros((n_cell, n_el_facets, 1))
c2f = cmesh.get_conn(dim, dim - 1)
for ic, o1 in enumerate(c2f.offsets[:-1]):
o2 = c2f.offsets[ic + 1]
for ifal, ifa in enumerate(c2f.indices[o1:o2]):
vols_out[ic, ifal] = vols[ifa]
self.facet_vols_cache[region.name] = vols_out
return vols_out
def get_data_shape(self, integral, integration='volume', region_name=None):
"""Returns data shape
(n_nod, n_qp, self.gel.dim, self.n_el_nod)
Parameters
----------
integral : integral used
integration :
'volume' is only supported value (Default value = 'volume')
region_name : not used
(Default value = None)
Returns
-------
data_shape : tuple
"""
if integration in ('volume',):
# from FEField.get_data_shape()
_, weights = integral.get_qp(self.gel.name)
n_qp = weights.shape[0]
data_shape = (self.n_cell, n_qp, self.gel.dim, self.n_el_nod)
# econn.shape[1] == n_el_nod i.e. number nod in element
else:
raise NotImplementedError('unsupported integration! (%s)'
% integration)
return data_shape
def get_econn(self, conn_type, region, is_trace=False, integration=None):
"""Getter for econn
Parameters
----------
conn_type : string or Struct
'volume' is only supported
region : sfepy.discrete.common.region.Region
is_trace : ignored
(Default value = False)
integration : ignored
(Default value = None)
Returns
-------
econn : ndarray
connectivity information
"""
ct = conn_type.type if isinstance(conn_type, Struct) else conn_type
if ct == 'volume':
if region.name == self.region.name:
conn = self.econn
else:
raise ValueError("Bad region for the field")
else:
raise ValueError('unknown connectivity type! (%s)' % ct)
return conn
def setup_extra_data(self, geometry, info, is_trace):
"""This is called in create_adof_conns(conn_info, var_indx=None,
active_only=True, verbose=True)
for each variable but has no effect.
Parameters
----------
geometry :
ignored
info :
set to self.info
is_trace :
set to self.trace
"""
# placeholder, what is this used for?
# dct = info.dc_type.type
self.info = info
self.is_trace = is_trace
def get_dofs_in_region(self, region, merge=True):
"""Return indices of DOFs that belong to the given region.
Not Used in BC treatment
Parameters
----------
region : sfepy.discrete.common.region.Region
merge : bool
merge dof tuple into one numpy array, default True
Returns
-------
dofs : ndarray
"""
dofs = []
if region.has_cells(): # main region or its part
els = nm.ravel(self.bubble_remap[region.cells])
eldofs = self.bubble_dofs[els[els >= 0]]
dofs.append(eldofs)
else:
# return indices of cells adjacent to boundary facets
dim = self.dim
cmesh = region.domain.mesh.cmesh
bc_cells = cmesh.get_incident(dim, region.facets, dim - 1)
bc_dofs = self.bubble_dofs[bc_cells]
dofs.append(bc_dofs)
if merge:
dofs = nm.concatenate(dofs)
return dofs
def get_bc_facet_idx(self, region):
"""Caches results in self.boundary_facet_local_idx
Parameters
----------
region : sfepy.discrete.common.region.Region
surface region defining BCs
Returns
-------
bc2bfi : ndarray
index of cells on boundary along with corresponding facets
"""
if region.name in self.boundary_facet_local_idx:
return self.boundary_facet_local_idx[region.name]
bc2bfi = region.get_facet_indices()
self.boundary_facet_local_idx[region.name] = bc2bfi
return bc2bfi
def create_mapping(self, region, integral, integration,
return_mapping=True):
"""Creates and returns mapping
Parameters
----------
region : sfepy.discrete.common.region.Region
integral : Integral
integration : str
'volume' is only accepted option
return_mapping : default True
(Default value = True)
Returns
-------
mapping : VolumeMapping
"""
domain = self.domain
coors = domain.get_mesh_coors(actual=True)
dconn = domain.get_conn()
# from FEField
if integration == 'volume':
qp = self.get_qp('v', integral)
# qp = self.integral.get_qp(self.gel.name)
iels = region.get_cells()
geo_ps = self.gel.poly_space
ps = self.poly_space
bf = self.get_base('v', 0, integral, iels=iels)
conn = nm.take(dconn, iels.astype(nm.int32), axis=0)
mapping = VolumeMapping(coors, conn, poly_space=geo_ps)
vg = mapping.get_mapping(qp.vals, qp.weights, poly_space=ps,
ori=self.ori,
transform=self.basis_transform)
out = vg
else:
raise ValueError('unsupported integration geometry type: %s'
% integration)
if out is not None:
# Store the integral used.
out.integral = integral
out.qp = qp
out.ps = ps
# Update base.
out.bf[:] = bf
if return_mapping:
out = (out, mapping)
return out
def set_dofs(self, fun=0.0, region=None, dpn=None, warn=None):
"""Compute projection of fun into the basis, alternatively set DOFs
directly to provided value or values either in main volume region
or in boundary region.
Parameters
----------
fun : callable, scalar or array corresponding to dofs
(Default value = 0.0)
region : sfepy.discrete.common.region.Region
region to set DOFs on (Default value = None)
dpn : number of dofs per element
(Default value = None)
warn :
(Default value = None)
Returns
-------
nods : ndarray
vals : ndarray
"""
if region is None:
region = self.region
return self.set_cell_dofs(fun, region, dpn, warn)
elif region.has_cells():
return self.set_cell_dofs(fun, region, dpn, warn)
elif region.kind_tdim == self.dim - 1:
nods, vals = self.set_facet_dofs(fun, region, dpn, warn)
return nods, vals
def set_cell_dofs(self, fun=0.0, region=None, dpn=None, warn=None):
"""
Compute projection of fun onto the basis, in main region, alternatively
set DOFs directly to provided value or values
Parameters
----------
fun : callable, scallar or array corresponding to dofs
(Default value = 0.0)
region : sfepy.discrete.common.region.Region
region to set DOFs on (Default value = None)
dpn : number of dofs per element
(Default value = None)
warn : not used
(Default value = None)
Returns
-------
nods : ndarray
vals : ndarray
"""
aux = self.get_dofs_in_region(region)
nods = nm.unique(nm.hstack(aux))
if nm.isscalar(fun):
vals = nm.zeros(aux.shape)
vals[:, 0] = fun
vals = nm.hstack(vals)
elif isinstance(fun, nm.ndarray):
# useful for testing, allows to pass complete array of dofs as IC
if nm.shape(fun) == nm.shape(nods):
vals = fun
elif callable(fun):
qp, weights = self.integral.get_qp(self.gel.name)
coors = self.mapping.get_physical_qps(qp)
base_vals_qp = self.poly_space.eval_base(qp)[:, 0, :]
# this drops redundant axis that is returned by eval_base due to
# consistency with derivatives
# left hand, so far only orthogonal basis
# for legendre base this can be calculated exactly
# in 1D it is: 1 / (2 * nm.arange(self.n_el_nod) + 1)
lhs_diag = nm.einsum("q,q...->...", weights, base_vals_qp ** 2)
rhs_vec = nm.einsum("q,q...,iq...->i...",
weights, base_vals_qp, fun(coors))
vals = (rhs_vec / lhs_diag)
# plot for 1D
# from utils.visualizer import plot1D_legendre_dofs, reconstruct
# _legendre_dofs
# import matplotlib.pyplot as plt
# plot1D_legendre_dofs(self.domain.mesh.coors, (vals,), fun)
# ww, xx = reconstruct_legendre_dofs(self.domain.mesh.coors, 1,
# vals.T[..., None, None])
# plt.plot(xx, ww[:, 0], label="reconstructed dofs")
# plt.show()
return nods, vals
def set_facet_dofs(self, fun, region, dpn, warn):
"""Compute projection of fun onto the basis on facets, alternatively
set DOFs directly to provided value or values
Parameters
----------
fun : callable, scalar or array corresponding to dofs
region : sfepy.discrete.common.region.Region
region to set DOFs on
dpn : int
number of dofs per element
warn :
not used
Returns
-------
nods : ndarray
vals : ndarray
"""
raise NotImplementedError(
"Setting facet DOFs is not supported with DGField, " +
"use values at qp directly. " +
"This is usually result of using ebc instead of dgebc")
aux = self.get_dofs_in_region(region)
nods = nm.unique(nm.hstack(aux))
if nm.isscalar(fun):
vals = nm.zeros(aux.shape)
vals[:, 0] = fun
vals = nm.hstack(vals)
elif isinstance(fun, nm.ndarray):
assert_(len(fun) == dpn)
vals = nm.zeros(aux.shape)
vals[:, 0] = nm.repeat(fun, vals.shape[0])
elif callable(fun):
vals = nm.zeros(aux.shape)
# set zero DOF to value fun, set other DOFs to zero
# get facets QPs
qp, weights = self.get_facet_qp()
weights = weights[0, :, 0]
qp = qp[:, 0, :, :]
# get facets weights ?
# get coors
bc2bfi = self.get_bc_facet_idx(region)
coors = self.mapping.get_physical_qps(qp)
# get_physical_qps returns data in strange format, swapping
# some axis and flipping qps order
bcoors = coors[bc2bfi[:, 1], ::-1, bc2bfi[:, 0], :]
# get facet basis vals
base_vals_qp = self.poly_space.eval_base(qp)[:, 0, 0, :]
# solve for boundary cell DOFs
bc_val = fun(bcoors)
# this returns singular matrix - projection on the boundary should
# be into facet dim space
#lhs = nm.einsum("q,qd,qc->dc", weights, base_vals_qp, base_vals_qp)
# inv_lhs = nm.linalg.inv(lhs)
# rhs_vec = nm.einsum("q,q...,iq...->i...",
# weights, base_vals_qp, bc_val)
return nods, vals
def get_bc_facet_values(self, fun, region, ret_coors=False, diff=0):
"""Returns values of fun in facet QPs of the region
Parameters
----------
diff: derivative 0 or 1 supported
fun: Function value or values to set qps values to
region : sfepy.discrete.common.region.Region
boundary region
ret_coors: default False,
Return physical coors of qps in shape (n_cell, n_qp, dim).
Returns
-------
vals : ndarray
In shape (n_cell,) + (self.dim,) * diff + (n_qp,)
"""
if region.has_cells():
raise NotImplementedError(
"Region {} has cells and can't be used as boundary region".
format(region))
# get facets QPs
qp, weights = self.get_facet_qp()
weights = weights[0, :, 0]
qp = qp[:, 0, :, :]
n_qp = qp.shape[0]
# get facets weights ?
# get physical coors
bc2bfi = self.get_bc_facet_idx(region)
n_cell = bc2bfi.shape[0]
coors = self.mapping.get_physical_qps(qp)
# get_physical_qps returns data in strange format,
# swapping some axis and flipping qps order
# to get coors in shape (n_facet, n_qp, n_cell, dim)
if len(coors.shape) == 3:
coors = coors[:, None, :, :] # add axis for qps when it is missing
coors = coors.swapaxes(0, 2)
bcoors = coors[bc2bfi[:, 1], ::-1, bc2bfi[:, 0], :]
diff_shape = (self.dim,) * diff
output_shape = (n_cell,) + diff_shape + (n_qp,)
vals = nm.zeros(output_shape)
# we do not need last axis of coors, values are scalars
if nm.isscalar(fun):
if sum(diff_shape) > 1:
output(("Warning: Setting gradient of shape {} "
"in region {} with scalar value {}")
.format(diff_shape, region.name, fun))
vals[:] = fun
elif isinstance(fun, nm.ndarray):
try:
vals[:] = fun[:, None]
except ValueError:
raise ValueError(("Provided values of shape {} could not" +
" be used to set BC qps of shape {} in " +
"region {}")
.format(fun.shape, vals.shape, region.name))
elif callable(fun):
# get boundary values
vals[:] = fun(bcoors)
if ret_coors:
return bcoors, vals
return vals
def get_nodal_values(self, dofs, region, ref_nodes=None):
"""Computes nodal representation of the DOFs
Parameters
---------
dofs : array_like
dofs to transform to nodes
region : ignored
ref_nodes:
reference node to use instead of default qps
Parameters
----------
dofs : array_like
region : Region
ref_nodes : array_like
(Default value = None)
Returns
-------
nodes : ndarray
nodal_vals : ndarray
"""
if ref_nodes is None:
# poly_space could provide special nodes
ref_nodes = self.get_qp('v', self.integral).vals
base_vals_node = self.poly_space.eval_base(ref_nodes)[:, 0, :]
dofs = self.unravel_sol(dofs[:, 0])
nodal_vals = nm.sum(dofs * base_vals_node.T, axis=1)
nodes = self.mapping.get_physical_qps(ref_nodes)
# import matplotlib.pyplot as plt
# plt.plot(nodes[:, 0], nodal_vals)
# plt.show()
return nodes, nodal_vals
def create_output(self, dofs, var_name, dof_names=None,
key=None, extend=True, fill_value=None,
linearization=None):
"""Converts the DOFs corresponding to the field to a dictionary of
output data usable by Mesh.write().
For 1D puts DOFs into vairables u_modal{0} ... u_modal{n}, where
n = approx_order and marks them for writing as cell data.
For 2+D puts dofs into name_cell_nodes and creates sturct with:
mode = "cell_nodes", data and iterpolation scheme.
Also get node values and adds them to dictionary as cell_nodes
Parameters
----------
dofs : ndarray, shape (n_nod, n_component)
The array of DOFs reshaped so that each column corresponds
to one component.
var_name : str
The variable name corresponding to `dofs`.
dof_names : tuple of str
The names of DOF components. (Default value = None)
key : str, optional
The key to be used in the output dictionary instead of the
variable name. (Default value = None)
extend : bool, not used
Extend the DOF values to cover the whole domain.
(Default value = True)
fill_value : float or complex, not used
The value used to fill the missing DOF values if `extend` is True.
(Default value = None)
linearization : Struct or None, not used
The linearization configuration for higher order approximations.
(Default value = None)
Returns
-------
out : dict
"""
out = {}
udofs = self.unravel_sol(dofs)
name = var_name if key is None else key
if self.dim == 1:
for i in range(self.n_el_nod):
out[name + "_modal{}".format(i)] = \
Struct(mode="cell", data=udofs[:, i, None, None])
else:
interpolation_scheme = self.poly_space.get_interpol_scheme()
unravel = get_unraveler(self.n_el_nod, self.n_cell)
out[name + "_cell_nodes"] = Struct(mode="cell_nodes",
data=unravel(dofs)[..., 0],
scheme=interpolation_scheme)
return out
|
[
"sfepy.discrete.fem.mappings.VolumeMapping",
"sfepy.base.base.Struct.__init__",
"sfepy.discrete.Integral",
"sfepy.base.base.Struct",
"sfepy.discrete.common.fields.parse_shape",
"sfepy.discrete.PolySpace.any_from_args"
] |
[((3017, 3054), 'six.iteritems', 'six.iteritems', (['region.domain.geom_els'], {}), '(region.domain.geom_els)\n', (3030, 3054), False, 'import six\n'), ((1255, 1369), 'numpy.lib.stride_tricks.as_strided', 'as_strided', (['u'], {'shape': '(n_cell, n_el_nod, 1)', 'strides': '(n_el_nod * ustride1, ustride1, ustride1)', 'writeable': '(False)'}), '(u, shape=(n_cell, n_el_nod, 1), strides=(n_el_nod * ustride1,\n ustride1, ustride1), writeable=False)\n', (1265, 1369), False, 'from numpy.lib.stride_tricks import as_strided\n'), ((4382, 4425), 'sfepy.discrete.common.fields.parse_shape', 'parse_shape', (['shape', 'region.domain.shape.dim'], {}), '(shape, region.domain.shape.dim)\n', (4393, 4425), False, 'from sfepy.discrete.common.fields import parse_shape\n'), ((4434, 4507), 'sfepy.base.base.Struct.__init__', 'Struct.__init__', (['self'], {'name': 'name', 'dtype': 'dtype', 'shape': 'shape', 'region': 'region'}), '(self, name=name, dtype=dtype, shape=shape, region=region)\n', (4449, 4507), False, 'from sfepy.base.base import output, assert_, Struct\n'), ((6283, 6393), 'sfepy.discrete.PolySpace.any_from_args', 'PolySpace.any_from_args', (['name', 'self.gel', 'self.approx_order'], {'base': 'self.poly_space_base', 'force_bubble': '(False)'}), '(name, self.gel, self.approx_order, base=self.\n poly_space_base, force_bubble=False)\n', (6306, 6393), False, 'from sfepy.discrete import Integral, PolySpace\n'), ((7878, 7900), 'numpy.arange', 'nm.arange', (['self.n_cell'], {}), '(self.n_cell)\n', (7887, 7900), True, 'import numpy as nm\n'), ((8214, 8233), 'numpy.prod', 'nm.prod', (['self.shape'], {}), '(self.shape)\n', (8221, 8233), True, 'import numpy as nm\n'), ((16796, 16846), 'numpy.zeros', 'nm.zeros', (['(n_cell, n_el_facets, 2)'], {'dtype': 'nm.int32'}), '((n_cell, n_el_facets, 2), dtype=nm.int32)\n', (16804, 16846), True, 'import numpy as nm\n'), ((24365, 24388), 'numpy.zeros', 'nm.zeros', (['outputs_shape'], {}), '(outputs_shape)\n', (24373, 24388), True, 'import numpy as nm\n'), ((24805, 24828), 'numpy.zeros', 'nm.zeros', (['outputs_shape'], {}), '(outputs_shape)\n', (24813, 24828), True, 'import numpy as nm\n'), ((24852, 24875), 'six.moves.range', 'range', (['self.n_el_facets'], {}), '(self.n_el_facets)\n', (24857, 24875), False, 'from six.moves import range\n'), ((27902, 27922), 'numpy.zeros', 'nm.zeros', (['base_shape'], {}), '(base_shape)\n', (27910, 27922), True, 'import numpy as nm\n'), ((27955, 27975), 'numpy.zeros', 'nm.zeros', (['base_shape'], {}), '(base_shape)\n', (27963, 27975), True, 'import numpy as nm\n'), ((30071, 30107), 'numpy.zeros', 'nm.zeros', (['(n_cell, n_el_facets, dim)'], {}), '((n_cell, n_el_facets, dim))\n', (30079, 30107), True, 'import numpy as nm\n'), ((31696, 31730), 'numpy.zeros', 'nm.zeros', (['(n_cell, n_el_facets, 1)'], {}), '((n_cell, n_el_facets, 1))\n', (31704, 31730), True, 'import numpy as nm\n'), ((39779, 39795), 'numpy.isscalar', 'nm.isscalar', (['fun'], {}), '(fun)\n', (39790, 39795), True, 'import numpy as nm\n'), ((42261, 42277), 'numpy.isscalar', 'nm.isscalar', (['fun'], {}), '(fun)\n', (42272, 42277), True, 'import numpy as nm\n'), ((45384, 45406), 'numpy.zeros', 'nm.zeros', (['output_shape'], {}), '(output_shape)\n', (45392, 45406), True, 'import numpy as nm\n'), ((45483, 45499), 'numpy.isscalar', 'nm.isscalar', (['fun'], {}), '(fun)\n', (45494, 45499), True, 'import numpy as nm\n'), ((47230, 47269), 'numpy.sum', 'nm.sum', (['(dofs * base_vals_node.T)'], {'axis': '(1)'}), '(dofs * base_vals_node.T, axis=1)\n', (47236, 47269), True, 'import numpy as nm\n'), ((2327, 2338), 'numpy.ravel', 'nm.ravel', (['u'], {}), '(u)\n', (2335, 2338), True, 'import numpy as nm\n'), ((5541, 5587), 'sfepy.discrete.Integral', 'Integral', (['"""dg_fi"""'], {'order': '(2 * self.approx_order)'}), "('dg_fi', order=2 * self.approx_order)\n", (5549, 5587), False, 'from sfepy.discrete import Integral, PolySpace\n'), ((7963, 7985), 'numpy.arange', 'nm.arange', (['self.n_cell'], {}), '(self.n_cell)\n', (7972, 7985), True, 'import numpy as nm\n'), ((12657, 12675), 'numpy.ones', 'nm.ones', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (12664, 12675), True, 'import numpy as nm\n'), ((14223, 14246), 'six.moves.range', 'range', (['self.n_el_facets'], {}), '(self.n_el_facets)\n', (14228, 14246), False, 'from six.moves import range\n'), ((20616, 20657), 'numpy.unique', 'nm.unique', (['self.dofs2cells[eq_map.master]'], {}), '(self.dofs2cells[eq_map.master])\n', (20625, 20657), True, 'import numpy as nm\n'), ((20679, 20719), 'numpy.unique', 'nm.unique', (['self.dofs2cells[eq_map.slave]'], {}), '(self.dofs2cells[eq_map.slave])\n', (20688, 20719), True, 'import numpy as nm\n'), ((24448, 24504), 'numpy.einsum', 'nm.einsum', (['"""id...,idf...->ifd..."""', 'dofs', 'inner_base_vals'], {}), "('id...,idf...->ifd...', dofs, inner_base_vals)\n", (24457, 24504), True, 'import numpy as nm\n'), ((24597, 24650), 'numpy.einsum', 'nm.einsum', (['"""id...,id...->i..."""', 'dofs', 'inner_base_vals'], {}), "('id...,id...->i...', dofs, inner_base_vals)\n", (24606, 24650), True, 'import numpy as nm\n'), ((27612, 27625), 'numpy.shape', 'nm.shape', (['whs'], {}), '(whs)\n', (27620, 27625), True, 'import numpy as nm\n'), ((31580, 31606), 'numpy.ones', 'nm.ones', (['(cmesh.num[0], 1)'], {}), '((cmesh.num[0], 1))\n', (31587, 31606), True, 'import numpy as nm\n'), ((35020, 35061), 'numpy.ravel', 'nm.ravel', (['self.bubble_remap[region.cells]'], {}), '(self.bubble_remap[region.cells])\n', (35028, 35061), True, 'import numpy as nm\n'), ((35490, 35510), 'numpy.concatenate', 'nm.concatenate', (['dofs'], {}), '(dofs)\n', (35504, 35510), True, 'import numpy as nm\n'), ((37212, 37257), 'sfepy.discrete.fem.mappings.VolumeMapping', 'VolumeMapping', (['coors', 'conn'], {'poly_space': 'geo_ps'}), '(coors, conn, poly_space=geo_ps)\n', (37225, 37257), False, 'from sfepy.discrete.fem.mappings import VolumeMapping\n'), ((39751, 39765), 'numpy.hstack', 'nm.hstack', (['aux'], {}), '(aux)\n', (39760, 39765), True, 'import numpy as nm\n'), ((39816, 39835), 'numpy.zeros', 'nm.zeros', (['aux.shape'], {}), '(aux.shape)\n', (39824, 39835), True, 'import numpy as nm\n'), ((39884, 39899), 'numpy.hstack', 'nm.hstack', (['vals'], {}), '(vals)\n', (39893, 39899), True, 'import numpy as nm\n'), ((42233, 42247), 'numpy.hstack', 'nm.hstack', (['aux'], {}), '(aux)\n', (42242, 42247), True, 'import numpy as nm\n'), ((42298, 42317), 'numpy.zeros', 'nm.zeros', (['aux.shape'], {}), '(aux.shape)\n', (42306, 42317), True, 'import numpy as nm\n'), ((42366, 42381), 'numpy.hstack', 'nm.hstack', (['vals'], {}), '(vals)\n', (42375, 42381), True, 'import numpy as nm\n'), ((49323, 49343), 'six.moves.range', 'range', (['self.n_el_nod'], {}), '(self.n_el_nod)\n', (49328, 49343), False, 'from six.moves import range\n'), ((7772, 7804), 'numpy.arange', 'nm.arange', (['n_dof'], {'dtype': 'nm.int32'}), '(n_dof, dtype=nm.int32)\n', (7781, 7804), True, 'import numpy as nm\n'), ((12610, 12626), 'numpy.zeros', 'nm.zeros', (['(1, 1)'], {}), '((1, 1))\n', (12618, 12626), True, 'import numpy as nm\n'), ((24978, 25088), 'numpy.einsum', 'nm.einsum', (['"""id...,id...->id..."""', 'dofs[per_facet_neighbours[:, facet_n, 0]]', 'outer_base_vals[:, :, facet_n]'], {}), "('id...,id...->id...', dofs[per_facet_neighbours[:, facet_n, 0]],\n outer_base_vals[:, :, facet_n])\n", (24987, 25088), True, 'import numpy as nm\n'), ((25235, 25344), 'numpy.einsum', 'nm.einsum', (['"""id...,id...->i..."""', 'dofs[per_facet_neighbours[:, facet_n, 0]]', 'outer_base_vals[:, :, facet_n]'], {}), "('id...,id...->i...', dofs[per_facet_neighbours[:, facet_n, 0]],\n outer_base_vals[:, :, facet_n])\n", (25244, 25344), True, 'import numpy as nm\n'), ((25436, 25479), 'numpy.where', 'nm.where', (['(per_facet_neighbours[:, :, 0] < 0)'], {}), '(per_facet_neighbours[:, :, 0] < 0)\n', (25444, 25479), True, 'import numpy as nm\n'), ((26027, 26099), 'numpy.einsum', 'nm.einsum', (['"""id,id...->id..."""', 'ebc_vals', 'inner_base_vals[0, :, ebc[:, 1]]'], {}), "('id,id...->id...', ebc_vals, inner_base_vals[0, :, ebc[:, 1]])\n", (26036, 26099), True, 'import numpy as nm\n'), ((42481, 42500), 'numpy.zeros', 'nm.zeros', (['aux.shape'], {}), '(aux.shape)\n', (42489, 42500), True, 'import numpy as nm\n'), ((42526, 42555), 'numpy.repeat', 'nm.repeat', (['fun', 'vals.shape[0]'], {}), '(fun, vals.shape[0])\n', (42535, 42555), True, 'import numpy as nm\n'), ((49418, 49467), 'sfepy.base.base.Struct', 'Struct', ([], {'mode': '"""cell"""', 'data': 'udofs[:, i, None, None]'}), "(mode='cell', data=udofs[:, i, None, None])\n", (49424, 49467), False, 'from sfepy.base.base import output, assert_, Struct\n'), ((9787, 9811), 'numpy.arange', 'nm.arange', (['self.n_el_nod'], {}), '(self.n_el_nod)\n', (9796, 9811), True, 'import numpy as nm\n'), ((10587, 10600), 'numpy.shape', 'nm.shape', (['qps'], {}), '(qps)\n', (10595, 10600), True, 'import numpy as nm\n'), ((20774, 20814), 'numpy.where', 'nm.where', (['(facet_neighbours[mcells] == -1)'], {}), '(facet_neighbours[mcells] == -1)\n', (20782, 20814), True, 'import numpy as nm\n'), ((20893, 20933), 'numpy.where', 'nm.where', (['(facet_neighbours[scells] == -1)'], {}), '(facet_neighbours[scells] == -1)\n', (20901, 20933), True, 'import numpy as nm\n'), ((40036, 40049), 'numpy.shape', 'nm.shape', (['fun'], {}), '(fun)\n', (40044, 40049), True, 'import numpy as nm\n'), ((40053, 40067), 'numpy.shape', 'nm.shape', (['nods'], {}), '(nods)\n', (40061, 40067), True, 'import numpy as nm\n'), ((40636, 40688), 'numpy.einsum', 'nm.einsum', (['"""q,q...->..."""', 'weights', '(base_vals_qp ** 2)'], {}), "('q,q...->...', weights, base_vals_qp ** 2)\n", (40645, 40688), True, 'import numpy as nm\n'), ((42604, 42623), 'numpy.zeros', 'nm.zeros', (['aux.shape'], {}), '(aux.shape)\n', (42612, 42623), True, 'import numpy as nm\n'), ((9528, 9543), 'numpy.shape', 'nm.shape', (['coors'], {}), '(coors)\n', (9536, 9543), True, 'import numpy as nm\n'), ((9850, 9866), 'numpy.unique', 'nm.unique', (['cells'], {}), '(cells)\n', (9859, 9866), True, 'import numpy as nm\n'), ((10738, 10751), 'numpy.shape', 'nm.shape', (['qps'], {}), '(qps)\n', (10746, 10751), True, 'import numpy as nm\n'), ((11149, 11162), 'numpy.shape', 'nm.shape', (['qps'], {}), '(qps)\n', (11157, 11162), True, 'import numpy as nm\n'), ((14159, 14172), 'numpy.shape', 'nm.shape', (['qps'], {}), '(qps)\n', (14167, 14172), True, 'import numpy as nm\n'), ((14009, 14022), 'numpy.shape', 'nm.shape', (['qps'], {}), '(qps)\n', (14017, 14022), True, 'import numpy as nm\n')]
|
from typing import Optional
from sqlmodel import Field, SQLModel
from datetime import datetime
class Calendar(SQLModel, table=True):
"""Create an SQLModel for a calendar"""
id: Optional[int] = Field(default=None, primary_key=True)
date: datetime
year_number: int
year_name: str
quarter_number: int
quarter_name: str
month_number: int
month_name: str
week_number: int
week_name: str
week_day_number: int
week_day_name: str
__table_args__ = {"schema": "app_db"}
|
[
"sqlmodel.Field"
] |
[((204, 241), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (209, 241), False, 'from sqlmodel import Field, SQLModel\n')]
|
import numpy as np
from megengine import Tensor
import megengine.functional as F
import pdb
class AnchorGenerator():
"""default anchor generator for fpn.
This class generate anchors by feature map in level.
"""
def __init__(self, base_size=16, ratios=[0.5, 1, 2],
base_scale=2):
self.base_size = base_size
self.base_scale = np.array(base_scale)
self.anchor_ratios = ratios
def _whctrs(self, anchor):
"""convert anchor box into (w, h, ctr_x, ctr_y)
"""
w = anchor[:, 2] - anchor[:, 0] + 1
h = anchor[:, 3] - anchor[:, 1] + 1
x_ctr = anchor[:, 0] + 0.5 * (w - 1)
y_ctr = anchor[:, 1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr
def get_plane_anchors(self, anchor_scales: np.ndarray):
"""get anchors per location on feature map.
The anchor number is anchor_scales x anchor_ratios
"""
base_anchor = Tensor([0, 0, self.base_size - 1, self.base_size - 1])
base_anchor = base_anchor.reshape(1, -1)
w, h, x_ctr, y_ctr = self._whctrs(base_anchor)
# ratio enumerate
size = w * h
size_ratios = size / self.anchor_ratios
#pdb.set_trace()
ws = F.sqrt(size_ratios)
hs = ws * self.anchor_ratios
# ws = size_ratios.sqrt().round()
# hs = (ws * self.anchor_ratios).round()
# scale enumerate
anchor_scales = anchor_scales.reshape(1, -1).astype(np.float32)
ws = F.expand_dims(ws, 1)
hs = F.expand_dims(hs, 1)
ws = (ws * anchor_scales).reshape(-1, 1)
hs = (hs * anchor_scales).reshape(-1, 1)
# make anchors
anchors = F.concat(
[
x_ctr - 0.5 * (ws - 1),
y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1),
y_ctr + 0.5 * (hs - 1),
],
axis=1,
)
return anchors.astype(np.float32)
def get_center_offsets(self, featmap, stride):
# f_shp = featmap.shape
# fm_height, fm_width = f_shp[-2], f_shp[-1]
fm_height, fm_width = featmap.shape[2:]
shift_x = F.linspace(0, fm_width - 1, fm_width) * stride
shift_y = F.linspace(0, fm_height - 1, fm_height) * stride
# make the mesh grid of shift_x and shift_y
mesh_shape = (fm_height, fm_width)
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), mesh_shape)
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), mesh_shape)
# broad_shift_x = shift_x.reshape(-1, shift_x.shape[0]).broadcast_to(*mesh_shape)
# broad_shift_y = shift_y.reshape(shift_y.shape[0], -1).broadcast_to(*mesh_shape)
flatten_shift_x = broad_shift_x.flatten()
flatten_shift_y = broad_shift_y.flatten()
shifts = F.stack([flatten_shift_x, flatten_shift_y, flatten_shift_x, flatten_shift_y], axis=1)
# flatten_shift_x = F.add_axis(broad_shift_x.reshape(-1), 1)
# flatten_shift_y = F.add_axis(broad_shift_y.reshape(-1), 1)
# shifts = F.concat(
# [flatten_shift_x, flatten_shift_y, flatten_shift_x, flatten_shift_y,],
# axis=1)
return shifts
def get_anchors_by_feature(self, featmap, stride):
# shifts shape: [A, 4]
shifts = self.get_center_offsets(featmap, stride)
# plane_anchors shape: [B, 4], e.g. B=3
plane_anchors = self.get_plane_anchors(self.base_scale * stride)
# all_anchors = shifts.repeat(1,3) + cell_anchors.flatten()
all_anchors = F.expand_dims(plane_anchors, 0) + F.expand_dims(shifts, 1)
all_anchors = all_anchors.reshape(-1, 4)
return all_anchors
def __call__(self, featmap, stride):
return self.get_anchors_by_feature(featmap, stride)
|
[
"megengine.functional.linspace",
"megengine.Tensor",
"megengine.functional.stack",
"megengine.functional.sqrt",
"megengine.functional.expand_dims",
"megengine.functional.concat"
] |
[((364, 384), 'numpy.array', 'np.array', (['base_scale'], {}), '(base_scale)\n', (372, 384), True, 'import numpy as np\n'), ((939, 993), 'megengine.Tensor', 'Tensor', (['[0, 0, self.base_size - 1, self.base_size - 1]'], {}), '([0, 0, self.base_size - 1, self.base_size - 1])\n', (945, 993), False, 'from megengine import Tensor\n'), ((1231, 1250), 'megengine.functional.sqrt', 'F.sqrt', (['size_ratios'], {}), '(size_ratios)\n', (1237, 1250), True, 'import megengine.functional as F\n'), ((1490, 1510), 'megengine.functional.expand_dims', 'F.expand_dims', (['ws', '(1)'], {}), '(ws, 1)\n', (1503, 1510), True, 'import megengine.functional as F\n'), ((1524, 1544), 'megengine.functional.expand_dims', 'F.expand_dims', (['hs', '(1)'], {}), '(hs, 1)\n', (1537, 1544), True, 'import megengine.functional as F\n'), ((1684, 1802), 'megengine.functional.concat', 'F.concat', (['[x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), x_ctr + 0.5 * (ws - 1), \n y_ctr + 0.5 * (hs - 1)]'], {'axis': '(1)'}), '([x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), x_ctr + 0.5 * (ws -\n 1), y_ctr + 0.5 * (hs - 1)], axis=1)\n', (1692, 1802), True, 'import megengine.functional as F\n'), ((2826, 2916), 'megengine.functional.stack', 'F.stack', (['[flatten_shift_x, flatten_shift_y, flatten_shift_x, flatten_shift_y]'], {'axis': '(1)'}), '([flatten_shift_x, flatten_shift_y, flatten_shift_x, flatten_shift_y\n ], axis=1)\n', (2833, 2916), True, 'import megengine.functional as F\n'), ((2159, 2196), 'megengine.functional.linspace', 'F.linspace', (['(0)', '(fm_width - 1)', 'fm_width'], {}), '(0, fm_width - 1, fm_width)\n', (2169, 2196), True, 'import megengine.functional as F\n'), ((2224, 2263), 'megengine.functional.linspace', 'F.linspace', (['(0)', '(fm_height - 1)', 'fm_height'], {}), '(0, fm_height - 1, fm_height)\n', (2234, 2263), True, 'import megengine.functional as F\n'), ((3566, 3597), 'megengine.functional.expand_dims', 'F.expand_dims', (['plane_anchors', '(0)'], {}), '(plane_anchors, 0)\n', (3579, 3597), True, 'import megengine.functional as F\n'), ((3600, 3624), 'megengine.functional.expand_dims', 'F.expand_dims', (['shifts', '(1)'], {}), '(shifts, 1)\n', (3613, 3624), True, 'import megengine.functional as F\n')]
|
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
class AdaptiveAvgPool2d(M.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return F.mean(F.mean(x, axis=-2, keepdims=True), axis=-1, keepdims=True)
class AdaptiveMaxPool2d(M.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return F.max(F.max(x, axis=-2, keepdims=True), axis=-1, keepdims=True)
class ChannelAttention(M.Module):
def __init__(self, in_planes, ratio=16):
super().__init__()
self.avg_pool = AdaptiveAvgPool2d()
self.max_pool = AdaptiveMaxPool2d()
self.sharedMLP = M.Sequential(
M.Conv2d(in_planes, in_planes // ratio, 1, bias=False), M.ReLU(),
M.Conv2d(in_planes // ratio, in_planes, 1, bias=False))
self.sigmoid = M.Sigmoid()
def forward(self, x):
avgout = self.sharedMLP(self.avg_pool(x))
maxout = self.sharedMLP(self.max_pool(x))
return self.sigmoid(avgout + maxout)
class SpatialAttention(M.Module):
def __init__(self, kernel_size=3):
super().__init__()
self.conv = M.Conv2d(2,1,kernel_size, padding=1, bias=False)
self.sigmoid = M.Sigmoid()
self.concat = F.concat
self.mean = F.mean
self.max = F.max
def forward(self, x):
avgout = self.mean(x, 1, True)
maxout = self.max(x, 1, True)
x = self.concat([avgout, maxout], 1)
x = self.conv(x)
return self.sigmoid(x)
class CBAM(M.Module):
def __init__(self, planes):
super().__init__()
self.ca = ChannelAttention(planes)
self.sa = SpatialAttention()
def forward(self, x):
x = self.ca(x) * x
out = self.sa(x) * x
return out
if __name__ == "__main__":
data = mge.tensor(np.random.random((1, 16, 10, 10)).astype(np.float32))
model = CBAM(16)
opt = optim.SGD(model.parameters(), lr=0.1)
for i in range(5):
opt.zero_grad()
loss = model(data).mean()
opt.backward(loss)
opt.step()
print("loss = {:.3f}".format(loss.numpy()[0]))
|
[
"megengine.module.Sigmoid",
"megengine.module.ReLU",
"megengine.functional.mean",
"megengine.functional.max",
"megengine.module.Conv2d"
] |
[((933, 944), 'megengine.module.Sigmoid', 'M.Sigmoid', ([], {}), '()\n', (942, 944), True, 'import megengine.module as M\n'), ((1239, 1289), 'megengine.module.Conv2d', 'M.Conv2d', (['(2)', '(1)', 'kernel_size'], {'padding': '(1)', 'bias': '(False)'}), '(2, 1, kernel_size, padding=1, bias=False)\n', (1247, 1289), True, 'import megengine.module as M\n'), ((1311, 1322), 'megengine.module.Sigmoid', 'M.Sigmoid', ([], {}), '()\n', (1320, 1322), True, 'import megengine.module as M\n'), ((278, 311), 'megengine.functional.mean', 'F.mean', (['x'], {'axis': '(-2)', 'keepdims': '(True)'}), '(x, axis=-2, keepdims=True)\n', (284, 311), True, 'import megengine.functional as F\n'), ((472, 504), 'megengine.functional.max', 'F.max', (['x'], {'axis': '(-2)', 'keepdims': '(True)'}), '(x, axis=-2, keepdims=True)\n', (477, 504), True, 'import megengine.functional as F\n'), ((776, 830), 'megengine.module.Conv2d', 'M.Conv2d', (['in_planes', '(in_planes // ratio)', '(1)'], {'bias': '(False)'}), '(in_planes, in_planes // ratio, 1, bias=False)\n', (784, 830), True, 'import megengine.module as M\n'), ((832, 840), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (838, 840), True, 'import megengine.module as M\n'), ((854, 908), 'megengine.module.Conv2d', 'M.Conv2d', (['(in_planes // ratio)', 'in_planes', '(1)'], {'bias': '(False)'}), '(in_planes // ratio, in_planes, 1, bias=False)\n', (862, 908), True, 'import megengine.module as M\n'), ((1926, 1959), 'numpy.random.random', 'np.random.random', (['(1, 16, 10, 10)'], {}), '((1, 16, 10, 10))\n', (1942, 1959), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# This code was adapted from http://sfepy.org/doc-devel/mat_optim.html.
from __future__ import print_function
from __future__ import absolute_import
import sys
sys.path.append('.')
import matplotlib as mlp
import matplotlib.pyplot as plt
from matplotlib.collections import PolyCollection
from mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection
import numpy as np
from sfepy.base.base import Struct, output
from sfepy.base.log import Log
from sfepy import data_dir
class MaterialSimulator(object):
@staticmethod
def create_app(filename, is_homog=False, **kwargs):
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.homogenization.homogen_app import HomogenizationApp
from sfepy.applications import PDESolverApp
required, other = get_standard_keywords()
if is_homog:
required.remove('equations')
conf = ProblemConf.from_file(filename, required, other,
define_args=kwargs)
options = Struct(output_filename_trunk=None,
save_ebc=False,
save_ebc_nodes=False,
save_regions=False,
save_regions_as_groups=False,
save_field_meshes=False,
solve_not=False,
)
output.set_output(filename='sfepy_log.txt', quiet=True)
if is_homog:
app = HomogenizationApp(conf, options, 'material_opt_micro:')
else:
app = PDESolverApp(conf, options, 'material_opt_macro:')
app.conf.opt_data = {}
opts = conf.options
if hasattr(opts, 'parametric_hook'): # Parametric study.
parametric_hook = conf.get_function(opts.parametric_hook)
app.parametrize(parametric_hook)
return app
def __init__(self, macro_fn, micro_fn, phis, plot_meshes_bool=False):
self.macro_app = self.create_app(macro_fn, is_homog=False, is_opt=True)
self.micro_app = self.create_app(micro_fn, is_homog=True, is_opt=True)
self.phis = phis
self.plot_meshes_bool = plot_meshes_bool
@staticmethod
def rotate_mat(D, angle):
s = np.sin(angle)
c = np.cos(angle)
s2 = s**2
c2 = c**2
sc = s * c
T = np.array([[c2, 0, s2, 0, 2*sc,0],
[0, 1, 0, 0, 0, 0],
[s2, 0, c2, 0, -2*sc, 0],
[0, 0, 0, c, 0, -s],
[-sc, 0, sc, 0, c2 - s2, 0],
[0, 0, 0, s, 0, c]])
return np.dot(np.dot(T, D), T.T)
def plot_meshes(self):
# plot mesh for micro problem
pb = self.micro_app.problem
coors = pb.domain.mesh.coors
#print(set(coors[:,2]))
graph = pb.domain.mesh.get_conn(pb.domain.mesh.descs[0])
graph_slice = np.zeros((graph.shape[0], 4))
for j in range(graph.shape[0]):
graph_slice[j,:] = graph[j,coors[graph[j,:],2] == 0]
cells_matrix = pb.domain.regions['Ym'].get_cells()
cells_fibers = pb.domain.regions['Yf'].get_cells()
fig = plt.figure(figsize = (12, 5))
ax = fig.add_subplot(121)
pc = PolyCollection(verts=coors[graph[cells_matrix,0:4],:2], facecolors='white',
edgecolors='black')
ax.add_collection(pc)
pc = PolyCollection(verts=coors[graph[cells_fibers,0:4],:2], facecolors='gray',
edgecolors='black')
ax.add_collection(pc)
ax.axis('equal')
ax.set_title('2D plot of microstructure')
ax = fig.add_subplot(122, projection='3d')
for e in range(graph.shape[0]):
if e in cells_fibers:
color = 'gray'
else:
color = 'white'
tupleList = coors[graph[e,:],:]
vertices = [[0, 1, 2, 3], [4, 5, 6, 7],
[0, 1, 5, 4], [1, 2, 6, 5], [2, 3, 7, 6], [3, 0, 4, 7]]
verts = [[tupleList[vertices[ix][iy]] for iy in range(len(vertices[0]))]
for ix in range(len(vertices))]
pc3d = Poly3DCollection(verts=verts, facecolors=color,
edgecolors='black', linewidths=1, alpha=0.5)
ax.add_collection3d(pc3d)
ax.set_title('3D plot of microstructure')
plt.show(fig)
# plot mesh for macro problem
pb = self.macro_app.problem
coors = pb.domain.mesh.coors
graph = pb.domain.mesh.get_conn(pb.domain.mesh.descs[0])
fig2 = plt.figure(figsize=(5,6))
ax = fig2.add_subplot(111, projection='3d')
for e in range(graph.shape[0]):
tupleList = coors[graph[e,:],:]
vertices = [[0, 1, 2, 3], [4, 5, 6, 7],
[0, 1, 5, 4], [1, 2, 6, 5], [2, 3, 7, 6], [3, 0, 4, 7]]
verts = [[tupleList[vertices[ix][iy]] for iy in range(len(vertices[0]))]
for ix in range(len(vertices))]
pc3d = Poly3DCollection(verts=verts, facecolors='white',
edgecolors='black', linewidths=1, alpha=0.5)
ax.add_collection3d(pc3d)
ax.set_xlim3d(-0.03, 0.03)
ax.set_ylim3d(-0.01, 0.01)
ax.set_zlim3d(-0.01, 0.1)
ax.set_title('3D plot of macro system')
plt.show(fig2)
return None
def mat_eval(self, x):
mic_od = self.micro_app.conf.opt_data
mac_od = self.macro_app.conf.opt_data
mic_od['coefs'] = {}
mic_od['mat_params'] = x_norm2real(x)
self.micro_app()
D = mic_od['D_homog']
comp_k = []
for phi in self.phis:
#print('phi = %d' % phi)
mac_od['D_homog'] = self.rotate_mat(D, np.deg2rad(phi))
self.macro_app()
comp_k.append(mac_od['k'])
# added by Audrey: get a plot of a slice of the mesh
if self.plot_meshes_bool:
self.plot_meshes()
return comp_k
def bounds():
x_L = [120e9, 0.2, 2e9, 0.2]
x_U = [200e9, 0.45, 8e9, 0.45]
return x_L, x_U
def x_norm2real(x):
x_L, x_U = np.array(bounds())
return x * (x_U - x_L) + x_L
def x_real2norm(x):
x_L, x_U = np.array(bounds())
return (x - x_L) / (x_U - x_L)
micro_filename = data_dir + '/examples/homogenization/' + 'homogenization_opt.py'
macro_filename = data_dir + '/examples/homogenization/' + 'linear_elasticity_opt.py'
def one_simulation(x0, plot_meshes_bool=False):
"""
This function is the main callable here: it takes in as input the parameter vector,
here x0=[E_fiber, nu_fiber, E_matrix, nu_matrix], and returns the simulated output
(here slope of the force-elongation curve obtained during a tensile test), to be compared
with the measured data.
"""
x0 = x0.reshape((-1, ))
phis = [0, 30, 60, 90]
#exp_data = zip([0, 30, 60, 90], [1051140., 197330., 101226., 95474.])
ms = MaterialSimulator(macro_filename, micro_filename,
phis,
plot_meshes_bool=plot_meshes_bool)
qoi = ms.mat_eval(x0)
return qoi
def one_simulation_2params(x0, plot_meshes_bool=False):
x0 = x0.reshape((-1, ))
x0 = np.array([x0[0], 0.45, x0[1], 0.])
phis = [0, 30, 60, 90]
#exp_data = zip([0, 30, 60, 90], [1051140., 197330., 101226., 95474.])
ms = MaterialSimulator(macro_filename, micro_filename,
phis, plot_meshes_bool=plot_meshes_bool)
qoi = ms.mat_eval(x0)
return qoi
def one_simulation_2params_rvs(x0, plot_meshes_bool=False):
x0 = x0.reshape((-1, ))
x0 = np.array([x0[0], 0.45, x0[1], 0.])
phis = [0, 30, 60, 90]
ms = MaterialSimulator(macro_filename, micro_filename,
phis,
plot_meshes_bool=plot_meshes_bool)
qoi = ms.mat_eval(x0)
qoi = np.tile(np.array(qoi), 100)
return qoi
|
[
"sfepy.base.conf.get_standard_keywords",
"sfepy.base.base.output.set_output",
"sfepy.base.conf.ProblemConf.from_file",
"sfepy.base.base.Struct",
"sfepy.homogenization.homogen_app.HomogenizationApp",
"sfepy.applications.PDESolverApp"
] |
[((184, 204), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (199, 204), False, 'import sys\n'), ((7258, 7293), 'numpy.array', 'np.array', (['[x0[0], 0.45, x0[1], 0.0]'], {}), '([x0[0], 0.45, x0[1], 0.0])\n', (7266, 7293), True, 'import numpy as np\n'), ((7662, 7697), 'numpy.array', 'np.array', (['[x0[0], 0.45, x0[1], 0.0]'], {}), '([x0[0], 0.45, x0[1], 0.0])\n', (7670, 7697), True, 'import numpy as np\n'), ((839, 862), 'sfepy.base.conf.get_standard_keywords', 'get_standard_keywords', ([], {}), '()\n', (860, 862), False, 'from sfepy.base.conf import ProblemConf, get_standard_keywords\n'), ((941, 1009), 'sfepy.base.conf.ProblemConf.from_file', 'ProblemConf.from_file', (['filename', 'required', 'other'], {'define_args': 'kwargs'}), '(filename, required, other, define_args=kwargs)\n', (962, 1009), False, 'from sfepy.base.conf import ProblemConf, get_standard_keywords\n'), ((1065, 1238), 'sfepy.base.base.Struct', 'Struct', ([], {'output_filename_trunk': 'None', 'save_ebc': '(False)', 'save_ebc_nodes': '(False)', 'save_regions': '(False)', 'save_regions_as_groups': '(False)', 'save_field_meshes': '(False)', 'solve_not': '(False)'}), '(output_filename_trunk=None, save_ebc=False, save_ebc_nodes=False,\n save_regions=False, save_regions_as_groups=False, save_field_meshes=\n False, solve_not=False)\n', (1071, 1238), False, 'from sfepy.base.base import Struct, output\n'), ((1415, 1470), 'sfepy.base.base.output.set_output', 'output.set_output', ([], {'filename': '"""sfepy_log.txt"""', 'quiet': '(True)'}), "(filename='sfepy_log.txt', quiet=True)\n", (1432, 1470), False, 'from sfepy.base.base import Struct, output\n'), ((2281, 2294), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (2287, 2294), True, 'import numpy as np\n'), ((2307, 2320), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (2313, 2320), True, 'import numpy as np\n'), ((2388, 2552), 'numpy.array', 'np.array', (['[[c2, 0, s2, 0, 2 * sc, 0], [0, 1, 0, 0, 0, 0], [s2, 0, c2, 0, -2 * sc, 0],\n [0, 0, 0, c, 0, -s], [-sc, 0, sc, 0, c2 - s2, 0], [0, 0, 0, s, 0, c]]'], {}), '([[c2, 0, s2, 0, 2 * sc, 0], [0, 1, 0, 0, 0, 0], [s2, 0, c2, 0, -2 *\n sc, 0], [0, 0, 0, c, 0, -s], [-sc, 0, sc, 0, c2 - s2, 0], [0, 0, 0, s, \n 0, c]])\n', (2396, 2552), True, 'import numpy as np\n'), ((2949, 2978), 'numpy.zeros', 'np.zeros', (['(graph.shape[0], 4)'], {}), '((graph.shape[0], 4))\n', (2957, 2978), True, 'import numpy as np\n'), ((3216, 3243), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 5)'}), '(figsize=(12, 5))\n', (3226, 3243), True, 'import matplotlib.pyplot as plt\n'), ((3293, 3395), 'matplotlib.collections.PolyCollection', 'PolyCollection', ([], {'verts': 'coors[graph[cells_matrix, 0:4], :2]', 'facecolors': '"""white"""', 'edgecolors': '"""black"""'}), "(verts=coors[graph[cells_matrix, 0:4], :2], facecolors=\n 'white', edgecolors='black')\n", (3307, 3395), False, 'from matplotlib.collections import PolyCollection\n'), ((3445, 3545), 'matplotlib.collections.PolyCollection', 'PolyCollection', ([], {'verts': 'coors[graph[cells_fibers, 0:4], :2]', 'facecolors': '"""gray"""', 'edgecolors': '"""black"""'}), "(verts=coors[graph[cells_fibers, 0:4], :2], facecolors='gray',\n edgecolors='black')\n", (3459, 3545), False, 'from matplotlib.collections import PolyCollection\n'), ((4392, 4405), 'matplotlib.pyplot.show', 'plt.show', (['fig'], {}), '(fig)\n', (4400, 4405), True, 'import matplotlib.pyplot as plt\n'), ((4606, 4632), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 6)'}), '(figsize=(5, 6))\n', (4616, 4632), True, 'import matplotlib.pyplot as plt\n'), ((5356, 5370), 'matplotlib.pyplot.show', 'plt.show', (['fig2'], {}), '(fig2)\n', (5364, 5370), True, 'import matplotlib.pyplot as plt\n'), ((7923, 7936), 'numpy.array', 'np.array', (['qoi'], {}), '(qoi)\n', (7931, 7936), True, 'import numpy as np\n'), ((1511, 1566), 'sfepy.homogenization.homogen_app.HomogenizationApp', 'HomogenizationApp', (['conf', 'options', '"""material_opt_micro:"""'], {}), "(conf, options, 'material_opt_micro:')\n", (1528, 1566), False, 'from sfepy.homogenization.homogen_app import HomogenizationApp\n'), ((1600, 1650), 'sfepy.applications.PDESolverApp', 'PDESolverApp', (['conf', 'options', '"""material_opt_macro:"""'], {}), "(conf, options, 'material_opt_macro:')\n", (1612, 1650), False, 'from sfepy.applications import PDESolverApp\n'), ((2672, 2684), 'numpy.dot', 'np.dot', (['T', 'D'], {}), '(T, D)\n', (2678, 2684), True, 'import numpy as np\n'), ((4186, 4282), 'mpl_toolkits.mplot3d.art3d.Poly3DCollection', 'Poly3DCollection', ([], {'verts': 'verts', 'facecolors': 'color', 'edgecolors': '"""black"""', 'linewidths': '(1)', 'alpha': '(0.5)'}), "(verts=verts, facecolors=color, edgecolors='black',\n linewidths=1, alpha=0.5)\n", (4202, 4282), False, 'from mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection\n'), ((5046, 5144), 'mpl_toolkits.mplot3d.art3d.Poly3DCollection', 'Poly3DCollection', ([], {'verts': 'verts', 'facecolors': '"""white"""', 'edgecolors': '"""black"""', 'linewidths': '(1)', 'alpha': '(0.5)'}), "(verts=verts, facecolors='white', edgecolors='black',\n linewidths=1, alpha=0.5)\n", (5062, 5144), False, 'from mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection\n'), ((5782, 5797), 'numpy.deg2rad', 'np.deg2rad', (['phi'], {}), '(phi)\n', (5792, 5797), True, 'import numpy as np\n')]
|
from typing import TYPE_CHECKING, Union
from uuid import UUID
from sqlalchemy.schema import Column, ForeignKey, UniqueConstraint
from sqlmodel import Field, Relationship
from sqlmodel.sql.sqltypes import GUID
from joj.horse.models.base import BaseORMModel
from joj.horse.models.domain import Domain
from joj.horse.models.domain_role import DomainRole
from joj.horse.models.permission import DefaultRole
from joj.horse.utils.errors import BizError, ErrorCode
if TYPE_CHECKING:
from joj.horse.models.user import User
class DomainUser(BaseORMModel, table=True): # type: ignore[call-arg]
__tablename__ = "domain_users"
__table_args__ = (UniqueConstraint("domain_id", "user_id"),)
role: str
domain_id: UUID = Field(
sa_column=Column(
GUID, ForeignKey("domains.id", ondelete="CASCADE"), nullable=False
)
)
domain: "Domain" = Relationship(back_populates="users")
user_id: UUID = Field(
sa_column=Column(
GUID, ForeignKey("users.id", ondelete="CASCADE"), nullable=False
)
)
user: "User" = Relationship(back_populates="domain_users")
@classmethod
async def add_domain_user(
cls, domain_id: UUID, user_id: UUID, role: Union[str, DefaultRole]
) -> "DomainUser":
role = str(role)
# check domain user
domain_user = await DomainUser.get_or_none(domain_id=domain_id, user_id=user_id)
if domain_user is not None:
raise BizError(ErrorCode.UserAlreadyInDomainBadRequestError)
# check domain role
await DomainRole.ensure_exists(domain_id=domain_id, role=role)
# add member
domain_user = DomainUser(domain_id=domain_id, user_id=user_id, role=role)
return domain_user
@classmethod
async def update_domain_user(
cls, domain_id: UUID, user_id: UUID, role: Union[str, DefaultRole]
) -> "DomainUser":
role = str(role)
# check domain user
domain_user = await DomainUser.get_or_none(domain_id=domain_id, user_id=user_id)
if domain_user is None:
raise BizError(ErrorCode.UserAlreadyInDomainBadRequestError)
# check domain role
await DomainRole.ensure_exists(domain_id=domain_id, role=role)
# update role
domain_user.role = role
return domain_user
|
[
"sqlmodel.Relationship"
] |
[((883, 919), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""users"""'}), "(back_populates='users')\n", (895, 919), False, 'from sqlmodel import Field, Relationship\n'), ((1086, 1129), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""domain_users"""'}), "(back_populates='domain_users')\n", (1098, 1129), False, 'from sqlmodel import Field, Relationship\n'), ((651, 691), 'sqlalchemy.schema.UniqueConstraint', 'UniqueConstraint', (['"""domain_id"""', '"""user_id"""'], {}), "('domain_id', 'user_id')\n", (667, 691), False, 'from sqlalchemy.schema import Column, ForeignKey, UniqueConstraint\n'), ((1473, 1527), 'joj.horse.utils.errors.BizError', 'BizError', (['ErrorCode.UserAlreadyInDomainBadRequestError'], {}), '(ErrorCode.UserAlreadyInDomainBadRequestError)\n', (1481, 1527), False, 'from joj.horse.utils.errors import BizError, ErrorCode\n'), ((1570, 1626), 'joj.horse.models.domain_role.DomainRole.ensure_exists', 'DomainRole.ensure_exists', ([], {'domain_id': 'domain_id', 'role': 'role'}), '(domain_id=domain_id, role=role)\n', (1594, 1626), False, 'from joj.horse.models.domain_role import DomainRole\n'), ((2099, 2153), 'joj.horse.utils.errors.BizError', 'BizError', (['ErrorCode.UserAlreadyInDomainBadRequestError'], {}), '(ErrorCode.UserAlreadyInDomainBadRequestError)\n', (2107, 2153), False, 'from joj.horse.utils.errors import BizError, ErrorCode\n'), ((2196, 2252), 'joj.horse.models.domain_role.DomainRole.ensure_exists', 'DomainRole.ensure_exists', ([], {'domain_id': 'domain_id', 'role': 'role'}), '(domain_id=domain_id, role=role)\n', (2220, 2252), False, 'from joj.horse.models.domain_role import DomainRole\n'), ((783, 827), 'sqlalchemy.schema.ForeignKey', 'ForeignKey', (['"""domains.id"""'], {'ondelete': '"""CASCADE"""'}), "('domains.id', ondelete='CASCADE')\n", (793, 827), False, 'from sqlalchemy.schema import Column, ForeignKey, UniqueConstraint\n'), ((992, 1034), 'sqlalchemy.schema.ForeignKey', 'ForeignKey', (['"""users.id"""'], {'ondelete': '"""CASCADE"""'}), "('users.id', ondelete='CASCADE')\n", (1002, 1034), False, 'from sqlalchemy.schema import Column, ForeignKey, UniqueConstraint\n')]
|
import functools
import numpy as np
import pytest
import megengine
from megengine.autodiff.grad_manager import GradManager
from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt
from megengine.core.tensor.utils import subgraph_fn
from megengine.device import CompNode, get_default_device
from megengine.jit import trace
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@functools.lru_cache(maxsize=None)
def _get_batch_norm_fn(dtype, device, channels, ndim, interpret, gopt_level):
@subgraph_fn(
"BatchNormNd",
dtype=dtype,
device=device,
nr_inputs=4,
interpret=interpret,
gopt_level=gopt_level,
)
def batch_norm_nd(inputs, f, c):
input, eps, weight, bias = inputs[0:4]
reduce_shape = c(
(1, channels) + (1,) * (ndim - 2), dtype="int32", device=device
)
input_shape = f(GetVarShape(), input)
input_elems = f(Reduce(mode="product", axis=0), input_shape)
reduce_elems = f(Reduce(mode="product", axis=0), reduce_shape)
reduce_size = f("//", input_elems, reduce_elems)
reduce_size = f(TypeCvt(dtype=dtype), reduce_size)
channel_x1s = f(Reduce(mode="sum"), input, reduce_shape)
channel_x2s = f(Reduce(mode="sum_sqr"), input, reduce_shape)
channel_mean = f("/", channel_x1s, reduce_size)
channel_var = f(
"-", f("/", channel_x2s, reduce_size), f("*", channel_mean, channel_mean),
)
invsqrt_channel_var = f("**", f("+", channel_var, eps), c(-0.5))
inv_var_wt = f("*", invsqrt_channel_var, weight)
neg_channel_mean = f("-", channel_mean)
outvar = f(
"fma3", input, inv_var_wt, f("fma3", neg_channel_mean, inv_var_wt, bias),
)
return (outvar,), (True,)
return batch_norm_nd
@pytest.mark.parametrize("device", [get_default_device(), "cpux"])
@pytest.mark.parametrize("batch_size", [1, 8])
@pytest.mark.parametrize("channels", [3])
@pytest.mark.parametrize(
"use_trace, symbolic", [(False, None), (True, False), (True, True)]
)
@pytest.mark.parametrize("gopt_level", [None, 1, 2])
@pytest.mark.parametrize("dtype", ["float32"])
def test_subgraph(device, batch_size, channels, use_trace, symbolic, gopt_level, dtype):
device = CompNode(device)
def subgraph_batch_norm(inp, weight, bias, eps, diff):
inp = inp.detach()
with GradManager().attach(inp) as gm:
batch_norm_fn = _get_batch_norm_fn(
dtype, device, channels, ndim, interpret=False, gopt_level=gopt_level
)
out, *_ = batch_norm_fn(inp, eps, weight, bias)
gm.backward(out * 1e3 + 1e3, diff)
return out, inp.grad
def primitive_batch_norm(inp, weight, bias, eps, diff):
inp = inp.detach()
with GradManager().attach(inp) as gm:
batch_norm_fn = _get_batch_norm_fn(
dtype, device, channels, ndim, interpret=True, gopt_level=gopt_level
)
(out,) = batch_norm_fn(inp, eps, weight, bias)
gm.backward(out * 1e3 + 1e3, diff)
return out, inp.grad
if use_trace:
subgraph_batch_norm = trace(symbolic=symbolic)(subgraph_batch_norm)
primitive_batch_norm = trace(symbolic=symbolic)(primitive_batch_norm)
def rand_tensor(shape, dtype=dtype, device=device):
return megengine.tensor(np.random.random(shape), dtype=dtype, device=device)
# test shape change
for image_shape in [(223, 223), (10, 20)]:
ndim = len(image_shape) + 2
input_shape = (batch_size, channels) + image_shape
param_shape = (1, channels) + (1,) * len(image_shape)
inp = rand_tensor(input_shape) * 1e3 + 1e3
weight = rand_tensor(param_shape)
bias = rand_tensor(param_shape)
eps = megengine.tensor(1e-5, dtype=dtype, device=device)
diff = rand_tensor(input_shape)
out1, grad1 = subgraph_batch_norm(inp, weight, bias, eps, diff)
out2, grad2 = primitive_batch_norm(inp, weight, bias, eps, diff)
_assert_allclose(out1.numpy(), out2.numpy())
_assert_allclose(grad1.numpy(), grad2.numpy())
|
[
"megengine.core.ops.builtin.GetVarShape",
"megengine.core.tensor.utils.subgraph_fn",
"megengine.device.CompNode",
"megengine.jit.trace",
"megengine.device.get_default_device",
"megengine.core.ops.builtin.Reduce",
"megengine.tensor",
"megengine.core.ops.builtin.TypeCvt",
"megengine.autodiff.grad_manager.GradManager"
] |
[((355, 424), 'functools.partial', 'functools.partial', (['np.testing.assert_allclose'], {'atol': '(5e-06)', 'rtol': '(5e-06)'}), '(np.testing.assert_allclose, atol=5e-06, rtol=5e-06)\n', (372, 424), False, 'import functools\n'), ((426, 459), 'functools.lru_cache', 'functools.lru_cache', ([], {'maxsize': 'None'}), '(maxsize=None)\n', (445, 459), False, 'import functools\n'), ((1944, 1989), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""batch_size"""', '[1, 8]'], {}), "('batch_size', [1, 8])\n", (1967, 1989), False, 'import pytest\n'), ((1991, 2031), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""channels"""', '[3]'], {}), "('channels', [3])\n", (2014, 2031), False, 'import pytest\n'), ((2033, 2130), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_trace, symbolic"""', '[(False, None), (True, False), (True, True)]'], {}), "('use_trace, symbolic', [(False, None), (True, False\n ), (True, True)])\n", (2056, 2130), False, 'import pytest\n'), ((2133, 2184), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""gopt_level"""', '[None, 1, 2]'], {}), "('gopt_level', [None, 1, 2])\n", (2156, 2184), False, 'import pytest\n'), ((2186, 2231), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', "['float32']"], {}), "('dtype', ['float32'])\n", (2209, 2231), False, 'import pytest\n'), ((543, 658), 'megengine.core.tensor.utils.subgraph_fn', 'subgraph_fn', (['"""BatchNormNd"""'], {'dtype': 'dtype', 'device': 'device', 'nr_inputs': '(4)', 'interpret': 'interpret', 'gopt_level': 'gopt_level'}), "('BatchNormNd', dtype=dtype, device=device, nr_inputs=4,\n interpret=interpret, gopt_level=gopt_level)\n", (554, 658), False, 'from megengine.core.tensor.utils import subgraph_fn\n'), ((2334, 2350), 'megengine.device.CompNode', 'CompNode', (['device'], {}), '(device)\n', (2342, 2350), False, 'from megengine.device import CompNode, get_default_device\n'), ((3884, 3935), 'megengine.tensor', 'megengine.tensor', (['(1e-05)'], {'dtype': 'dtype', 'device': 'device'}), '(1e-05, dtype=dtype, device=device)\n', (3900, 3935), False, 'import megengine\n'), ((1912, 1932), 'megengine.device.get_default_device', 'get_default_device', ([], {}), '()\n', (1930, 1932), False, 'from megengine.device import CompNode, get_default_device\n'), ((930, 943), 'megengine.core.ops.builtin.GetVarShape', 'GetVarShape', ([], {}), '()\n', (941, 943), False, 'from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt\n'), ((976, 1006), 'megengine.core.ops.builtin.Reduce', 'Reduce', ([], {'mode': '"""product"""', 'axis': '(0)'}), "(mode='product', axis=0)\n", (982, 1006), False, 'from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt\n'), ((1046, 1076), 'megengine.core.ops.builtin.Reduce', 'Reduce', ([], {'mode': '"""product"""', 'axis': '(0)'}), "(mode='product', axis=0)\n", (1052, 1076), False, 'from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt\n'), ((1173, 1193), 'megengine.core.ops.builtin.TypeCvt', 'TypeCvt', ([], {'dtype': 'dtype'}), '(dtype=dtype)\n', (1180, 1193), False, 'from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt\n'), ((1232, 1250), 'megengine.core.ops.builtin.Reduce', 'Reduce', ([], {'mode': '"""sum"""'}), "(mode='sum')\n", (1238, 1250), False, 'from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt\n'), ((1297, 1319), 'megengine.core.ops.builtin.Reduce', 'Reduce', ([], {'mode': '"""sum_sqr"""'}), "(mode='sum_sqr')\n", (1303, 1319), False, 'from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt\n'), ((3241, 3265), 'megengine.jit.trace', 'trace', ([], {'symbolic': 'symbolic'}), '(symbolic=symbolic)\n', (3246, 3265), False, 'from megengine.jit import trace\n'), ((3318, 3342), 'megengine.jit.trace', 'trace', ([], {'symbolic': 'symbolic'}), '(symbolic=symbolic)\n', (3323, 3342), False, 'from megengine.jit import trace\n'), ((3454, 3477), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (3470, 3477), True, 'import numpy as np\n'), ((2451, 2464), 'megengine.autodiff.grad_manager.GradManager', 'GradManager', ([], {}), '()\n', (2462, 2464), False, 'from megengine.autodiff.grad_manager import GradManager\n'), ((2873, 2886), 'megengine.autodiff.grad_manager.GradManager', 'GradManager', ([], {}), '()\n', (2884, 2886), False, 'from megengine.autodiff.grad_manager import GradManager\n')]
|
from __future__ import absolute_import
import hashlib
import numpy as nm
import warnings
import scipy.sparse as sps
import six
from six.moves import range
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import LinearSolver
def solve(mtx, rhs, solver_class=None, solver_conf=None):
"""
Solve the linear system with the matrix `mtx` and the right-hand side
`rhs`.
Convenience wrapper around the linear solver classes below.
"""
solver_class = get_default(solver_class, ScipyDirect)
solver_conf = get_default(solver_conf, {})
solver = solver_class(solver_conf, mtx=mtx)
solution = solver(rhs)
return solution
def _get_cs_matrix_hash(mtx, chunk_size=100000):
def _gen_array_chunks(arr):
ii = 0
while len(arr[ii:]):
yield arr[ii:ii+chunk_size].tobytes()
ii += chunk_size
sha1 = hashlib.sha1()
for chunk in _gen_array_chunks(mtx.indptr):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.indices):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.data):
sha1.update(chunk)
digest = sha1.hexdigest()
return digest
def _is_new_matrix(mtx, mtx_digest, force_reuse=False):
if not isinstance(mtx, sps.csr_matrix):
return True, mtx_digest
if force_reuse:
return False, mtx_digest
id0, digest0 = mtx_digest
id1 = id(mtx)
digest1 = _get_cs_matrix_hash(mtx)
if (id1 == id0) and (digest1 == digest0):
return False, (id1, digest1)
return True, (id1, digest1)
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None,
**kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
context=context, **kwargs)
if isinstance(result, tuple):
result, n_iter = result
else:
n_iter = -1 # Number of iterations is undefined/unavailable.
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = n_iter
return result
return _standard_call
def petsc_call(call):
"""
Decorator handling argument preparation and timing for PETSc-based linear
solvers.
"""
def _petsc_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None,
context=None, **kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
comm = get_default(comm, self.comm)
mshape = mtx.size if isinstance(mtx, self.petsc.Mat) else mtx.shape
rshape = [rhs.size] if isinstance(rhs, self.petsc.Vec) else rhs.shape
assert_(mshape[0] == mshape[1] == rshape[0])
if x0 is not None:
xshape = [x0.size] if isinstance(x0, self.petsc.Vec) else x0.shape
assert_(xshape[0] == rshape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
comm, context=context, **kwargs)
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = self.ksp.getIterationNumber()
return result
return _petsc_call
class ScipyDirect(LinearSolver):
"""
Direct sparse solver from SciPy.
"""
name = 'ls.scipy_direct'
_parameters = [
('method', "{'auto', 'umfpack', 'superlu'}", 'auto', False,
'The actual solver to use.'),
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, method=None, **kwargs):
LinearSolver.__init__(self, conf, solve=None, **kwargs)
um = self.sls = None
if method is None:
method = self.conf.method
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
if 'sls' in aux:
self.sls = aux['sls']
else:
raise ValueError('SuperLU not available!')
if method in ['auto', 'umfpack']:
aux = try_imports([
'import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
is_umfpack = True if 'um' in aux\
and hasattr(aux['um'], 'UMFPACK_OK') else False
if method == 'umfpack' and not is_umfpack:
raise ValueError('UMFPACK not available!')
elif method == 'superlu':
is_umfpack = False
else:
raise ValueError('uknown solution method! (%s)' % method)
if is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
else:
self.sls.use_solver(useUmfpack=False)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if conf.use_presolve:
self.presolve(mtx)
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def presolve(self, mtx):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if is_new:
self.solve = self.sls.factorized(mtx)
self.mtx_digest = mtx_digest
class ScipySuperLU(ScipyDirect):
"""
SuperLU - direct sparse solver from SciPy.
"""
name = 'ls.scipy_superlu'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='superlu', **kwargs)
class ScipyUmfpack(ScipyDirect):
"""
UMFPACK - direct sparse solver from SciPy.
"""
name = 'ls.scipy_umfpack'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='umfpack', **kwargs)
class ScipyIterative(LinearSolver):
"""
Interface to SciPy iterative solvers.
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
"""
name = 'ls.scipy_iterative'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in scipy.sparse.linalg pass a solution vector into
# a callback except those below, that take a residual vector.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, context=context, **kwargs)
try:
solver = getattr(la, self.conf.method)
except AttributeError:
output('scipy solver %s does not exist!' % self.conf.method)
output('using cg instead')
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
solver_kwargs.update(prec_args)
try:
sol, info = self.solver(mtx, rhs, x0=x0, atol=eps_a, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
except TypeError:
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PyAMGSolver(LinearSolver):
"""
Interface to PyAMG solvers.
The `method` parameter can be one of: 'smoothed_aggregation_solver',
'ruge_stuben_solver'. The `accel` parameter specifies the Krylov
solver name, that is used as an accelerator for the multigrid solver.
"""
name = 'ls.pyamg'
_parameters = [
('method', 'str', 'smoothed_aggregation_solver', False,
'The actual solver to use.'),
('accel', 'str', None, False,
'The accelerator.'),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres accelerator, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the MG solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Use the 'method:'
prefix for arguments of the method construction function
(e.g. 'method:max_levels' : 5), and the 'solve:' prefix for
the subsequent solver call."""),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, **kwargs):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr(pyamg, self.conf.method)
except AttributeError:
output('pyamg.%s does not exist!' % self.conf.method)
output('using pyamg.smoothed_aggregation_solver instead')
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.accel not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest,
force_reuse=conf.force_reuse)
if is_new or (self.mg is None):
_kwargs = {key[7:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('method:')}
self.mg = self.solver(mtx, **_kwargs)
self.mtx_digest = mtx_digest
_kwargs = {key[6:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('solve:')}
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**_kwargs)
return sol, self.iter
class PyAMGKrylovSolver(LinearSolver):
"""
Interface to PyAMG Krylov solvers.
"""
name = 'ls.pyamg_krylov'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
try:
import pyamg.krylov as krylov
except ImportError:
msg = 'cannot import pyamg.krylov!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None,
context=context, **kwargs)
try:
solver = getattr(krylov, self.conf.method)
except AttributeError:
output('pyamg.krylov.%s does not exist!' % self.conf.method)
raise
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
M=precond, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PETScKrylovSolver(LinearSolver):
"""
PETSc Krylov subspace solver.
The solver supports parallel use with a given MPI communicator (see `comm`
argument of :func:`PETScKrylovSolver.__init__()`) and allows passing in
PETSc matrices and vectors. Returns a (global) PETSc solution vector
instead of a (local) numpy array, when given a PETSc right-hand side
vector.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overridden when called by passing a `conf`
object.
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return an
object with `setUp(self, pc)` and `apply(self, pc, x, y)` methods.
Has precedence over the `precond`/`sub_precond` parameters.
"""),
('precond', 'str', 'icc', False,
'The preconditioner.'),
('sub_precond', 'str', 'none', False,
'The preconditioner for matrix blocks (in parallel runs).'),
('precond_side', "{'left', 'right', 'symmetric', None}", None, False,
'The preconditioner side.'),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('eps_d', 'float', 1e5, False,
'The divergence tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the KSP solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Can be used to pass
all PETSc options supported by :func:`petsc.Options()`."""),
]
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
def __init__(self, conf, comm=None, context=None, **kwargs):
if comm is None:
from sfepy.parallel.parallel import init_petsc_args; init_petsc_args
from petsc4py import PETSc as petsc
converged_reasons = {}
for key, val in six.iteritems(petsc.KSP.ConvergedReason.__dict__):
if isinstance(val, int):
converged_reasons[val] = key
LinearSolver.__init__(self, conf, petsc=petsc, comm=comm,
converged_reasons=converged_reasons,
fields=None, ksp=None, pmtx=None,
context=context, **kwargs)
def set_field_split(self, field_ranges, comm=None):
"""
Setup local PETSc ranges for fields to be used with 'fieldsplit'
preconditioner.
This function must be called before solving the linear system.
"""
comm = get_default(comm, self.comm)
self.fields = []
for key, rng in six.iteritems(field_ranges):
if isinstance(rng, slice):
rng = rng.start, rng.stop
size = rng[1] - rng[0]
field_is = self.petsc.IS().createStride(size, first=rng[0], step=1,
comm=comm)
self.fields.append((key, field_is))
def create_ksp(self, options=None, comm=None):
optDB = self.petsc.Options()
optDB['sub_pc_type'] = self.conf.sub_precond
if options is not None:
for key, val in six.iteritems(options):
optDB[key] = val
ksp = self.petsc.KSP()
ksp.create(comm)
ksp.setType(self.conf.method)
pc = ksp.getPC()
if self.conf.setup_precond is None:
pc.setType(self.conf.precond)
else:
pc.setType(pc.Type.PYTHON)
ksp.setFromOptions()
if (pc.type == 'fieldsplit'):
if self.fields is not None:
pc.setFieldSplitIS(*self.fields)
else:
msg = 'PETScKrylovSolver.set_field_split() has to be called!'
raise ValueError(msg)
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
return ksp
def create_petsc_matrix(self, mtx, comm=None):
if isinstance(mtx, self.petsc.Mat):
pmtx = mtx
else:
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat()
pmtx.createAIJ(mtx.shape, csr=(mtx.indptr, mtx.indices, mtx.data),
comm=comm)
return pmtx
@petsc_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None, context=None,
**kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest,
force_reuse=conf.force_reuse)
if (not is_new) and self.ksp is not None:
ksp = self.ksp
pmtx = self.pmtx
else:
pmtx = self.create_petsc_matrix(mtx, comm=comm)
ksp = self.create_ksp(options=solver_kwargs, comm=comm)
ksp.setOperators(pmtx)
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d,
max_it=i_max)
setup_precond = self.conf.setup_precond
if setup_precond is not None:
ksp.pc.setPythonContext(setup_precond(mtx, context))
ksp.setFromOptions()
self.mtx_digest = mtx_digest
self.ksp = ksp
self.pmtx = pmtx
if isinstance(rhs, self.petsc.Vec):
prhs = rhs
else:
prhs = pmtx.getVecLeft()
prhs[...] = rhs
if x0 is not None:
if isinstance(x0, self.petsc.Vec):
psol = x0
else:
psol = pmtx.getVecRight()
psol[...] = x0
ksp.setInitialGuessNonzero(True)
else:
psol = pmtx.getVecRight()
ksp.setInitialGuessNonzero(False)
ksp.solve(prhs, psol)
output('%s(%s, %s/proc) convergence: %s (%s, %d iterations)'
% (ksp.getType(), ksp.getPC().getType(), self.conf.sub_precond,
ksp.reason, self.converged_reasons[ksp.reason],
ksp.getIterationNumber()),
verbose=conf.verbose)
if isinstance(rhs, self.petsc.Vec):
sol = psol
else:
sol = psol[...].copy()
return sol
class MUMPSSolver(LinearSolver):
"""
Interface to MUMPS solver.
"""
name = 'ls.mumps'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
('memory_relaxation', 'int', 20, False,
'The percentage increase in the estimated working space.'),
]
def __init__(self, conf, **kwargs):
import sfepy.solvers.ls_mumps as mumps
self.mumps_ls = None
if not mumps.use_mpi:
raise AttributeError('No mpi4py found! Required by MUMPS solver.')
mumps.load_mumps_libraries() # try to load MUMPS libraries
LinearSolver.__init__(self, conf, mumps=mumps, mumps_ls=None,
mumps_presolved=False, **kwargs)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if not self.mumps_presolved:
self.presolve(mtx, presolve_flag=conf.use_presolve)
out = rhs.copy()
self.mumps_ls.set_rhs(out)
self.mumps_ls(3) # solve
return out
def presolve(self, mtx, presolve_flag=False):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if not isinstance(mtx, sps.coo_matrix):
mtx = mtx.tocoo()
if self.mumps_ls is None:
system = 'complex' if mtx.dtype.name.startswith('complex')\
else 'real'
is_sym = self.mumps.coo_is_symmetric(mtx)
mem_relax = self.conf.memory_relaxation
self.mumps_ls = self.mumps.MumpsSolver(system=system,
is_sym=is_sym,
mem_relax=mem_relax)
if is_new:
if self.conf.verbose:
self.mumps_ls.set_verbose()
self.mumps_ls.set_mtx_centralized(mtx)
self.mumps_ls(4) # analyze + factorize
if presolve_flag:
self.mumps_presolved = True
self.mtx_digest = mtx_digest
def __del__(self):
if self.mumps_ls is not None:
del(self.mumps_ls)
class MUMPSParallelSolver(LinearSolver):
"""
Interface to MUMPS parallel solver.
"""
name = 'ls.mumps_par'
_parameters = [
('memory_relaxation', 'int', 20, False,
'The percentage increase in the estimated working space.'),
]
def __init__(self, conf, **kwargs):
import multiprocessing
import sfepy.solvers.ls_mumps as mumps
mumps.load_mumps_libraries() # try to load MUMPS libraries
LinearSolver.__init__(self, conf, mumps=mumps, mumps_ls=None,
number_of_cpu=multiprocessing.cpu_count(),
mumps_presolved=False, **kwargs)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
from mpi4py import MPI
import sys
from sfepy import data_dir
import os.path as op
from tempfile import gettempdir
def tmpfile(fname):
return op.join(gettempdir(), fname)
if not isinstance(mtx, sps.coo_matrix):
mtx = mtx.tocoo()
is_sym = self.mumps.coo_is_symmetric(mtx)
rr, cc, data = mtx.row + 1, mtx.col + 1, mtx.data
if is_sym:
idxs = nm.where(cc >= rr)[0] # upper triangular matrix
rr, cc, data = rr[idxs], cc[idxs], data[idxs]
n = mtx.shape[0]
nz = rr.shape[0]
flags = nm.memmap(tmpfile('vals_flags.array'), dtype='int32',
mode='w+', shape=(4,))
flags[0] = n
flags[1] = 1 if data.dtype.name.startswith('complex') else 0
flags[2] = int(is_sym)
flags[3] = int(self.conf.verbose)
idxs = nm.memmap(tmpfile('idxs.array'), dtype='int32',
mode='w+', shape=(2, nz))
idxs[0, :] = rr
idxs[1, :] = cc
dtype = {0: 'float64', 1: 'complex128'}[flags[1]]
vals_mtx = nm.memmap(tmpfile('vals_mtx.array'), dtype=dtype,
mode='w+', shape=(nz,))
vals_rhs = nm.memmap(tmpfile('vals_rhs.array'), dtype=dtype,
mode='w+', shape=(n,))
vals_mtx[:] = data
vals_rhs[:] = rhs
mumps_call = op.join(data_dir, 'sfepy', 'solvers',
'ls_mumps_parallel.py')
comm = MPI.COMM_SELF.Spawn(sys.executable, args=[mumps_call],
maxprocs=self.number_of_cpu)
comm.Disconnect()
out = nm.memmap(tmpfile('vals_x.array'), dtype=dtype, mode='r')
return out
class SchurMumps(MUMPSSolver):
r"""
Mumps Schur complement solver.
"""
name = 'ls.schur_mumps'
_parameters = MUMPSSolver._parameters + [
('schur_variables', 'list', None, True,
'The list of Schur variables.'),
]
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
import scipy.linalg as sla
if not isinstance(mtx, sps.coo_matrix):
mtx = mtx.tocoo()
system = 'complex' if mtx.dtype.name.startswith('complex') else 'real'
self.mumps_ls = self.mumps.MumpsSolver(system=system)
if self.conf.verbose:
self.mumps_ls.set_verbose()
schur_list = []
for schur_var in conf.schur_variables:
slc = self.context.equations.variables.adi.indx[schur_var]
schur_list.append(nm.arange(slc.start, slc.stop, slc.step, dtype='i'))
self.mumps_ls.set_mtx_centralized(mtx)
out = rhs.copy()
self.mumps_ls.set_rhs(out)
S, y2 = self.mumps_ls.get_schur(nm.hstack(schur_list))
x2 = sla.solve(S.T, y2) # solve the dense Schur system using scipy.linalg
return self.mumps_ls.expand_schur(x2)
class MultiProblem(ScipyDirect):
r"""
Conjugate multiple problems.
Allows to define conjugate multiple problems.
"""
name = 'ls.cm_pb'
_parameters = ScipyDirect._parameters + [
('others', 'list', None, True,
'The list of auxiliary problem definition files.'),
('coupling_variables', 'list', None, True,
'The list of coupling variables.'),
]
def __init__(self, conf, context=None, **kwargs):
ScipyDirect.__init__(self, conf, context=context, **kwargs)
def init_subproblems(self, conf, **kwargs):
from sfepy.discrete.state import State
from sfepy.discrete import Problem
from sfepy.base.conf import ProblemConf, get_standard_keywords
from scipy.spatial import cKDTree as KDTree
# init subproblems
problem = self.context
pb_vars = problem.get_variables()
# get "master" DofInfo and last index
pb_adi_indx = problem.equations.variables.adi.indx
self.adi_indx = pb_adi_indx.copy()
last_indx = -1
for ii in six.itervalues(self.adi_indx):
last_indx = nm.max([last_indx, ii.stop])
# coupling variables
self.cvars_to_pb = {}
for jj in conf.coupling_variables:
self.cvars_to_pb[jj] = [None, None]
if jj in pb_vars.names:
if pb_vars[jj].dual_var_name is not None:
self.cvars_to_pb[jj][0] = -1
else:
self.cvars_to_pb[jj][1] = -1
# init subproblems
self.subpb = []
required, other = get_standard_keywords()
master_prefix = output.get_output_prefix()
for ii, ifname in enumerate(conf.others):
sub_prefix = master_prefix[:-1] + '-sub%d:' % (ii + 1)
output.set_output_prefix(sub_prefix)
kwargs['master_problem'] = problem
confi = ProblemConf.from_file(ifname, required, other,
define_args=kwargs)
pbi = Problem.from_conf(confi, init_equations=True)
sti = State(pbi.equations.variables)
pbi.equations.set_data(None, ignore_unknown=True)
pbi.time_update()
pbi.update_materials()
sti.apply_ebc()
pbi_vars = pbi.get_variables()
output.set_output_prefix(master_prefix)
self.subpb.append([pbi, sti, None])
# append "slave" DofInfo
for jj in pbi_vars.names:
if not(pbi_vars[jj].is_state()):
continue
didx = pbi.equations.variables.adi.indx[jj]
ndof = didx.stop - didx.start
if jj in self.adi_indx:
if ndof != \
(self.adi_indx[jj].stop - self.adi_indx[jj].start):
raise ValueError('DOFs do not match!')
else:
self.adi_indx.update({
jj: slice(last_indx, last_indx + ndof, None)})
last_indx += ndof
for jj in conf.coupling_variables:
if jj in pbi_vars.names:
if pbi_vars[jj].dual_var_name is not None:
self.cvars_to_pb[jj][0] = ii
else:
self.cvars_to_pb[jj][1] = ii
self.subpb.append([problem, None, None])
self.cvars_to_pb_map = {}
for varname, pbs in six.iteritems(self.cvars_to_pb):
# match field nodes
coors = []
for ii in pbs:
pbi = self.subpb[ii][0]
pbi_vars = pbi.get_variables()
fcoors = pbi_vars[varname].field.coors
dc = nm.abs(nm.max(fcoors, axis=0)\
- nm.min(fcoors, axis=0))
ax = nm.where(dc > 1e-9)[0]
coors.append(fcoors[:,ax])
if len(coors[0]) != len(coors[1]):
raise ValueError('number of nodes does not match!')
kdtree = KDTree(coors[0])
map_12 = kdtree.query(coors[1])[1]
pbi1 = self.subpb[pbs[0]][0]
pbi1_vars = pbi1.get_variables()
eq_map_1 = pbi1_vars[varname].eq_map
pbi2 = self.subpb[pbs[1]][0]
pbi2_vars = pbi2.get_variables()
eq_map_2 = pbi2_vars[varname].eq_map
dpn = eq_map_2.dpn
nnd = map_12.shape[0]
map_12_nd = nm.zeros((nnd * dpn,), dtype=nm.int32)
if dpn > 1:
for ii in range(dpn):
map_12_nd[ii::dpn] = map_12 * dpn + ii
else:
map_12_nd = map_12
idx = nm.where(eq_map_2.eq >= 0)[0]
self.cvars_to_pb_map[varname] = eq_map_1.eq[map_12[idx]]
def sparse_submat(self, Ad, Ar, Ac, gr, gc, S):
"""
A[gr,gc] = S
"""
if type(gr) is slice:
gr = nm.arange(gr.start, gr.stop)
if type(gc) is slice:
gc = nm.arange(gc.start, gc.stop)
for ii, lrow in enumerate(S):
m = lrow.indices.shape[0]
idxrow = nm.ones((m,), dtype=nm.int32) * gr[ii]
Ar = nm.hstack([Ar, idxrow])
Ac = nm.hstack([Ac, gc[lrow.indices]])
Ad = nm.hstack([Ad, lrow.data])
return Ad, Ar, Ac
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
self.init_subproblems(self.conf, **kwargs)
max_indx = 0
hst = nm.hstack
for ii in six.itervalues(self.adi_indx):
max_indx = nm.max([max_indx, ii.stop])
new_rhs = nm.zeros((max_indx,), dtype=rhs.dtype)
new_rhs[:rhs.shape[0]] = rhs
# copy "master" matrices
pbi = self.subpb[-1][0]
adi_indxi = pbi.equations.variables.adi.indx
mtxc = mtx.tocsc()
aux_data = nm.array([], dtype=mtxc.dtype)
aux_rows = nm.array([], dtype=nm.int32)
aux_cols = nm.array([], dtype=nm.int32)
for jk, jv in six.iteritems(adi_indxi):
if jk in self.cvars_to_pb:
if not(self.cvars_to_pb[jk][0] == -1):
continue
gjv = self.adi_indx[jk]
ii = gjv.start
for jj in nm.arange(jv.start, jv.stop):
ptr = mtxc.indptr[jj]
nn = mtxc.indptr[jj + 1] - ptr
sl = slice(ptr, ptr + nn, None)
aux_data = hst([aux_data, mtxc.data[sl]])
aux_rows = hst([aux_rows, mtxc.indices[sl]])
aux_cols = hst([aux_cols, nm.ones((nn,), dtype=nm.int32) * ii])
ii += 1
# copy "slave" (sub)matricies
mtxs = []
for kk, (pbi, sti0, _) in enumerate(self.subpb[:-1]):
x0i = sti0.get_reduced()
evi = pbi.get_evaluator()
mtxi = evi.eval_tangent_matrix(x0i, mtx=pbi.mtx_a)
rhsi = evi.eval_residual(x0i)
mtxs.append(mtxi)
adi_indxi = pbi.equations.variables.adi.indx
for ik, iv in six.iteritems(adi_indxi):
if ik in self.cvars_to_pb:
if not(self.cvars_to_pb[ik][0] == kk):
continue
giv = self.adi_indx[ik]
for jk, jv in six.iteritems(adi_indxi):
gjv = self.adi_indx[jk]
if jk in self.cvars_to_pb:
if not(self.cvars_to_pb[jk][0] == kk):
continue
aux_data, aux_rows, aux_cols =\
self.sparse_submat(aux_data, aux_rows, aux_cols,
giv, gjv, mtxi[iv, jv])
new_rhs[giv] = rhsi[iv]
mtxs.append(mtx)
# copy "coupling" (sub)matricies
for varname, pbs in six.iteritems(self.cvars_to_pb):
idx = pbs[1]
pbi = self.subpb[idx][0]
mtxi = mtxs[idx]
gjv = self.adi_indx[varname]
jv = pbi.equations.variables.adi.indx[varname]
adi_indxi = pbi.equations.variables.adi.indx
for ik, iv in six.iteritems(adi_indxi):
if ik == varname:
continue
giv = self.adi_indx[ik]
aux_mtx = mtxi[iv,:].tocsc()
for ll, jj in enumerate(nm.arange(jv.start, jv.stop)):
ptr = aux_mtx.indptr[jj]
nn = aux_mtx.indptr[jj + 1] - ptr
if nn < 1:
continue
sl = slice(ptr, ptr + nn, None)
aux_data = hst([aux_data, aux_mtx.data[sl]])
aux_rows = hst([aux_rows, aux_mtx.indices[sl] + giv.start])
jjr = gjv.start + self.cvars_to_pb_map[varname][ll]
aux_cols = hst([aux_cols,
nm.ones((nn,), dtype=nm.int32) * jjr])
# create new matrix
new_mtx = sps.coo_matrix((aux_data, (aux_rows, aux_cols))).tocsr()
res0 = ScipyDirect.__call__(self, new_rhs, mtx=new_mtx)
res = []
for kk, (pbi, sti0, _) in enumerate(self.subpb):
adi_indxi = pbi.equations.variables.adi.indx
max_indx = 0
for ii in six.itervalues(adi_indxi):
max_indx = nm.max([max_indx, ii.stop])
resi = nm.zeros((max_indx,), dtype=res0.dtype)
for ik, iv in six.iteritems(adi_indxi):
giv = self.adi_indx[ik]
if ik in self.cvars_to_pb:
if pbi is self.subpb[self.cvars_to_pb[ik][1]][0]:
giv = self.cvars_to_pb_map[ik] + giv.start
resi[iv] = res0[giv]
if sti0 is not None:
sti = sti0.copy()
sti.set_reduced(-resi)
pbi.setup_default_output()
pbi.save_state(pbi.get_output_name(), sti)
self.subpb[kk][-1] = sti
res.append(resi)
return res[-1]
|
[
"sfepy.solvers.solvers.LinearSolver.__init__",
"sfepy.solvers.ls_mumps.load_mumps_libraries",
"sfepy.base.conf.get_standard_keywords",
"sfepy.base.base.output",
"sfepy.base.base.assert_",
"sfepy.discrete.Problem.from_conf",
"sfepy.base.base.get_default",
"sfepy.base.base.output.set_output_prefix",
"sfepy.base.base.output.get_output_prefix",
"sfepy.base.conf.ProblemConf.from_file",
"sfepy.discrete.state.State",
"sfepy.base.timing.Timer",
"sfepy.base.base.try_imports"
] |
[((158, 218), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'sps.SparseEfficiencyWarning'], {}), "('ignore', sps.SparseEfficiencyWarning)\n", (179, 218), False, 'import warnings\n'), ((617, 655), 'sfepy.base.base.get_default', 'get_default', (['solver_class', 'ScipyDirect'], {}), '(solver_class, ScipyDirect)\n', (628, 655), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((674, 702), 'sfepy.base.base.get_default', 'get_default', (['solver_conf', '{}'], {}), '(solver_conf, {})\n', (685, 702), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((1017, 1031), 'hashlib.sha1', 'hashlib.sha1', ([], {}), '()\n', (1029, 1031), False, 'import hashlib\n'), ((2013, 2030), 'sfepy.base.timing.Timer', 'Timer', ([], {'start': '(True)'}), '(start=True)\n', (2018, 2030), False, 'from sfepy.base.timing import Timer\n'), ((2047, 2075), 'sfepy.base.base.get_default', 'get_default', (['conf', 'self.conf'], {}), '(conf, self.conf)\n', (2058, 2075), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((2090, 2116), 'sfepy.base.base.get_default', 'get_default', (['mtx', 'self.mtx'], {}), '(mtx, self.mtx)\n', (2101, 2116), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((2134, 2166), 'sfepy.base.base.get_default', 'get_default', (['status', 'self.status'], {}), '(status, self.status)\n', (2145, 2166), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((2185, 2219), 'sfepy.base.base.get_default', 'get_default', (['context', 'self.context'], {}), '(context, self.context)\n', (2196, 2219), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((2229, 2282), 'sfepy.base.base.assert_', 'assert_', (['(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])'], {}), '(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])\n', (2236, 2282), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((3168, 3185), 'sfepy.base.timing.Timer', 'Timer', ([], {'start': '(True)'}), '(start=True)\n', (3173, 3185), False, 'from sfepy.base.timing import Timer\n'), ((3202, 3230), 'sfepy.base.base.get_default', 'get_default', (['conf', 'self.conf'], {}), '(conf, self.conf)\n', (3213, 3230), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((3245, 3271), 'sfepy.base.base.get_default', 'get_default', (['mtx', 'self.mtx'], {}), '(mtx, self.mtx)\n', (3256, 3271), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((3289, 3321), 'sfepy.base.base.get_default', 'get_default', (['status', 'self.status'], {}), '(status, self.status)\n', (3300, 3321), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((3340, 3374), 'sfepy.base.base.get_default', 'get_default', (['context', 'self.context'], {}), '(context, self.context)\n', (3351, 3374), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((3390, 3418), 'sfepy.base.base.get_default', 'get_default', (['comm', 'self.comm'], {}), '(comm, self.comm)\n', (3401, 3418), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((3583, 3627), 'sfepy.base.base.assert_', 'assert_', (['(mshape[0] == mshape[1] == rshape[0])'], {}), '(mshape[0] == mshape[1] == rshape[0])\n', (3590, 3627), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((4527, 4582), 'sfepy.solvers.solvers.LinearSolver.__init__', 'LinearSolver.__init__', (['self', 'conf'], {'solve': 'None'}), '(self, conf, solve=None, **kwargs)\n', (4548, 4582), False, 'from sfepy.solvers.solvers import LinearSolver\n'), ((4692, 4878), 'sfepy.base.base.try_imports', 'try_imports', (["['import scipy.linsolve as sls', 'import scipy.splinalg.dsolve as sls',\n 'import scipy.sparse.linalg.dsolve as sls']", '"""cannot import scipy sparse direct solvers!"""'], {}), "(['import scipy.linsolve as sls',\n 'import scipy.splinalg.dsolve as sls',\n 'import scipy.sparse.linalg.dsolve as sls'],\n 'cannot import scipy sparse direct solvers!')\n", (4703, 4878), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((8965, 9025), 'sfepy.solvers.solvers.LinearSolver.__init__', 'LinearSolver.__init__', (['self', 'conf'], {'context': 'context'}), '(self, conf, context=context, **kwargs)\n', (8986, 9025), False, 'from sfepy.solvers.solvers import LinearSolver\n'), ((9698, 9733), 'sfepy.base.base.get_default', 'get_default', (['eps_a', 'self.conf.eps_a'], {}), '(eps_a, self.conf.eps_a)\n', (9709, 9733), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((9750, 9785), 'sfepy.base.base.get_default', 'get_default', (['eps_r', 'self.conf.eps_r'], {}), '(eps_r, self.conf.eps_r)\n', (9761, 9785), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((9802, 9837), 'sfepy.base.base.get_default', 'get_default', (['i_max', 'self.conf.i_max'], {}), '(i_max, self.conf.i_max)\n', (9813, 9837), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((13416, 13468), 'sfepy.solvers.solvers.LinearSolver.__init__', 'LinearSolver.__init__', (['self', 'conf'], {'mg': 'None'}), '(self, conf, mg=None, **kwargs)\n', (13437, 13468), False, 'from sfepy.solvers.solvers import LinearSolver\n'), ((14015, 14050), 'sfepy.base.base.get_default', 'get_default', (['eps_r', 'self.conf.eps_r'], {}), '(eps_r, self.conf.eps_r)\n', (14026, 14050), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((14067, 14102), 'sfepy.base.base.get_default', 'get_default', (['i_max', 'self.conf.i_max'], {}), '(i_max, self.conf.i_max)\n', (14078, 14102), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((17141, 17210), 'sfepy.solvers.solvers.LinearSolver.__init__', 'LinearSolver.__init__', (['self', 'conf'], {'mg': 'None', 'context': 'context'}), '(self, conf, mg=None, context=context, **kwargs)\n', (17162, 17210), False, 'from sfepy.solvers.solvers import LinearSolver\n'), ((17870, 17905), 'sfepy.base.base.get_default', 'get_default', (['eps_r', 'self.conf.eps_r'], {}), '(eps_r, self.conf.eps_r)\n', (17881, 17905), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((17922, 17957), 'sfepy.base.base.get_default', 'get_default', (['i_max', 'self.conf.i_max'], {}), '(i_max, self.conf.i_max)\n', (17933, 17957), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((21959, 22008), 'six.iteritems', 'six.iteritems', (['petsc.KSP.ConvergedReason.__dict__'], {}), '(petsc.KSP.ConvergedReason.__dict__)\n', (21972, 22008), False, 'import six\n'), ((22101, 22265), 'sfepy.solvers.solvers.LinearSolver.__init__', 'LinearSolver.__init__', (['self', 'conf'], {'petsc': 'petsc', 'comm': 'comm', 'converged_reasons': 'converged_reasons', 'fields': 'None', 'ksp': 'None', 'pmtx': 'None', 'context': 'context'}), '(self, conf, petsc=petsc, comm=comm, converged_reasons\n =converged_reasons, fields=None, ksp=None, pmtx=None, context=context,\n **kwargs)\n', (22122, 22265), False, 'from sfepy.solvers.solvers import LinearSolver\n'), ((22612, 22640), 'sfepy.base.base.get_default', 'get_default', (['comm', 'self.comm'], {}), '(comm, self.comm)\n', (22623, 22640), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((22691, 22718), 'six.iteritems', 'six.iteritems', (['field_ranges'], {}), '(field_ranges)\n', (22704, 22718), False, 'import six\n'), ((24595, 24630), 'sfepy.base.base.get_default', 'get_default', (['eps_a', 'self.conf.eps_a'], {}), '(eps_a, self.conf.eps_a)\n', (24606, 24630), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((24647, 24682), 'sfepy.base.base.get_default', 'get_default', (['eps_r', 'self.conf.eps_r'], {}), '(eps_r, self.conf.eps_r)\n', (24658, 24682), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((24699, 24734), 'sfepy.base.base.get_default', 'get_default', (['i_max', 'self.conf.i_max'], {}), '(i_max, self.conf.i_max)\n', (24710, 24734), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((27131, 27159), 'sfepy.solvers.ls_mumps.load_mumps_libraries', 'mumps.load_mumps_libraries', ([], {}), '()\n', (27157, 27159), True, 'import sfepy.solvers.ls_mumps as mumps\n'), ((27200, 27298), 'sfepy.solvers.solvers.LinearSolver.__init__', 'LinearSolver.__init__', (['self', 'conf'], {'mumps': 'mumps', 'mumps_ls': 'None', 'mumps_presolved': '(False)'}), '(self, conf, mumps=mumps, mumps_ls=None,\n mumps_presolved=False, **kwargs)\n', (27221, 27298), False, 'from sfepy.solvers.solvers import LinearSolver\n'), ((29142, 29170), 'sfepy.solvers.ls_mumps.load_mumps_libraries', 'mumps.load_mumps_libraries', ([], {}), '()\n', (29168, 29170), True, 'import sfepy.solvers.ls_mumps as mumps\n'), ((31001, 31062), 'os.path.join', 'op.join', (['data_dir', '"""sfepy"""', '"""solvers"""', '"""ls_mumps_parallel.py"""'], {}), "(data_dir, 'sfepy', 'solvers', 'ls_mumps_parallel.py')\n", (31008, 31062), True, 'import os.path as op\n'), ((31107, 31195), 'mpi4py.MPI.COMM_SELF.Spawn', 'MPI.COMM_SELF.Spawn', (['sys.executable'], {'args': '[mumps_call]', 'maxprocs': 'self.number_of_cpu'}), '(sys.executable, args=[mumps_call], maxprocs=self.\n number_of_cpu)\n', (31126, 31195), False, 'from mpi4py import MPI\n'), ((32494, 32512), 'scipy.linalg.solve', 'sla.solve', (['S.T', 'y2'], {}), '(S.T, y2)\n', (32503, 32512), True, 'import scipy.linalg as sla\n'), ((33693, 33722), 'six.itervalues', 'six.itervalues', (['self.adi_indx'], {}), '(self.adi_indx)\n', (33707, 33722), False, 'import six\n'), ((34221, 34244), 'sfepy.base.conf.get_standard_keywords', 'get_standard_keywords', ([], {}), '()\n', (34242, 34244), False, 'from sfepy.base.conf import ProblemConf, get_standard_keywords\n'), ((34269, 34295), 'sfepy.base.base.output.get_output_prefix', 'output.get_output_prefix', ([], {}), '()\n', (34293, 34295), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((36093, 36124), 'six.iteritems', 'six.iteritems', (['self.cvars_to_pb'], {}), '(self.cvars_to_pb)\n', (36106, 36124), False, 'import six\n'), ((38261, 38290), 'six.itervalues', 'six.itervalues', (['self.adi_indx'], {}), '(self.adi_indx)\n', (38275, 38290), False, 'import six\n'), ((38362, 38400), 'numpy.zeros', 'nm.zeros', (['(max_indx,)'], {'dtype': 'rhs.dtype'}), '((max_indx,), dtype=rhs.dtype)\n', (38370, 38400), True, 'import numpy as nm\n'), ((38603, 38633), 'numpy.array', 'nm.array', (['[]'], {'dtype': 'mtxc.dtype'}), '([], dtype=mtxc.dtype)\n', (38611, 38633), True, 'import numpy as nm\n'), ((38653, 38681), 'numpy.array', 'nm.array', (['[]'], {'dtype': 'nm.int32'}), '([], dtype=nm.int32)\n', (38661, 38681), True, 'import numpy as nm\n'), ((38701, 38729), 'numpy.array', 'nm.array', (['[]'], {'dtype': 'nm.int32'}), '([], dtype=nm.int32)\n', (38709, 38729), True, 'import numpy as nm\n'), ((38753, 38777), 'six.iteritems', 'six.iteritems', (['adi_indxi'], {}), '(adi_indxi)\n', (38766, 38777), False, 'import six\n'), ((40565, 40596), 'six.iteritems', 'six.iteritems', (['self.cvars_to_pb'], {}), '(self.cvars_to_pb)\n', (40578, 40596), False, 'import six\n'), ((2322, 2358), 'sfepy.base.base.assert_', 'assert_', (['(x0.shape[0] == rhs.shape[0])'], {}), '(x0.shape[0] == rhs.shape[0])\n', (2329, 2358), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((3746, 3777), 'sfepy.base.base.assert_', 'assert_', (['(xshape[0] == rshape[0])'], {}), '(xshape[0] == rshape[0])\n', (3753, 3777), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((5136, 5329), 'sfepy.base.base.try_imports', 'try_imports', (["['import scipy.linsolve.umfpack as um',\n 'import scipy.splinalg.dsolve.umfpack as um',\n 'import scipy.sparse.linalg.dsolve.umfpack as um',\n 'import scikits.umfpack as um']"], {}), "(['import scipy.linsolve.umfpack as um',\n 'import scipy.splinalg.dsolve.umfpack as um',\n 'import scipy.sparse.linalg.dsolve.umfpack as um',\n 'import scikits.umfpack as um'])\n", (5147, 5329), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((10535, 10572), 'sfepy.base.base.output', 'output', (['msg'], {'verbose': '(conf.verbose > 1)'}), '(msg, verbose=conf.verbose > 1)\n', (10541, 10572), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((14667, 14704), 'sfepy.base.base.output', 'output', (['msg'], {'verbose': '(conf.verbose > 1)'}), '(msg, verbose=conf.verbose > 1)\n', (14673, 14704), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((18655, 18692), 'sfepy.base.base.output', 'output', (['msg'], {'verbose': '(conf.verbose > 1)'}), '(msg, verbose=conf.verbose > 1)\n', (18661, 18692), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((23231, 23253), 'six.iteritems', 'six.iteritems', (['options'], {}), '(options)\n', (23244, 23253), False, 'import six\n'), ((24134, 24153), 'scipy.sparse.csr_matrix', 'sps.csr_matrix', (['mtx'], {}), '(mtx)\n', (24148, 24153), True, 'import scipy.sparse as sps\n'), ((32458, 32479), 'numpy.hstack', 'nm.hstack', (['schur_list'], {}), '(schur_list)\n', (32467, 32479), True, 'import numpy as nm\n'), ((33748, 33776), 'numpy.max', 'nm.max', (['[last_indx, ii.stop]'], {}), '([last_indx, ii.stop])\n', (33754, 33776), True, 'import numpy as nm\n'), ((34425, 34461), 'sfepy.base.base.output.set_output_prefix', 'output.set_output_prefix', (['sub_prefix'], {}), '(sub_prefix)\n', (34449, 34461), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((34529, 34595), 'sfepy.base.conf.ProblemConf.from_file', 'ProblemConf.from_file', (['ifname', 'required', 'other'], {'define_args': 'kwargs'}), '(ifname, required, other, define_args=kwargs)\n', (34550, 34595), False, 'from sfepy.base.conf import ProblemConf, get_standard_keywords\n'), ((34656, 34701), 'sfepy.discrete.Problem.from_conf', 'Problem.from_conf', (['confi'], {'init_equations': '(True)'}), '(confi, init_equations=True)\n', (34673, 34701), False, 'from sfepy.discrete import Problem\n'), ((34720, 34750), 'sfepy.discrete.state.State', 'State', (['pbi.equations.variables'], {}), '(pbi.equations.variables)\n', (34725, 34750), False, 'from sfepy.discrete.state import State\n'), ((34961, 35000), 'sfepy.base.base.output.set_output_prefix', 'output.set_output_prefix', (['master_prefix'], {}), '(master_prefix)\n', (34985, 35000), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((36681, 36697), 'scipy.spatial.cKDTree', 'KDTree', (['coors[0]'], {}), '(coors[0])\n', (36687, 36697), True, 'from scipy.spatial import cKDTree as KDTree\n'), ((37108, 37146), 'numpy.zeros', 'nm.zeros', (['(nnd * dpn,)'], {'dtype': 'nm.int32'}), '((nnd * dpn,), dtype=nm.int32)\n', (37116, 37146), True, 'import numpy as nm\n'), ((37585, 37613), 'numpy.arange', 'nm.arange', (['gr.start', 'gr.stop'], {}), '(gr.start, gr.stop)\n', (37594, 37613), True, 'import numpy as nm\n'), ((37662, 37690), 'numpy.arange', 'nm.arange', (['gc.start', 'gc.stop'], {}), '(gc.start, gc.stop)\n', (37671, 37690), True, 'import numpy as nm\n'), ((37845, 37868), 'numpy.hstack', 'nm.hstack', (['[Ar, idxrow]'], {}), '([Ar, idxrow])\n', (37854, 37868), True, 'import numpy as nm\n'), ((37886, 37919), 'numpy.hstack', 'nm.hstack', (['[Ac, gc[lrow.indices]]'], {}), '([Ac, gc[lrow.indices]])\n', (37895, 37919), True, 'import numpy as nm\n'), ((37937, 37963), 'numpy.hstack', 'nm.hstack', (['[Ad, lrow.data]'], {}), '([Ad, lrow.data])\n', (37946, 37963), True, 'import numpy as nm\n'), ((38315, 38342), 'numpy.max', 'nm.max', (['[max_indx, ii.stop]'], {}), '([max_indx, ii.stop])\n', (38321, 38342), True, 'import numpy as nm\n'), ((38988, 39016), 'numpy.arange', 'nm.arange', (['jv.start', 'jv.stop'], {}), '(jv.start, jv.stop)\n', (38997, 39016), True, 'import numpy as nm\n'), ((39787, 39811), 'six.iteritems', 'six.iteritems', (['adi_indxi'], {}), '(adi_indxi)\n', (39800, 39811), False, 'import six\n'), ((40872, 40896), 'six.iteritems', 'six.iteritems', (['adi_indxi'], {}), '(adi_indxi)\n', (40885, 40896), False, 'import six\n'), ((42019, 42044), 'six.itervalues', 'six.itervalues', (['adi_indxi'], {}), '(adi_indxi)\n', (42033, 42044), False, 'import six\n'), ((42121, 42160), 'numpy.zeros', 'nm.zeros', (['(max_indx,)'], {'dtype': 'res0.dtype'}), '((max_indx,), dtype=res0.dtype)\n', (42129, 42160), True, 'import numpy as nm\n'), ((42187, 42211), 'six.iteritems', 'six.iteritems', (['adi_indxi'], {}), '(adi_indxi)\n', (42200, 42211), False, 'import six\n'), ((9134, 9194), 'sfepy.base.base.output', 'output', (["('scipy solver %s does not exist!' % self.conf.method)"], {}), "('scipy solver %s does not exist!' % self.conf.method)\n", (9140, 9194), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((9207, 9233), 'sfepy.base.base.output', 'output', (['"""using cg instead"""'], {}), "('using cg instead')\n", (9213, 9233), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((10456, 10475), 'numpy.linalg.norm', 'nm.linalg.norm', (['res'], {}), '(res)\n', (10470, 10475), True, 'import numpy as nm\n'), ((13580, 13633), 'sfepy.base.base.output', 'output', (["('pyamg.%s does not exist!' % self.conf.method)"], {}), "('pyamg.%s does not exist!' % self.conf.method)\n", (13586, 13633), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((13646, 13703), 'sfepy.base.base.output', 'output', (['"""using pyamg.smoothed_aggregation_solver instead"""'], {}), "('using pyamg.smoothed_aggregation_solver instead')\n", (13652, 13703), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((14588, 14607), 'numpy.linalg.norm', 'nm.linalg.norm', (['res'], {}), '(res)\n', (14602, 14607), True, 'import numpy as nm\n'), ((15285, 15313), 'six.iteritems', 'six.iteritems', (['solver_kwargs'], {}), '(solver_kwargs)\n', (15298, 15313), False, 'import six\n'), ((17353, 17413), 'sfepy.base.base.output', 'output', (["('pyamg.krylov.%s does not exist!' % self.conf.method)"], {}), "('pyamg.krylov.%s does not exist!' % self.conf.method)\n", (17359, 17413), False, 'from sfepy.base.base import output, get_default, assert_, try_imports\n'), ((18576, 18595), 'numpy.linalg.norm', 'nm.linalg.norm', (['res'], {}), '(res)\n', (18590, 18595), True, 'import numpy as nm\n'), ((29317, 29344), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (29342, 29344), False, 'import multiprocessing\n'), ((29774, 29786), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (29784, 29786), False, 'from tempfile import gettempdir\n'), ((30021, 30039), 'numpy.where', 'nm.where', (['(cc >= rr)'], {}), '(cc >= rr)\n', (30029, 30039), True, 'import numpy as nm\n'), ((32256, 32307), 'numpy.arange', 'nm.arange', (['slc.start', 'slc.stop', 'slc.step'], {'dtype': '"""i"""'}), "(slc.start, slc.stop, slc.step, dtype='i')\n", (32265, 32307), True, 'import numpy as nm\n'), ((37197, 37207), 'six.moves.range', 'range', (['dpn'], {}), '(dpn)\n', (37202, 37207), False, 'from six.moves import range\n'), ((37340, 37366), 'numpy.where', 'nm.where', (['(eq_map_2.eq >= 0)'], {}), '(eq_map_2.eq >= 0)\n', (37348, 37366), True, 'import numpy as nm\n'), ((37789, 37818), 'numpy.ones', 'nm.ones', (['(m,)'], {'dtype': 'nm.int32'}), '((m,), dtype=nm.int32)\n', (37796, 37818), True, 'import numpy as nm\n'), ((40019, 40043), 'six.iteritems', 'six.iteritems', (['adi_indxi'], {}), '(adi_indxi)\n', (40032, 40043), False, 'import six\n'), ((41718, 41766), 'scipy.sparse.coo_matrix', 'sps.coo_matrix', (['(aux_data, (aux_rows, aux_cols))'], {}), '((aux_data, (aux_rows, aux_cols)))\n', (41732, 41766), True, 'import scipy.sparse as sps\n'), ((42073, 42100), 'numpy.max', 'nm.max', (['[max_indx, ii.stop]'], {}), '([max_indx, ii.stop])\n', (42079, 42100), True, 'import numpy as nm\n'), ((15043, 15071), 'six.iteritems', 'six.iteritems', (['solver_kwargs'], {}), '(solver_kwargs)\n', (15056, 15071), False, 'import six\n'), ((36477, 36497), 'numpy.where', 'nm.where', (['(dc > 1e-09)'], {}), '(dc > 1e-09)\n', (36485, 36497), True, 'import numpy as nm\n'), ((41087, 41115), 'numpy.arange', 'nm.arange', (['jv.start', 'jv.stop'], {}), '(jv.start, jv.stop)\n', (41096, 41115), True, 'import numpy as nm\n'), ((11484, 11497), 'numpy.sign', 'nm.sign', (['info'], {}), '(info)\n', (11491, 11497), True, 'import numpy as nm\n'), ((19173, 19186), 'numpy.sign', 'nm.sign', (['info'], {}), '(info)\n', (19180, 19186), True, 'import numpy as nm\n'), ((36378, 36400), 'numpy.max', 'nm.max', (['fcoors'], {'axis': '(0)'}), '(fcoors, axis=0)\n', (36384, 36400), True, 'import numpy as nm\n'), ((36432, 36454), 'numpy.min', 'nm.min', (['fcoors'], {'axis': '(0)'}), '(fcoors, axis=0)\n', (36438, 36454), True, 'import numpy as nm\n'), ((39312, 39342), 'numpy.ones', 'nm.ones', (['(nn,)'], {'dtype': 'nm.int32'}), '((nn,), dtype=nm.int32)\n', (39319, 39342), True, 'import numpy as nm\n'), ((41632, 41662), 'numpy.ones', 'nm.ones', (['(nn,)'], {'dtype': 'nm.int32'}), '((nn,), dtype=nm.int32)\n', (41639, 41662), True, 'import numpy as nm\n')]
|
# -*- coding: utf-8 -*-
import megengine as mge
import megengine.random as rand
import megengine.functional as F
import numpy as np
from config import config
from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr, box_overlap_ignore_opr
import pdb
def fpn_roi_target(rpn_rois, im_info, gt_boxes, fg_threshold = config.fg_threshold, top_k=1):
return_rois, return_labels = [], []
return_bbox_targets = []
# get per image proposals and gt_boxes
batch_per_gpu = im_info.shape[0]
sampling = True
# is_sample = True if top_k < 2 else False
for bid in range(batch_per_gpu):
gt_boxes_perimg = gt_boxes[bid, :im_info[bid, 5].astype(np.int32), :]
dummy_gt = F.ones([1, gt_boxes_perimg.shape[1]])
batch_inds = F.ones((gt_boxes_perimg.shape[0], 1)) * bid
#if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_perimg[:, :4]], axis=1)
batch_rois_mask = F.equal(rpn_rois[:, 0], bid) > 0
_, batch_rois_index = F.cond_take(batch_rois_mask, batch_rois_mask)
# batch_roi_mask = rpn_rois[:, 0] == bid
# batch_roi_inds = mask_to_inds(batch_roi_mask)
all_rois= F.concat([rpn_rois[batch_rois_index], gt_rois], axis=0) if sampling \
else rpn_rois[batch_rois_index]
# all_rois = F.concat([rpn_rois.ai[batch_roi_inds], gt_rois], axis=0)
gt_boxes_perimg = F.concat([gt_boxes_perimg, dummy_gt],axis=0)
overlaps_normal, overlaps_ignore = box_overlap_ignore_opr(
all_rois[:, 1:5], gt_boxes_perimg)
# overlaps_normal, overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
# overlaps_ignore, overlaps_ignore_indices = F.argsort(overlaps_ignore, descending=True)
overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
overlaps_normal = F.gather(overlaps_normal, 1, overlaps_normal_indices)
# overlaps_normal = F.nn.indexing_one_hot(overlaps_normal, overlaps_normal_indices, 1)
overlaps_ignore_indices = F.argsort(overlaps_ignore, descending = True)
overlaps_ignore = F.gather(overlaps_ignore, 1, overlaps_ignore_indices)
# overlaps_ignore = F.nn.indexing_one_hot(overlaps_ignore, overlaps_ignore_indices, 1)
# gt max and indices, ignore max and indices
max_overlaps_normal = overlaps_normal[:, :top_k].flatten()
gt_assignment_normal = overlaps_normal_indices[:, :top_k].flatten()
max_overlaps_ignore = overlaps_ignore[:, :top_k].flatten()
gt_assignment_ignore = overlaps_ignore_indices[:, :top_k].flatten()
# cons masks
ignore_assign_mask = (max_overlaps_normal < fg_threshold).astype(np.float32) * (
max_overlaps_ignore > max_overlaps_normal).astype(np.float32)
max_overlaps = max_overlaps_normal * (1 - ignore_assign_mask).astype(np.float32) + \
max_overlaps_ignore * ignore_assign_mask
gt_assignment = gt_assignment_normal * (1- ignore_assign_mask) + \
gt_assignment_ignore * ignore_assign_mask
gt_assignment = gt_assignment.astype(np.int32)
labels = gt_boxes_perimg[gt_assignment, 4]
fg_mask = (max_overlaps >= fg_threshold).astype(np.float32) * (1 - F.equal(labels, config.ignore_label))
bg_mask = (max_overlaps < config.bg_threshold_high).astype(np.float32) * (
max_overlaps >= config.bg_threshold_low).astype(np.float32)
fg_mask = fg_mask.reshape(-1, top_k)
bg_mask = bg_mask.reshape(-1, top_k)
pos_max = config.num_rois * config.fg_ratio
fg_inds_mask = _bernoulli_sample_masks(fg_mask[:, 0], pos_max, 1) if sampling else F.equal(fg_mask[:, 0], 0)
neg_max = config.num_rois - fg_inds_mask.sum()
bg_inds_mask = _bernoulli_sample_masks(bg_mask[:, 0], neg_max, 1) if sampling else F.equal(bg_mask[:, 0], 0)
labels = labels * fg_mask.reshape(-1)
keep_mask = fg_inds_mask + bg_inds_mask
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0)
# keep_inds = mask_to_inds(keep_mask)
_, keep_inds = F.cond_take(keep_mask > 0, keep_mask)
#keep_inds = keep_inds[:F.minimum(config.num_rois, keep_inds.shapeof()[0])]
# labels
labels = labels.reshape(-1, top_k)[keep_inds]
gt_assignment = gt_assignment.reshape(-1, top_k)[keep_inds].reshape(-1).astype(np.int32)
target_boxes = gt_boxes_perimg[gt_assignment, :4]
# rois = all_rois.ai[keep_inds]
rois = all_rois[keep_inds]
# target_shape = (rois.shapeof()[0], top_k, rois.shapeof()[-1])
n, c = rois.shape[0], rois.shape[1]
target_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, top_k, c)).reshape(-1, c)
# target_rois = F.add_axis(rois, 1).broadcast(target_shape).reshape(-1, rois.shapeof()[-1])
bbox_targets = bbox_transform_opr(target_rois[:, 1:5], target_boxes[:, :4])
if config.rcnn_bbox_normalize_targets:
std_opr = mge.tensor(config.bbox_normalize_stds[None, :]).to(rois.device)
mean_opr = mge.tensor(config.bbox_normalize_means[None, :]).to(rois.device)
minus_opr = mean_opr / std_opr
bbox_targets = bbox_targets / std_opr - minus_opr
bbox_targets = bbox_targets.reshape(-1, top_k * 4)
return_rois.append(rois)
return_labels.append(labels)
return_bbox_targets.append(bbox_targets)
if config.batch_per_gpu == 1:
rois, labels, bbox_targets = rois.detach(), labels.detach(), bbox_targets.detach()
return rois, labels, bbox_targets
# return F.zero_grad(rois), F.zero_grad(labels), F.zero_grad(bbox_targets)
else:
return_rois = F.concat(return_rois, axis=0)
return_labels = F.concat(return_labels, axis=0)
return_bbox_targets = F.concat(return_bbox_targets, axis=0)
return_rois = return_rois.detach()
return_labels = return_labels.detach()
return_bbox_targets = return_bbox_targets.detach()
return return_rois, return_labels, return_bbox_targets
# rois, labels, bbox_targets = return_rois.detach(), return_labels.detach(), return_bbox_targets.detach()
# return rois, labels, bbox_targets
# return F.zero_grad(return_rois), F.zero_grad(return_labels), F.zero_grad(return_bbox_targets)
def _bernoulli_sample_masks(masks, num_samples, sample_value):
""" Using the bernoulli sampling method"""
sample_mask = F.equal(masks, sample_value)
num_mask = sample_mask.sum()
num_final_samples = F.minimum(num_mask, num_samples)
# here, we use the bernoulli probability to sample the anchors
sample_prob = num_final_samples / num_mask
# uniform_rng = rand.uniform(sample_mask.shapeof()[0])
uniform_rng = rand.uniform(0, 1, sample_mask.shape)
after_sampled_mask = (uniform_rng <= sample_prob) * sample_mask
return after_sampled_mask
|
[
"megengine.functional.gather",
"megengine.functional.minimum",
"megengine.functional.argsort",
"megengine.tensor",
"megengine.functional.cond_take",
"megengine.random.uniform",
"megengine.functional.equal",
"megengine.functional.expand_dims",
"megengine.functional.concat",
"megengine.functional.ones"
] |
[((6507, 6535), 'megengine.functional.equal', 'F.equal', (['masks', 'sample_value'], {}), '(masks, sample_value)\n', (6514, 6535), True, 'import megengine.functional as F\n'), ((6593, 6625), 'megengine.functional.minimum', 'F.minimum', (['num_mask', 'num_samples'], {}), '(num_mask, num_samples)\n', (6602, 6625), True, 'import megengine.functional as F\n'), ((6817, 6854), 'megengine.random.uniform', 'rand.uniform', (['(0)', '(1)', 'sample_mask.shape'], {}), '(0, 1, sample_mask.shape)\n', (6829, 6854), True, 'import megengine.random as rand\n'), ((706, 743), 'megengine.functional.ones', 'F.ones', (['[1, gt_boxes_perimg.shape[1]]'], {}), '([1, gt_boxes_perimg.shape[1]])\n', (712, 743), True, 'import megengine.functional as F\n'), ((867, 921), 'megengine.functional.concat', 'F.concat', (['[batch_inds, gt_boxes_perimg[:, :4]]'], {'axis': '(1)'}), '([batch_inds, gt_boxes_perimg[:, :4]], axis=1)\n', (875, 921), True, 'import megengine.functional as F\n'), ((1011, 1056), 'megengine.functional.cond_take', 'F.cond_take', (['batch_rois_mask', 'batch_rois_mask'], {}), '(batch_rois_mask, batch_rois_mask)\n', (1022, 1056), True, 'import megengine.functional as F\n'), ((1408, 1453), 'megengine.functional.concat', 'F.concat', (['[gt_boxes_perimg, dummy_gt]'], {'axis': '(0)'}), '([gt_boxes_perimg, dummy_gt], axis=0)\n', (1416, 1453), True, 'import megengine.functional as F\n'), ((1496, 1553), 'det_opr.bbox_opr.box_overlap_ignore_opr', 'box_overlap_ignore_opr', (['all_rois[:, 1:5]', 'gt_boxes_perimg'], {}), '(all_rois[:, 1:5], gt_boxes_perimg)\n', (1518, 1553), False, 'from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr, box_overlap_ignore_opr\n'), ((1800, 1843), 'megengine.functional.argsort', 'F.argsort', (['overlaps_normal'], {'descending': '(True)'}), '(overlaps_normal, descending=True)\n', (1809, 1843), True, 'import megengine.functional as F\n'), ((1870, 1923), 'megengine.functional.gather', 'F.gather', (['overlaps_normal', '(1)', 'overlaps_normal_indices'], {}), '(overlaps_normal, 1, overlaps_normal_indices)\n', (1878, 1923), True, 'import megengine.functional as F\n'), ((2053, 2096), 'megengine.functional.argsort', 'F.argsort', (['overlaps_ignore'], {'descending': '(True)'}), '(overlaps_ignore, descending=True)\n', (2062, 2096), True, 'import megengine.functional as F\n'), ((2125, 2178), 'megengine.functional.gather', 'F.gather', (['overlaps_ignore', '(1)', 'overlaps_ignore_indices'], {}), '(overlaps_ignore, 1, overlaps_ignore_indices)\n', (2133, 2178), True, 'import megengine.functional as F\n'), ((4149, 4186), 'megengine.functional.cond_take', 'F.cond_take', (['(keep_mask > 0)', 'keep_mask'], {}), '(keep_mask > 0, keep_mask)\n', (4160, 4186), True, 'import megengine.functional as F\n'), ((4902, 4962), 'det_opr.bbox_opr.bbox_transform_opr', 'bbox_transform_opr', (['target_rois[:, 1:5]', 'target_boxes[:, :4]'], {}), '(target_rois[:, 1:5], target_boxes[:, :4])\n', (4920, 4962), False, 'from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr, box_overlap_ignore_opr\n'), ((5749, 5778), 'megengine.functional.concat', 'F.concat', (['return_rois'], {'axis': '(0)'}), '(return_rois, axis=0)\n', (5757, 5778), True, 'import megengine.functional as F\n'), ((5803, 5834), 'megengine.functional.concat', 'F.concat', (['return_labels'], {'axis': '(0)'}), '(return_labels, axis=0)\n', (5811, 5834), True, 'import megengine.functional as F\n'), ((5865, 5902), 'megengine.functional.concat', 'F.concat', (['return_bbox_targets'], {'axis': '(0)'}), '(return_bbox_targets, axis=0)\n', (5873, 5902), True, 'import megengine.functional as F\n'), ((766, 803), 'megengine.functional.ones', 'F.ones', (['(gt_boxes_perimg.shape[0], 1)'], {}), '((gt_boxes_perimg.shape[0], 1))\n', (772, 803), True, 'import megengine.functional as F\n'), ((948, 976), 'megengine.functional.equal', 'F.equal', (['rpn_rois[:, 0]', 'bid'], {}), '(rpn_rois[:, 0], bid)\n', (955, 976), True, 'import megengine.functional as F\n'), ((1189, 1244), 'megengine.functional.concat', 'F.concat', (['[rpn_rois[batch_rois_index], gt_rois]'], {'axis': '(0)'}), '([rpn_rois[batch_rois_index], gt_rois], axis=0)\n', (1197, 1244), True, 'import megengine.functional as F\n'), ((3727, 3752), 'megengine.functional.equal', 'F.equal', (['fg_mask[:, 0]', '(0)'], {}), '(fg_mask[:, 0], 0)\n', (3734, 3752), True, 'import megengine.functional as F\n'), ((3899, 3924), 'megengine.functional.equal', 'F.equal', (['bg_mask[:, 0]', '(0)'], {}), '(bg_mask[:, 0], 0)\n', (3906, 3924), True, 'import megengine.functional as F\n'), ((3296, 3332), 'megengine.functional.equal', 'F.equal', (['labels', 'config.ignore_label'], {}), '(labels, config.ignore_label)\n', (3303, 3332), True, 'import megengine.functional as F\n'), ((4725, 4747), 'megengine.functional.expand_dims', 'F.expand_dims', (['rois', '(1)'], {}), '(rois, 1)\n', (4738, 4747), True, 'import megengine.functional as F\n'), ((5032, 5079), 'megengine.tensor', 'mge.tensor', (['config.bbox_normalize_stds[None, :]'], {}), '(config.bbox_normalize_stds[None, :])\n', (5042, 5079), True, 'import megengine as mge\n'), ((5119, 5167), 'megengine.tensor', 'mge.tensor', (['config.bbox_normalize_means[None, :]'], {}), '(config.bbox_normalize_means[None, :])\n', (5129, 5167), True, 'import megengine as mge\n')]
|
"""Arquivo para fixtures"""
from unittest.mock import patch
from pytest import fixture
from sqlmodel import create_engine, SQLModel
from tests.mocks import mock_user
from mitmirror.infra.entities import * # pylint: disable=W0614, W0401
user = mock_user()
@fixture(scope="module")
def fake_user():
"""Mock de usuario"""
return user
@fixture(autouse=True, scope="function")
def separate_database(request):
"""
Cria um mock do banco de dados para que cada teste use um banco separado.
"""
tmpdir = request.getfixturevalue("tmpdir")
test_db = tmpdir.join("mitmirror.test.db")
engine = create_engine(f"sqlite:///{test_db}")
SQLModel.metadata.create_all(engine)
with patch("mitmirror.infra.config.database_config.engine", engine):
yield
|
[
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.create_engine"
] |
[((246, 257), 'tests.mocks.mock_user', 'mock_user', ([], {}), '()\n', (255, 257), False, 'from tests.mocks import mock_user\n'), ((261, 284), 'pytest.fixture', 'fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (268, 284), False, 'from pytest import fixture\n'), ((348, 387), 'pytest.fixture', 'fixture', ([], {'autouse': '(True)', 'scope': '"""function"""'}), "(autouse=True, scope='function')\n", (355, 387), False, 'from pytest import fixture\n'), ((622, 659), 'sqlmodel.create_engine', 'create_engine', (['f"""sqlite:///{test_db}"""'], {}), "(f'sqlite:///{test_db}')\n", (635, 659), False, 'from sqlmodel import create_engine, SQLModel\n'), ((664, 700), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (692, 700), False, 'from sqlmodel import create_engine, SQLModel\n'), ((710, 772), 'unittest.mock.patch', 'patch', (['"""mitmirror.infra.config.database_config.engine"""', 'engine'], {}), "('mitmirror.infra.config.database_config.engine', engine)\n", (715, 772), False, 'from unittest.mock import patch\n')]
|
from typing import Optional
from fastapi import FastAPI
from sqlalchemy.sql.expression import table
from sqlmodel import (
SQLModel,
Field,
create_engine,
select,
Session
)
engine = create_engine('sqlite:///database.db')
class Pessoa(SQLModel, table=True):
id : Optional[int] = Field(default=None, primary_key=True)
nome: str
idade: str
SQLModel.metadata.create_all(engine)
app = FastAPI()
@app.get('/')
def home():
return {'message' : 'Deu bom!!!'}
@app.get('/pessoa')
def pessoa():
query = select(Pessoa)
with Session(engine) as session:
result = session.execute(query).scalars().all()
return result
@app.get('/pessoas-nome')
def pessoa():
query = select(Pessoa.nome)
with Session(engine) as session:
result = session.execute(query).scalars().all()
return result
|
[
"sqlmodel.create_engine",
"sqlmodel.select",
"sqlmodel.Session",
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.Field"
] |
[((204, 242), 'sqlmodel.create_engine', 'create_engine', (['"""sqlite:///database.db"""'], {}), "('sqlite:///database.db')\n", (217, 242), False, 'from sqlmodel import SQLModel, Field, create_engine, select, Session\n'), ((373, 409), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (401, 409), False, 'from sqlmodel import SQLModel, Field, create_engine, select, Session\n'), ((417, 426), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (424, 426), False, 'from fastapi import FastAPI\n'), ((305, 342), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (310, 342), False, 'from sqlmodel import SQLModel, Field, create_engine, select, Session\n'), ((539, 553), 'sqlmodel.select', 'select', (['Pessoa'], {}), '(Pessoa)\n', (545, 553), False, 'from sqlmodel import SQLModel, Field, create_engine, select, Session\n'), ((718, 737), 'sqlmodel.select', 'select', (['Pessoa.nome'], {}), '(Pessoa.nome)\n', (724, 737), False, 'from sqlmodel import SQLModel, Field, create_engine, select, Session\n'), ((563, 578), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (570, 578), False, 'from sqlmodel import SQLModel, Field, create_engine, select, Session\n'), ((747, 762), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (754, 762), False, 'from sqlmodel import SQLModel, Field, create_engine, select, Session\n')]
|
# Copyright (c) 2020 <NAME>
# This code is licensed under MIT license
# (https://github.com/kwotsin/mimicry/blob/master/LICENSE)
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
import megengine.jit as jit
from .. import gan
from ..blocks import DBlock, DBlockOptimized
class WGANBaseGenerator(gan.BaseGenerator):
r"""
ResNet backbone generator for ResNet WGAN.
Attributes:
nz (int): Noise dimension for upsampling.
ngf (int): Variable controlling generator feature map sizes.
bottom_width (int): Starting width for upsampling generator output to an image.
loss_type (str): Name of loss to use for GAN loss.
"""
def __init__(self, nz, ngf, bottom_width, **kwargs):
super().__init__(nz=nz,
ngf=ngf,
bottom_width=bottom_width,
loss_type="wasserstein",
**kwargs)
class WGANBaseDiscriminator(gan.BaseDiscriminator):
r"""
ResNet backbone discriminator for ResNet WGAN.
Attributes:
ndf (int): Variable controlling discriminator feature map sizes.
loss_type (str): Name of loss to use for GAN loss.
"""
def __init__(self, ndf, **kwargs):
super().__init__(ndf=ndf, loss_type="wasserstein", **kwargs)
def _reset_jit_graph(self, impl: callable):
"""We override this func to attach weight clipping after default training step"""
traced_obj = jit.trace(impl)
def _(*args, **kwargs):
ret = traced_obj(*args, **kwargs)
if self.training:
self._apply_lipshitz_constraint() # dynamically apply weight clipping
return ret
return _
def _apply_lipshitz_constraint(self):
"""Weight clipping described in [Wasserstein GAN](https://arxiv.org/abs/1701.07875)"""
for p in self.parameters():
F.add_update(p, F.clamp(p, lower=-3e-2, upper=3e-2), alpha=0)
def layernorm(x):
original_shape = x.shape
x = x.reshape(original_shape[0], -1)
m = F.mean(x, axis=1, keepdims=True)
v = F.mean((x - m) ** 2, axis=1, keepdims=True)
x = (x - m) / F.maximum(F.sqrt(v), 1e-6)
x = x.reshape(original_shape)
return x
class WGANDBlockWithLayerNorm(DBlock):
def _residual(self, x):
h = x
h = layernorm(h)
h = self.activation(h)
h = self.c1(h)
h = layernorm(h)
h = self.activation(h)
h = self.c2(h)
if self.downsample:
h = F.avg_pool2d(h, 2)
return h
class WGANDBlockOptimized(DBlockOptimized):
pass
|
[
"megengine.functional.sqrt",
"megengine.functional.mean",
"megengine.jit.trace",
"megengine.functional.clamp",
"megengine.functional.avg_pool2d"
] |
[((2715, 2747), 'megengine.functional.mean', 'F.mean', (['x'], {'axis': '(1)', 'keepdims': '(True)'}), '(x, axis=1, keepdims=True)\n', (2721, 2747), True, 'import megengine.functional as F\n'), ((2756, 2799), 'megengine.functional.mean', 'F.mean', (['((x - m) ** 2)'], {'axis': '(1)', 'keepdims': '(True)'}), '((x - m) ** 2, axis=1, keepdims=True)\n', (2762, 2799), True, 'import megengine.functional as F\n'), ((2118, 2133), 'megengine.jit.trace', 'jit.trace', (['impl'], {}), '(impl)\n', (2127, 2133), True, 'import megengine.jit as jit\n'), ((2828, 2837), 'megengine.functional.sqrt', 'F.sqrt', (['v'], {}), '(v)\n', (2834, 2837), True, 'import megengine.functional as F\n'), ((3177, 3195), 'megengine.functional.avg_pool2d', 'F.avg_pool2d', (['h', '(2)'], {}), '(h, 2)\n', (3189, 3195), True, 'import megengine.functional as F\n'), ((2571, 2606), 'megengine.functional.clamp', 'F.clamp', (['p'], {'lower': '(-0.03)', 'upper': '(0.03)'}), '(p, lower=-0.03, upper=0.03)\n', (2578, 2606), True, 'import megengine.functional as F\n')]
|
from datetime import datetime
from sqlmodel import Field, SQLModel, Relationship, Column, DateTime
from app.models.links import LinkGroupUser
from typing import List, Optional
from pydantic import EmailStr
from app.models.base_uuid_model import BaseUUIDModel
from uuid import UUID
class UserBase(SQLModel):
first_name: str
last_name: str
email: EmailStr = Field(nullable=True, index=True, sa_column_kwargs={"unique": True})
is_active: bool = Field(default=True)
is_superuser: bool = Field(default=False)
birthdate: Optional[datetime] = Field(sa_column=Column(DateTime(timezone=True), nullable=True)) #birthday with timezone
phone: Optional[str]
state: Optional[str]
country: Optional[str]
address: Optional[str]
class User(BaseUUIDModel, UserBase, table=True):
hashed_password: str = Field(
nullable=False, index=True
)
role_id: Optional[UUID] = Field(default=None, foreign_key="role.id")
role: Optional["Role"] = Relationship(back_populates="users", sa_relationship_kwargs={"lazy": "selectin"})
groups: List["Group"] = Relationship(back_populates="users", link_model=LinkGroupUser, sa_relationship_kwargs={"lazy": "selectin"})
|
[
"sqlmodel.Field",
"sqlmodel.DateTime",
"sqlmodel.Relationship"
] |
[((369, 436), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'index': '(True)', 'sa_column_kwargs': "{'unique': True}"}), "(nullable=True, index=True, sa_column_kwargs={'unique': True})\n", (374, 436), False, 'from sqlmodel import Field, SQLModel, Relationship, Column, DateTime\n'), ((463, 482), 'sqlmodel.Field', 'Field', ([], {'default': '(True)'}), '(default=True)\n', (468, 482), False, 'from sqlmodel import Field, SQLModel, Relationship, Column, DateTime\n'), ((508, 528), 'sqlmodel.Field', 'Field', ([], {'default': '(False)'}), '(default=False)\n', (513, 528), False, 'from sqlmodel import Field, SQLModel, Relationship, Column, DateTime\n'), ((842, 875), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)', 'index': '(True)'}), '(nullable=False, index=True)\n', (847, 875), False, 'from sqlmodel import Field, SQLModel, Relationship, Column, DateTime\n'), ((920, 962), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""role.id"""'}), "(default=None, foreign_key='role.id')\n", (925, 962), False, 'from sqlmodel import Field, SQLModel, Relationship, Column, DateTime\n'), ((992, 1077), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""users"""', 'sa_relationship_kwargs': "{'lazy': 'selectin'}"}), "(back_populates='users', sa_relationship_kwargs={'lazy':\n 'selectin'})\n", (1004, 1077), False, 'from sqlmodel import Field, SQLModel, Relationship, Column, DateTime\n'), ((1102, 1213), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""users"""', 'link_model': 'LinkGroupUser', 'sa_relationship_kwargs': "{'lazy': 'selectin'}"}), "(back_populates='users', link_model=LinkGroupUser,\n sa_relationship_kwargs={'lazy': 'selectin'})\n", (1114, 1213), False, 'from sqlmodel import Field, SQLModel, Relationship, Column, DateTime\n'), ((588, 611), 'sqlmodel.DateTime', 'DateTime', ([], {'timezone': '(True)'}), '(timezone=True)\n', (596, 611), False, 'from sqlmodel import Field, SQLModel, Relationship, Column, DateTime\n')]
|
from typing import TYPE_CHECKING, Any, Dict, Optional, Union
from uuid import UUID, uuid4
from pydantic import PositiveFloat, validator
from sqlmodel import Column, Field, Relationship, SQLModel
from sqlmodel.sql.sqltypes import GUID
if TYPE_CHECKING:
from .user import User
class BaseItem(SQLModel):
code: str = Field(description="Item code", min_length=1)
name: str = Field(description="Item Name", min_length=1)
cost: Optional[float] = Field(description="Production/Buy cost of the item", ge=0)
value: PositiveFloat = Field(description="Sugested sell value of item")
amount: int = Field(default=0, description="Quantity of itens avaliable", ge=0)
class CreateItem(BaseItem):
@validator("value")
def validate_value(cls, value: Union[str, float], values: Dict[str, Any]) -> float:
if isinstance(value, str):
if "," in value and "." not in value:
value = value.replace(",", ".")
value = float(value)
if (values.get("cost") or 0) >= value:
raise ValueError("The sugested sell value must be higher then buy value!")
return value
class UpdateItem(BaseItem):
id: UUID = Field(description="ID do item")
@validator("value")
def validate_value(cls, value: Union[str, float], values: Dict[str, Any]) -> float:
if isinstance(value, str):
value = float(value)
if (values.get("cost") or 0) >= value:
raise ValueError("The sugested sell value must be higher then buy value!")
return value
class QueryItem(SQLModel):
name: Optional[str] = Field(description="Name of the item for query")
code: Optional[str] = Field(description="Code of the item for query")
avaliable: Optional[bool] = Field(description="Flag to identify if the item is avaliable")
class Item(BaseItem, table=True):
__tablename__ = "items"
id: UUID = Field(default_factory=uuid4, description="ID do item", sa_column=Column("id", GUID(), primary_key=True))
owner_id: UUID = Field(description="User ID that owns the file", foreign_key="users.id")
owner: "User" = Relationship()
@property
def avaliable(self) -> bool:
return self.amount > 0
|
[
"sqlmodel.sql.sqltypes.GUID",
"sqlmodel.Relationship",
"sqlmodel.Field"
] |
[((325, 369), 'sqlmodel.Field', 'Field', ([], {'description': '"""Item code"""', 'min_length': '(1)'}), "(description='Item code', min_length=1)\n", (330, 369), False, 'from sqlmodel import Column, Field, Relationship, SQLModel\n'), ((386, 430), 'sqlmodel.Field', 'Field', ([], {'description': '"""Item Name"""', 'min_length': '(1)'}), "(description='Item Name', min_length=1)\n", (391, 430), False, 'from sqlmodel import Column, Field, Relationship, SQLModel\n'), ((459, 517), 'sqlmodel.Field', 'Field', ([], {'description': '"""Production/Buy cost of the item"""', 'ge': '(0)'}), "(description='Production/Buy cost of the item', ge=0)\n", (464, 517), False, 'from sqlmodel import Column, Field, Relationship, SQLModel\n'), ((545, 593), 'sqlmodel.Field', 'Field', ([], {'description': '"""Sugested sell value of item"""'}), "(description='Sugested sell value of item')\n", (550, 593), False, 'from sqlmodel import Column, Field, Relationship, SQLModel\n'), ((612, 677), 'sqlmodel.Field', 'Field', ([], {'default': '(0)', 'description': '"""Quantity of itens avaliable"""', 'ge': '(0)'}), "(default=0, description='Quantity of itens avaliable', ge=0)\n", (617, 677), False, 'from sqlmodel import Column, Field, Relationship, SQLModel\n'), ((713, 731), 'pydantic.validator', 'validator', (['"""value"""'], {}), "('value')\n", (722, 731), False, 'from pydantic import PositiveFloat, validator\n'), ((1189, 1220), 'sqlmodel.Field', 'Field', ([], {'description': '"""ID do item"""'}), "(description='ID do item')\n", (1194, 1220), False, 'from sqlmodel import Column, Field, Relationship, SQLModel\n'), ((1227, 1245), 'pydantic.validator', 'validator', (['"""value"""'], {}), "('value')\n", (1236, 1245), False, 'from pydantic import PositiveFloat, validator\n'), ((1614, 1661), 'sqlmodel.Field', 'Field', ([], {'description': '"""Name of the item for query"""'}), "(description='Name of the item for query')\n", (1619, 1661), False, 'from sqlmodel import Column, Field, Relationship, SQLModel\n'), ((1688, 1735), 'sqlmodel.Field', 'Field', ([], {'description': '"""Code of the item for query"""'}), "(description='Code of the item for query')\n", (1693, 1735), False, 'from sqlmodel import Column, Field, Relationship, SQLModel\n'), ((1768, 1830), 'sqlmodel.Field', 'Field', ([], {'description': '"""Flag to identify if the item is avaliable"""'}), "(description='Flag to identify if the item is avaliable')\n", (1773, 1830), False, 'from sqlmodel import Column, Field, Relationship, SQLModel\n'), ((2037, 2108), 'sqlmodel.Field', 'Field', ([], {'description': '"""User ID that owns the file"""', 'foreign_key': '"""users.id"""'}), "(description='User ID that owns the file', foreign_key='users.id')\n", (2042, 2108), False, 'from sqlmodel import Column, Field, Relationship, SQLModel\n'), ((2129, 2143), 'sqlmodel.Relationship', 'Relationship', ([], {}), '()\n', (2141, 2143), False, 'from sqlmodel import Column, Field, Relationship, SQLModel\n'), ((1989, 1995), 'sqlmodel.sql.sqltypes.GUID', 'GUID', ([], {}), '()\n', (1993, 1995), False, 'from sqlmodel.sql.sqltypes import GUID\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import json
import os
from tqdm import tqdm
import megengine as mge
import megengine.distributed as dist
from megengine.data import DataLoader
from official.vision.detection.tools.utils import (
DetEvaluator,
InferenceSampler,
PseudoDetectionDataset,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-w", "--weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-n", "--devices", default=1, type=int, help="total number of gpus for testing",
)
parser.add_argument(
"-d", "--dataset_dir", default="/data/datasets", type=str,
)
parser.add_argument("-se", "--start_epoch", default=-1, type=int)
parser.add_argument("-ee", "--end_epoch", default=-1, type=int)
return parser
def main():
# pylint: disable=import-outside-toplevel,too-many-branches,too-many-statements
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
parser = make_parser()
args = parser.parse_args()
current_network = import_from_file(args.file)
cfg = current_network.Cfg()
if args.weight_file:
args.start_epoch = args.end_epoch = -1
else:
if args.start_epoch == -1:
args.start_epoch = cfg.max_epoch - 1
if args.end_epoch == -1:
args.end_epoch = args.start_epoch
assert 0 <= args.start_epoch <= args.end_epoch < cfg.max_epoch
for epoch_num in range(args.start_epoch, args.end_epoch + 1):
if args.weight_file:
weight_file = args.weight_file
else:
weight_file = "log-of-{}/epoch_{}.pkl".format(
os.path.basename(args.file).split(".")[0], epoch_num
)
if args.devices > 1:
dist_worker = dist.launcher(n_gpus=args.devices)(worker)
result_list = dist_worker(current_network, weight_file, args.dataset_dir)
result_list = sum(result_list, [])
else:
result_list = worker(current_network, weight_file, args.dataset_dir)
all_results = DetEvaluator.format(result_list, cfg)
if args.weight_file:
json_path = "{}_{}.json".format(
os.path.basename(args.file).split(".")[0],
os.path.basename(args.weight_file).split(".")[0],
)
else:
json_path = "log-of-{}/epoch_{}.json".format(
os.path.basename(args.file).split(".")[0], epoch_num
)
all_results = json.dumps(all_results)
with open(json_path, "w") as fo:
fo.write(all_results)
logger.info("Save results to %s, start evaluation!", json_path)
eval_gt = COCO(
os.path.join(
args.dataset_dir, cfg.test_dataset["name"], cfg.test_dataset["ann_file"]
)
)
eval_dt = eval_gt.loadRes(json_path)
cocoEval = COCOeval(eval_gt, eval_dt, iouType="bbox")
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
metrics = [
"AP",
"[email protected]",
"[email protected]",
"APs",
"APm",
"APl",
"AR@1",
"AR@10",
"AR@100",
"ARs",
"ARm",
"ARl",
]
logger.info("mmAP".center(32, "-"))
for i, m in enumerate(metrics):
logger.info("|\t%s\t|\t%.03f\t|", m, cocoEval.stats[i])
logger.info("-" * 32)
def worker(current_network, weight_file, dataset_dir):
cfg = current_network.Cfg()
cfg.backbone_pretrained = False
model = current_network.Net(cfg)
model.eval()
state_dict = mge.load(weight_file)
if "state_dict" in state_dict:
state_dict = state_dict["state_dict"]
model.load_state_dict(state_dict)
evaluator = DetEvaluator(model)
test_loader = build_dataloader(dataset_dir, model.cfg)
if dist.get_rank() == 0:
test_loader = tqdm(test_loader)
result_list = []
for data in test_loader:
image, im_info = DetEvaluator.process_inputs(
data[0][0],
model.cfg.test_image_short_size,
model.cfg.test_image_max_size,
)
pred_res = evaluator.predict(
image=mge.tensor(image),
im_info=mge.tensor(im_info)
)
result = {
"pred_boxes": pred_res,
"image_id": int(data[1][2][0].split(".")[0].split("_")[-1]),
}
result_list.append(result)
return result_list
# pylint: disable=unused-argument
def build_dataloader(dataset_dir, cfg):
val_dataset = PseudoDetectionDataset(length=5000, order=["image", "info"])
val_sampler = InferenceSampler(val_dataset, 1)
val_dataloader = DataLoader(val_dataset, sampler=val_sampler, num_workers=2)
return val_dataloader
if __name__ == "__main__":
main()
|
[
"megengine.get_logger",
"megengine.distributed.get_rank",
"megengine.data.DataLoader",
"megengine.tensor",
"megengine.distributed.launcher",
"megengine.load"
] |
[((691, 715), 'megengine.get_logger', 'mge.get_logger', (['__name__'], {}), '(__name__)\n', (705, 715), True, 'import megengine as mge\n'), ((774, 799), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (797, 799), False, 'import argparse\n'), ((1656, 1683), 'official.vision.detection.tools.utils.import_from_file', 'import_from_file', (['args.file'], {}), '(args.file)\n', (1672, 1683), False, 'from official.vision.detection.tools.utils import DetEvaluator, InferenceSampler, PseudoDetectionDataset, import_from_file\n'), ((4286, 4307), 'megengine.load', 'mge.load', (['weight_file'], {}), '(weight_file)\n', (4294, 4307), True, 'import megengine as mge\n'), ((4444, 4463), 'official.vision.detection.tools.utils.DetEvaluator', 'DetEvaluator', (['model'], {}), '(model)\n', (4456, 4463), False, 'from official.vision.detection.tools.utils import DetEvaluator, InferenceSampler, PseudoDetectionDataset, import_from_file\n'), ((5235, 5295), 'official.vision.detection.tools.utils.PseudoDetectionDataset', 'PseudoDetectionDataset', ([], {'length': '(5000)', 'order': "['image', 'info']"}), "(length=5000, order=['image', 'info'])\n", (5257, 5295), False, 'from official.vision.detection.tools.utils import DetEvaluator, InferenceSampler, PseudoDetectionDataset, import_from_file\n'), ((5314, 5346), 'official.vision.detection.tools.utils.InferenceSampler', 'InferenceSampler', (['val_dataset', '(1)'], {}), '(val_dataset, 1)\n', (5330, 5346), False, 'from official.vision.detection.tools.utils import DetEvaluator, InferenceSampler, PseudoDetectionDataset, import_from_file\n'), ((5368, 5427), 'megengine.data.DataLoader', 'DataLoader', (['val_dataset'], {'sampler': 'val_sampler', 'num_workers': '(2)'}), '(val_dataset, sampler=val_sampler, num_workers=2)\n', (5378, 5427), False, 'from megengine.data import DataLoader\n'), ((2678, 2715), 'official.vision.detection.tools.utils.DetEvaluator.format', 'DetEvaluator.format', (['result_list', 'cfg'], {}), '(result_list, cfg)\n', (2697, 2715), False, 'from official.vision.detection.tools.utils import DetEvaluator, InferenceSampler, PseudoDetectionDataset, import_from_file\n'), ((3106, 3129), 'json.dumps', 'json.dumps', (['all_results'], {}), '(all_results)\n', (3116, 3129), False, 'import json\n'), ((3506, 3548), 'pycocotools.cocoeval.COCOeval', 'COCOeval', (['eval_gt', 'eval_dt'], {'iouType': '"""bbox"""'}), "(eval_gt, eval_dt, iouType='bbox')\n", (3514, 3548), False, 'from pycocotools.cocoeval import COCOeval\n'), ((4531, 4546), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (4544, 4546), True, 'import megengine.distributed as dist\n'), ((4575, 4592), 'tqdm.tqdm', 'tqdm', (['test_loader'], {}), '(test_loader)\n', (4579, 4592), False, 'from tqdm import tqdm\n'), ((4669, 4776), 'official.vision.detection.tools.utils.DetEvaluator.process_inputs', 'DetEvaluator.process_inputs', (['data[0][0]', 'model.cfg.test_image_short_size', 'model.cfg.test_image_max_size'], {}), '(data[0][0], model.cfg.test_image_short_size,\n model.cfg.test_image_max_size)\n', (4696, 4776), False, 'from official.vision.detection.tools.utils import DetEvaluator, InferenceSampler, PseudoDetectionDataset, import_from_file\n'), ((3315, 3406), 'os.path.join', 'os.path.join', (['args.dataset_dir', "cfg.test_dataset['name']", "cfg.test_dataset['ann_file']"], {}), "(args.dataset_dir, cfg.test_dataset['name'], cfg.test_dataset[\n 'ann_file'])\n", (3327, 3406), False, 'import os\n'), ((2384, 2418), 'megengine.distributed.launcher', 'dist.launcher', ([], {'n_gpus': 'args.devices'}), '(n_gpus=args.devices)\n', (2397, 2418), True, 'import megengine.distributed as dist\n'), ((4876, 4893), 'megengine.tensor', 'mge.tensor', (['image'], {}), '(image)\n', (4886, 4893), True, 'import megengine as mge\n'), ((4915, 4934), 'megengine.tensor', 'mge.tensor', (['im_info'], {}), '(im_info)\n', (4925, 4934), True, 'import megengine as mge\n'), ((2261, 2288), 'os.path.basename', 'os.path.basename', (['args.file'], {}), '(args.file)\n', (2277, 2288), False, 'import os\n'), ((2806, 2833), 'os.path.basename', 'os.path.basename', (['args.file'], {}), '(args.file)\n', (2822, 2833), False, 'import os\n'), ((2865, 2899), 'os.path.basename', 'os.path.basename', (['args.weight_file'], {}), '(args.weight_file)\n', (2881, 2899), False, 'import os\n'), ((3017, 3044), 'os.path.basename', 'os.path.basename', (['args.file'], {}), '(args.file)\n', (3033, 3044), False, 'import os\n')]
|
from sfepy.base.base import Struct
from sfepy.solvers import Solver
class MassOperator(Struct):
"""
Encapsulation of action and inverse action of a mass matrix operator
:math:`M`.
"""
def __init__(self, problem, options):
self.mtx_mass = problem.evaluate(options.mass, mode='weak',
auto_init=True, dw_mode='matrix')
if options.lumped:
raise NotImplementedError
else:
# Initialize solvers (and possibly presolve the matrix).
self.ls = Solver.any_from_conf(problem.ls_conf, mtx=self.mtx_mass,
presolve=True)
def action(self, vec):
"""
Action of mass matrix operator on a vector: :math:`M x`.
"""
return self.mtx_mass * vec
def inverse_action(self, vec):
"""
Inverse action of mass matrix operator on a vector: :math:`M^{-1} x`.
"""
return self.ls(vec)
|
[
"sfepy.solvers.Solver.any_from_conf"
] |
[((559, 630), 'sfepy.solvers.Solver.any_from_conf', 'Solver.any_from_conf', (['problem.ls_conf'], {'mtx': 'self.mtx_mass', 'presolve': '(True)'}), '(problem.ls_conf, mtx=self.mtx_mass, presolve=True)\n', (579, 630), False, 'from sfepy.solvers import Solver\n')]
|
import typing as t
from sqlmodel import Field, SQLModel
class Quotes(SQLModel, table=True):
id: t.Optional[int] = Field(default=None, primary_key=True)
quote: str
|
[
"sqlmodel.Field"
] |
[((121, 158), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (126, 158), False, 'from sqlmodel import Field, SQLModel\n')]
|
#!/usr/bin/env python
"""
Convert a mesh file from one SfePy-supported format to another.
Examples::
$ ./script/convert_mesh.py meshes/3d/cylinder.mesh new.vtk
$ ./script/convert_mesh.py meshes/3d/cylinder.mesh new.vtk -s2.5
$ ./script/convert_mesh.py meshes/3d/cylinder.mesh new.vtk -s0.5,2,1
$ ./script/convert_mesh.py meshes/3d/cylinder.mesh new.vtk -s0.5,2,1 -c 0
$ ./script/convert_mesh.py meshes/3d/cylinder.mesh new.mesh --remesh='q2/0 a1e-8 O9/7 V'
$ ./script/convert_mesh.py meshes/3d/cylinder.mesh new2.mesh --remesh='rq2/0 a1e-8 O9/7 V'
"""
from __future__ import absolute_import
import sys
import os.path as op
from six.moves import range
sys.path.append('.')
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from sfepy.base.base import nm, output
from sfepy.base.ioutils import remove_files
from sfepy.discrete.fem import Mesh, FEDomain
from sfepy.discrete.fem.meshio import (output_mesh_formats, MeshIO,
supported_cell_types)
from sfepy.discrete.fem.mesh import fix_double_nodes
from sfepy.mesh.mesh_tools import elems_q2t
helps = {
'scale' : 'scale factor (float or comma-separated list for each axis)'
' [default: %(default)s]',
'center' : 'center of the output mesh (0 for origin or'
' comma-separated list for each axis) applied after scaling'
' [default: %(default)s]',
'refine' : 'uniform refinement level [default: %(default)s]',
'format' : 'output mesh format (overrides filename_out extension)',
'list' : 'list supported readable/writable output mesh formats',
'merge' : 'remove duplicate vertices',
'tri-tetra' : 'convert elements: quad->tri, hexa->tetra',
'2d' : 'force a 2D mesh by removing the z coordinates - assumes a 3D mesh'
' in the xy plane',
'save-per-mat': 'extract cells by material id and save them into'
' separate mesh files with a name based on filename_out and the id'
' numbers (preserves original mesh vertices)',
'remesh' : """when given, remesh the given mesh using tetgen.
The options can be the following, separated by spaces, in this order: 1.
"r" causes remeshing of the mesh volume - if not present the mesh surface
is extracted and used for the volume mesh generation. 2.
"q[<float>/<float>]" (required) - the two numbers after "q" are a maximum
radius-edge ratio bound and a minimum dihedral angle bound. 3. "a<float>"
(optional) - the number imposes a maximum volume constraint on all
tetrahedra. 4. O[<0-9>/<0-7>] - the two numbers correspond to a mesh
optimization level and a choice of optimizing operations. 5. "V"
(optional) - if present, mesh statistics are printed. Consult the tetgen
documentation for details.""",
}
def _parse_val_or_vec(option, name, parser):
if option is not None:
try:
try:
option = float(option)
except ValueError:
option = [float(ii) for ii in option.split(',')]
option = nm.array(option, dtype=nm.float64, ndmin=1)
except:
output('bad %s! (%s)' % (name, option))
parser.print_help()
sys.exit(1)
return option
def main():
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('-s', '--scale', metavar='scale',
action='store', dest='scale',
default=None, help=helps['scale'])
parser.add_argument('-c', '--center', metavar='center',
action='store', dest='center',
default=None, help=helps['center'])
parser.add_argument('-r', '--refine', metavar='level',
action='store', type=int, dest='refine',
default=0, help=helps['refine'])
parser.add_argument('-f', '--format', metavar='format',
action='store', type=str, dest='format',
default=None, help=helps['format'])
parser.add_argument('-l', '--list', action='store_true',
dest='list', help=helps['list'])
parser.add_argument('-m', '--merge', action='store_true',
dest='merge', help=helps['merge'])
parser.add_argument('-t', '--tri-tetra', action='store_true',
dest='tri_tetra', help=helps['tri-tetra'])
parser.add_argument('-2', '--2d', action='store_true',
dest='force_2d', help=helps['2d'])
parser.add_argument('--save-per-mat', action='store_true',
dest='save_per_mat', help=helps['save-per-mat'])
parser.add_argument('--remesh', metavar='options',
action='store', dest='remesh',
default=None, help=helps['remesh'])
parser.add_argument('filename_in')
parser.add_argument('filename_out')
options = parser.parse_args()
if options.list:
output('Supported readable mesh formats:')
output('--------------------------------')
output_mesh_formats('r')
output('')
output('Supported writable mesh formats:')
output('--------------------------------')
output_mesh_formats('w')
sys.exit(0)
scale = _parse_val_or_vec(options.scale, 'scale', parser)
center = _parse_val_or_vec(options.center, 'center', parser)
filename_in = options.filename_in
filename_out = options.filename_out
if options.remesh:
import tempfile
import shlex
import subprocess
dirname = tempfile.mkdtemp()
is_surface = options.remesh.startswith('q')
if is_surface:
mesh = Mesh.from_file(filename_in)
domain = FEDomain(mesh.name, mesh)
region = domain.create_region('surf', 'vertices of surface',
'facet')
surf_mesh = Mesh.from_region(region, mesh,
localize=True, is_surface=True)
filename = op.join(dirname, 'surf.mesh')
surf_mesh.write(filename, io='auto')
else:
import shutil
shutil.copy(filename_in, dirname)
filename = op.join(dirname, op.basename(filename_in))
qopts = ''.join(options.remesh.split()) # Remove spaces.
command = 'tetgen -BFENkACp%s %s' % (qopts, filename)
args = shlex.split(command)
subprocess.call(args)
root, ext = op.splitext(filename)
mesh = Mesh.from_file(root + '.1.vtk')
remove_files(dirname)
else:
mesh = Mesh.from_file(filename_in)
if options.force_2d:
data = list(mesh._get_io_data())
data[0] = data[0][:, :2]
mesh = Mesh.from_data(mesh.name, *data)
if scale is not None:
if len(scale) == 1:
tr = nm.eye(mesh.dim, dtype=nm.float64) * scale
elif len(scale) == mesh.dim:
tr = nm.diag(scale)
else:
raise ValueError('bad scale! (%s)' % scale)
mesh.transform_coors(tr)
if center is not None:
cc = 0.5 * mesh.get_bounding_box().sum(0)
shift = center - cc
tr = nm.c_[nm.eye(mesh.dim, dtype=nm.float64), shift[:, None]]
mesh.transform_coors(tr)
if options.refine > 0:
domain = FEDomain(mesh.name, mesh)
output('initial mesh: %d nodes %d elements'
% (domain.shape.n_nod, domain.shape.n_el))
for ii in range(options.refine):
output('refine %d...' % ii)
domain = domain.refine()
output('... %d nodes %d elements'
% (domain.shape.n_nod, domain.shape.n_el))
mesh = domain.mesh
if options.tri_tetra > 0:
conns = None
for k, new_desc in [('3_8', '3_4'), ('2_4', '2_3')]:
if k in mesh.descs:
conns = mesh.get_conn(k)
break
if conns is not None:
nelo = conns.shape[0]
output('initial mesh: %d elements' % nelo)
new_conns = elems_q2t(conns)
nn = new_conns.shape[0] // nelo
new_cgroups = nm.repeat(mesh.cmesh.cell_groups, nn)
output('new mesh: %d elements' % new_conns.shape[0])
mesh = Mesh.from_data(mesh.name, mesh.coors,
mesh.cmesh.vertex_groups,
[new_conns], [new_cgroups], [new_desc])
if options.merge:
desc = mesh.descs[0]
coor, ngroups, conns = fix_double_nodes(mesh.coors,
mesh.cmesh.vertex_groups,
mesh.get_conn(desc), 1e-9)
mesh = Mesh.from_data(mesh.name + '_merged',
coor, ngroups,
[conns], [mesh.cmesh.cell_groups], [desc])
if options.save_per_mat:
desc = mesh.descs[0]
conns, cgroups = mesh.get_conn(desc), mesh.cmesh.cell_groups
coors, ngroups = mesh.coors, mesh.cmesh.vertex_groups
mat_ids = nm.unique(cgroups)
for mat_id in mat_ids:
idxs = nm.where(cgroups == mat_id)[0]
imesh = Mesh.from_data(mesh.name + '_matid_%d' % mat_id,
coors, ngroups,
[conns[idxs]], [cgroups[idxs]], [desc])
fbase, fext = op.splitext(filename_out)
ifilename_out = '%s_matid_%d%s' % (fbase, mat_id, fext)
io = MeshIO.for_format(ifilename_out, format=options.format,
writable=True)
output('writing %s...' % ifilename_out)
imesh.write(ifilename_out, io=io)
output('...done')
io = MeshIO.for_format(filename_out, format=options.format,
writable=True)
cell_types = ', '.join(supported_cell_types[io.format])
output('writing [%s] %s...' % (cell_types, filename_out))
mesh.write(filename_out, io=io)
output('...done')
if __name__ == '__main__':
main()
|
[
"sfepy.base.base.nm.eye",
"sfepy.base.base.nm.repeat",
"sfepy.discrete.fem.meshio.output_mesh_formats",
"sfepy.discrete.fem.Mesh.from_region",
"sfepy.discrete.fem.Mesh.from_file",
"sfepy.discrete.fem.FEDomain",
"sfepy.base.base.output",
"sfepy.discrete.fem.Mesh.from_data",
"sfepy.base.base.nm.array",
"sfepy.discrete.fem.meshio.MeshIO.for_format",
"sfepy.base.ioutils.remove_files",
"sfepy.base.base.nm.unique",
"sfepy.mesh.mesh_tools.elems_q2t",
"sfepy.base.base.nm.diag",
"sfepy.base.base.nm.where"
] |
[((665, 685), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (680, 685), False, 'import sys\n'), ((3246, 3331), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'RawDescriptionHelpFormatter'}), '(description=__doc__, formatter_class=RawDescriptionHelpFormatter\n )\n', (3260, 3331), False, 'from argparse import ArgumentParser, RawDescriptionHelpFormatter\n'), ((9829, 9898), 'sfepy.discrete.fem.meshio.MeshIO.for_format', 'MeshIO.for_format', (['filename_out'], {'format': 'options.format', 'writable': '(True)'}), '(filename_out, format=options.format, writable=True)\n', (9846, 9898), False, 'from sfepy.discrete.fem.meshio import output_mesh_formats, MeshIO, supported_cell_types\n'), ((9991, 10048), 'sfepy.base.base.output', 'output', (["('writing [%s] %s...' % (cell_types, filename_out))"], {}), "('writing [%s] %s...' % (cell_types, filename_out))\n", (9997, 10048), False, 'from sfepy.base.base import nm, output\n'), ((10089, 10106), 'sfepy.base.base.output', 'output', (['"""...done"""'], {}), "('...done')\n", (10095, 10106), False, 'from sfepy.base.base import nm, output\n'), ((5006, 5048), 'sfepy.base.base.output', 'output', (['"""Supported readable mesh formats:"""'], {}), "('Supported readable mesh formats:')\n", (5012, 5048), False, 'from sfepy.base.base import nm, output\n'), ((5057, 5099), 'sfepy.base.base.output', 'output', (['"""--------------------------------"""'], {}), "('--------------------------------')\n", (5063, 5099), False, 'from sfepy.base.base import nm, output\n'), ((5108, 5132), 'sfepy.discrete.fem.meshio.output_mesh_formats', 'output_mesh_formats', (['"""r"""'], {}), "('r')\n", (5127, 5132), False, 'from sfepy.discrete.fem.meshio import output_mesh_formats, MeshIO, supported_cell_types\n'), ((5141, 5151), 'sfepy.base.base.output', 'output', (['""""""'], {}), "('')\n", (5147, 5151), False, 'from sfepy.base.base import nm, output\n'), ((5160, 5202), 'sfepy.base.base.output', 'output', (['"""Supported writable mesh formats:"""'], {}), "('Supported writable mesh formats:')\n", (5166, 5202), False, 'from sfepy.base.base import nm, output\n'), ((5211, 5253), 'sfepy.base.base.output', 'output', (['"""--------------------------------"""'], {}), "('--------------------------------')\n", (5217, 5253), False, 'from sfepy.base.base import nm, output\n'), ((5262, 5286), 'sfepy.discrete.fem.meshio.output_mesh_formats', 'output_mesh_formats', (['"""w"""'], {}), "('w')\n", (5281, 5286), False, 'from sfepy.discrete.fem.meshio import output_mesh_formats, MeshIO, supported_cell_types\n'), ((5295, 5306), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (5303, 5306), False, 'import sys\n'), ((5628, 5646), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (5644, 5646), False, 'import tempfile\n'), ((6469, 6489), 'shlex.split', 'shlex.split', (['command'], {}), '(command)\n', (6480, 6489), False, 'import shlex\n'), ((6498, 6519), 'subprocess.call', 'subprocess.call', (['args'], {}), '(args)\n', (6513, 6519), False, 'import subprocess\n'), ((6541, 6562), 'os.path.splitext', 'op.splitext', (['filename'], {}), '(filename)\n', (6552, 6562), True, 'import os.path as op\n'), ((6578, 6609), 'sfepy.discrete.fem.Mesh.from_file', 'Mesh.from_file', (["(root + '.1.vtk')"], {}), "(root + '.1.vtk')\n", (6592, 6609), False, 'from sfepy.discrete.fem import Mesh, FEDomain\n'), ((6619, 6640), 'sfepy.base.ioutils.remove_files', 'remove_files', (['dirname'], {}), '(dirname)\n', (6631, 6640), False, 'from sfepy.base.ioutils import remove_files\n'), ((6667, 6694), 'sfepy.discrete.fem.Mesh.from_file', 'Mesh.from_file', (['filename_in'], {}), '(filename_in)\n', (6681, 6694), False, 'from sfepy.discrete.fem import Mesh, FEDomain\n'), ((6810, 6842), 'sfepy.discrete.fem.Mesh.from_data', 'Mesh.from_data', (['mesh.name', '*data'], {}), '(mesh.name, *data)\n', (6824, 6842), False, 'from sfepy.discrete.fem import Mesh, FEDomain\n'), ((7385, 7410), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['mesh.name', 'mesh'], {}), '(mesh.name, mesh)\n', (7393, 7410), False, 'from sfepy.discrete.fem import Mesh, FEDomain\n'), ((7419, 7510), 'sfepy.base.base.output', 'output', (["('initial mesh: %d nodes %d elements' % (domain.shape.n_nod, domain.shape.n_el)\n )"], {}), "('initial mesh: %d nodes %d elements' % (domain.shape.n_nod, domain.\n shape.n_el))\n", (7425, 7510), False, 'from sfepy.base.base import nm, output\n'), ((7540, 7561), 'six.moves.range', 'range', (['options.refine'], {}), '(options.refine)\n', (7545, 7561), False, 'from six.moves import range\n'), ((8787, 8887), 'sfepy.discrete.fem.Mesh.from_data', 'Mesh.from_data', (["(mesh.name + '_merged')", 'coor', 'ngroups', '[conns]', '[mesh.cmesh.cell_groups]', '[desc]'], {}), "(mesh.name + '_merged', coor, ngroups, [conns], [mesh.cmesh.\n cell_groups], [desc])\n", (8801, 8887), False, 'from sfepy.discrete.fem import Mesh, FEDomain\n'), ((9151, 9169), 'sfepy.base.base.nm.unique', 'nm.unique', (['cgroups'], {}), '(cgroups)\n', (9160, 9169), False, 'from sfepy.base.base import nm, output\n'), ((3033, 3076), 'sfepy.base.base.nm.array', 'nm.array', (['option'], {'dtype': 'nm.float64', 'ndmin': '(1)'}), '(option, dtype=nm.float64, ndmin=1)\n', (3041, 3076), False, 'from sfepy.base.base import nm, output\n'), ((5742, 5769), 'sfepy.discrete.fem.Mesh.from_file', 'Mesh.from_file', (['filename_in'], {}), '(filename_in)\n', (5756, 5769), False, 'from sfepy.discrete.fem import Mesh, FEDomain\n'), ((5791, 5816), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['mesh.name', 'mesh'], {}), '(mesh.name, mesh)\n', (5799, 5816), False, 'from sfepy.discrete.fem import Mesh, FEDomain\n'), ((5965, 6027), 'sfepy.discrete.fem.Mesh.from_region', 'Mesh.from_region', (['region', 'mesh'], {'localize': '(True)', 'is_surface': '(True)'}), '(region, mesh, localize=True, is_surface=True)\n', (5981, 6027), False, 'from sfepy.discrete.fem import Mesh, FEDomain\n'), ((6093, 6122), 'os.path.join', 'op.join', (['dirname', '"""surf.mesh"""'], {}), "(dirname, 'surf.mesh')\n", (6100, 6122), True, 'import os.path as op\n'), ((6226, 6259), 'shutil.copy', 'shutil.copy', (['filename_in', 'dirname'], {}), '(filename_in, dirname)\n', (6237, 6259), False, 'import shutil\n'), ((7575, 7602), 'sfepy.base.base.output', 'output', (["('refine %d...' % ii)"], {}), "('refine %d...' % ii)\n", (7581, 7602), False, 'from sfepy.base.base import nm, output\n'), ((7652, 7728), 'sfepy.base.base.output', 'output', (["('... %d nodes %d elements' % (domain.shape.n_nod, domain.shape.n_el))"], {}), "('... %d nodes %d elements' % (domain.shape.n_nod, domain.shape.n_el))\n", (7658, 7728), False, 'from sfepy.base.base import nm, output\n'), ((8061, 8103), 'sfepy.base.base.output', 'output', (["('initial mesh: %d elements' % nelo)"], {}), "('initial mesh: %d elements' % nelo)\n", (8067, 8103), False, 'from sfepy.base.base import nm, output\n'), ((8129, 8145), 'sfepy.mesh.mesh_tools.elems_q2t', 'elems_q2t', (['conns'], {}), '(conns)\n', (8138, 8145), False, 'from sfepy.mesh.mesh_tools import elems_q2t\n'), ((8216, 8253), 'sfepy.base.base.nm.repeat', 'nm.repeat', (['mesh.cmesh.cell_groups', 'nn'], {}), '(mesh.cmesh.cell_groups, nn)\n', (8225, 8253), False, 'from sfepy.base.base import nm, output\n'), ((8267, 8319), 'sfepy.base.base.output', 'output', (["('new mesh: %d elements' % new_conns.shape[0])"], {}), "('new mesh: %d elements' % new_conns.shape[0])\n", (8273, 8319), False, 'from sfepy.base.base import nm, output\n'), ((8339, 8446), 'sfepy.discrete.fem.Mesh.from_data', 'Mesh.from_data', (['mesh.name', 'mesh.coors', 'mesh.cmesh.vertex_groups', '[new_conns]', '[new_cgroups]', '[new_desc]'], {}), '(mesh.name, mesh.coors, mesh.cmesh.vertex_groups, [new_conns],\n [new_cgroups], [new_desc])\n', (8353, 8446), False, 'from sfepy.discrete.fem import Mesh, FEDomain\n'), ((9272, 9381), 'sfepy.discrete.fem.Mesh.from_data', 'Mesh.from_data', (["(mesh.name + '_matid_%d' % mat_id)", 'coors', 'ngroups', '[conns[idxs]]', '[cgroups[idxs]]', '[desc]'], {}), "(mesh.name + '_matid_%d' % mat_id, coors, ngroups, [conns[\n idxs]], [cgroups[idxs]], [desc])\n", (9286, 9381), False, 'from sfepy.discrete.fem import Mesh, FEDomain\n'), ((9474, 9499), 'os.path.splitext', 'op.splitext', (['filename_out'], {}), '(filename_out)\n', (9485, 9499), True, 'import os.path as op\n'), ((9585, 9655), 'sfepy.discrete.fem.meshio.MeshIO.for_format', 'MeshIO.for_format', (['ifilename_out'], {'format': 'options.format', 'writable': '(True)'}), '(ifilename_out, format=options.format, writable=True)\n', (9602, 9655), False, 'from sfepy.discrete.fem.meshio import output_mesh_formats, MeshIO, supported_cell_types\n'), ((9703, 9742), 'sfepy.base.base.output', 'output', (["('writing %s...' % ifilename_out)"], {}), "('writing %s...' % ifilename_out)\n", (9709, 9742), False, 'from sfepy.base.base import nm, output\n'), ((9801, 9818), 'sfepy.base.base.output', 'output', (['"""...done"""'], {}), "('...done')\n", (9807, 9818), False, 'from sfepy.base.base import nm, output\n'), ((3105, 3144), 'sfepy.base.base.output', 'output', (["('bad %s! (%s)' % (name, option))"], {}), "('bad %s! (%s)' % (name, option))\n", (3111, 3144), False, 'from sfepy.base.base import nm, output\n'), ((3189, 3200), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3197, 3200), False, 'import sys\n'), ((6300, 6324), 'os.path.basename', 'op.basename', (['filename_in'], {}), '(filename_in)\n', (6311, 6324), True, 'import os.path as op\n'), ((6915, 6949), 'sfepy.base.base.nm.eye', 'nm.eye', (['mesh.dim'], {'dtype': 'nm.float64'}), '(mesh.dim, dtype=nm.float64)\n', (6921, 6949), False, 'from sfepy.base.base import nm, output\n'), ((7012, 7026), 'sfepy.base.base.nm.diag', 'nm.diag', (['scale'], {}), '(scale)\n', (7019, 7026), False, 'from sfepy.base.base import nm, output\n'), ((7255, 7289), 'sfepy.base.base.nm.eye', 'nm.eye', (['mesh.dim'], {'dtype': 'nm.float64'}), '(mesh.dim, dtype=nm.float64)\n', (7261, 7289), False, 'from sfepy.base.base import nm, output\n'), ((9221, 9248), 'sfepy.base.base.nm.where', 'nm.where', (['(cgroups == mat_id)'], {}), '(cgroups == mat_id)\n', (9229, 9248), False, 'from sfepy.base.base import nm, output\n')]
|
from sqlmodel import SQLModel, create_engine
from sqlalchemy.orm import sessionmaker
from opencensus.ext.azure.log_exporter import AzureLogHandler
from sfm.logger import create_logger
from sfm.config import get_settings
import psycopg2
# def generate_db_string(ENV: str, DBHOST: str, DBNAME: str, DBUSER: str, DBPASS: str):
# """Take in env variables and generate correct db string."""
# # if ENV == "test":
# # return "sqlite://" # in-memory database for unit tests
# if ENV == "local":
# return "postgres+asyncpg://postgres:postgres@db:5432/sfm" # local sqlite for local development
# if ENV == "development" or "production":
# # need all four parameters available
# if "unset" in [DBNAME, DBPASS]:
# raise ValueError(
# "Missing database parameter in the environment. Please specify DBHOST, DBNAME, DBUSER, and DBPASS"
# )
# conn = "host={0} user={1} dbname={2} password={3} sslmode={4}".format(
# DBHOST, DBUSER, DBNAME, DBPASS, "require"
# )
# conn = f"postgresql+asyncpg://{DBUSER}:{DBPASS}@{DBHOST}/{DBNAME}"
# # return conn
# return conn
app_settings = get_settings()
# CONN_STR = generate_db_string(
# app_settings.ENV,
# app_settings.DBHOST,
# app_settings.DBNAME,
# app_settings.DBUSER,
# app_settings.DBPASS,
# )
# check_same_thread = false only works in sqlite, not postgres or others
# if "sqlite" in CONN_STR:
# # print("Using a sqlite database")
# connect_args = {"check_same_thread": False}
# engine = create_engine(CONN_STR, connect_args=connect_args)
# else:
logger = create_logger(__name__)
engine = create_engine(app_settings.DATABASE_URL, echo=False)
# async def init_db():
# async with engine.begin() as conn:
# # await conn.run_sync(SQLModel.metadata.drop_all)
# await conn.run_sync(SQLModel.metadata.create_all)
# logger.info("Database tables have been created")
# async def get_session() -> AsyncSession:
# async_session = sessionmaker(
# engine, class_=AsyncSession, expire_on_commit=False
# )
# async with async_session() as session:
# yield session
# def create_db_and_tables():
# SQLModel.metadata.create_all(engine)
|
[
"sqlmodel.create_engine"
] |
[((1212, 1226), 'sfm.config.get_settings', 'get_settings', ([], {}), '()\n', (1224, 1226), False, 'from sfm.config import get_settings\n'), ((1671, 1694), 'sfm.logger.create_logger', 'create_logger', (['__name__'], {}), '(__name__)\n', (1684, 1694), False, 'from sfm.logger import create_logger\n'), ((1705, 1757), 'sqlmodel.create_engine', 'create_engine', (['app_settings.DATABASE_URL'], {'echo': '(False)'}), '(app_settings.DATABASE_URL, echo=False)\n', (1718, 1757), False, 'from sqlmodel import SQLModel, create_engine\n')]
|
from sfepy.terms.terms import *
from sfepy.linalg import dot_sequences
class NonPenetrationTerm(Term):
r"""
:Description:
Non-penetration condition in the weak sense.
:Definition:
.. math::
\int_{\Gamma} \lambda \ul{n} \cdot \ul{v} \mbox{ , }
\int_{\Gamma} \hat\lambda \ul{n} \cdot \ul{u}
:Arguments 1:
virtual : :math:`\ul{v}`,
state : :math:`\lambda`
:Arguments 2:
state : :math:`\ul{u}`,
virtual : :math:`\hat\lambda`
"""
name = 'dw_non_penetration'
arg_types = (('virtual', 'state'),
('state', 'virtual'))
modes = ('grad', 'div')
integration = 'surface'
use_caches = {'state_in_surface_qp' : [['state']]}
def __call__(self, diff_var=None, **kwargs):
if self.mode == 'grad':
virtual, state = self.get_args(**kwargs)
ap, sg = self.get_approximation(virtual)
cache = self.get_cache('state_in_surface_qp', 0)
else:
state, virtual = self.get_args(**kwargs)
ap, sg = self.get_approximation(state)
cache = self.get_cache('state_in_surface_qp', 0)
n_fa, n_qp, dim, n_fp = ap.get_s_data_shape(self.integral,
self.region.name)
rdim, cdim = virtual.n_components, state.n_components
if diff_var is None:
shape = (n_fa, 1, rdim * n_fp, 1)
elif diff_var == self.get_arg_name('state'):
shape = (n_fa, 1, rdim * n_fp, cdim * n_fp)
else:
raise StopIteration
sd = ap.surface_data[self.region.name]
# ap corresponds to \ul{u} field.
bf = ap.get_base(sd.face_type, 0, integral=self.integral)
ebf = nm.zeros((bf.shape[0], dim, n_fp * dim),
dtype=nm.float64)
for ir in xrange(dim):
ebf[:, ir, ir*n_fp:(ir+1)*n_fp] = bf[:,0,:]
normals = sg.variable(0)
out = nm.zeros(shape, dtype=nm.float64)
lchunk = nm.arange(n_fa, dtype=nm.int32)
if diff_var is None:
vec_qp = cache('state', self, 0,
state=state, get_vector=self.get_vector)
if self.mode == 'grad':
ebf_t = nm.tile(ebf.transpose((0, 2, 1)), (n_fa, 1, 1, 1))
nl = normals * vec_qp
eftnl = dot_sequences(ebf_t, nl, use_rows=True)
sg.integrate_chunk(out, eftnl, lchunk, 1)
else:
bf_t = nm.tile(bf.transpose((0, 2, 1)), (n_fa, 1, 1, 1))
ntu = (normals * vec_qp).sum(axis=-2)[...,None]
ftntu = (bf_t * ntu)
sg.integrate_chunk(out, ftntu, lchunk, 1)
else:
ebf_t = nm.tile(ebf.transpose((0, 2, 1)), (n_fa, 1, 1, 1))
bf_ = nm.tile(bf, (n_fa, 1, 1, 1))
eftn = dot_sequences(ebf_t, normals, use_rows=True)
eftnf = eftn * bf_
if self.mode == 'grad':
sg.integrate_chunk(out, eftnf, lchunk, 1)
else:
ftntef = nm.ascontiguousarray(eftnf.transpose((0, 1, 3, 2)))
sg.integrate_chunk(out, ftntef, lchunk, 1)
yield out, lchunk, 0
|
[
"sfepy.linalg.dot_sequences"
] |
[((2897, 2941), 'sfepy.linalg.dot_sequences', 'dot_sequences', (['ebf_t', 'normals'], {'use_rows': '(True)'}), '(ebf_t, normals, use_rows=True)\n', (2910, 2941), False, 'from sfepy.linalg import dot_sequences\n'), ((2393, 2432), 'sfepy.linalg.dot_sequences', 'dot_sequences', (['ebf_t', 'nl'], {'use_rows': '(True)'}), '(ebf_t, nl, use_rows=True)\n', (2406, 2432), False, 'from sfepy.linalg import dot_sequences\n')]
|
from typing import List
from uuid import UUID
import inject
from sqlmodel import Session, select
from src.core.events import EventDescription
from src.core.helpers.exceptions import DatabaseError, NotAuthorizedError, NotFoundError
from src.core.models import Client, Context, CreateClient, QueryClient, UpdateClient
from src.core.services import Streamer
@inject.params(streamer=Streamer)
def create(session: Session, schema: CreateClient, context: Context, streamer: Streamer) -> Client:
if session.exec(select(Client).where(Client.email == schema.email)).first():
raise DatabaseError("Já existe um cliente cadastrado com o email: %s" % schema.email)
if session.exec(select(Client).where(Client.phone == schema.phone)).first():
raise DatabaseError("Já existe um cliente cadastrado com o telefone: %s" % schema.phone)
client = Client(**schema.dict(), owner_id=context.user_id)
session.add(client)
session.commit()
streamer.send_event(EventDescription.CREATE_USER, context=context, client=client.dict())
return client
def get_all(session: Session, query_schema: QueryClient, context: Context) -> List[Client]:
args = []
if not context.user_is_super_user:
args.append(Client.owner_id == context.user_id)
return session.exec(select(Client).where(*args)).all()
def get_by_id(session: Session, client_id: UUID, context: Context) -> Client:
client = session.exec(select(Client).where(Client.id == client_id)).first()
if not client:
raise NotFoundError(f"Não foi possível localizar o Client com ID: {client_id}")
if not context.user_is_super_user and client.owner_id != context.user_id:
raise NotAuthorizedError(f"Você não possui permissão para consultar os dados do cliente com ID {client_id}!")
return client
@inject.params(streamer=Streamer)
def delete(session: Session, client_id: UUID, context: Context, streamer: Streamer) -> Client:
client = session.exec(select(Client).where(Client.id == client_id)).first()
if not client:
raise NotFoundError(f"Não foi possível localizar o Cliente com ID: {client_id}")
if not context.user_is_super_user and client.owner_id != context.user_id:
raise NotAuthorizedError(f"Você não possui permissão para excluir o Cliente com ID: {client_id}")
session.delete(client)
session.commit()
streamer.send_event(description=EventDescription.DELETE_CLIENT, context=context, client=client.dict())
return client
@inject.params(streamer=Streamer)
def update(session: Session, data: UpdateClient, context: Context, streamer: Streamer) -> Client:
client = session.exec(select(Client).where(Client.id == data.id)).first()
if not client:
raise NotFoundError(f"Não foi possível localizar o Client com ID: {data.id}")
if not context.user_is_super_user and client.owner_id != context.user_id:
raise NotAuthorizedError(f"Você não possui permissão para excluir o Cliente com ID: {data.id}")
columns = client.__table__.columns.keys()
for key, value in data:
if key not in columns:
continue
setattr(client, key, value)
session.add(client)
session.commit()
streamer.send_event(
description=EventDescription.UPDATE_CLIENT,
context=context,
data={"client_data": client.dict(), "update_schema": data.dict()},
)
return client
|
[
"sqlmodel.select"
] |
[((360, 392), 'inject.params', 'inject.params', ([], {'streamer': 'Streamer'}), '(streamer=Streamer)\n', (373, 392), False, 'import inject\n'), ((1819, 1851), 'inject.params', 'inject.params', ([], {'streamer': 'Streamer'}), '(streamer=Streamer)\n', (1832, 1851), False, 'import inject\n'), ((2500, 2532), 'inject.params', 'inject.params', ([], {'streamer': 'Streamer'}), '(streamer=Streamer)\n', (2513, 2532), False, 'import inject\n'), ((588, 667), 'src.core.helpers.exceptions.DatabaseError', 'DatabaseError', (["('Já existe um cliente cadastrado com o email: %s' % schema.email)"], {}), "('Já existe um cliente cadastrado com o email: %s' % schema.email)\n", (601, 667), False, 'from src.core.helpers.exceptions import DatabaseError, NotAuthorizedError, NotFoundError\n'), ((764, 851), 'src.core.helpers.exceptions.DatabaseError', 'DatabaseError', (["('Já existe um cliente cadastrado com o telefone: %s' % schema.phone)"], {}), "('Já existe um cliente cadastrado com o telefone: %s' % schema\n .phone)\n", (777, 851), False, 'from src.core.helpers.exceptions import DatabaseError, NotAuthorizedError, NotFoundError\n'), ((1526, 1599), 'src.core.helpers.exceptions.NotFoundError', 'NotFoundError', (['f"""Não foi possível localizar o Client com ID: {client_id}"""'], {}), "(f'Não foi possível localizar o Client com ID: {client_id}')\n", (1539, 1599), False, 'from src.core.helpers.exceptions import DatabaseError, NotAuthorizedError, NotFoundError\n'), ((1693, 1806), 'src.core.helpers.exceptions.NotAuthorizedError', 'NotAuthorizedError', (['f"""Você não possui permissão para consultar os dados do cliente com ID {client_id}!"""'], {}), "(\n f'Você não possui permissão para consultar os dados do cliente com ID {client_id}!'\n )\n", (1711, 1806), False, 'from src.core.helpers.exceptions import DatabaseError, NotAuthorizedError, NotFoundError\n'), ((2061, 2135), 'src.core.helpers.exceptions.NotFoundError', 'NotFoundError', (['f"""Não foi possível localizar o Cliente com ID: {client_id}"""'], {}), "(f'Não foi possível localizar o Cliente com ID: {client_id}')\n", (2074, 2135), False, 'from src.core.helpers.exceptions import DatabaseError, NotAuthorizedError, NotFoundError\n'), ((2229, 2325), 'src.core.helpers.exceptions.NotAuthorizedError', 'NotAuthorizedError', (['f"""Você não possui permissão para excluir o Cliente com ID: {client_id}"""'], {}), "(\n f'Você não possui permissão para excluir o Cliente com ID: {client_id}')\n", (2247, 2325), False, 'from src.core.helpers.exceptions import DatabaseError, NotAuthorizedError, NotFoundError\n'), ((2743, 2814), 'src.core.helpers.exceptions.NotFoundError', 'NotFoundError', (['f"""Não foi possível localizar o Client com ID: {data.id}"""'], {}), "(f'Não foi possível localizar o Client com ID: {data.id}')\n", (2756, 2814), False, 'from src.core.helpers.exceptions import DatabaseError, NotAuthorizedError, NotFoundError\n'), ((2908, 3002), 'src.core.helpers.exceptions.NotAuthorizedError', 'NotAuthorizedError', (['f"""Você não possui permissão para excluir o Cliente com ID: {data.id}"""'], {}), "(\n f'Você não possui permissão para excluir o Cliente com ID: {data.id}')\n", (2926, 3002), False, 'from src.core.helpers.exceptions import DatabaseError, NotAuthorizedError, NotFoundError\n'), ((513, 527), 'sqlmodel.select', 'select', (['Client'], {}), '(Client)\n', (519, 527), False, 'from sqlmodel import Session, select\n'), ((689, 703), 'sqlmodel.select', 'select', (['Client'], {}), '(Client)\n', (695, 703), False, 'from sqlmodel import Session, select\n'), ((1297, 1311), 'sqlmodel.select', 'select', (['Client'], {}), '(Client)\n', (1303, 1311), False, 'from sqlmodel import Session, select\n'), ((1438, 1452), 'sqlmodel.select', 'select', (['Client'], {}), '(Client)\n', (1444, 1452), False, 'from sqlmodel import Session, select\n'), ((1973, 1987), 'sqlmodel.select', 'select', (['Client'], {}), '(Client)\n', (1979, 1987), False, 'from sqlmodel import Session, select\n'), ((2657, 2671), 'sqlmodel.select', 'select', (['Client'], {}), '(Client)\n', (2663, 2671), False, 'from sqlmodel import Session, select\n')]
|
from sqlmodel import Field, SQLModel
from pydantic import EmailStr
from fastapi_utils.guid_type import GUID, GUID_DEFAULT_SQLITE
import uuid
class User(SQLModel, table=True):
id: str = Field(default=str(GUID_DEFAULT_SQLITE()))
email: EmailStr = Field(primary_key=True)
firstName: str
lastName: str
disabled: bool = True
class UserResult:
id: str
class UserInDB(User):
hashed_password: str
|
[
"sqlmodel.Field"
] |
[((254, 277), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (259, 277), False, 'from sqlmodel import Field, SQLModel\n'), ((208, 229), 'fastapi_utils.guid_type.GUID_DEFAULT_SQLITE', 'GUID_DEFAULT_SQLITE', ([], {}), '()\n', (227, 229), False, 'from fastapi_utils.guid_type import GUID, GUID_DEFAULT_SQLITE\n')]
|
import importlib
import pytest
from fastapi.testclient import TestClient
from sqlalchemy import inspect
from sqlalchemy.engine.reflection import Inspector
from sqlmodel import Session, create_engine
from docs_src.tutorial.fastapi.app_testing.tutorial001 import main as app_mod
from docs_src.tutorial.fastapi.app_testing.tutorial001 import test_main as test_mod
from docs_src.tutorial.fastapi.app_testing.tutorial001.test_main import (
client_fixture,
session_fixture,
)
assert session_fixture, "This keeps the session fixture used below"
assert client_fixture, "This keeps the client fixture used below"
@pytest.fixture(name="prepare", autouse=True)
def prepare_fixture(clear_sqlmodel):
# Trigger side effects of registering table models in SQLModel
# This has to be called after clear_sqlmodel, but before the session_fixture
# That's why the extra custom fixture here
importlib.reload(app_mod)
importlib.reload(test_mod)
def test_create_hero(session: Session, client: TestClient):
test_mod.test_create_hero(client)
def test_create_hero_incomplete(session: Session, client: TestClient):
test_mod.test_create_hero_incomplete(client)
def test_create_hero_invalid(session: Session, client: TestClient):
test_mod.test_create_hero_invalid(client)
def test_read_heroes(session: Session, client: TestClient):
test_mod.test_read_heroes(session=session, client=client)
def test_read_hero(session: Session, client: TestClient):
test_mod.test_read_hero(session=session, client=client)
def test_update_hero(session: Session, client: TestClient):
test_mod.test_update_hero(session=session, client=client)
def test_delete_hero(session: Session, client: TestClient):
test_mod.test_delete_hero(session=session, client=client)
def test_startup():
app_mod.engine = create_engine("sqlite://")
app_mod.on_startup()
insp: Inspector = inspect(app_mod.engine)
assert insp.has_table(str(app_mod.Hero.__tablename__))
def test_get_session():
app_mod.engine = create_engine("sqlite://")
for session in app_mod.get_session():
assert isinstance(session, Session)
assert session.bind == app_mod.engine
def test_read_hero_not_found(client: TestClient):
response = client.get("/heroes/9000")
assert response.status_code == 404
def test_update_hero_not_found(client: TestClient):
response = client.patch("/heroes/9000", json={"name": "Very-Rusty-Man"})
assert response.status_code == 404
def test_delete_hero_not_found(client: TestClient):
response = client.delete("/heroes/9000")
assert response.status_code == 404
|
[
"sqlmodel.create_engine"
] |
[((618, 662), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""prepare"""', 'autouse': '(True)'}), "(name='prepare', autouse=True)\n", (632, 662), False, 'import pytest\n'), ((899, 924), 'importlib.reload', 'importlib.reload', (['app_mod'], {}), '(app_mod)\n', (915, 924), False, 'import importlib\n'), ((929, 955), 'importlib.reload', 'importlib.reload', (['test_mod'], {}), '(test_mod)\n', (945, 955), False, 'import importlib\n'), ((1022, 1055), 'docs_src.tutorial.fastapi.app_testing.tutorial001.test_main.test_create_hero', 'test_mod.test_create_hero', (['client'], {}), '(client)\n', (1047, 1055), True, 'from docs_src.tutorial.fastapi.app_testing.tutorial001 import test_main as test_mod\n'), ((1133, 1177), 'docs_src.tutorial.fastapi.app_testing.tutorial001.test_main.test_create_hero_incomplete', 'test_mod.test_create_hero_incomplete', (['client'], {}), '(client)\n', (1169, 1177), True, 'from docs_src.tutorial.fastapi.app_testing.tutorial001 import test_main as test_mod\n'), ((1252, 1293), 'docs_src.tutorial.fastapi.app_testing.tutorial001.test_main.test_create_hero_invalid', 'test_mod.test_create_hero_invalid', (['client'], {}), '(client)\n', (1285, 1293), True, 'from docs_src.tutorial.fastapi.app_testing.tutorial001 import test_main as test_mod\n'), ((1360, 1417), 'docs_src.tutorial.fastapi.app_testing.tutorial001.test_main.test_read_heroes', 'test_mod.test_read_heroes', ([], {'session': 'session', 'client': 'client'}), '(session=session, client=client)\n', (1385, 1417), True, 'from docs_src.tutorial.fastapi.app_testing.tutorial001 import test_main as test_mod\n'), ((1482, 1537), 'docs_src.tutorial.fastapi.app_testing.tutorial001.test_main.test_read_hero', 'test_mod.test_read_hero', ([], {'session': 'session', 'client': 'client'}), '(session=session, client=client)\n', (1505, 1537), True, 'from docs_src.tutorial.fastapi.app_testing.tutorial001 import test_main as test_mod\n'), ((1604, 1661), 'docs_src.tutorial.fastapi.app_testing.tutorial001.test_main.test_update_hero', 'test_mod.test_update_hero', ([], {'session': 'session', 'client': 'client'}), '(session=session, client=client)\n', (1629, 1661), True, 'from docs_src.tutorial.fastapi.app_testing.tutorial001 import test_main as test_mod\n'), ((1728, 1785), 'docs_src.tutorial.fastapi.app_testing.tutorial001.test_main.test_delete_hero', 'test_mod.test_delete_hero', ([], {'session': 'session', 'client': 'client'}), '(session=session, client=client)\n', (1753, 1785), True, 'from docs_src.tutorial.fastapi.app_testing.tutorial001 import test_main as test_mod\n'), ((1829, 1855), 'sqlmodel.create_engine', 'create_engine', (['"""sqlite://"""'], {}), "('sqlite://')\n", (1842, 1855), False, 'from sqlmodel import Session, create_engine\n'), ((1860, 1880), 'docs_src.tutorial.fastapi.app_testing.tutorial001.main.on_startup', 'app_mod.on_startup', ([], {}), '()\n', (1878, 1880), True, 'from docs_src.tutorial.fastapi.app_testing.tutorial001 import main as app_mod\n'), ((1903, 1926), 'sqlalchemy.inspect', 'inspect', (['app_mod.engine'], {}), '(app_mod.engine)\n', (1910, 1926), False, 'from sqlalchemy import inspect\n'), ((2033, 2059), 'sqlmodel.create_engine', 'create_engine', (['"""sqlite://"""'], {}), "('sqlite://')\n", (2046, 2059), False, 'from sqlmodel import Session, create_engine\n'), ((2079, 2100), 'docs_src.tutorial.fastapi.app_testing.tutorial001.main.get_session', 'app_mod.get_session', ([], {}), '()\n', (2098, 2100), True, 'from docs_src.tutorial.fastapi.app_testing.tutorial001 import main as app_mod\n')]
|
from pydantic import BaseModel
from sqlmodel import Field, SQLModel, Relationship
from typing import Optional, List
class UserBase(SQLModel):
username: str
desc: str
class User(UserBase, table=True):
id: Optional[int] = Field(index=True, default=None, primary_key=True)
password: str
tasks: List["Task"] = Relationship(back_populates="owner")
class UserQuery(SQLModel):
id: int
class UserCreate(UserBase):
pass
class UserRead(UserBase, UserQuery):
pass
class TaskBase(SQLModel):
title: str
desc: str
owner_id: Optional[int] = Field(default=None, foreign_key="user.id")
class Task(TaskBase, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
owner: Optional[User] = Relationship(back_populates="tasks")
class TaskCreate(TaskBase):
pass
class TaskRead(TaskBase):
id: int
class TaskQuery(SQLModel):
id: int
owner_id: int
class StandardResponse(BaseModel):
success: str = "Success"
message: str = "Task completed successfully"
code: int = 200
|
[
"sqlmodel.Relationship",
"sqlmodel.Field"
] |
[((235, 284), 'sqlmodel.Field', 'Field', ([], {'index': '(True)', 'default': 'None', 'primary_key': '(True)'}), '(index=True, default=None, primary_key=True)\n', (240, 284), False, 'from sqlmodel import Field, SQLModel, Relationship\n'), ((329, 365), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""owner"""'}), "(back_populates='owner')\n", (341, 365), False, 'from sqlmodel import Field, SQLModel, Relationship\n'), ((578, 620), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""user.id"""'}), "(default=None, foreign_key='user.id')\n", (583, 620), False, 'from sqlmodel import Field, SQLModel, Relationship\n'), ((680, 717), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (685, 717), False, 'from sqlmodel import Field, SQLModel, Relationship\n'), ((746, 782), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""tasks"""'}), "(back_populates='tasks')\n", (758, 782), False, 'from sqlmodel import Field, SQLModel, Relationship\n')]
|
# -*- coding: utf-8 -*-
import megengine.module as M
import megengine.functional as F
class LeNet32x32(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
# Conv2d(1, 6, kernel_size=(5, 5))
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
# MaxPool2d(kernel_size=2, stride=2, padding=0)
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = M.Conv2d(6, 16, 5)
self.relu2 = M.ReLU()
self.pool2 = M.MaxPool2d(2, 2)
# two FC + ReLU
self.fc1 = M.Linear(16 * 5 * 5, 120)
self.relu3 = M.ReLU()
self.fc2 = M.Linear(120, 84)
self.relu4 = M.ReLU()
# classifier
self.classifier = M.Linear(84, 10)
def forward(self, x):
x = self.pool1(self.relu1(self.conv1(x)))
x = self.pool2(self.relu2(self.conv2(x)))
# F.flatten reshape the tensor x with (N, C, H, W) into shape of (N, C*H*W)
# i.e., x = x.reshape(x.shape[0], -1)
# x.shape: (256, 16, 5, 5)
x = F.flatten(x, 1)
# x.shape: (256, 400)
x = self.relu3(self.fc1(x))
x = self.relu4(self.fc2(x))
x = self.classifier(x)
return x
class LeNet224x224(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = M.Conv2d(6, 16, 5)
self.relu2 = M.ReLU()
self.pool2 = M.MaxPool2d(2, 2)
# two FC + ReLU
self.fc1 = M.Linear(16 * 53 * 53, 120)
self.relu3 = M.ReLU()
self.fc2 = M.Linear(120, 84)
self.relu4 = M.ReLU()
# classifier
self.classifier = M.Linear(84, 10)
def forward(self, x):
x = self.pool1(self.relu1(self.conv1(x)))
x = self.pool2(self.relu2(self.conv2(x)))
# F.flatten(x, 1) reshape the tensor x with (N, C, H, W) along
# the 1st dimension into shape of (N, C*H*W),
# i.e., x = x.reshape(x.shape[0], -1)
# x.shape: (256, 16, 53, 53)
x = F.flatten(x, 1)
# x.shape: (256, 16*53*53)
x = self.relu3(self.fc1(x))
x = self.relu4(self.fc2(x))
x = self.classifier(x)
return x
|
[
"megengine.module.MaxPool2d",
"megengine.module.Linear",
"megengine.module.ReLU",
"megengine.module.Conv2d",
"megengine.functional.flatten"
] |
[((289, 306), 'megengine.module.Conv2d', 'M.Conv2d', (['(1)', '(6)', '(5)'], {}), '(1, 6, 5)\n', (297, 306), True, 'import megengine.module as M\n'), ((328, 336), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (334, 336), True, 'import megengine.module as M\n'), ((414, 431), 'megengine.module.MaxPool2d', 'M.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (425, 431), True, 'import megengine.module as M\n'), ((453, 471), 'megengine.module.Conv2d', 'M.Conv2d', (['(6)', '(16)', '(5)'], {}), '(6, 16, 5)\n', (461, 471), True, 'import megengine.module as M\n'), ((493, 501), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (499, 501), True, 'import megengine.module as M\n'), ((523, 540), 'megengine.module.MaxPool2d', 'M.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (534, 540), True, 'import megengine.module as M\n'), ((584, 609), 'megengine.module.Linear', 'M.Linear', (['(16 * 5 * 5)', '(120)'], {}), '(16 * 5 * 5, 120)\n', (592, 609), True, 'import megengine.module as M\n'), ((631, 639), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (637, 639), True, 'import megengine.module as M\n'), ((659, 676), 'megengine.module.Linear', 'M.Linear', (['(120)', '(84)'], {}), '(120, 84)\n', (667, 676), True, 'import megengine.module as M\n'), ((698, 706), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (704, 706), True, 'import megengine.module as M\n'), ((754, 770), 'megengine.module.Linear', 'M.Linear', (['(84)', '(10)'], {}), '(84, 10)\n', (762, 770), True, 'import megengine.module as M\n'), ((1075, 1090), 'megengine.functional.flatten', 'F.flatten', (['x', '(1)'], {}), '(x, 1)\n', (1084, 1090), True, 'import megengine.functional as F\n'), ((1403, 1420), 'megengine.module.Conv2d', 'M.Conv2d', (['(1)', '(6)', '(5)'], {}), '(1, 6, 5)\n', (1411, 1420), True, 'import megengine.module as M\n'), ((1442, 1450), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (1448, 1450), True, 'import megengine.module as M\n'), ((1472, 1489), 'megengine.module.MaxPool2d', 'M.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (1483, 1489), True, 'import megengine.module as M\n'), ((1511, 1529), 'megengine.module.Conv2d', 'M.Conv2d', (['(6)', '(16)', '(5)'], {}), '(6, 16, 5)\n', (1519, 1529), True, 'import megengine.module as M\n'), ((1551, 1559), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (1557, 1559), True, 'import megengine.module as M\n'), ((1581, 1598), 'megengine.module.MaxPool2d', 'M.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (1592, 1598), True, 'import megengine.module as M\n'), ((1642, 1669), 'megengine.module.Linear', 'M.Linear', (['(16 * 53 * 53)', '(120)'], {}), '(16 * 53 * 53, 120)\n', (1650, 1669), True, 'import megengine.module as M\n'), ((1691, 1699), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (1697, 1699), True, 'import megengine.module as M\n'), ((1719, 1736), 'megengine.module.Linear', 'M.Linear', (['(120)', '(84)'], {}), '(120, 84)\n', (1727, 1736), True, 'import megengine.module as M\n'), ((1758, 1766), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (1764, 1766), True, 'import megengine.module as M\n'), ((1814, 1830), 'megengine.module.Linear', 'M.Linear', (['(84)', '(10)'], {}), '(84, 10)\n', (1822, 1830), True, 'import megengine.module as M\n'), ((2178, 2193), 'megengine.functional.flatten', 'F.flatten', (['x', '(1)'], {}), '(x, 1)\n', (2187, 2193), True, 'import megengine.functional as F\n')]
|
from sqlmodel import SQLModel, Field
import datetime
from typing import Optional
class Orders(SQLModel, table=True):
invoice_no: Optional[int] = Field(default=None, primary_key=True)
stock_code: str
description: str
quantity: int
invoice_date: datetime.datetime
unit_price: float
cust_id: int
|
[
"sqlmodel.Field"
] |
[((151, 188), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (156, 188), False, 'from sqlmodel import SQLModel, Field\n')]
|
import contextlib
import os
import pathlib
import hypothesis.strategies as st
import pytest
import strawberry
from hypothesis.strategies import SearchStrategy
from sqlalchemy.pool import StaticPool
from sqlmodel import Session, SQLModel, create_engine
from starlette.testclient import TestClient
from fastapi_server.database.database import get_session
from fastapi_server.main import app
from fastapi_server.routes.graphql import schema
TEST_DB_FILE_PATH = 'test.db'
TEST_DB_URL = f'sqlite:///{TEST_DB_FILE_PATH}'
TEST_DB_MEMORY_PATH = ':memory:'
TEST_DB_MEMORY_URL = f'sqlite:///{TEST_DB_MEMORY_PATH}'
class BaseTest:
method_client: TestClient = None # type: ignore
method_session: Session = None # type: ignore
example_client: TestClient = None # type: ignore
example_session: Session = None # type: ignore
def setup_method(self, _method):
BaseTest.method_session = BaseTest.create_memory_sesssion()
# BaseTest.method_session = BaseTest.create_file_sesssion()
BaseTest.method_client = TestClient(app)
BaseTest.method_client.app.dependency_overrides[get_session] = BaseTest.method_get_session
def teardown_method(self, _method):
if BaseTest.method_session is not None:
db_path = pathlib.Path(TEST_DB_FILE_PATH)
# Remove file if it wasnt a memory database
if BaseTest.method_session.bind.url.database != TEST_DB_MEMORY_PATH and db_path.is_file():
os.remove(db_path)
BaseTest.method_session.close()
BaseTest.method_session = None
app.dependency_overrides.clear()
BaseTest.method_client = None
@classmethod
def create_file_sesssion(cls):
engine = create_engine(TEST_DB_URL, connect_args={'check_same_thread': False}, poolclass=StaticPool)
SQLModel.metadata.create_all(engine)
with Session(engine, autoflush=False, autocommit=False) as session:
return session
@classmethod
def create_memory_sesssion(cls):
engine = create_engine(TEST_DB_MEMORY_URL, connect_args={'check_same_thread': False}, poolclass=StaticPool)
SQLModel.metadata.create_all(engine)
with Session(engine, autoflush=False, autocommit=False) as session:
# Can this be "yield" instead?
return session
@classmethod
@contextlib.contextmanager
def example_session_context(cls):
"""
Used together with hypothesis: create a class-variable to be used in hypothesis. Unset once the test is over.
Session strategy doesn't seem to work as expected, nor does setup example and teardown example with sql.
"""
assert not isinstance(cls.example_session, Session)
try:
# cls.example_session = cls.create_file_sesssion()
cls.example_session = cls.create_memory_sesssion()
yield cls.example_session
finally:
if cls.example_session is not None:
db_path = pathlib.Path(TEST_DB_FILE_PATH)
# Remove file if it wasnt a memory database
if cls.example_session.bind.url.database != TEST_DB_MEMORY_PATH and db_path.is_file():
os.remove(db_path)
cls.example_session.close()
cls.example_session = None
@classmethod
@contextlib.contextmanager
def method_client_context(cls):
""" Same reasoning as above. """
# See https://sqlmodel.tiangolo.com/tutorial/fastapi/tests/#pytest-fixtures
# https://strawberry.rocks/docs/integrations/fastapi#context_getter
# app.dependency_overrides.clear()
app.dependency_overrides[get_session] = cls.method_get_session
cls.method_client = TestClient(app)
try:
yield cls.method_client
finally:
cls.method_client = None
app.dependency_overrides.clear()
@classmethod
@contextlib.contextmanager
def example_client_context(cls):
""" Same reasoning as above. """
# See https://sqlmodel.tiangolo.com/tutorial/fastapi/tests/#pytest-fixtures
# https://strawberry.rocks/docs/integrations/fastapi#context_getter
with cls.example_session_context() as _session:
app.dependency_overrides[get_session] = cls.example_get_session
cls.example_client = TestClient(app)
try:
yield cls.example_client
finally:
cls.example_client = None
app.dependency_overrides.clear()
@classmethod
def method_get_session(cls) -> Session: # type: ignore
assert isinstance(cls.method_session, Session)
assert cls.method_session.bind.url.database in {TEST_DB_FILE_PATH, TEST_DB_MEMORY_PATH} # type: ignore
yield cls.method_session
@classmethod
def example_get_session(cls) -> Session: # type: ignore
assert isinstance(cls.example_session, Session)
assert cls.example_session.bind.url.database in {TEST_DB_FILE_PATH, TEST_DB_MEMORY_PATH} # type: ignore
yield cls.example_session
@classmethod
def example_get_client(cls) -> TestClient: # type: ignore
yield cls.example_client
@pytest.fixture(name='method_client_fixture')
def method_client_fixture(self) -> TestClient: # type: ignore
with BaseTest.method_client_context() as client:
assert isinstance(client, TestClient)
yield client
@pytest.fixture(name='example_client_fixture')
def example_client_fixture(self) -> TestClient: # type: ignore
assert isinstance(BaseTest.example_client, TestClient)
yield self.example_client
@pytest.fixture(name='method_session_fixture')
def method_session_fixture(self) -> Session: # type: ignore
assert isinstance(BaseTest.method_session, Session)
yield BaseTest.method_session
@pytest.fixture(name='example_session_fixture')
def example_session_fixture(self) -> Session: # type: ignore
assert isinstance(BaseTest.example_session, Session)
yield BaseTest.example_session
@classmethod
def get_schema(cls) -> strawberry.Schema:
return schema
@pytest.fixture(name='schema_fixture')
def schema_fixture(self):
return BaseTest.get_schema()
@classmethod
def schema_strategy(cls) -> SearchStrategy:
""" Deprecated? """
return st.builds(cls.get_schema)
|
[
"sqlmodel.create_engine",
"sqlmodel.Session",
"sqlmodel.SQLModel.metadata.create_all"
] |
[((5233, 5277), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""method_client_fixture"""'}), "(name='method_client_fixture')\n", (5247, 5277), False, 'import pytest\n'), ((5483, 5528), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""example_client_fixture"""'}), "(name='example_client_fixture')\n", (5497, 5528), False, 'import pytest\n'), ((5700, 5745), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""method_session_fixture"""'}), "(name='method_session_fixture')\n", (5714, 5745), False, 'import pytest\n'), ((5915, 5961), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""example_session_fixture"""'}), "(name='example_session_fixture')\n", (5929, 5961), False, 'import pytest\n'), ((6220, 6257), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""schema_fixture"""'}), "(name='schema_fixture')\n", (6234, 6257), False, 'import pytest\n'), ((1042, 1057), 'starlette.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (1052, 1057), False, 'from starlette.testclient import TestClient\n'), ((1589, 1621), 'fastapi_server.main.app.dependency_overrides.clear', 'app.dependency_overrides.clear', ([], {}), '()\n', (1619, 1621), False, 'from fastapi_server.main import app\n'), ((1730, 1825), 'sqlmodel.create_engine', 'create_engine', (['TEST_DB_URL'], {'connect_args': "{'check_same_thread': False}", 'poolclass': 'StaticPool'}), "(TEST_DB_URL, connect_args={'check_same_thread': False},\n poolclass=StaticPool)\n", (1743, 1825), False, 'from sqlmodel import Session, SQLModel, create_engine\n'), ((1830, 1866), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (1858, 1866), False, 'from sqlmodel import Session, SQLModel, create_engine\n'), ((2042, 2144), 'sqlmodel.create_engine', 'create_engine', (['TEST_DB_MEMORY_URL'], {'connect_args': "{'check_same_thread': False}", 'poolclass': 'StaticPool'}), "(TEST_DB_MEMORY_URL, connect_args={'check_same_thread': False},\n poolclass=StaticPool)\n", (2055, 2144), False, 'from sqlmodel import Session, SQLModel, create_engine\n'), ((2149, 2185), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (2177, 2185), False, 'from sqlmodel import Session, SQLModel, create_engine\n'), ((3751, 3766), 'starlette.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (3761, 3766), False, 'from starlette.testclient import TestClient\n'), ((6434, 6459), 'hypothesis.strategies.builds', 'st.builds', (['cls.get_schema'], {}), '(cls.get_schema)\n', (6443, 6459), True, 'import hypothesis.strategies as st\n'), ((1268, 1299), 'pathlib.Path', 'pathlib.Path', (['TEST_DB_FILE_PATH'], {}), '(TEST_DB_FILE_PATH)\n', (1280, 1299), False, 'import pathlib\n'), ((1880, 1930), 'sqlmodel.Session', 'Session', (['engine'], {'autoflush': '(False)', 'autocommit': '(False)'}), '(engine, autoflush=False, autocommit=False)\n', (1887, 1930), False, 'from sqlmodel import Session, SQLModel, create_engine\n'), ((2199, 2249), 'sqlmodel.Session', 'Session', (['engine'], {'autoflush': '(False)', 'autocommit': '(False)'}), '(engine, autoflush=False, autocommit=False)\n', (2206, 2249), False, 'from sqlmodel import Session, SQLModel, create_engine\n'), ((3882, 3914), 'fastapi_server.main.app.dependency_overrides.clear', 'app.dependency_overrides.clear', ([], {}), '()\n', (3912, 3914), False, 'from fastapi_server.main import app\n'), ((4367, 4382), 'starlette.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (4377, 4382), False, 'from starlette.testclient import TestClient\n'), ((1475, 1493), 'os.remove', 'os.remove', (['db_path'], {}), '(db_path)\n', (1484, 1493), False, 'import os\n'), ((3002, 3033), 'pathlib.Path', 'pathlib.Path', (['TEST_DB_FILE_PATH'], {}), '(TEST_DB_FILE_PATH)\n', (3014, 3033), False, 'import pathlib\n'), ((4520, 4552), 'fastapi_server.main.app.dependency_overrides.clear', 'app.dependency_overrides.clear', ([], {}), '()\n', (4550, 4552), False, 'from fastapi_server.main import app\n'), ((3217, 3235), 'os.remove', 'os.remove', (['db_path'], {}), '(db_path)\n', (3226, 3235), False, 'import os\n')]
|
from datetime import datetime
from typing import TYPE_CHECKING, List, Optional
from uuid import UUID, uuid4
from pydantic import EmailStr, constr, validator
from sqlmodel import Column, Field, Relationship, SQLModel
from sqlmodel.sql.sqltypes import GUID
from ...utils.date import now_datetime
if TYPE_CHECKING:
from .order import Order
from .user import User
class BaseClient(SQLModel):
name: str = Field(description="Client name")
email: Optional[EmailStr] = Field(description="Client email", nullable=True)
phone: Optional[constr(regex=r"^\d{2}9\d{8}$")] = Field(description="Client cellphone", nullable=True) # noqa
zip_code: Optional[str] = Field(description="Postal code", nullable=True)
address: Optional[str] = Field(description="Address of Client", nullable=True)
@validator("name")
def validate_name(cls, value: str) -> str:
return value.title()
class CreateClient(BaseClient):
pass
class UpdateClient(BaseClient):
id: UUID = Field(description="Client ID")
class QueryClient(SQLModel):
name: Optional[str] = Field(description="Name of client for query")
class Client(BaseClient, table=True):
__tablename__ = "clients"
id: UUID = Field(default_factory=uuid4, description="Client ID", sa_column=Column("id", GUID(), primary_key=True))
owner_id: UUID = Field(description="User ID that owns the client", foreign_key="users.id")
created_at: datetime = Field(default_factory=now_datetime)
owner: "User" = Relationship()
orders: List["Order"] = Relationship(
back_populates="client",
sa_relationship_kwargs={"cascade": "all,delete", "lazy": "selectin", "passive_deletes": True},
)
|
[
"sqlmodel.Field",
"sqlmodel.sql.sqltypes.GUID",
"sqlmodel.Relationship"
] |
[((417, 449), 'sqlmodel.Field', 'Field', ([], {'description': '"""Client name"""'}), "(description='Client name')\n", (422, 449), False, 'from sqlmodel import Column, Field, Relationship, SQLModel\n'), ((482, 530), 'sqlmodel.Field', 'Field', ([], {'description': '"""Client email"""', 'nullable': '(True)'}), "(description='Client email', nullable=True)\n", (487, 530), False, 'from sqlmodel import Column, Field, Relationship, SQLModel\n'), ((585, 637), 'sqlmodel.Field', 'Field', ([], {'description': '"""Client cellphone"""', 'nullable': '(True)'}), "(description='Client cellphone', nullable=True)\n", (590, 637), False, 'from sqlmodel import Column, Field, Relationship, SQLModel\n'), ((676, 723), 'sqlmodel.Field', 'Field', ([], {'description': '"""Postal code"""', 'nullable': '(True)'}), "(description='Postal code', nullable=True)\n", (681, 723), False, 'from sqlmodel import Column, Field, Relationship, SQLModel\n'), ((753, 806), 'sqlmodel.Field', 'Field', ([], {'description': '"""Address of Client"""', 'nullable': '(True)'}), "(description='Address of Client', nullable=True)\n", (758, 806), False, 'from sqlmodel import Column, Field, Relationship, SQLModel\n'), ((813, 830), 'pydantic.validator', 'validator', (['"""name"""'], {}), "('name')\n", (822, 830), False, 'from pydantic import EmailStr, constr, validator\n'), ((999, 1029), 'sqlmodel.Field', 'Field', ([], {'description': '"""Client ID"""'}), "(description='Client ID')\n", (1004, 1029), False, 'from sqlmodel import Column, Field, Relationship, SQLModel\n'), ((1087, 1132), 'sqlmodel.Field', 'Field', ([], {'description': '"""Name of client for query"""'}), "(description='Name of client for query')\n", (1092, 1132), False, 'from sqlmodel import Column, Field, Relationship, SQLModel\n'), ((1344, 1417), 'sqlmodel.Field', 'Field', ([], {'description': '"""User ID that owns the client"""', 'foreign_key': '"""users.id"""'}), "(description='User ID that owns the client', foreign_key='users.id')\n", (1349, 1417), False, 'from sqlmodel import Column, Field, Relationship, SQLModel\n'), ((1445, 1480), 'sqlmodel.Field', 'Field', ([], {'default_factory': 'now_datetime'}), '(default_factory=now_datetime)\n', (1450, 1480), False, 'from sqlmodel import Column, Field, Relationship, SQLModel\n'), ((1502, 1516), 'sqlmodel.Relationship', 'Relationship', ([], {}), '()\n', (1514, 1516), False, 'from sqlmodel import Column, Field, Relationship, SQLModel\n'), ((1545, 1681), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""client"""', 'sa_relationship_kwargs': "{'cascade': 'all,delete', 'lazy': 'selectin', 'passive_deletes': True}"}), "(back_populates='client', sa_relationship_kwargs={'cascade':\n 'all,delete', 'lazy': 'selectin', 'passive_deletes': True})\n", (1557, 1681), False, 'from sqlmodel import Column, Field, Relationship, SQLModel\n'), ((551, 582), 'pydantic.constr', 'constr', ([], {'regex': '"""^\\\\d{2}9\\\\d{8}$"""'}), "(regex='^\\\\d{2}9\\\\d{8}$')\n", (557, 582), False, 'from pydantic import EmailStr, constr, validator\n'), ((1296, 1302), 'sqlmodel.sql.sqltypes.GUID', 'GUID', ([], {}), '()\n', (1300, 1302), False, 'from sqlmodel.sql.sqltypes import GUID\n')]
|
from typing import List, Optional
from sqlmodel import Field, Session, SQLModel, create_engine
class Zi(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(...)
stroke_count: int = Field(...)
strokes: str = Field(...)
heng_count: int = Field(...)
shu_count: int = Field(...)
pie_count: int = Field(...)
dian_count: int = Field(...)
zhe_count: int = Field(...)
engine = create_engine('sqlite:///Data/strokes.db', connect_args={'check_same_thread': False})
SQLModel.metadata.create_all(engine)
session = Session(engine)
|
[
"sqlmodel.Session",
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.create_engine",
"sqlmodel.Field"
] |
[((474, 564), 'sqlmodel.create_engine', 'create_engine', (['"""sqlite:///Data/strokes.db"""'], {'connect_args': "{'check_same_thread': False}"}), "('sqlite:///Data/strokes.db', connect_args={\n 'check_same_thread': False})\n", (487, 564), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((561, 597), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (589, 597), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((609, 624), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (616, 624), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((160, 197), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (165, 197), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((215, 225), 'sqlmodel.Field', 'Field', (['...'], {}), '(...)\n', (220, 225), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((251, 261), 'sqlmodel.Field', 'Field', (['...'], {}), '(...)\n', (256, 261), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((282, 292), 'sqlmodel.Field', 'Field', (['...'], {}), '(...)\n', (287, 292), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((316, 326), 'sqlmodel.Field', 'Field', (['...'], {}), '(...)\n', (321, 326), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((349, 359), 'sqlmodel.Field', 'Field', (['...'], {}), '(...)\n', (354, 359), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((382, 392), 'sqlmodel.Field', 'Field', (['...'], {}), '(...)\n', (387, 392), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((416, 426), 'sqlmodel.Field', 'Field', (['...'], {}), '(...)\n', (421, 426), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((449, 459), 'sqlmodel.Field', 'Field', (['...'], {}), '(...)\n', (454, 459), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n')]
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine._internal as mgb
from ... import functional as F
from ...core import Parameter
from ..qat import linear as QAT
from .module import QuantizedModule
class Linear(QuantizedModule):
r"""quantized version of :class:`~.qat.linear.Linear`."""
def __init__(
self, dtype: np.dtype = None,
):
super().__init__()
self.weight = None
self.bias = None
self.output_dtype = dtype
def forward(self, inp):
if self.training:
raise ValueError("quantized module only support inference.")
inp_scale = mgb.dtype.get_scale(inp.dtype)
w_scale = mgb.dtype.get_scale(self.weight.dtype)
bias_dtype = mgb.dtype.qint32(inp_scale * w_scale)
return F.linear(
inp,
self.weight,
None if self.bias is None else self.bias.astype(bias_dtype),
).astype(self.output_dtype)
@classmethod
def from_qat_module(cls, qat_module: QAT.Linear):
r"""
return a :class:`~.QuantizedModule` instance converted from a
:class:`~.QATModule` instance.
"""
output_dtype = qat_module.get_activation_dtype()
qmod = cls(dtype=output_dtype)
weight = qat_module.weight.astype(qat_module.get_weight_dtype())
qmod.weight = Parameter(weight.numpy())
if qat_module.bias is not None:
qmod.bias = Parameter(qat_module.bias.numpy())
return qmod
|
[
"megengine._internal.dtype.qint32",
"megengine._internal.dtype.get_scale"
] |
[((958, 988), 'megengine._internal.dtype.get_scale', 'mgb.dtype.get_scale', (['inp.dtype'], {}), '(inp.dtype)\n', (977, 988), True, 'import megengine._internal as mgb\n'), ((1007, 1045), 'megengine._internal.dtype.get_scale', 'mgb.dtype.get_scale', (['self.weight.dtype'], {}), '(self.weight.dtype)\n', (1026, 1045), True, 'import megengine._internal as mgb\n'), ((1067, 1104), 'megengine._internal.dtype.qint32', 'mgb.dtype.qint32', (['(inp_scale * w_scale)'], {}), '(inp_scale * w_scale)\n', (1083, 1104), True, 'import megengine._internal as mgb\n')]
|
from sqlmodel import SQLModel, Relationship
from typing import List
from app.models.base_uuid_model import BaseUUIDModel
class RoleBase(SQLModel):
name: str
description: str
class Role(BaseUUIDModel, RoleBase, table=True):
users: List["User"] = Relationship(back_populates="role", sa_relationship_kwargs={"lazy": "selectin"})
|
[
"sqlmodel.Relationship"
] |
[((263, 348), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""role"""', 'sa_relationship_kwargs': "{'lazy': 'selectin'}"}), "(back_populates='role', sa_relationship_kwargs={'lazy': 'selectin'}\n )\n", (275, 348), False, 'from sqlmodel import SQLModel, Relationship\n')]
|
from enum import Enum
from typing import TYPE_CHECKING, Optional
from sqlalchemy import Column
from sqlalchemy import Enum as SQLEnum
from sqlalchemy import ForeignKey, Integer
from sqlmodel import Field, Relationship, SQLModel
if TYPE_CHECKING:
from .message import Message, MessageList
class MessageTriggerType(Enum):
SIGN_UP = "Sign Up"
APPLICATION_SUBMITTED = "Application - Submitted"
APPLICATION_ACCEPTED = "Application - Accepted"
APPLICATION_REJECTED = "Application - Rejected"
INCOMPLETE_APPLICATION_24H = "Incomplete Application - 24hr"
INCOMPLETE_APPLICATION_7D = "Incomplete Application - 7 day"
class MessageTriggerBase(SQLModel):
trigger: MessageTriggerType = Field(
sa_column=Column(
SQLEnum(MessageTriggerType),
nullable=False,
primary_key=True,
)
)
class MessageTrigger(MessageTriggerBase, table=True):
__tablename__ = "message_triggers"
message_id: Optional[int] = Field(
sa_column=Column(
Integer(),
ForeignKey("messages.id", ondelete="CASCADE"),
nullable=True,
)
)
message: Optional["Message"] = Relationship()
class MessageTriggerRead(MessageTriggerBase):
message: Optional["MessageList"]
class MessageTriggerUpdate(SQLModel):
message_id: Optional[int]
|
[
"sqlmodel.Relationship"
] |
[((1182, 1196), 'sqlmodel.Relationship', 'Relationship', ([], {}), '()\n', (1194, 1196), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((757, 784), 'sqlalchemy.Enum', 'SQLEnum', (['MessageTriggerType'], {}), '(MessageTriggerType)\n', (764, 784), True, 'from sqlalchemy import Enum as SQLEnum\n'), ((1033, 1042), 'sqlalchemy.Integer', 'Integer', ([], {}), '()\n', (1040, 1042), False, 'from sqlalchemy import ForeignKey, Integer\n'), ((1056, 1101), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""messages.id"""'], {'ondelete': '"""CASCADE"""'}), "('messages.id', ondelete='CASCADE')\n", (1066, 1101), False, 'from sqlalchemy import ForeignKey, Integer\n')]
|
from datetime import datetime, timezone
from typing import List, Optional
from sqlmodel import Field, Relationship, SQLModel, Session, select
from sqlalchemy.engine import Engine
class Post(SQLModel, table=True):
__tablename__ = "ig_posts"
id: Optional[int] = Field(default=None, primary_key=True)
ig_account_id: int = Field(foreign_key="ig_accounts.id")
ig_pk: int
webhook_message_id: Optional[int] = None
ig_account: "IGAccount" = Relationship(back_populates="ig_posts")
class IGAccount(SQLModel, table=True):
__tablename__ = "ig_accounts"
id: Optional[int] = Field(default=None, primary_key=True)
ig_pk: int
ig_hint_username: Optional[str] = None
webhook_id: int
min_time: datetime = Field(default_factory=datetime.utcnow)
@property
def aware_min_time(self) -> datetime:
return self.min_time.replace(tzinfo=timezone.utc)
@aware_min_time.setter
def aware_min_time(self, value: datetime) -> None:
assert value.tzinfo is not None
delta = value.tzinfo.utcoffset(value)
assert delta is not None
self.min_time = value - delta
ig_posts: List["Post"] = Relationship(back_populates="ig_account")
@classmethod
def get(cls, session: Session, pk: int | str, webhook_id: int):
pk_i = int(pk)
query = select(cls).where(cls.ig_pk == pk_i, cls.webhook_id == webhook_id)
acc = session.exec(query).one_or_none()
if acc is None:
acc = cls(ig_pk=pk_i, webhook_id=webhook_id)
session.add(acc)
return acc
def make_post(self, session: Session, pk: int | str) -> Post:
pk_i = int(pk)
# use ig_account_id=-1 to remove type errors (overwritten by ig_account=self)
post = Post(ig_account_id=-1, ig_account=self, ig_pk=pk_i)
session.add(post)
return post
class DB:
engine: Engine
def __init__(self, engine: Engine) -> None:
self.engine = engine
SQLModel.metadata.create_all(engine)
def session(self) -> Session:
return Session(self.engine)
def get_ig_account(
self, session: Session, pk: int | str, webhook_id: int
) -> IGAccount:
return IGAccount.get(session, pk=pk, webhook_id=webhook_id)
def make_ig_post(self, session: Session, pk: int | str, account: IGAccount) -> Post:
return account.make_post(session, pk)
|
[
"sqlmodel.Relationship",
"sqlmodel.Session",
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.Field",
"sqlmodel.select"
] |
[((271, 308), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (276, 308), False, 'from sqlmodel import Field, Relationship, SQLModel, Session, select\n'), ((335, 370), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""ig_accounts.id"""'}), "(foreign_key='ig_accounts.id')\n", (340, 370), False, 'from sqlmodel import Field, Relationship, SQLModel, Session, select\n'), ((462, 501), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""ig_posts"""'}), "(back_populates='ig_posts')\n", (474, 501), False, 'from sqlmodel import Field, Relationship, SQLModel, Session, select\n'), ((602, 639), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (607, 639), False, 'from sqlmodel import Field, Relationship, SQLModel, Session, select\n'), ((744, 782), 'sqlmodel.Field', 'Field', ([], {'default_factory': 'datetime.utcnow'}), '(default_factory=datetime.utcnow)\n', (749, 782), False, 'from sqlmodel import Field, Relationship, SQLModel, Session, select\n'), ((1168, 1209), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""ig_account"""'}), "(back_populates='ig_account')\n", (1180, 1209), False, 'from sqlmodel import Field, Relationship, SQLModel, Session, select\n'), ((1985, 2021), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (2013, 2021), False, 'from sqlmodel import Field, Relationship, SQLModel, Session, select\n'), ((2072, 2092), 'sqlmodel.Session', 'Session', (['self.engine'], {}), '(self.engine)\n', (2079, 2092), False, 'from sqlmodel import Field, Relationship, SQLModel, Session, select\n'), ((1335, 1346), 'sqlmodel.select', 'select', (['cls'], {}), '(cls)\n', (1341, 1346), False, 'from sqlmodel import Field, Relationship, SQLModel, Session, select\n')]
|
import re
from copy import copy
import numpy as nm
from sfepy.base.base import (as_float_or_complex, get_default, assert_,
Container, Struct, basestr, goptions)
from sfepy.base.compat import in1d
# Used for imports in term files.
from sfepy.terms.extmods import terms
from sfepy.linalg import split_range
_match_args = re.compile('^([^\(\}]*)\((.*)\)$').match
_match_virtual = re.compile('^virtual$').match
_match_state = re.compile('^state(_[_a-zA-Z0-9]+)?$').match
_match_parameter = re.compile('^parameter(_[_a-zA-Z0-9]+)?$').match
_match_material = re.compile('^material(_[_a-zA-Z0-9]+)?$').match
_match_material_opt = re.compile('^opt_material(_[_a-zA-Z0-9]+)?$').match
_match_material_root = re.compile('(.+)\.(.*)').match
def get_arg_kinds(arg_types):
"""
Translate `arg_types` of a Term to a canonical form.
Parameters
----------
arg_types : tuple of strings
The term argument types, as given in the `arg_types` attribute.
Returns
-------
arg_kinds : list of strings
The argument kinds - one of 'virtual_variable', 'state_variable',
'parameter_variable', 'opt_material', 'user'.
"""
arg_kinds = []
for ii, arg_type in enumerate(arg_types):
if _match_virtual(arg_type):
arg_kinds.append('virtual_variable')
elif _match_state(arg_type):
arg_kinds.append('state_variable')
elif _match_parameter(arg_type):
arg_kinds.append('parameter_variable')
elif _match_material(arg_type):
arg_kinds.append('material')
elif _match_material_opt(arg_type):
arg_kinds.append('opt_material')
if ii > 0:
msg = 'opt_material at position %d, must be at 0!' % ii
raise ValueError(msg)
else:
arg_kinds.append('user')
return arg_kinds
def get_shape_kind(integration):
"""
Get data shape kind for given integration type.
"""
if integration == 'surface':
shape_kind = 'surface'
elif integration in ('volume', 'plate', 'surface_extra'):
shape_kind = 'volume'
elif integration == 'point':
shape_kind = 'point'
else:
raise NotImplementedError('unsupported term integration! (%s)'
% integration)
return shape_kind
def split_complex_args(args):
"""
Split complex arguments to real and imaginary parts.
Returns
-------
newargs : dictionary
Dictionary with lists corresponding to `args` such that each
argument of numpy.complex128 data type is split to its real and
imaginary part. The output depends on the number of complex
arguments in 'args':
- 0: list (key 'r') identical to input one
- 1: two lists with keys 'r', 'i' corresponding to real
and imaginary parts
- 2: output dictionary contains four lists:
- 'r' - real(arg1), real(arg2)
- 'i' - imag(arg1), imag(arg2)
- 'ri' - real(arg1), imag(arg2)
- 'ir' - imag(arg1), real(arg2)
"""
newargs = {}
cai = []
for ii, arg in enumerate(args):
if isinstance(arg, nm.ndarray) and (arg.dtype == nm.complex128):
cai.append(ii)
if len(cai) > 0:
newargs['r'] = list(args[:])
newargs['i'] = list(args[:])
arg1 = cai[0]
newargs['r'][arg1] = args[arg1].real.copy()
newargs['i'][arg1] = args[arg1].imag.copy()
if len(cai) == 2:
arg2 = cai[1]
newargs['r'][arg2] = args[arg2].real.copy()
newargs['i'][arg2] = args[arg2].imag.copy()
newargs['ri'] = list(args[:])
newargs['ir'] = list(args[:])
newargs['ri'][arg1] = newargs['r'][arg1]
newargs['ri'][arg2] = newargs['i'][arg2]
newargs['ir'][arg1] = newargs['i'][arg1]
newargs['ir'][arg2] = newargs['r'][arg2]
elif len(cai) > 2:
raise NotImplementedError('more than 2 complex arguments! (%d)'
% len(cai))
else:
newargs['r'] = args[:]
return newargs
def vector_chunk_generator(total_size, chunk_size, shape_in,
zero=False, set_shape=True, dtype=nm.float64):
if not chunk_size:
chunk_size = total_size
shape = list(shape_in)
sizes = split_range(total_size, chunk_size)
ii = nm.array(0, dtype=nm.int32)
for size in sizes:
chunk = nm.arange(size, dtype=nm.int32) + ii
if set_shape:
shape[0] = size
if zero:
out = nm.zeros(shape, dtype=dtype)
else:
out = nm.empty(shape, dtype=dtype)
yield out, chunk
ii += size
def create_arg_parser():
from pyparsing import Literal, Word, delimitedList, Group, \
StringStart, StringEnd, Optional, nums, alphas, alphanums
inumber = Word("+-" + nums, nums)
history = Optional(Literal('[').suppress() + inumber
+ Literal(']').suppress(), default=0)("history")
history.setParseAction(lambda str, loc, toks: int(toks[0]))
variable = Group(Word(alphas, alphanums + '._') + history)
derivative = Group(Literal('d') + variable\
+ Literal('/').suppress() + Literal('dt'))
trace = Group(Literal('tr') + Literal('(').suppress() + variable \
+ Literal(')').suppress())
generalized_var = derivative | trace | variable
args = StringStart() + delimitedList(generalized_var) + StringEnd()
return args
# 22.01.2006, c
class CharacteristicFunction(Struct):
def __init__(self, region):
self.igs = region.igs
self.region = region
self.local_chunk = None
self.ig = None
def __call__(self, chunk_size, shape_in, zero=False, set_shape=True,
ret_local_chunk=False, dtype=nm.float64):
els = self.region.get_cells(self.ig)
for out, chunk in vector_chunk_generator(els.shape[0], chunk_size,
shape_in, zero, set_shape,
dtype):
self.local_chunk = chunk
if ret_local_chunk:
yield out, chunk
else:
yield out, els[chunk]
self.local_chunk = None
def set_current_group(self, ig):
self.ig = ig
def get_local_chunk(self):
return self.local_chunk
class ConnInfo(Struct):
def get_region(self, can_trace=True):
if self.is_trace and can_trace:
return self.region.get_mirror_region()[0]
else:
return self.region
def get_region_name(self, can_trace=True):
if self.is_trace and can_trace:
reg = self.region.get_mirror_region()[0]
else:
reg = self.region
if reg is not None:
return reg.name
else:
return None
def iter_igs(self):
if self.region is not None:
for ig in self.region.igs:
if self.virtual_igs is not None:
ir = self.virtual_igs.tolist().index(ig)
rig = self.virtual_igs[ir]
else:
rig = None
if not self.is_trace:
ii = ig
else:
ig_map_i = self.region.get_mirror_region()[2]
ii = ig_map_i[ig]
if self.state_igs is not None:
ic = self.state_igs.tolist().index(ii)
cig = self.state_igs[ic]
else:
cig = None
yield rig, cig
else:
yield None, None
class Terms(Container):
@staticmethod
def from_desc(term_descs, regions, integrals=None):
"""
Create terms, assign each term its region.
"""
from sfepy.terms import term_table
terms = Terms()
for td in term_descs:
try:
constructor = term_table[td.name]
except:
msg = "term '%s' is not in %s" % (td.name,
sorted(term_table.keys()))
raise ValueError(msg)
try:
region = regions[td.region]
except IndexError:
raise KeyError('region "%s" does not exist!' % td.region)
term = Term.from_desc(constructor, td, region, integrals=integrals)
terms.append(term)
return terms
def __init__(self, objs=None):
Container.__init__(self, objs=objs)
self.update_expression()
def insert(self, ii, obj):
Container.insert(self, ii, obj)
self.update_expression()
def append(self, obj):
Container.append(self, obj)
self.update_expression()
def update_expression(self):
self.expression = []
for term in self:
aux = [term.sign, term.name, term.arg_str,
term.integral_name, term.region.name]
self.expression.append(aux)
def __mul__(self, other):
out = Terms()
for name, term in self.iteritems():
out.append(term * other)
return out
def __rmul__(self, other):
return self * other
def __add__(self, other):
if isinstance(other, Term):
out = self.copy()
out.append(other)
elif isinstance(other, Terms):
out = Terms(self._objs + other._objs)
else:
raise ValueError('cannot add Terms with %s!' % other)
return out
def __radd__(self, other):
return self + other
def __sub__(self, other):
if isinstance(other, Term):
out = self + (-other)
elif isinstance(other, Terms):
out = self + (-other)
else:
raise ValueError('cannot subtract Terms with %s!' % other)
return out
def __rsub__(self, other):
return -self + other
def __pos__(self):
return self
def __neg__(self):
return -1.0 * self
def setup(self):
for term in self:
term.setup()
def assign_args(self, variables, materials, user=None):
"""
Assign all term arguments.
"""
for term in self:
term.assign_args(variables, materials, user)
def get_variable_names(self):
out = []
for term in self:
out.extend(term.get_variable_names())
return list(set(out))
def get_material_names(self):
out = []
for term in self:
out.extend(term.get_material_names())
return list(set(out))
def get_user_names(self):
out = []
for term in self:
out.extend(term.get_user_names())
return list(set(out))
def set_current_group(self, ig):
for term in self:
term.char_fun.set_current_group(ig)
class Term(Struct):
name = ''
arg_types = ()
arg_shapes = {}
integration = 'volume'
geometries = ['2_3', '2_4', '3_4', '3_8']
@staticmethod
def new(name, integral, region, **kwargs):
from sfepy.terms import term_table
arg_str = _match_args(name)
if arg_str is not None:
name, arg_str = arg_str.groups()
else:
raise ValueError('bad term syntax! (%s)' % name)
if name in term_table:
constructor = term_table[name]
else:
msg = "term '%s' is not in %s" % (name, sorted(term_table.keys()))
raise ValueError(msg)
obj = constructor(name, arg_str, integral, region, **kwargs)
return obj
@staticmethod
def from_desc(constructor, desc, region, integrals=None):
from sfepy.discrete import Integrals
if integrals is None:
integrals = Integrals()
if desc.name == 'intFE':
obj = constructor(desc.name, desc.args, None, region, desc=desc)
else:
obj = constructor(desc.name, desc.args, None, region)
obj.set_integral(integrals.get(desc.integral))
obj.sign = desc.sign
return obj
def __init__(self, name, arg_str, integral, region, **kwargs):
self.name = name
self.arg_str = arg_str
self.region = region
self._kwargs = kwargs
self._integration = self.integration
self.sign = 1.0
self.set_integral(integral)
def __mul__(self, other):
try:
mul = as_float_or_complex(other)
except ValueError:
raise ValueError('cannot multiply Term with %s!' % other)
out = self.copy(name=self.name)
out.sign = mul * self.sign
return out
def __rmul__(self, other):
return self * other
def __add__(self, other):
if isinstance(other, Term):
out = Terms([self, other])
else:
out = NotImplemented
return out
def __sub__(self, other):
if isinstance(other, Term):
out = Terms([self, -1.0 * other])
else:
out = NotImplemented
return out
def __pos__(self):
return self
def __neg__(self):
out = -1.0 * self
return out
def set_integral(self, integral):
"""
Set the term integral.
"""
self.integral = integral
if self.integral is not None:
self.integral_name = self.integral.name
def setup(self):
self.char_fun = CharacteristicFunction(self.region)
self.function = Struct.get(self, 'function', None)
self.step = 0
self.dt = 1.0
self.is_quasistatic = False
self.has_region = True
self.setup_formal_args()
if self._kwargs:
self.setup_args(**self._kwargs)
else:
self.args = []
def setup_formal_args(self):
self.arg_names = []
self.arg_steps = {}
self.arg_derivatives = {}
self.arg_traces = {}
parser = create_arg_parser()
self.arg_desc = parser.parseString(self.arg_str)
for arg in self.arg_desc:
trace = False
derivative = None
if isinstance(arg[1], int):
name, step = arg
else:
kind = arg[0]
name, step = arg[1]
if kind == 'd':
derivative = arg[2]
elif kind == 'tr':
trace = True
match = _match_material_root(name)
if match:
name = (match.group(1), match.group(2))
self.arg_names.append(name)
self.arg_steps[name] = step
self.arg_derivatives[name] = derivative
self.arg_traces[name] = trace
def setup_args(self, **kwargs):
self._kwargs = kwargs
self.args = []
for arg_name in self.arg_names:
if isinstance(arg_name, basestr):
self.args.append(self._kwargs[arg_name])
else:
self.args.append((self._kwargs[arg_name[0]], arg_name[1]))
self.classify_args()
self.check_args()
def __call__(self, diff_var=None, chunk_size=None, **kwargs):
"""
Subclasses either implement __call__ or plug in a proper _call().
"""
return self._call(diff_var, chunk_size, **kwargs)
def _call(self, diff_var=None, chunk_size=None, **kwargs):
msg = 'base class method "_call" called for %s' \
% self.__class__.__name__
raise RuntimeError(msg)
def assign_args(self, variables, materials, user=None):
"""
Check term argument existence in variables, materials, user data
and assign the arguments to terms. Also check compatibility of
field and term subdomain lists (igs).
"""
if user is None:
user = {}
kwargs = {}
for arg_name in self.arg_names:
if isinstance(arg_name, basestr):
if arg_name in variables.names:
kwargs[arg_name] = variables[arg_name]
elif arg_name in user:
kwargs[arg_name] = user[arg_name]
else:
raise ValueError('argument %s not found!' % arg_name)
else:
arg_name = arg_name[0]
if arg_name in materials.names:
kwargs[arg_name] = materials[arg_name]
else:
raise ValueError('material argument %s not found!'
% arg_name)
self.setup_args(**kwargs)
def classify_args(self):
"""
Classify types of the term arguments and find matching call
signature.
A state variable can be in place of a parameter variable and
vice versa.
"""
self.names = Struct(name='arg_names',
material=[], variable=[], user=[],
state=[], virtual=[], parameter=[])
# Prepare for 'opt_material' - just prepend a None argument if needed.
if isinstance(self.arg_types[0], tuple):
arg_types = self.arg_types[0]
else:
arg_types = self.arg_types
if len(arg_types) == (len(self.args) + 1):
self.args.insert(0, (None, None))
self.arg_names.insert(0, (None, None))
if isinstance(self.arg_types[0], tuple):
assert_(len(self.modes) == len(self.arg_types))
# Find matching call signature using variable arguments - material
# and user arguments are ignored!
matched = []
for it, arg_types in enumerate(self.arg_types):
arg_kinds = get_arg_kinds(arg_types)
if self._check_variables(arg_kinds):
matched.append((it, arg_kinds))
if len(matched) == 1:
i_match, arg_kinds = matched[0]
arg_types = self.arg_types[i_match]
self.mode = self.modes[i_match]
elif len(matched) == 0:
msg = 'cannot match arguments! (%s)' % self.arg_names
raise ValueError(msg)
else:
msg = 'ambiguous arguments! (%s)' % self.arg_names
raise ValueError(msg)
else:
arg_types = self.arg_types
arg_kinds = get_arg_kinds(self.arg_types)
self.mode = Struct.get(self, 'mode', None)
if not self._check_variables(arg_kinds):
raise ValueError('cannot match variables! (%s)'
% self.arg_names)
# Set actual argument types.
self.ats = list(arg_types)
for ii, arg_kind in enumerate(arg_kinds):
name = self.arg_names[ii]
if arg_kind.endswith('variable'):
names = self.names.variable
if arg_kind == 'virtual_variable':
self.names.virtual.append(name)
elif arg_kind == 'state_variable':
self.names.state.append(name)
elif arg_kind == 'parameter_variable':
self.names.parameter.append(name)
elif arg_kind.endswith('material'):
names = self.names.material
else:
names = self.names.user
names.append(name)
self.n_virtual = len(self.names.virtual)
if self.n_virtual > 1:
raise ValueError('at most one virtual variable is allowed! (%d)'
% self.n_virtual)
self.set_arg_types()
self.setup_integration()
def _check_variables(self, arg_kinds):
for ii, arg_kind in enumerate(arg_kinds):
if arg_kind.endswith('variable'):
var = self.args[ii]
check = {'virtual_variable' : var.is_virtual,
'state_variable' : var.is_state_or_parameter,
'parameter_variable' : var.is_state_or_parameter}
if not check[arg_kind]():
return False
else:
return True
def set_arg_types(self):
pass
def check_args(self):
"""
Common checking to all terms.
Check compatibility of field and term subdomain lists (igs).
"""
vns = self.get_variable_names()
for name in vns:
field = self._kwargs[name].get_field()
if field is None:
continue
if not nm.all(in1d(self.region.vertices,
field.region.vertices)):
msg = ('%s: incompatible regions: (self, field %s)'
+ '(%s in %s)') %\
(self.name, field.name,
self.region.vertices, field.region.vertices)
raise ValueError(msg)
def get_variable_names(self):
return self.names.variable
def get_material_names(self):
out = []
for aux in self.names.material:
if aux[0] is not None:
out.append(aux[0])
return out
def get_user_names(self):
return self.names.user
def get_virtual_name(self):
if not self.names.virtual:
return None
var = self.get_virtual_variable()
return var.name
def get_state_names(self):
"""
If variables are given, return only true unknowns whose data are of
the current time step (0).
"""
variables = self.get_state_variables()
return [var.name for var in variables]
def get_parameter_names(self):
return copy(self.names.parameter)
def get_conn_key(self):
"""The key to be used in DOF connectivity information."""
key = (self.name,) + tuple(self.arg_names)
key += (self.integral_name, self.region.name)
return key
def get_conn_info(self):
vvar = self.get_virtual_variable()
svars = self.get_state_variables()
pvars = self.get_parameter_variables()
all_vars = self.get_variables()
dc_type = self.get_dof_conn_type()
tgs = self.get_geometry_types()
v_igs = v_tg = None
if vvar is not None:
field = vvar.get_field()
if field is not None:
v_igs = field.igs
if vvar.name in tgs:
v_tg = tgs[vvar.name]
else:
v_tg = None
else:
# No virtual variable -> all unknowns are in fact known parameters.
pvars += svars
svars = []
region = self.get_region()
if region is not None:
is_any_trace = reduce(lambda x, y: x or y,
self.arg_traces.values())
if is_any_trace:
region.setup_mirror_region()
self.char_fun.igs = region.igs
vals = []
aux_pvars = []
for svar in svars:
# Allow only true state variables.
if not svar.is_state():
aux_pvars.append(svar)
continue
field = svar.get_field()
if field is not None:
s_igs = field.igs
else:
s_igs = None
is_trace = self.arg_traces[svar.name]
if svar.name in tgs:
ps_tg = tgs[svar.name]
else:
ps_tg = v_tg
val = ConnInfo(virtual=vvar, virtual_igs=v_igs,
state=svar, state_igs=s_igs,
primary=svar, primary_igs=s_igs,
has_virtual=True,
has_state=True,
is_trace=is_trace,
dc_type=dc_type,
v_tg=v_tg,
ps_tg=ps_tg,
region=region,
all_vars=all_vars)
vals.append(val)
pvars += aux_pvars
for pvar in pvars:
field = pvar.get_field()
if field is not None:
p_igs = field.igs
else:
p_igs = None
is_trace = self.arg_traces[pvar.name]
if pvar.name in tgs:
ps_tg = tgs[pvar.name]
else:
ps_tg = v_tg
val = ConnInfo(virtual=vvar, virtual_igs=v_igs,
state=None, state_igs=[],
primary=pvar.get_primary(), primary_igs=p_igs,
has_virtual=vvar is not None,
has_state=False,
is_trace=is_trace,
dc_type=dc_type,
v_tg=v_tg,
ps_tg=ps_tg,
region=region,
all_vars=all_vars)
vals.append(val)
if vvar and (len(vals) == 0):
# No state, parameter variables, just the virtual one.
val = ConnInfo(virtual=vvar, virtual_igs=v_igs,
state=vvar.get_primary(), state_igs=v_igs,
primary=vvar.get_primary(), primary_igs=v_igs,
has_virtual=True,
has_state=False,
is_trace=False,
dc_type=dc_type,
v_tg=v_tg,
ps_tg=v_tg,
region=region,
all_vars=all_vars)
vals.append(val)
return vals
def get_args_by_name(self, arg_names):
"""
Return arguments by name.
"""
out = []
for name in arg_names:
try:
ii = self.arg_names.index(name)
except ValueError:
raise ValueError('non-existing argument! (%s)' % name)
out.append(self.args[ii])
return out
def get_args(self, arg_types=None, **kwargs):
"""
Return arguments by type as specified in arg_types (or
self.ats). Arguments in **kwargs can override the ones assigned
at the term construction - this is useful for passing user data.
"""
ats = self.ats
if arg_types is None:
arg_types = ats
args = []
iname, region_name, ig = self.get_current_group()
for at in arg_types:
ii = ats.index(at)
arg_name = self.arg_names[ii]
if isinstance(arg_name, basestr):
if arg_name in kwargs:
args.append(kwargs[arg_name])
else:
args.append(self.args[ii])
else:
mat, par_name = self.args[ii]
if mat is not None:
mat_data = mat.get_data((region_name, self.integral_name),
ig, par_name)
else:
mat_data = None
args.append(mat_data)
return args
def get_kwargs(self, keys, **kwargs):
"""Extract arguments from **kwargs listed in keys (default is
None)."""
return [kwargs.get(name) for name in keys]
def get_arg_name(self, arg_type, full=False, join=None):
"""
Get the name of the argument specified by `arg_type.`
Parameters
----------
arg_type : str
The argument type string.
full : bool
If True, return the full name. For example, if the name of a
variable argument is 'u' and its time derivative is
requested, the full name is 'du/dt'.
join : str, optional
Optionally, the material argument name tuple can be joined
to a single string using the `join` string.
Returns
-------
name : str
The argument name.
"""
try:
ii = self.ats.index(arg_type)
except ValueError:
return None
name = self.arg_names[ii]
if full:
# Include derivatives.
if self.arg_derivatives[name]:
name = 'd%s/%s' % (name, self.arg_derivatives[name])
if (join is not None) and isinstance(name, tuple):
name = join.join(name)
return name
def setup_integration(self):
self.has_geometry = True
self.geometry_types = {}
if isinstance(self.integration, basestr):
for var in self.get_variables():
self.geometry_types[var.name] = self.integration
else:
if self.mode is not None:
self.integration = self._integration[self.mode]
if self.integration is not None:
for arg_type, gtype in self.integration.iteritems():
var = self.get_args(arg_types=[arg_type])[0]
self.geometry_types[var.name] = gtype
gtypes = list(set(self.geometry_types.itervalues()))
if 'surface_extra' in gtypes:
self.dof_conn_type = 'volume'
elif len(gtypes):
self.dof_conn_type = gtypes[0]
def get_region(self):
return self.region
def get_geometry_types(self):
"""
Returns
-------
out : dict
The required geometry types for each variable argument.
"""
return self.geometry_types
def get_current_group(self):
return (self.integral_name, self.region.name, self.char_fun.ig)
def get_dof_conn_type(self):
return Struct(name='dof_conn_info', type=self.dof_conn_type,
region_name=self.region.name)
def set_current_group(self, ig):
self.char_fun.set_current_group(ig)
def igs(self):
return self.char_fun.igs
def get_assembling_cells(self, shape=None):
"""
Return the assembling cell indices into a DOF connectivity.
"""
cells = nm.arange(shape[0], dtype=nm.int32)
return cells
def iter_groups(self):
if self.dof_conn_type == 'point':
igs = self.igs()[0:1]
else:
igs = self.igs()
for ig in igs:
if self.integration in ('volume', 'plate'):
if not len(self.region.get_cells(ig)): continue
self.set_current_group(ig)
yield ig
def time_update(self, ts):
if ts is not None:
self.step = ts.step
self.dt = ts.dt
self.is_quasistatic = ts.is_quasistatic
def advance(self, ts):
"""
Advance to the next time step. Implemented in subclasses.
"""
def get_vector(self, variable):
"""Get the vector stored in `variable` according to self.arg_steps
and self.arg_derivatives. Supports only the backward difference w.r.t.
time."""
name = variable.name
return variable(step=self.arg_steps[name],
derivative=self.arg_derivatives[name])
def get_approximation(self, variable, get_saved=False):
"""
Return approximation corresponding to `variable`. Also return
the corresponding geometry (actual or saved, according to
`get_saved`).
"""
geo, _, key = self.get_mapping(variable, get_saved=get_saved,
return_key=True)
ig = key[2]
ap = variable.get_approximation(ig)
return ap, geo
def get_variables(self, as_list=True):
if as_list:
variables = self.get_args_by_name(self.names.variable)
else:
variables = {}
for var in self.get_args_by_name(self.names.variable):
variables[var.name] = var
return variables
def get_virtual_variable(self):
aux = self.get_args_by_name(self.names.virtual)
if len(aux) == 1:
var = aux[0]
else:
var = None
return var
def get_state_variables(self, unknown_only=False):
variables = self.get_args_by_name(self.names.state)
if unknown_only:
variables = [var for var in variables
if (var.kind == 'unknown') and
(self.arg_steps[var.name] == 0)]
return variables
def get_parameter_variables(self):
return self.get_args_by_name(self.names.parameter)
def get_materials(self, join=False):
materials = self.get_args_by_name(self.names.material)
for mat in materials:
if mat[0] is None:
materials.remove(mat)
if join:
materials = list(set(mat[0] for mat in materials))
return materials
def get_qp_key(self):
"""
Return a key identifying uniquely the term quadrature points.
"""
return (self.region.name, self.integral.name)
def get_physical_qps(self):
"""
Get physical quadrature points corresponding to the term region
and integral.
"""
from sfepy.discrete.common.mappings import get_physical_qps, PhysicalQPs
if self.integration == 'point':
phys_qps = PhysicalQPs(self.region.igs)
elif self.integration == 'plate':
phys_qps = get_physical_qps(self.region, self.integral,
map_kind='v')
else:
phys_qps = get_physical_qps(self.region, self.integral)
return phys_qps
def get_mapping(self, variable, get_saved=False, return_key=False):
"""
Get the reference mapping from a variable.
Notes
-----
This is a convenience wrapper of Field.get_mapping() that
initializes the arguments using the term data.
"""
integration = self.geometry_types[variable.name]
is_trace = self.arg_traces[variable.name]
if is_trace:
region, ig_map, ig_map_i = self.region.get_mirror_region()
ig = ig_map_i[self.char_fun.ig]
else:
region = self.region
ig = self.char_fun.ig
out = variable.field.get_mapping(ig, region,
self.integral, integration,
get_saved=get_saved,
return_key=return_key)
return out
def get_data_shape(self, variable):
"""
Get data shape information from variable.
Notes
-----
This is a convenience wrapper of FieldVariable.get_data_shape() that
initializes the arguments using the term data.
"""
integration = self.geometry_types[variable.name]
is_trace = self.arg_traces[variable.name]
if is_trace:
region, ig_map, ig_map_i = self.region.get_mirror_region()
ig = ig_map_i[self.char_fun.ig]
else:
region = self.region
ig = self.char_fun.ig
out = variable.get_data_shape(ig, self.integral,
integration, region.name)
return out
def get(self, variable, quantity_name, bf=None, integration=None,
step=None, time_derivative=None):
"""
Get the named quantity related to the variable.
Notes
-----
This is a convenience wrapper of Variable.evaluate() that
initializes the arguments using the term data.
"""
name = variable.name
step = get_default(step, self.arg_steps[name])
time_derivative = get_default(time_derivative,
self.arg_derivatives[name])
integration = get_default(integration, self.geometry_types[name])
data = variable.evaluate(self.char_fun.ig, mode=quantity_name,
region=self.region, integral=self.integral,
integration=integration,
step=step, time_derivative=time_derivative,
is_trace=self.arg_traces[name], bf=bf)
return data
def check_shapes(self, *args, **kwargs):
"""
Default implementation of function to check term argument shapes
at run-time.
"""
pass
def standalone_setup(self):
from sfepy.discrete import create_adof_conns, Variables
conn_info = {'aux' : self.get_conn_info()}
adcs = create_adof_conns(conn_info, None)
variables = Variables(self.get_variables())
variables.set_adof_conns(adcs)
materials = self.get_materials(join=True)
for mat in materials:
mat.time_update(None, [Struct(terms=[self])])
def call_get_fargs(self, args, kwargs):
try:
fargs = self.get_fargs(*args, **kwargs)
except RuntimeError:
terms.errclear()
raise ValueError
return fargs
def call_function(self, out, fargs):
try:
status = self.function(out, *fargs)
except RuntimeError:
terms.errclear()
raise ValueError
if status:
terms.errclear()
raise ValueError('term evaluation failed! (%s)' % self.name)
return status
def eval_real(self, shape, fargs, mode='eval', term_mode=None,
diff_var=None, **kwargs):
out = nm.empty(shape, dtype=nm.float64)
if mode == 'eval':
status = self.call_function(out, fargs)
# Sum over elements but not over components.
out1 = nm.sum(out, 0).squeeze()
return out1, status
else:
status = self.call_function(out, fargs)
return out, status
def eval_complex(self, shape, fargs, mode='eval', term_mode=None,
diff_var=None, **kwargs):
rout = nm.empty(shape, dtype=nm.float64)
fargsd = split_complex_args(fargs)
# Assuming linear forms. Then the matrix is the
# same both for real and imaginary part.
rstatus = self.call_function(rout, fargsd['r'])
if (diff_var is None) and len(fargsd) >= 2:
iout = nm.empty(shape, dtype=nm.float64)
istatus = self.call_function(iout, fargsd['i'])
if mode == 'eval' and len(fargsd) >= 4:
irout = nm.empty(shape, dtype=nm.float64)
irstatus = self.call_function(irout, fargsd['ir'])
riout = nm.empty(shape, dtype=nm.float64)
ristatus = self.call_function(riout, fargsd['ri'])
out = (rout - iout) + (riout + irout) * 1j
status = rstatus or istatus or ristatus or irstatus
else:
out = rout + 1j * iout
status = rstatus or istatus
else:
out, status = rout, rstatus
if mode == 'eval':
out1 = nm.sum(out, 0).squeeze()
return out1, status
else:
return out, status
def evaluate(self, mode='eval', diff_var=None,
standalone=True, ret_status=False, **kwargs):
"""
Evaluate the term.
Parameters
----------
mode : 'eval' (default), or 'weak'
The term evaluation mode.
Returns
-------
val : float or array
In 'eval' mode, the term returns a single value (the
integral, it does not need to be a scalar), while in 'weak'
mode it returns an array for each element.
status : int, optional
The flag indicating evaluation success (0) or failure
(nonzero). Only provided if `ret_status` is True.
iels : array of ints, optional
The local elements indices in 'weak' mode. Only provided in
non-'eval' modes.
"""
if standalone:
self.standalone_setup()
kwargs = kwargs.copy()
term_mode = kwargs.pop('term_mode', None)
if mode == 'eval':
val = 0.0
status = 0
for ig in self.iter_groups():
args = self.get_args(**kwargs)
self.check_shapes(*args)
_args = tuple(args) + (mode, term_mode, diff_var)
fargs = self.call_get_fargs(_args, kwargs)
shape, dtype = self.get_eval_shape(*_args, **kwargs)
if dtype == nm.float64:
_v, stat = self.eval_real(shape, fargs, mode, term_mode,
**kwargs)
elif dtype == nm.complex128:
_v, stat = self.eval_complex(shape, fargs, mode, term_mode,
**kwargs)
else:
raise ValueError('unsupported term dtype! (%s)' % dtype)
val += _v
status += stat
val *= self.sign
elif mode in ('el_avg', 'el', 'qp'):
vals = None
iels = nm.empty((0, 2), dtype=nm.int32)
status = 0
for ig in self.iter_groups():
args = self.get_args(**kwargs)
self.check_shapes(*args)
_args = tuple(args) + (mode, term_mode, diff_var)
fargs = self.call_get_fargs(_args, kwargs)
shape, dtype = self.get_eval_shape(*_args, **kwargs)
if dtype == nm.float64:
val, stat = self.eval_real(shape, fargs, mode,
term_mode, **kwargs)
elif dtype == nm.complex128:
val, stat = self.eval_complex(shape, fargs, mode,
term_mode, **kwargs)
if vals is None:
vals = val
else:
vals = nm.r_[vals, val]
_iels = self.get_assembling_cells(val.shape)
aux = nm.c_[nm.repeat(ig, _iels.shape[0])[:, None],
_iels[:, None]]
iels = nm.r_[iels, aux]
status += stat
vals *= self.sign
elif mode == 'weak':
vals = []
iels = []
status = 0
varr = self.get_virtual_variable()
if diff_var is not None:
varc = self.get_variables(as_list=False)[diff_var]
for ig in self.iter_groups():
args = self.get_args(**kwargs)
self.check_shapes(*args)
_args = tuple(args) + (mode, term_mode, diff_var)
fargs = self.call_get_fargs(_args, kwargs)
n_elr, n_qpr, dim, n_enr, n_cr = self.get_data_shape(varr)
n_row = n_cr * n_enr
if diff_var is None:
shape = (n_elr, 1, n_row, 1)
else:
n_elc, n_qpc, dim, n_enc, n_cc = self.get_data_shape(varc)
n_col = n_cc * n_enc
shape = (n_elr, 1, n_row, n_col)
if varr.dtype == nm.float64:
val, stat = self.eval_real(shape, fargs, mode, term_mode,
diff_var, **kwargs)
elif varr.dtype == nm.complex128:
val, stat = self.eval_complex(shape, fargs, mode, term_mode,
diff_var, **kwargs)
else:
raise ValueError('unsupported term dtype! (%s)'
% varr.dtype)
vals.append(self.sign * val)
iels.append((ig, self.get_assembling_cells(val.shape)))
status += stat
# Setup return value.
if mode == 'eval':
out = (val,)
else:
out = (vals, iels)
if goptions['check_term_finiteness']:
assert_(nm.isfinite(out[0]).all(),
msg='%+.2e * %s.%d.%s(%s) term values not finite!'
% (self.sign, self.name, self.integral.order,
self.region.name, self.arg_str))
if ret_status:
out = out + (status,)
if len(out) == 1:
out = out[0]
return out
def assemble_to(self, asm_obj, val, iels, mode='vector', diff_var=None):
import sfepy.discrete.fem.extmods.assemble as asm
vvar = self.get_virtual_variable()
dc_type = self.get_dof_conn_type()
if mode == 'vector':
if asm_obj.dtype == nm.float64:
assemble = asm.assemble_vector
else:
assert_(asm_obj.dtype == nm.complex128)
assemble = asm.assemble_vector_complex
for ii in range(len(val)):
if not(val[ii].dtype == nm.complex128):
val[ii] = nm.complex128(val[ii])
for ii, (ig, _iels) in enumerate(iels):
vec_in_els = val[ii]
dc = vvar.get_dof_conn(dc_type, ig)
assert_(vec_in_els.shape[2] == dc.shape[1])
assemble(asm_obj, vec_in_els, _iels, 1.0, dc)
elif mode == 'matrix':
if asm_obj.dtype == nm.float64:
assemble = asm.assemble_matrix
else:
assert_(asm_obj.dtype == nm.complex128)
assemble = asm.assemble_matrix_complex
svar = diff_var
tmd = (asm_obj.data, asm_obj.indptr, asm_obj.indices)
for ii, (ig, _iels) in enumerate(iels):
mtx_in_els = val[ii]
if ((asm_obj.dtype == nm.complex128)
and (mtx_in_els.dtype == nm.float64)):
mtx_in_els = mtx_in_els.astype(nm.complex128)
rdc = vvar.get_dof_conn(dc_type, ig)
is_trace = self.arg_traces[svar.name]
cdc = svar.get_dof_conn(dc_type, ig, is_trace=is_trace)
assert_(mtx_in_els.shape[2:] == (rdc.shape[1], cdc.shape[1]))
sign = 1.0
if self.arg_derivatives[svar.name]:
if not self.is_quasistatic or (self.step > 0):
sign *= 1.0 / self.dt
else:
sign = 0.0
assemble(tmd[0], tmd[1], tmd[2], mtx_in_els,
_iels, sign, rdc, cdc)
else:
raise ValueError('unknown assembling mode! (%s)' % mode)
|
[
"sfepy.base.base.Container.append",
"sfepy.base.base.Struct",
"sfepy.terms.extmods.terms.append",
"sfepy.terms.term_table.keys",
"sfepy.base.base.get_default",
"sfepy.discrete.Integrals",
"sfepy.terms.extmods.terms.errclear",
"sfepy.base.base.Container.insert",
"sfepy.linalg.split_range",
"sfepy.discrete.common.mappings.get_physical_qps",
"sfepy.base.compat.in1d",
"sfepy.discrete.create_adof_conns",
"sfepy.discrete.common.mappings.PhysicalQPs",
"sfepy.base.base.assert_",
"sfepy.base.base.as_float_or_complex",
"sfepy.base.base.Container.__init__",
"sfepy.base.base.Struct.get"
] |
[((353, 391), 're.compile', 're.compile', (['"""^([^\\\\(\\\\}]*)\\\\((.*)\\\\)$"""'], {}), "('^([^\\\\(\\\\}]*)\\\\((.*)\\\\)$')\n", (363, 391), False, 'import re\n'), ((411, 434), 're.compile', 're.compile', (['"""^virtual$"""'], {}), "('^virtual$')\n", (421, 434), False, 'import re\n'), ((456, 494), 're.compile', 're.compile', (['"""^state(_[_a-zA-Z0-9]+)?$"""'], {}), "('^state(_[_a-zA-Z0-9]+)?$')\n", (466, 494), False, 'import re\n'), ((520, 562), 're.compile', 're.compile', (['"""^parameter(_[_a-zA-Z0-9]+)?$"""'], {}), "('^parameter(_[_a-zA-Z0-9]+)?$')\n", (530, 562), False, 'import re\n'), ((587, 628), 're.compile', 're.compile', (['"""^material(_[_a-zA-Z0-9]+)?$"""'], {}), "('^material(_[_a-zA-Z0-9]+)?$')\n", (597, 628), False, 'import re\n'), ((657, 702), 're.compile', 're.compile', (['"""^opt_material(_[_a-zA-Z0-9]+)?$"""'], {}), "('^opt_material(_[_a-zA-Z0-9]+)?$')\n", (667, 702), False, 'import re\n'), ((732, 757), 're.compile', 're.compile', (['"""(.+)\\\\.(.*)"""'], {}), "('(.+)\\\\.(.*)')\n", (742, 757), False, 'import re\n'), ((4445, 4480), 'sfepy.linalg.split_range', 'split_range', (['total_size', 'chunk_size'], {}), '(total_size, chunk_size)\n', (4456, 4480), False, 'from sfepy.linalg import split_range\n'), ((4490, 4517), 'numpy.array', 'nm.array', (['(0)'], {'dtype': 'nm.int32'}), '(0, dtype=nm.int32)\n', (4498, 4517), True, 'import numpy as nm\n'), ((4986, 5009), 'pyparsing.Word', 'Word', (["('+-' + nums)", 'nums'], {}), "('+-' + nums, nums)\n", (4990, 5009), False, 'from pyparsing import Literal, Word, delimitedList, Group, StringStart, StringEnd, Optional, nums, alphas, alphanums\n'), ((5614, 5625), 'pyparsing.StringEnd', 'StringEnd', ([], {}), '()\n', (5623, 5625), False, 'from pyparsing import Literal, Word, delimitedList, Group, StringStart, StringEnd, Optional, nums, alphas, alphanums\n'), ((8696, 8731), 'sfepy.base.base.Container.__init__', 'Container.__init__', (['self'], {'objs': 'objs'}), '(self, objs=objs)\n', (8714, 8731), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((8806, 8837), 'sfepy.base.base.Container.insert', 'Container.insert', (['self', 'ii', 'obj'], {}), '(self, ii, obj)\n', (8822, 8837), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((8907, 8934), 'sfepy.base.base.Container.append', 'Container.append', (['self', 'obj'], {}), '(self, obj)\n', (8923, 8934), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((13742, 13776), 'sfepy.base.base.Struct.get', 'Struct.get', (['self', '"""function"""', 'None'], {}), "(self, 'function', None)\n", (13752, 13776), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((17097, 17196), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""arg_names"""', 'material': '[]', 'variable': '[]', 'user': '[]', 'state': '[]', 'virtual': '[]', 'parameter': '[]'}), "(name='arg_names', material=[], variable=[], user=[], state=[],\n virtual=[], parameter=[])\n", (17103, 17196), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((21935, 21961), 'copy.copy', 'copy', (['self.names.parameter'], {}), '(self.names.parameter)\n', (21939, 21961), False, 'from copy import copy\n'), ((29985, 30073), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""dof_conn_info"""', 'type': 'self.dof_conn_type', 'region_name': 'self.region.name'}), "(name='dof_conn_info', type=self.dof_conn_type, region_name=self.\n region.name)\n", (29991, 30073), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((30383, 30418), 'numpy.arange', 'nm.arange', (['shape[0]'], {'dtype': 'nm.int32'}), '(shape[0], dtype=nm.int32)\n', (30392, 30418), True, 'import numpy as nm\n'), ((35945, 35984), 'sfepy.base.base.get_default', 'get_default', (['step', 'self.arg_steps[name]'], {}), '(step, self.arg_steps[name])\n', (35956, 35984), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((36011, 36067), 'sfepy.base.base.get_default', 'get_default', (['time_derivative', 'self.arg_derivatives[name]'], {}), '(time_derivative, self.arg_derivatives[name])\n', (36022, 36067), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((36128, 36179), 'sfepy.base.base.get_default', 'get_default', (['integration', 'self.geometry_types[name]'], {}), '(integration, self.geometry_types[name])\n', (36139, 36179), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((36897, 36931), 'sfepy.discrete.create_adof_conns', 'create_adof_conns', (['conn_info', 'None'], {}), '(conn_info, None)\n', (36914, 36931), False, 'from sfepy.discrete import create_adof_conns, Variables\n'), ((37846, 37879), 'numpy.empty', 'nm.empty', (['shape'], {'dtype': 'nm.float64'}), '(shape, dtype=nm.float64)\n', (37854, 37879), True, 'import numpy as nm\n'), ((38325, 38358), 'numpy.empty', 'nm.empty', (['shape'], {'dtype': 'nm.float64'}), '(shape, dtype=nm.float64)\n', (38333, 38358), True, 'import numpy as nm\n'), ((4557, 4588), 'numpy.arange', 'nm.arange', (['size'], {'dtype': 'nm.int32'}), '(size, dtype=nm.int32)\n', (4566, 4588), True, 'import numpy as nm\n'), ((4679, 4707), 'numpy.zeros', 'nm.zeros', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (4687, 4707), True, 'import numpy as nm\n'), ((4740, 4768), 'numpy.empty', 'nm.empty', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (4748, 4768), True, 'import numpy as nm\n'), ((5226, 5256), 'pyparsing.Word', 'Word', (['alphas', "(alphanums + '._')"], {}), "(alphas, alphanums + '._')\n", (5230, 5256), False, 'from pyparsing import Literal, Word, delimitedList, Group, StringStart, StringEnd, Optional, nums, alphas, alphanums\n'), ((5368, 5381), 'pyparsing.Literal', 'Literal', (['"""dt"""'], {}), "('dt')\n", (5375, 5381), False, 'from pyparsing import Literal, Word, delimitedList, Group, StringStart, StringEnd, Optional, nums, alphas, alphanums\n'), ((5565, 5578), 'pyparsing.StringStart', 'StringStart', ([], {}), '()\n', (5576, 5578), False, 'from pyparsing import Literal, Word, delimitedList, Group, StringStart, StringEnd, Optional, nums, alphas, alphanums\n'), ((5581, 5611), 'pyparsing.delimitedList', 'delimitedList', (['generalized_var'], {}), '(generalized_var)\n', (5594, 5611), False, 'from pyparsing import Literal, Word, delimitedList, Group, StringStart, StringEnd, Optional, nums, alphas, alphanums\n'), ((8611, 8629), 'sfepy.terms.extmods.terms.append', 'terms.append', (['term'], {}), '(term)\n', (8623, 8629), False, 'from sfepy.terms.extmods import terms\n'), ((12011, 12022), 'sfepy.discrete.Integrals', 'Integrals', ([], {}), '()\n', (12020, 12022), False, 'from sfepy.discrete import Integrals\n'), ((12669, 12695), 'sfepy.base.base.as_float_or_complex', 'as_float_or_complex', (['other'], {}), '(other)\n', (12688, 12695), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((18685, 18715), 'sfepy.base.base.Struct.get', 'Struct.get', (['self', '"""mode"""', 'None'], {}), "(self, 'mode', None)\n", (18695, 18715), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((33615, 33643), 'sfepy.discrete.common.mappings.PhysicalQPs', 'PhysicalQPs', (['self.region.igs'], {}), '(self.region.igs)\n', (33626, 33643), False, 'from sfepy.discrete.common.mappings import get_physical_qps, PhysicalQPs\n'), ((37607, 37623), 'sfepy.terms.extmods.terms.errclear', 'terms.errclear', ([], {}), '()\n', (37621, 37623), False, 'from sfepy.terms.extmods import terms\n'), ((38636, 38669), 'numpy.empty', 'nm.empty', (['shape'], {'dtype': 'nm.float64'}), '(shape, dtype=nm.float64)\n', (38644, 38669), True, 'import numpy as nm\n'), ((33710, 33768), 'sfepy.discrete.common.mappings.get_physical_qps', 'get_physical_qps', (['self.region', 'self.integral'], {'map_kind': '"""v"""'}), "(self.region, self.integral, map_kind='v')\n", (33726, 33768), False, 'from sfepy.discrete.common.mappings import get_physical_qps, PhysicalQPs\n'), ((33847, 33891), 'sfepy.discrete.common.mappings.get_physical_qps', 'get_physical_qps', (['self.region', 'self.integral'], {}), '(self.region, self.integral)\n', (33863, 33891), False, 'from sfepy.discrete.common.mappings import get_physical_qps, PhysicalQPs\n'), ((37316, 37332), 'sfepy.terms.extmods.terms.errclear', 'terms.errclear', ([], {}), '()\n', (37330, 37332), False, 'from sfepy.terms.extmods import terms\n'), ((37529, 37545), 'sfepy.terms.extmods.terms.errclear', 'terms.errclear', ([], {}), '()\n', (37543, 37545), False, 'from sfepy.terms.extmods import terms\n'), ((38807, 38840), 'numpy.empty', 'nm.empty', (['shape'], {'dtype': 'nm.float64'}), '(shape, dtype=nm.float64)\n', (38815, 38840), True, 'import numpy as nm\n'), ((38932, 38965), 'numpy.empty', 'nm.empty', (['shape'], {'dtype': 'nm.float64'}), '(shape, dtype=nm.float64)\n', (38940, 38965), True, 'import numpy as nm\n'), ((41485, 41517), 'numpy.empty', 'nm.empty', (['(0, 2)'], {'dtype': 'nm.int32'}), '((0, 2), dtype=nm.int32)\n', (41493, 41517), True, 'import numpy as nm\n'), ((45150, 45189), 'sfepy.base.base.assert_', 'assert_', (['(asm_obj.dtype == nm.complex128)'], {}), '(asm_obj.dtype == nm.complex128)\n', (45157, 45189), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((45563, 45606), 'sfepy.base.base.assert_', 'assert_', (['(vec_in_els.shape[2] == dc.shape[1])'], {}), '(vec_in_els.shape[2] == dc.shape[1])\n', (45570, 45606), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((5292, 5304), 'pyparsing.Literal', 'Literal', (['"""d"""'], {}), "('d')\n", (5299, 5304), False, 'from pyparsing import Literal, Word, delimitedList, Group, StringStart, StringEnd, Optional, nums, alphas, alphanums\n'), ((5402, 5415), 'pyparsing.Literal', 'Literal', (['"""tr"""'], {}), "('tr')\n", (5409, 5415), False, 'from pyparsing import Literal, Word, delimitedList, Group, StringStart, StringEnd, Optional, nums, alphas, alphanums\n'), ((5475, 5487), 'pyparsing.Literal', 'Literal', (['""")"""'], {}), "(')')\n", (5482, 5487), False, 'from pyparsing import Literal, Word, delimitedList, Group, StringStart, StringEnd, Optional, nums, alphas, alphanums\n'), ((20804, 20853), 'sfepy.base.compat.in1d', 'in1d', (['self.region.vertices', 'field.region.vertices'], {}), '(self.region.vertices, field.region.vertices)\n', (20808, 20853), False, 'from sfepy.base.compat import in1d\n'), ((37141, 37161), 'sfepy.base.base.Struct', 'Struct', ([], {'terms': '[self]'}), '(terms=[self])\n', (37147, 37161), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((38036, 38050), 'numpy.sum', 'nm.sum', (['out', '(0)'], {}), '(out, 0)\n', (38042, 38050), True, 'import numpy as nm\n'), ((39365, 39379), 'numpy.sum', 'nm.sum', (['out', '(0)'], {}), '(out, 0)\n', (39371, 39379), True, 'import numpy as nm\n'), ((45828, 45867), 'sfepy.base.base.assert_', 'assert_', (['(asm_obj.dtype == nm.complex128)'], {}), '(asm_obj.dtype == nm.complex128)\n', (45835, 45867), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((46482, 46543), 'sfepy.base.base.assert_', 'assert_', (['(mtx_in_els.shape[2:] == (rdc.shape[1], cdc.shape[1]))'], {}), '(mtx_in_els.shape[2:] == (rdc.shape[1], cdc.shape[1]))\n', (46489, 46543), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((5093, 5105), 'pyparsing.Literal', 'Literal', (['"""]"""'], {}), "(']')\n", (5100, 5105), False, 'from pyparsing import Literal, Word, delimitedList, Group, StringStart, StringEnd, Optional, nums, alphas, alphanums\n'), ((5342, 5354), 'pyparsing.Literal', 'Literal', (['"""/"""'], {}), "('/')\n", (5349, 5354), False, 'from pyparsing import Literal, Word, delimitedList, Group, StringStart, StringEnd, Optional, nums, alphas, alphanums\n'), ((11687, 11704), 'sfepy.terms.term_table.keys', 'term_table.keys', ([], {}), '()\n', (11702, 11704), False, 'from sfepy.terms import term_table\n'), ((44421, 44440), 'numpy.isfinite', 'nm.isfinite', (['out[0]'], {}), '(out[0])\n', (44432, 44440), True, 'import numpy as nm\n'), ((45382, 45404), 'numpy.complex128', 'nm.complex128', (['val[ii]'], {}), '(val[ii])\n', (45395, 45404), True, 'import numpy as nm\n'), ((5034, 5046), 'pyparsing.Literal', 'Literal', (['"""["""'], {}), "('[')\n", (5041, 5046), False, 'from pyparsing import Literal, Word, delimitedList, Group, StringStart, StringEnd, Optional, nums, alphas, alphanums\n'), ((5418, 5430), 'pyparsing.Literal', 'Literal', (['"""("""'], {}), "('(')\n", (5425, 5430), False, 'from pyparsing import Literal, Word, delimitedList, Group, StringStart, StringEnd, Optional, nums, alphas, alphanums\n'), ((8293, 8310), 'sfepy.terms.term_table.keys', 'term_table.keys', ([], {}), '()\n', (8308, 8310), False, 'from sfepy.terms import term_table\n'), ((42452, 42481), 'numpy.repeat', 'nm.repeat', (['ig', '_iels.shape[0]'], {}), '(ig, _iels.shape[0])\n', (42461, 42481), True, 'import numpy as nm\n')]
|
from sfepy.base.testing import TestCommon, assert_, debug
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
return Test(conf=conf, options=options)
def test_tensors(self):
import numpy as nm
from sfepy.mechanics.matcoefs import ElasticConstants
ok = True
names = ['bulk', 'lam', 'mu', 'young', 'poisson', 'p_wave']
ec = ElasticConstants(lam=1.0, mu=1.5)
vals = ec.get(names)
self.report('using values:', vals)
for i1 in range(len(names)):
for i2 in range(i1+1, len(names)):
kwargs = {names[i1] : vals[i1], names[i2] : vals[i2]}
try:
ec.init(**kwargs)
except:
_ok = False
else:
_ok = True
ec_vals = ec.get(names)
_ok = _ok and nm.allclose(ec_vals, vals)
self.report(names[i1], names[i2], '->', _ok)
if not _ok:
self.report('correct:', vals)
self.report(' got:', ec_vals)
ok = ok and _ok
return ok
|
[
"sfepy.mechanics.matcoefs.ElasticConstants"
] |
[((404, 437), 'sfepy.mechanics.matcoefs.ElasticConstants', 'ElasticConstants', ([], {'lam': '(1.0)', 'mu': '(1.5)'}), '(lam=1.0, mu=1.5)\n', (420, 437), False, 'from sfepy.mechanics.matcoefs import ElasticConstants\n'), ((908, 934), 'numpy.allclose', 'nm.allclose', (['ec_vals', 'vals'], {}), '(ec_vals, vals)\n', (919, 934), True, 'import numpy as nm\n')]
|
# 28.05.2009, c
from sfepy import data_dir
filename_mesh = data_dir + '/meshes/3d/elbow.mesh'
fields = {
'scalar' : ('real', 'scalar', 'Omega', 1),
'vector' : ('real', 'vector', 'Omega', 1),
}
integrals = {
'i1' : ('v', 'gauss_o2_d3'),
'i2' : ('s', 'gauss_o2_d2'),
}
regions = {
'Omega' : ('all', {}),
'Gamma' : ('nodes of surface', {'can_cells' : True}),
}
expressions = {
'volume_p' : 'd_volume.i1.Omega( p )',
'volume_u' : 'd_volume.i1.Omega( u )',
'surface_p' : 'd_volume_surface.i2.Gamma( p )',
'surface_u' : 'd_volume_surface.i2.Gamma( u )',
}
fe = {
'chunk_size' : 1000
}
import numpy as nm
from sfepy.base.testing import TestCommon
from sfepy.base.base import debug, pause
##
# 10.07.2007, c
class Test( TestCommon ):
tests = ['test_volume']
def from_conf( conf, options ):
from sfepy.fem import ProblemDefinition
problem = ProblemDefinition.from_conf(conf, init_equations=False)
test = Test( problem = problem,
conf = conf, options = options )
return test
from_conf = staticmethod( from_conf )
def test_volume( self ):
from sfepy.fem import FieldVariable
ok = True
field_map = {'u' : 'vector', 'p' : 'scalar'}
volumes = {}
avg = 0.0
for key, term in expressions.items():
var_name = key[-1]
field = self.problem.fields[field_map[var_name]]
var = FieldVariable(var_name, 'parameter', field, 1,
primary_var_name='(set-to-None)')
val = self.problem.evaluate(term, **{var_name : var})
volumes[key] = val
avg += val
avg /= len(volumes)
for key, val in volumes.items():
err = nm.abs( avg - val ) / nm.abs( avg )
_ok = err < 1e-12
self.report('"'"%s"'" - volume: %e' % (key, val))
self.report('"'"%s"'" - relative volume difference: %e -> %s'\
% (key, err, _ok) )
ok = ok and _ok
return ok
|
[
"sfepy.fem.FieldVariable",
"sfepy.fem.ProblemDefinition.from_conf"
] |
[((910, 965), 'sfepy.fem.ProblemDefinition.from_conf', 'ProblemDefinition.from_conf', (['conf'], {'init_equations': '(False)'}), '(conf, init_equations=False)\n', (937, 965), False, 'from sfepy.fem import ProblemDefinition\n'), ((1465, 1550), 'sfepy.fem.FieldVariable', 'FieldVariable', (['var_name', '"""parameter"""', 'field', '(1)'], {'primary_var_name': '"""(set-to-None)"""'}), "(var_name, 'parameter', field, 1, primary_var_name='(set-to-None)'\n )\n", (1478, 1550), False, 'from sfepy.fem import FieldVariable\n'), ((1789, 1806), 'numpy.abs', 'nm.abs', (['(avg - val)'], {}), '(avg - val)\n', (1795, 1806), True, 'import numpy as nm\n'), ((1811, 1822), 'numpy.abs', 'nm.abs', (['avg'], {}), '(avg)\n', (1817, 1822), True, 'import numpy as nm\n')]
|
from flask import Blueprint
from flask import request
from flask import jsonify
from flask import session
from flask import current_app
from flask import render_template
from flask import request
from app.utils import render_markdown
from sqlmodel import Session as SQLSession
from sqlmodel import select
from app.models.server import Catagory, Organization
from app.utils.decorators import admin_required
bp = Blueprint("admin", __name__)
@bp.route("/")
@admin_required
def index():
return render_markdown(
"page.html",
file="admin.md",
session=session,
)
@bp.route("/organizations", methods=["GET"])
def get_organizations():
with SQLSession(current_app.engine) as s:
orgs = select(Organization)
results = s.exec(orgs).all()
return render_template(
"admin/organizations.html", session=session, organizations=results
)
@bp.route("/server/organization", methods=["POST", "DELETE"])
def post_server():
if request.method == "POST":
data = request.form
org_id = data.get("id").strip()
title = data.get("title").strip()
return jsonify({}), 200
@bp.route("/catagory", methods=["POST"])
@admin_required
def get_catagory():
data = request.form
cat_id = data.get("id")
title = data.get("title")
color = data.get("color")
with SQLSession(current_app.engine) as s:
if cat_id:
_catagory = s.exec(select(Catagory).where(Catagory.id == cat_id)).one()
_catagory.title = data.get("title")
_catagory.meta_ref = data.get("title").lower().replace(" ", "-")
_catagory.color = data.get("color")
s.add(_catagory)
s.commit()
else:
_catagory = Catagory(
title=data.get("title"),
meta_ref=data.get("title").lower().replace(" ", "-"),
color=data.get("color"),
)
s.add(_catagory)
s.commit()
return jsonify({"result": "Operate successfully"})
@bp.route("/catagories", methods=["GET", "POST"])
def get_post_catagories():
if request.method == "GET":
with SQLSession(current_app.engine) as s:
results = s.exec(select(Catagory)).all()
return render_template(
"admin/catagories.html", sesssion=session, catagories=results
)
else:
data = request.get_json()
|
[
"sqlmodel.select",
"sqlmodel.Session"
] |
[((413, 441), 'flask.Blueprint', 'Blueprint', (['"""admin"""', '__name__'], {}), "('admin', __name__)\n", (422, 441), False, 'from flask import Blueprint\n'), ((499, 561), 'app.utils.render_markdown', 'render_markdown', (['"""page.html"""'], {'file': '"""admin.md"""', 'session': 'session'}), "('page.html', file='admin.md', session=session)\n", (514, 561), False, 'from app.utils import render_markdown\n'), ((2011, 2054), 'flask.jsonify', 'jsonify', (["{'result': 'Operate successfully'}"], {}), "({'result': 'Operate successfully'})\n", (2018, 2054), False, 'from flask import jsonify\n'), ((674, 704), 'sqlmodel.Session', 'SQLSession', (['current_app.engine'], {}), '(current_app.engine)\n', (684, 704), True, 'from sqlmodel import Session as SQLSession\n'), ((726, 746), 'sqlmodel.select', 'select', (['Organization'], {}), '(Organization)\n', (732, 746), False, 'from sqlmodel import select\n'), ((800, 888), 'flask.render_template', 'render_template', (['"""admin/organizations.html"""'], {'session': 'session', 'organizations': 'results'}), "('admin/organizations.html', session=session, organizations=\n results)\n", (815, 888), False, 'from flask import render_template\n'), ((1367, 1397), 'sqlmodel.Session', 'SQLSession', (['current_app.engine'], {}), '(current_app.engine)\n', (1377, 1397), True, 'from sqlmodel import Session as SQLSession\n'), ((2423, 2441), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (2439, 2441), False, 'from flask import request\n'), ((1148, 1159), 'flask.jsonify', 'jsonify', (['{}'], {}), '({})\n', (1155, 1159), False, 'from flask import jsonify\n'), ((2179, 2209), 'sqlmodel.Session', 'SQLSession', (['current_app.engine'], {}), '(current_app.engine)\n', (2189, 2209), True, 'from sqlmodel import Session as SQLSession\n'), ((2289, 2367), 'flask.render_template', 'render_template', (['"""admin/catagories.html"""'], {'sesssion': 'session', 'catagories': 'results'}), "('admin/catagories.html', sesssion=session, catagories=results)\n", (2304, 2367), False, 'from flask import render_template\n'), ((2245, 2261), 'sqlmodel.select', 'select', (['Catagory'], {}), '(Catagory)\n', (2251, 2261), False, 'from sqlmodel import select\n'), ((1454, 1470), 'sqlmodel.select', 'select', (['Catagory'], {}), '(Catagory)\n', (1460, 1470), False, 'from sqlmodel import select\n')]
|
"""add remoteuser
Revision ID: 5c6d07e2a9c1
Revises: <KEY>
Create Date: 2022-02-13 01:54:01.310088
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = '5c6d07e2a9c1'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('remoteuser',
sa.Column('username', sa.VARCHAR(), nullable=True),
sa.Column('id', sa.Integer(), nullable=True),
sa.Column('inbox', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('public_key', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('username')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('remoteuser')
# ### end Alembic commands ###
|
[
"sqlmodel.sql.sqltypes.AutoString"
] |
[((880, 907), 'alembic.op.drop_table', 'op.drop_table', (['"""remoteuser"""'], {}), "('remoteuser')\n", (893, 907), False, 'from alembic import op\n'), ((683, 712), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (706, 712), True, 'import sqlalchemy as sa\n'), ((718, 749), 'sqlalchemy.UniqueConstraint', 'sa.UniqueConstraint', (['"""username"""'], {}), "('username')\n", (737, 749), True, 'import sqlalchemy as sa\n'), ((443, 455), 'sqlalchemy.VARCHAR', 'sa.VARCHAR', ([], {}), '()\n', (453, 455), True, 'import sqlalchemy as sa\n'), ((493, 505), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (503, 505), True, 'import sqlalchemy as sa\n'), ((546, 580), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (578, 580), False, 'import sqlmodel\n'), ((627, 661), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (659, 661), False, 'import sqlmodel\n')]
|
#!/usr/bin/env python
"""
First solve the stationary electric conduction problem. Then use its
results to solve the evolutionary heat conduction problem.
Run this example as on a command line::
$ python <path_to_this_file>/thermal_electric.py
"""
from __future__ import absolute_import
import sys
sys.path.append( '.' )
import os
from sfepy import data_dir
filename_mesh = data_dir + '/meshes/2d/special/circle_in_square.mesh'
# Time stepping for the heat conduction problem.
t0 = 0.0
t1 = 0.5
n_step = 11
# Material parameters.
specific_heat = 1.2
##########
cwd = os.path.split(os.path.join(os.getcwd(), __file__))[0]
options = {
'absolute_mesh_path' : True,
'output_dir' : os.path.join(cwd, 'output')
}
regions = {
'Omega' : 'all',
'Omega1' : 'cells of group 1',
'Omega2' : 'cells of group 2',
'Omega2_Surface': ('r.Omega1 *v r.Omega2', 'facet'),
'Left' : ('vertices in (x < %f)' % -0.4999, 'facet'),
'Right' : ('vertices in (x > %f)' % 0.4999, 'facet'),
}
materials = {
'm' : ({
'thermal_conductivity' : 2.0,
'electric_conductivity' : 1.5,
},),
}
# The fields use the same approximation, so a single field could be used
# instead.
fields = {
'temperature': ('real', 1, 'Omega', 1),
'potential' : ('real', 1, 'Omega', 1),
}
variables = {
'T' : ('unknown field', 'temperature', 0, 1),
's' : ('test field', 'temperature', 'T'),
'phi' : ('unknown field', 'potential', 1),
'psi' : ('test field', 'potential', 'phi'),
'phi_known' : ('parameter field', 'potential', '(set-to-None)'),
}
ics = {
'ic' : ('Omega', {'T.0' : 0.0}),
}
ebcs = {
'left' : ('Left', {'T.0' : 0.0, 'phi.0' : 0.0}),
'right' : ('Right', {'T.0' : 2.0, 'phi.0' : 0.0}),
'inside' : ('Omega2_Surface', {'phi.0' : 'set_electric_bc'}),
}
def set_electric_bc(coor):
y = coor[:,1]
ymin, ymax = y.min(), y.max()
val = 2.0 * (((y - ymin) / (ymax - ymin)) - 0.5)
return val
functions = {
'set_electric_bc' : (lambda ts, coor, bc, problem, **kwargs:
set_electric_bc(coor),),
}
equations = {
'2' : """%.12e * dw_volume_dot.2.Omega( s, dT/dt )
+ dw_laplace.2.Omega( m.thermal_conductivity, s, T )
= dw_electric_source.2.Omega( m.electric_conductivity,
s, phi_known ) """ % specific_heat,
'1' : """dw_laplace.2.Omega( m.electric_conductivity, psi, phi ) = 0""",
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 1,
'eps_a' : 1e-10,
'problem' : 'nonlinear',
}),
'ts' : ('ts.simple', {
't0' : t0,
't1' : t1,
'dt' : None,
'n_step' : n_step, # has precedence over dt!
}),
}
def main():
from sfepy.base.base import output
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.discrete import Problem
output.prefix = 'therel:'
required, other = get_standard_keywords()
conf = ProblemConf.from_file(__file__, required, other)
problem = Problem.from_conf(conf, init_equations=False)
# Setup output directory according to options above.
problem.setup_default_output()
# First solve the stationary electric conduction problem.
problem.set_equations({'eq' : conf.equations['1']})
problem.time_update()
state_el = problem.solve()
problem.save_state(problem.get_output_name(suffix = 'el'), state_el)
# Then solve the evolutionary heat conduction problem, using state_el.
problem.set_equations({'eq' : conf.equations['2']})
phi_var = problem.get_variables()['phi_known']
phi_var.set_data(state_el())
time_solver = problem.get_time_solver()
time_solver.init_time()
for _ in time_solver():
pass
output('results saved in %s' % problem.get_output_name(suffix = '*'))
if __name__ == '__main__':
main()
|
[
"sfepy.discrete.Problem.from_conf",
"sfepy.base.conf.ProblemConf.from_file",
"sfepy.base.conf.get_standard_keywords"
] |
[((303, 323), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (318, 323), False, 'import sys\n'), ((697, 724), 'os.path.join', 'os.path.join', (['cwd', '"""output"""'], {}), "(cwd, 'output')\n", (709, 724), False, 'import os\n'), ((3013, 3036), 'sfepy.base.conf.get_standard_keywords', 'get_standard_keywords', ([], {}), '()\n', (3034, 3036), False, 'from sfepy.base.conf import ProblemConf, get_standard_keywords\n'), ((3048, 3096), 'sfepy.base.conf.ProblemConf.from_file', 'ProblemConf.from_file', (['__file__', 'required', 'other'], {}), '(__file__, required, other)\n', (3069, 3096), False, 'from sfepy.base.conf import ProblemConf, get_standard_keywords\n'), ((3112, 3157), 'sfepy.discrete.Problem.from_conf', 'Problem.from_conf', (['conf'], {'init_equations': '(False)'}), '(conf, init_equations=False)\n', (3129, 3157), False, 'from sfepy.discrete import Problem\n'), ((605, 616), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (614, 616), False, 'import os\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import os
from typing import List
import megengine.distributed as dist
from basecore.config import ConfigDict
from basecore.engine import BaseHook
from basecore.utils import str_timestamp
from basecls.utils import registers
from .hooks import (
CheckpointHook,
EvalHook,
LoggerHook,
LRSchedulerHook,
PreciseBNHook,
ResumeHook,
TensorboardHook,
)
__all__ = ["DefaultHooks"]
@registers.hooks.register()
class DefaultHooks:
"""The default hooks factory.
It combines :py:class:`~basecls.engine.LRSchedulerHook` ->
:py:class:`~basecls.engine.PreciseBNHook` -> :py:class:`~basecls.engine.ResumeHook` ->
:py:class:`~basecls.engine.TensorboardHook` -> :py:class:`~basecls.engine.LoggerHook` ->
:py:class:`~basecls.engine.CheckpointHook` -> :py:class:`~basecls.engine.EvalHook`.
"""
@classmethod
def build(cls, cfg: ConfigDict) -> List[BaseHook]:
"""Build function with a simple strategy.
Args:
cfg: config for setting hooks.
Returns:
A hook list.
"""
output_dir = cfg.output_dir
hook_list = [
LRSchedulerHook(),
PreciseBNHook(cfg.bn.precise_every_n_epoch),
ResumeHook(output_dir, cfg.resume),
]
if dist.get_rank() == 0:
# Since LoggerHook will reset value, TensorboardHook should be added before LoggerHook
hook_list.append(
TensorboardHook(
os.path.join(output_dir, "tensorboard", str_timestamp()), cfg.tb_every_n_iter
)
)
hook_list.append(LoggerHook(cfg.log_every_n_iter))
hook_list.append(CheckpointHook(output_dir, cfg.save_every_n_epoch))
# Hooks better work after CheckpointHook
hook_list.append(EvalHook(output_dir, cfg.eval_every_n_epoch))
return hook_list
|
[
"megengine.distributed.get_rank"
] |
[((490, 516), 'basecls.utils.registers.hooks.register', 'registers.hooks.register', ([], {}), '()\n', (514, 516), False, 'from basecls.utils import registers\n'), ((1367, 1382), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (1380, 1382), True, 'import megengine.distributed as dist\n'), ((1611, 1626), 'basecore.utils.str_timestamp', 'str_timestamp', ([], {}), '()\n', (1624, 1626), False, 'from basecore.utils import str_timestamp\n')]
|
from sqlmodel import SQLModel, Field
import uuid as uuid_pkg
from typing import Optional
class FilesBase(SQLModel):
name: str
class Files(FilesBase, table=True):
id: int = Field(default=None, primary_key=True)
uuid: uuid_pkg.UUID = Field(
default_factory=uuid_pkg.uuid4,
index=True,
nullable=False,
)
count_download: int = Field(default=0)
class FilesCreate(FilesBase):
pass
|
[
"sqlmodel.Field"
] |
[((184, 221), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (189, 221), False, 'from sqlmodel import SQLModel, Field\n'), ((248, 313), 'sqlmodel.Field', 'Field', ([], {'default_factory': 'uuid_pkg.uuid4', 'index': '(True)', 'nullable': '(False)'}), '(default_factory=uuid_pkg.uuid4, index=True, nullable=False)\n', (253, 313), False, 'from sqlmodel import SQLModel, Field\n'), ((371, 387), 'sqlmodel.Field', 'Field', ([], {'default': '(0)'}), '(default=0)\n', (376, 387), False, 'from sqlmodel import SQLModel, Field\n')]
|
from datetime import date
from typing import List, Optional
from api.ecoindex.models.responses import ApiEcoindex
from api.models.enums import Version
from sqlalchemy.ext.asyncio.session import AsyncSession
from sqlmodel import select
from db.helper import date_filter
async def get_host_list_db(
session: AsyncSession,
version: Optional[Version] = Version.v1,
q: Optional[str] = None,
date_from: Optional[date] = None,
date_to: Optional[date] = None,
) -> List[str]:
statement = select(ApiEcoindex.host).where(
ApiEcoindex.version == version.get_version_number()
)
if q:
statement = statement.filter(ApiEcoindex.host.like(f"%{q}%"))
statement = date_filter(statement=statement, date_from=date_from, date_to=date_to)
statement = statement.group_by(ApiEcoindex.host).order_by(ApiEcoindex.host)
hosts = await session.execute(statement)
return hosts.scalars().all()
|
[
"sqlmodel.select"
] |
[((704, 774), 'db.helper.date_filter', 'date_filter', ([], {'statement': 'statement', 'date_from': 'date_from', 'date_to': 'date_to'}), '(statement=statement, date_from=date_from, date_to=date_to)\n', (715, 774), False, 'from db.helper import date_filter\n'), ((508, 532), 'sqlmodel.select', 'select', (['ApiEcoindex.host'], {}), '(ApiEcoindex.host)\n', (514, 532), False, 'from sqlmodel import select\n'), ((654, 685), 'api.ecoindex.models.responses.ApiEcoindex.host.like', 'ApiEcoindex.host.like', (['f"""%{q}%"""'], {}), "(f'%{q}%')\n", (675, 685), False, 'from api.ecoindex.models.responses import ApiEcoindex\n')]
|
import json
import requests
import base64, hashlib, hmac, time
import email.utils
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import padding
from urllib.parse import urlparse
from db.db import async_session
from models.users import RemoteUser
from sqlmodel import select
from activitypub.activitystreams import ActivityTypes
async def send_activity(private_key: str, user_id: str, target_id: str, body: dict):
# 1. Fetch target inbox
# 1.1 Check if we know the user's inbox
async with async_session() as s:
statement = select(RemoteUser).where(RemoteUser.remote_id == target_id)
results = await s.execute(statement)
user = results.first()
if user:
inbox = user[0].inbox
else:
# 1.2 If not, fetch from remote server
resp = requests.get(target_id, headers={'accept': 'application/activity+json'})
target = resp.json()
inbox = target['inbox']
remote_user = RemoteUser(remote_id=target_id, inbox=target['inbox'], public_key=target['publicKey']['publicKeyPem'])
async with async_session() as s:
s.add(remote_user)
await s.commit()
# 2. Send activity to target inbox
private_key = serialization.load_pem_private_key(private_key.encode('utf-8'), password=None)
urlparts = urlparse(target_id)
m = hashlib.sha256()
m.update(json.dumps(body).encode('utf-8'))
digestHash = base64.b64encode(m.digest()).decode()
created_timestamp = int(time.time())
header_time = email.utils.formatdate(created_timestamp, usegmt=True)
toSign = '(request-target): post {}\nhost: {}\ndate: {}\ndigest: SHA-256={}'.format(
urlparts.path,
urlparts.netloc,
header_time,
digestHash
)
signed = private_key.sign(
toSign.encode('utf-8'),
padding.PKCS1v15(),
hashes.SHA256()
)
signature = 'keyId="{}#main-key",headers="(request-target) host date digest",signature="{}"'.format(
user_id,
base64.b64encode(signed).decode(),
)
headers = {
'Content-type': 'application/ld+json',
'Host': urlparts.netloc,
'Date': header_time,
'Signature': signature,
'Digest': 'SHA-256={}'.format(digestHash)
}
resp = requests.post(inbox, json=body, headers=headers)
print(resp.text)
|
[
"sqlmodel.select"
] |
[((1382, 1401), 'urllib.parse.urlparse', 'urlparse', (['target_id'], {}), '(target_id)\n', (1390, 1401), False, 'from urllib.parse import urlparse\n'), ((1411, 1427), 'hashlib.sha256', 'hashlib.sha256', ([], {}), '()\n', (1425, 1427), False, 'import base64, hashlib, hmac, time\n'), ((2347, 2395), 'requests.post', 'requests.post', (['inbox'], {'json': 'body', 'headers': 'headers'}), '(inbox, json=body, headers=headers)\n', (2360, 2395), False, 'import requests\n'), ((567, 582), 'db.db.async_session', 'async_session', ([], {}), '()\n', (580, 582), False, 'from db.db import async_session\n'), ((865, 937), 'requests.get', 'requests.get', (['target_id'], {'headers': "{'accept': 'application/activity+json'}"}), "(target_id, headers={'accept': 'application/activity+json'})\n", (877, 937), False, 'import requests\n'), ((1022, 1129), 'models.users.RemoteUser', 'RemoteUser', ([], {'remote_id': 'target_id', 'inbox': "target['inbox']", 'public_key': "target['publicKey']['publicKeyPem']"}), "(remote_id=target_id, inbox=target['inbox'], public_key=target[\n 'publicKey']['publicKeyPem'])\n", (1032, 1129), False, 'from models.users import RemoteUser\n'), ((1559, 1570), 'time.time', 'time.time', ([], {}), '()\n', (1568, 1570), False, 'import base64, hashlib, hmac, time\n'), ((1901, 1919), 'cryptography.hazmat.primitives.asymmetric.padding.PKCS1v15', 'padding.PKCS1v15', ([], {}), '()\n', (1917, 1919), False, 'from cryptography.hazmat.primitives.asymmetric import padding\n'), ((1929, 1944), 'cryptography.hazmat.primitives.hashes.SHA256', 'hashes.SHA256', ([], {}), '()\n', (1942, 1944), False, 'from cryptography.hazmat.primitives import hashes, serialization\n'), ((1144, 1159), 'db.db.async_session', 'async_session', ([], {}), '()\n', (1157, 1159), False, 'from db.db import async_session\n'), ((609, 627), 'sqlmodel.select', 'select', (['RemoteUser'], {}), '(RemoteUser)\n', (615, 627), False, 'from sqlmodel import select\n'), ((1441, 1457), 'json.dumps', 'json.dumps', (['body'], {}), '(body)\n', (1451, 1457), False, 'import json\n'), ((2082, 2106), 'base64.b64encode', 'base64.b64encode', (['signed'], {}), '(signed)\n', (2098, 2106), False, 'import base64, hashlib, hmac, time\n')]
|
import argparse
import logging
import os
import dataset.data_loader as data_loader
import model.net as net
from common import utils
from loss.losses import compute_losses, compute_metrics
from common.manager import Manager
import megengine.distributed as dist
import megengine.functional as F
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", default="experiments/base_model", help="Directory containing params.json")
parser.add_argument("--restore_file", default="best", help="name of the file in --model_dir containing weights to load")
def evaluate(model, manager):
rank = dist.get_rank()
world_size = dist.get_world_size()
"""Evaluate the model on `num_steps` batches.
Args:
model: (torch.nn.Module) the neural network
manager: a class instance that contains objects related to train and evaluate.
"""
# set model to evaluation mode
model.eval()
# compute metrics over the dataset
if manager.dataloaders["val"] is not None:
# loss status and val status initial
manager.reset_loss_status()
manager.reset_metric_status("val")
for data_batch in manager.dataloaders["val"]:
# compute the real batch size
bs = data_batch["points_src"].shape[0]
# move to GPU if available
data_batch = utils.tensor_mge(data_batch)
# compute model output
output_batch = model(data_batch)
# compute all loss on this batch
loss = compute_losses(output_batch, manager.params)
metrics = compute_metrics(output_batch, manager.params)
if world_size > 1:
for k, v in loss.items():
loss[k] = F.distributed.all_reduce_sum(v) / world_size
for k, v in metrics.items():
metrics[k] = F.distributed.all_reduce_sum(v) / world_size
manager.update_loss_status(loss, "val", bs)
# compute all metrics on this batch
manager.update_metric_status(metrics, "val", bs)
# update val data to tensorboard
if rank == 0:
# compute RMSE metrics
manager.summarize_metric_status(metrics, "val")
manager.writer.add_scalar("Loss/val", manager.loss_status["total"].avg, manager.epoch)
# manager.logger.info("Loss/valid epoch {}: {:.4f}".format(manager.epoch, manager.loss_status["total"].avg))
for k, v in manager.val_status.items():
manager.writer.add_scalar("Metric/val/{}".format(k), v.avg, manager.epoch)
# For each epoch, print the metric
manager.print_metrics("val", title="Val", color="green")
if manager.dataloaders["test"] is not None:
# loss status and val status initial
manager.reset_loss_status()
manager.reset_metric_status("test")
for data_batch in manager.dataloaders["test"]:
# compute the real batch size
bs = data_batch["points_src"].shape[0]
# move to GPU if available
data_batch = utils.tensor_mge(data_batch)
# compute model output
output_batch = model(data_batch)
# compute all loss on this batch
loss = compute_losses(output_batch, manager.params)
metrics = compute_metrics(output_batch, manager.params)
if world_size > 1:
for k, v in loss.items():
loss[k] = F.distributed.all_reduce_sum(v) / world_size
for k, v in metrics.items():
metrics[k] = F.distributed.all_reduce_sum(v) / world_size
manager.update_loss_status(loss, "test", bs)
# compute all metrics on this batch
manager.update_metric_status(metrics, "test", bs)
# update test data to tensorboard
if rank == 0:
# compute RMSE metrics
manager.summarize_metric_status(metrics, "test")
manager.writer.add_scalar("Loss/test", manager.loss_status["total"].avg, manager.epoch)
# manager.logger.info("Loss/test epoch {}: {:.4f}".format(manager.epoch, manager.loss_status["total"].avg))
for k, v in manager.val_status.items():
manager.writer.add_scalar("Metric/test/{}".format(k), v.avg, manager.epoch)
# For each epoch, print the metric
manager.print_metrics("test", title="Test", color="red")
def test(model, manager):
"""Test the model with loading checkpoints.
Args:
model: (torch.nn.Module) the neural network
manager: a class instance that contains objects related to train and evaluate.
"""
# set model to evaluation mode
model.eval()
# compute metrics over the dataset
if manager.dataloaders["val"] is not None:
# loss status and val status initial
manager.reset_loss_status()
manager.reset_metric_status("val")
for data_batch in manager.dataloaders["val"]:
# compute the real batch size
bs = data_batch["points_src"].shape[0]
# move to GPU if available
data_batch = utils.tensor_mge(data_batch)
# compute model output
output_batch = model(data_batch)
# compute all loss on this batch
loss = compute_losses(output_batch, manager.params)
manager.update_loss_status(loss, "val", bs)
# compute all metrics on this batch
metrics = compute_metrics(output_batch, manager.params)
manager.update_metric_status(metrics, "val", bs)
# compute RMSE metrics
manager.summarize_metric_status(metrics, "val")
# For each epoch, update and print the metric
manager.print_metrics("val", title="Val", color="green")
if manager.dataloaders["test"] is not None:
# loss status and test status initial
manager.reset_loss_status()
manager.reset_metric_status("test")
for data_batch in manager.dataloaders["test"]:
# compute the real batch size
bs = data_batch["points_src"].shape[0]
# move to GPU if available
data_batch = utils.tensor_mge(data_batch)
# compute model output
output_batch = model(data_batch)
# compute all loss on this batch
loss = compute_losses(output_batch, manager.params)
manager.update_loss_status(loss, "test", bs)
# compute all metrics on this batch
metrics = compute_metrics(output_batch, manager.params)
manager.update_metric_status(metrics, "test", bs)
# compute RMSE metrics
manager.summarize_metric_status(metrics, "test")
# For each epoch, print the metric
manager.print_metrics("test", title="Test", color="red")
if __name__ == "__main__":
"""
Evaluate the model on the test set.
"""
# Load the parameters
args = parser.parse_args()
json_path = os.path.join(args.model_dir, "params.json")
assert os.path.isfile(json_path), "No json configuration file found at {}".format(json_path)
params = utils.Params(json_path)
# Only load model weights
params.only_weights = True
# Update args into params
params.update(vars(args))
# Get the logger
logger = utils.set_logger(os.path.join(args.model_dir, "evaluate.log"))
# Create the input data pipeline
logging.info("Creating the dataset...")
# Fetch dataloaders
dataloaders = data_loader.fetch_dataloader(params)
# Define the model and optimizer
model = net.fetch_net(params)
# Initial status for checkpoint manager
manager = Manager(model=model, optimizer=None, scheduler=None, params=params, dataloaders=dataloaders, writer=None, logger=logger)
# Reload weights from the saved file
manager.load_checkpoints()
# Test the model
logger.info("Starting test")
# Evaluate
test(model, manager)
|
[
"megengine.functional.distributed.all_reduce_sum",
"megengine.distributed.get_world_size",
"megengine.distributed.get_rank"
] |
[((306, 331), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (329, 331), False, 'import argparse\n'), ((606, 621), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (619, 621), True, 'import megengine.distributed as dist\n'), ((639, 660), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (658, 660), True, 'import megengine.distributed as dist\n'), ((7021, 7064), 'os.path.join', 'os.path.join', (['args.model_dir', '"""params.json"""'], {}), "(args.model_dir, 'params.json')\n", (7033, 7064), False, 'import os\n'), ((7076, 7101), 'os.path.isfile', 'os.path.isfile', (['json_path'], {}), '(json_path)\n', (7090, 7101), False, 'import os\n'), ((7175, 7198), 'common.utils.Params', 'utils.Params', (['json_path'], {}), '(json_path)\n', (7187, 7198), False, 'from common import utils\n'), ((7461, 7500), 'logging.info', 'logging.info', (['"""Creating the dataset..."""'], {}), "('Creating the dataset...')\n", (7473, 7500), False, 'import logging\n'), ((7544, 7580), 'dataset.data_loader.fetch_dataloader', 'data_loader.fetch_dataloader', (['params'], {}), '(params)\n', (7572, 7580), True, 'import dataset.data_loader as data_loader\n'), ((7631, 7652), 'model.net.fetch_net', 'net.fetch_net', (['params'], {}), '(params)\n', (7644, 7652), True, 'import model.net as net\n'), ((7712, 7836), 'common.manager.Manager', 'Manager', ([], {'model': 'model', 'optimizer': 'None', 'scheduler': 'None', 'params': 'params', 'dataloaders': 'dataloaders', 'writer': 'None', 'logger': 'logger'}), '(model=model, optimizer=None, scheduler=None, params=params,\n dataloaders=dataloaders, writer=None, logger=logger)\n', (7719, 7836), False, 'from common.manager import Manager\n'), ((7373, 7417), 'os.path.join', 'os.path.join', (['args.model_dir', '"""evaluate.log"""'], {}), "(args.model_dir, 'evaluate.log')\n", (7385, 7417), False, 'import os\n'), ((1343, 1371), 'common.utils.tensor_mge', 'utils.tensor_mge', (['data_batch'], {}), '(data_batch)\n', (1359, 1371), False, 'from common import utils\n'), ((1516, 1560), 'loss.losses.compute_losses', 'compute_losses', (['output_batch', 'manager.params'], {}), '(output_batch, manager.params)\n', (1530, 1560), False, 'from loss.losses import compute_losses, compute_metrics\n'), ((1583, 1628), 'loss.losses.compute_metrics', 'compute_metrics', (['output_batch', 'manager.params'], {}), '(output_batch, manager.params)\n', (1598, 1628), False, 'from loss.losses import compute_losses, compute_metrics\n'), ((3090, 3118), 'common.utils.tensor_mge', 'utils.tensor_mge', (['data_batch'], {}), '(data_batch)\n', (3106, 3118), False, 'from common import utils\n'), ((3263, 3307), 'loss.losses.compute_losses', 'compute_losses', (['output_batch', 'manager.params'], {}), '(output_batch, manager.params)\n', (3277, 3307), False, 'from loss.losses import compute_losses, compute_metrics\n'), ((3330, 3375), 'loss.losses.compute_metrics', 'compute_metrics', (['output_batch', 'manager.params'], {}), '(output_batch, manager.params)\n', (3345, 3375), False, 'from loss.losses import compute_losses, compute_metrics\n'), ((5164, 5192), 'common.utils.tensor_mge', 'utils.tensor_mge', (['data_batch'], {}), '(data_batch)\n', (5180, 5192), False, 'from common import utils\n'), ((5337, 5381), 'loss.losses.compute_losses', 'compute_losses', (['output_batch', 'manager.params'], {}), '(output_batch, manager.params)\n', (5351, 5381), False, 'from loss.losses import compute_losses, compute_metrics\n'), ((5508, 5553), 'loss.losses.compute_metrics', 'compute_metrics', (['output_batch', 'manager.params'], {}), '(output_batch, manager.params)\n', (5523, 5553), False, 'from loss.losses import compute_losses, compute_metrics\n'), ((6209, 6237), 'common.utils.tensor_mge', 'utils.tensor_mge', (['data_batch'], {}), '(data_batch)\n', (6225, 6237), False, 'from common import utils\n'), ((6382, 6426), 'loss.losses.compute_losses', 'compute_losses', (['output_batch', 'manager.params'], {}), '(output_batch, manager.params)\n', (6396, 6426), False, 'from loss.losses import compute_losses, compute_metrics\n'), ((6554, 6599), 'loss.losses.compute_metrics', 'compute_metrics', (['output_batch', 'manager.params'], {}), '(output_batch, manager.params)\n', (6569, 6599), False, 'from loss.losses import compute_losses, compute_metrics\n'), ((1732, 1763), 'megengine.functional.distributed.all_reduce_sum', 'F.distributed.all_reduce_sum', (['v'], {}), '(v)\n', (1760, 1763), True, 'import megengine.functional as F\n'), ((1855, 1886), 'megengine.functional.distributed.all_reduce_sum', 'F.distributed.all_reduce_sum', (['v'], {}), '(v)\n', (1883, 1886), True, 'import megengine.functional as F\n'), ((3479, 3510), 'megengine.functional.distributed.all_reduce_sum', 'F.distributed.all_reduce_sum', (['v'], {}), '(v)\n', (3507, 3510), True, 'import megengine.functional as F\n'), ((3602, 3633), 'megengine.functional.distributed.all_reduce_sum', 'F.distributed.all_reduce_sum', (['v'], {}), '(v)\n', (3630, 3633), True, 'import megengine.functional as F\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div(tensor([3.0, 4.0]), 2).numpy(),
np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
F.floor_div(tensor([-5.0, -7.0]), 2).numpy(),
np.floor_divide(np.array([-5.0, -7.0], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([-5, -7]) // 2).numpy(),
np.floor_divide(np.array([-5, -7], dtype=np.int32), 2),
)
def test_clamp():
"""Fix an issue when `lower` or `upper` is 0, it will be recognized as `False` and
`F.clip` will fall into wrong conditions unexpectedly.
"""
x = np.linspace(-6, 6, dtype="float32")
np.testing.assert_allclose(
F.clip(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)
)
np.testing.assert_allclose(
F.clip(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0)
)
def test_isnan():
for case in [[1, float("nan"), 0]]:
np.testing.assert_allclose(F.isnan(tensor(case)).numpy(), np.isnan(case))
def test_isinf():
for case in [[1, float("inf"), 0]]:
np.testing.assert_allclose(F.isinf(tensor(case)).numpy(), np.isinf(case))
def test_sign():
for case in [[1, -1, 0]]:
x = tensor(case)
np.testing.assert_allclose(F.sign(x).numpy(), np.sign(case).astype(x.dtype))
def test_cosh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.cosh(x)
y_mge = F.cosh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_sinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.sinh(x)
y_mge = F.sinh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_asinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.arcsinh(x)
y_mge = F.asinh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_acosh():
x = np.arange(0, 10000).astype("float32") / 100 + 1
y_np = np.arccosh(x)
y_mge = F.acosh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_atanh():
np.random.seed(42)
x = np.random.rand(100).astype("float32") * 2 - 1
y_np = np.arctanh(x)
y_mge = F.atanh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_hswish():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = x * np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hswish(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_silu():
x = np.array([-1.5, 0.0, 1.0, 1.5]).astype("float32")
y_np = x / (1 + np.exp(-x))
y_mge = F.silu(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_hsigmoid():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hsigmoid(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_logical_oprs():
x = np.array([[True, False], [False, True]])
y = np.array([[True, True], [False, False]])
xx = tensor(x)
yy = tensor(y)
np.testing.assert_equal(~x, (F.logical_not(xx)).numpy())
np.testing.assert_equal(x & y, F.logical_and(xx, yy).numpy())
np.testing.assert_equal(x | y, F.logical_or(xx, yy).numpy())
np.testing.assert_equal(x ^ y, F.logical_xor(xx, yy).numpy())
def test_logaddexp():
x = np.random.randn(2, 100)
y = np.random.randn(2, 100)
xx = tensor(x)
yy = tensor(y)
out_np = np.log(np.exp(x) + np.exp(y))
out_mge = F.logaddexp(xx, yy)
np.testing.assert_almost_equal(out_np, out_mge.numpy(), decimal=6)
def test_qadd():
inp_scale = 0.5
outp_scale = 0.2
x = np.arange(6).reshape(2, 3).astype("float32")
y = np.arange(6).reshape(2, 3).astype("float32")
x = tensor(x, dtype=dtype.qint8(inp_scale))
y = tensor(y, dtype=dtype.qint8(inp_scale))
result_mge = F.elemwise._elemwise_multi_type(
x, y, mode="qadd", dtype=dtype.qint8(outp_scale)
)
result_mge = result_mge.astype("float32").numpy()
result_expect = x.astype("float32").numpy() + y.astype("float32").numpy()
np.testing.assert_almost_equal(result_mge, result_expect, decimal=6)
def test_int32_input():
x = tensor(np.array([1, 2, 3, 4, 5]), dtype="int32")
for op_name in elemwise.__all__:
op = getattr(elemwise, op_name)
nargs = op.__code__.co_argcount
if op_name == "clip":
inp = (x, 0, 1)
elif op_name.endswith("_shift"):
inp = (x, 1)
elif op_name.startswith("logical_"):
continue
else:
inp = (x,) * nargs
y = op(*inp)
y.numpy()
@pytest.mark.parametrize("is_trace", [True, False])
def test_empty_tensor(is_trace):
binary_func = []
unary_func = []
for op_name in elemwise.__all__:
op = getattr(elemwise, op_name)
nargs = op.__code__.co_argcount
if op_name == "clip":
unary_func.append(["clip", lambda x, f=op: f(x, lower=0, upper=1)])
elif op_name.endswith("_shift"):
unary_func.append(
[op_name, lambda x, f=op: f(tensor(x.numpy(), dtype="int32"), 1)]
)
elif op_name.startswith("logical_"): # logical_xxx op only accept boolean type
if nargs == 1:
unary_func.append(
[op_name, lambda x, f=op: f(tensor(x.numpy(), dtype="bool"))]
)
else:
assert nargs == 2
binary_func.append(
[
op_name,
lambda x, y, f=op: f(
tensor(x.numpy(), dtype="bool"),
tensor(y.numpy(), dtype="bool"),
),
]
)
elif nargs == 1:
unary_func.append([op_name, op])
elif nargs == 2:
binary_func.append([op_name, op])
else:
raise NotImplementedError("nargs {}".format(nargs))
def run_test(func, args, ref_shape, is_trace, sym=False):
args = [tensor(t, dtype="float32") for t in args]
if is_trace:
func = trace(symbolic=sym)(func)
for _ in range(3):
out = func(*args)
assert out.numpy().shape == ref_shape
else:
out = func(*args)
assert out.numpy().shape == ref_shape, out.numpy().shape
inps = [
np.array([]).astype("float32"),
np.random.randn(2, 0, 3).astype("float32"),
123,
]
for op_name, op in unary_func:
if is_trace:
for sym in [True, False]:
run_test(op, [inps[0],], inps[0].shape, True, sym)
run_test(op, [inps[1],], inps[1].shape, True, sym)
else:
run_test(op, [inps[0],], inps[0].shape, False)
run_test(op, [inps[1],], inps[1].shape, False)
for op_name, op in binary_func:
if is_trace:
for sym in [True, False]:
run_test(op, [inps[0], inps[0]], (inps[0] + inps[0]).shape, True, sym)
run_test(op, [inps[1], inps[1]], (inps[1] + inps[1]).shape, True, sym)
run_test(op, [inps[0], inps[2]], (inps[0] + inps[2]).shape, True, sym)
run_test(op, [inps[1], inps[2]], (inps[1] + inps[2]).shape, True, sym)
else:
run_test(op, [inps[0], inps[0]], (inps[0] + inps[0]).shape, False)
run_test(op, [inps[1], inps[1]], (inps[1] + inps[1]).shape, False)
run_test(op, [inps[0], inps[2]], (inps[0] + inps[2]).shape, False)
run_test(op, [inps[1], inps[2]], (inps[1] + inps[2]).shape, False)
|
[
"megengine.functional.sign",
"megengine.functional.elemwise.Elemwise",
"megengine.core.tensor.dtype.qint8",
"megengine.functional.logical_not",
"megengine.jit.trace",
"megengine.tensor",
"megengine.functional.logical_xor",
"megengine.functional.logical_or",
"megengine.functional.mul",
"megengine.functional.logaddexp",
"megengine.functional.logical_and",
"megengine.functional.abs"
] |
[((6757, 6807), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""is_trace"""', '[True, False]'], {}), "('is_trace', [True, False])\n", (6780, 6807), False, 'import pytest\n'), ((2580, 2615), 'numpy.linspace', 'np.linspace', (['(-6)', '(6)'], {'dtype': '"""float32"""'}), "(-6, 6, dtype='float32')\n", (2591, 2615), True, 'import numpy as np\n'), ((3292, 3310), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (3306, 3310), True, 'import numpy as np\n'), ((3369, 3379), 'numpy.cosh', 'np.cosh', (['x'], {}), '(x)\n', (3376, 3379), True, 'import numpy as np\n'), ((3422, 3473), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y_np', 'y_mge'], {'rtol': '(1e-05)'}), '(y_np, y_mge, rtol=1e-05)\n', (3448, 3473), True, 'import numpy as np\n'), ((3496, 3514), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (3510, 3514), True, 'import numpy as np\n'), ((3573, 3583), 'numpy.sinh', 'np.sinh', (['x'], {}), '(x)\n', (3580, 3583), True, 'import numpy as np\n'), ((3626, 3677), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y_np', 'y_mge'], {'rtol': '(1e-05)'}), '(y_np, y_mge, rtol=1e-05)\n', (3652, 3677), True, 'import numpy as np\n'), ((3701, 3719), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (3715, 3719), True, 'import numpy as np\n'), ((3778, 3791), 'numpy.arcsinh', 'np.arcsinh', (['x'], {}), '(x)\n', (3788, 3791), True, 'import numpy as np\n'), ((3835, 3889), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['y_np', 'y_mge'], {'decimal': '(5)'}), '(y_np, y_mge, decimal=5)\n', (3865, 3889), True, 'import numpy as np\n'), ((3977, 3990), 'numpy.arccosh', 'np.arccosh', (['x'], {}), '(x)\n', (3987, 3990), True, 'import numpy as np\n'), ((4034, 4088), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['y_np', 'y_mge'], {'decimal': '(6)'}), '(y_np, y_mge, decimal=6)\n', (4064, 4088), True, 'import numpy as np\n'), ((4113, 4131), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (4127, 4131), True, 'import numpy as np\n'), ((4197, 4210), 'numpy.arctanh', 'np.arctanh', (['x'], {}), '(x)\n', (4207, 4210), True, 'import numpy as np\n'), ((4254, 4308), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['y_np', 'y_mge'], {'decimal': '(5)'}), '(y_np, y_mge, decimal=5)\n', (4284, 4308), True, 'import numpy as np\n'), ((4334, 4352), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (4348, 4352), True, 'import numpy as np\n'), ((4499, 4553), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['y_np', 'y_mge'], {'decimal': '(6)'}), '(y_np, y_mge, decimal=6)\n', (4529, 4553), True, 'import numpy as np\n'), ((4705, 4759), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['y_np', 'y_mge'], {'decimal': '(6)'}), '(y_np, y_mge, decimal=6)\n', (4735, 4759), True, 'import numpy as np\n'), ((4787, 4805), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (4801, 4805), True, 'import numpy as np\n'), ((4950, 5004), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['y_np', 'y_mge'], {'decimal': '(6)'}), '(y_np, y_mge, decimal=6)\n', (4980, 5004), True, 'import numpy as np\n'), ((5040, 5080), 'numpy.array', 'np.array', (['[[True, False], [False, True]]'], {}), '([[True, False], [False, True]])\n', (5048, 5080), True, 'import numpy as np\n'), ((5089, 5129), 'numpy.array', 'np.array', (['[[True, True], [False, False]]'], {}), '([[True, True], [False, False]])\n', (5097, 5129), True, 'import numpy as np\n'), ((5139, 5148), 'megengine.tensor', 'tensor', (['x'], {}), '(x)\n', (5145, 5148), False, 'from megengine import tensor\n'), ((5158, 5167), 'megengine.tensor', 'tensor', (['y'], {}), '(y)\n', (5164, 5167), False, 'from megengine import tensor\n'), ((5458, 5481), 'numpy.random.randn', 'np.random.randn', (['(2)', '(100)'], {}), '(2, 100)\n', (5473, 5481), True, 'import numpy as np\n'), ((5490, 5513), 'numpy.random.randn', 'np.random.randn', (['(2)', '(100)'], {}), '(2, 100)\n', (5505, 5513), True, 'import numpy as np\n'), ((5523, 5532), 'megengine.tensor', 'tensor', (['x'], {}), '(x)\n', (5529, 5532), False, 'from megengine import tensor\n'), ((5542, 5551), 'megengine.tensor', 'tensor', (['y'], {}), '(y)\n', (5548, 5551), False, 'from megengine import tensor\n'), ((5609, 5628), 'megengine.functional.logaddexp', 'F.logaddexp', (['xx', 'yy'], {}), '(xx, yy)\n', (5620, 5628), True, 'import megengine.functional as F\n'), ((6211, 6279), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['result_mge', 'result_expect'], {'decimal': '(6)'}), '(result_mge, result_expect, decimal=6)\n', (6241, 6279), True, 'import numpy as np\n'), ((2693, 2713), 'numpy.clip', 'np.clip', (['(x + 3)', '(0)', '(6)'], {}), '(x + 3, 0, 6)\n', (2700, 2713), True, 'import numpy as np\n'), ((2798, 2819), 'numpy.clip', 'np.clip', (['(x - 3)', '(-6)', '(0)'], {}), '(x - 3, -6, 0)\n', (2805, 2819), True, 'import numpy as np\n'), ((3171, 3183), 'megengine.tensor', 'tensor', (['case'], {}), '(case)\n', (3177, 3183), False, 'from megengine import tensor\n'), ((6321, 6346), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (6329, 6346), True, 'import numpy as np\n'), ((761, 807), 'numpy.array', 'np.array', (['[-3.0, -4.0, -5.0]'], {'dtype': 'np.float32'}), '([-3.0, -4.0, -5.0], dtype=np.float32)\n', (769, 807), True, 'import numpy as np\n'), ((876, 892), 'numpy.float32', 'np.float32', (['(-3.0)'], {}), '(-3.0)\n', (886, 892), True, 'import numpy as np\n'), ((1218, 1234), 'numpy.float32', 'np.float32', (['(-3.0)'], {}), '(-3.0)\n', (1228, 1234), True, 'import numpy as np\n'), ((1236, 1252), 'numpy.float32', 'np.float32', (['(-4.0)'], {}), '(-4.0)\n', (1246, 1252), True, 'import numpy as np\n'), ((1361, 1399), 'numpy.array', 'np.array', (['[3.0, 4.0]'], {'dtype': 'np.float32'}), '([3.0, 4.0], dtype=np.float32)\n', (1369, 1399), True, 'import numpy as np\n'), ((1519, 1557), 'numpy.array', 'np.array', (['[3.0, 4.0]'], {'dtype': 'np.float32'}), '([3.0, 4.0], dtype=np.float32)\n', (1527, 1557), True, 'import numpy as np\n'), ((1695, 1733), 'numpy.array', 'np.array', (['[3.0, 4.0]'], {'dtype': 'np.float32'}), '([3.0, 4.0], dtype=np.float32)\n', (1703, 1733), True, 'import numpy as np\n'), ((1747, 1785), 'numpy.array', 'np.array', (['[3.0, 4.0]'], {'dtype': 'np.float32'}), '([3.0, 4.0], dtype=np.float32)\n', (1755, 1785), True, 'import numpy as np\n'), ((1918, 1952), 'numpy.array', 'np.array', (['[3, 4]'], {'dtype': 'np.float32'}), '([3, 4], dtype=np.float32)\n', (1926, 1952), True, 'import numpy as np\n'), ((2045, 2079), 'numpy.array', 'np.array', (['[3, 4]'], {'dtype': 'np.float32'}), '([3, 4], dtype=np.float32)\n', (2053, 2079), True, 'import numpy as np\n'), ((2202, 2242), 'numpy.array', 'np.array', (['[-5.0, -7.0]'], {'dtype': 'np.float32'}), '([-5.0, -7.0], dtype=np.float32)\n', (2210, 2242), True, 'import numpy as np\n'), ((2352, 2386), 'numpy.array', 'np.array', (['[-5, -7]'], {'dtype': 'np.int32'}), '([-5, -7], dtype=np.int32)\n', (2360, 2386), True, 'import numpy as np\n'), ((2952, 2966), 'numpy.isnan', 'np.isnan', (['case'], {}), '(case)\n', (2960, 2966), True, 'import numpy as np\n'), ((3094, 3108), 'numpy.isinf', 'np.isinf', (['case'], {}), '(case)\n', (3102, 3108), True, 'import numpy as np\n'), ((3319, 3339), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (3334, 3339), True, 'import numpy as np\n'), ((3523, 3543), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (3538, 3543), True, 'import numpy as np\n'), ((3728, 3748), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (3743, 3748), True, 'import numpy as np\n'), ((4361, 4381), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (4376, 4381), True, 'import numpy as np\n'), ((4581, 4612), 'numpy.array', 'np.array', (['[-1.5, 0.0, 1.0, 1.5]'], {}), '([-1.5, 0.0, 1.0, 1.5])\n', (4589, 4612), True, 'import numpy as np\n'), ((4651, 4661), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (4657, 4661), True, 'import numpy as np\n'), ((4814, 4834), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (4829, 4834), True, 'import numpy as np\n'), ((4875, 4895), 'numpy.maximum', 'np.maximum', (['(x + 3)', '(0)'], {}), '(x + 3, 0)\n', (4885, 4895), True, 'import numpy as np\n'), ((5572, 5581), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (5578, 5581), True, 'import numpy as np\n'), ((5584, 5593), 'numpy.exp', 'np.exp', (['y'], {}), '(y)\n', (5590, 5593), True, 'import numpy as np\n'), ((5890, 5912), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['inp_scale'], {}), '(inp_scale)\n', (5901, 5912), False, 'from megengine.core.tensor import dtype\n'), ((5938, 5960), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['inp_scale'], {}), '(inp_scale)\n', (5949, 5960), False, 'from megengine.core.tensor import dtype\n'), ((6045, 6068), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['outp_scale'], {}), '(outp_scale)\n', (6056, 6068), False, 'from megengine.core.tensor import dtype\n'), ((8203, 8229), 'megengine.tensor', 'tensor', (['t'], {'dtype': '"""float32"""'}), "(t, dtype='float32')\n", (8209, 8229), False, 'from megengine import tensor\n'), ((848, 859), 'megengine.functional.abs', 'F.abs', (['(-3.0)'], {}), '(-3.0)\n', (853, 859), True, 'import megengine.functional as F\n'), ((1074, 1092), 'megengine.functional.elemwise.Elemwise', 'Elemwise', ([], {'mode': 'key'}), '(mode=key)\n', (1082, 1092), False, 'from megengine.functional.elemwise import Elemwise\n'), ((1096, 1115), 'megengine.functional.elemwise.Elemwise', 'Elemwise', ([], {'mode': 'mode'}), '(mode=mode)\n', (1104, 1115), False, 'from megengine.functional.elemwise import Elemwise\n'), ((1179, 1196), 'megengine.functional.mul', 'F.mul', (['(-3.0)', '(-4.0)'], {}), '(-3.0, -4.0)\n', (1184, 1196), True, 'import megengine.functional as F\n'), ((3399, 3408), 'megengine.tensor', 'tensor', (['x'], {}), '(x)\n', (3405, 3408), False, 'from megengine import tensor\n'), ((3603, 3612), 'megengine.tensor', 'tensor', (['x'], {}), '(x)\n', (3609, 3612), False, 'from megengine import tensor\n'), ((3812, 3821), 'megengine.tensor', 'tensor', (['x'], {}), '(x)\n', (3818, 3821), False, 'from megengine import tensor\n'), ((4011, 4020), 'megengine.tensor', 'tensor', (['x'], {}), '(x)\n', (4017, 4020), False, 'from megengine import tensor\n'), ((4231, 4240), 'megengine.tensor', 'tensor', (['x'], {}), '(x)\n', (4237, 4240), False, 'from megengine import tensor\n'), ((4426, 4446), 'numpy.maximum', 'np.maximum', (['(x + 3)', '(0)'], {}), '(x + 3, 0)\n', (4436, 4446), True, 'import numpy as np\n'), ((4476, 4485), 'megengine.tensor', 'tensor', (['x'], {}), '(x)\n', (4482, 4485), False, 'from megengine import tensor\n'), ((4682, 4691), 'megengine.tensor', 'tensor', (['x'], {}), '(x)\n', (4688, 4691), False, 'from megengine import tensor\n'), ((4927, 4936), 'megengine.tensor', 'tensor', (['x'], {}), '(x)\n', (4933, 4936), False, 'from megengine import tensor\n'), ((5201, 5218), 'megengine.functional.logical_not', 'F.logical_not', (['xx'], {}), '(xx)\n', (5214, 5218), True, 'import megengine.functional as F\n'), ((5264, 5285), 'megengine.functional.logical_and', 'F.logical_and', (['xx', 'yy'], {}), '(xx, yy)\n', (5277, 5285), True, 'import megengine.functional as F\n'), ((5330, 5350), 'megengine.functional.logical_or', 'F.logical_or', (['xx', 'yy'], {}), '(xx, yy)\n', (5342, 5350), True, 'import megengine.functional as F\n'), ((5395, 5416), 'megengine.functional.logical_xor', 'F.logical_xor', (['xx', 'yy'], {}), '(xx, yy)\n', (5408, 5416), True, 'import megengine.functional as F\n'), ((8285, 8304), 'megengine.jit.trace', 'trace', ([], {'symbolic': 'sym'}), '(symbolic=sym)\n', (8290, 8304), False, 'from megengine.jit import trace\n'), ((8565, 8577), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (8573, 8577), True, 'import numpy as np\n'), ((8605, 8629), 'numpy.random.randn', 'np.random.randn', (['(2)', '(0)', '(3)'], {}), '(2, 0, 3)\n', (8620, 8629), True, 'import numpy as np\n'), ((709, 735), 'megengine.tensor', 'tensor', (['[-3.0, -4.0, -5.0]'], {}), '([-3.0, -4.0, -5.0])\n', (715, 735), False, 'from megengine import tensor\n'), ((1307, 1325), 'megengine.tensor', 'tensor', (['[3.0, 4.0]'], {}), '([3.0, 4.0])\n', (1313, 1325), False, 'from megengine import tensor\n'), ((1465, 1483), 'megengine.tensor', 'tensor', (['[3.0, 4.0]'], {}), '([3.0, 4.0])\n', (1471, 1483), False, 'from megengine import tensor\n'), ((1613, 1631), 'megengine.tensor', 'tensor', (['[3.0, 4.0]'], {}), '([3.0, 4.0])\n', (1619, 1631), False, 'from megengine import tensor\n'), ((1633, 1651), 'megengine.tensor', 'tensor', (['[3.0, 4.0]'], {}), '([3.0, 4.0])\n', (1639, 1651), False, 'from megengine import tensor\n'), ((1868, 1886), 'megengine.tensor', 'tensor', (['[3.0, 4.0]'], {}), '([3.0, 4.0])\n', (1874, 1886), False, 'from megengine import tensor\n'), ((2006, 2020), 'megengine.tensor', 'tensor', (['[3, 4]'], {}), '([3, 4])\n', (2012, 2020), False, 'from megengine import tensor\n'), ((2144, 2164), 'megengine.tensor', 'tensor', (['[-5.0, -7.0]'], {}), '([-5.0, -7.0])\n', (2150, 2164), False, 'from megengine import tensor\n'), ((2296, 2312), 'megengine.tensor', 'tensor', (['[-5, -7]'], {}), '([-5, -7])\n', (2302, 2312), False, 'from megengine import tensor\n'), ((3219, 3228), 'megengine.functional.sign', 'F.sign', (['x'], {}), '(x)\n', (3225, 3228), True, 'import megengine.functional as F\n'), ((3238, 3251), 'numpy.sign', 'np.sign', (['case'], {}), '(case)\n', (3245, 3251), True, 'import numpy as np\n'), ((3918, 3937), 'numpy.arange', 'np.arange', (['(0)', '(10000)'], {}), '(0, 10000)\n', (3927, 3937), True, 'import numpy as np\n'), ((4140, 4159), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (4154, 4159), True, 'import numpy as np\n'), ((5768, 5780), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (5777, 5780), True, 'import numpy as np\n'), ((5821, 5833), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (5830, 5833), True, 'import numpy as np\n'), ((2663, 2672), 'megengine.tensor', 'tensor', (['x'], {}), '(x)\n', (2669, 2672), False, 'from megengine import tensor\n'), ((2767, 2776), 'megengine.tensor', 'tensor', (['x'], {}), '(x)\n', (2773, 2776), False, 'from megengine import tensor\n'), ((2929, 2941), 'megengine.tensor', 'tensor', (['case'], {}), '(case)\n', (2935, 2941), False, 'from megengine import tensor\n'), ((3071, 3083), 'megengine.tensor', 'tensor', (['case'], {}), '(case)\n', (3077, 3083), False, 'from megengine import tensor\n')]
|
from fastapi import *
from sqlmodel import Session, select, SQLModel
from sqlalchemy.exc import OperationalError
from backend.models.timelog import TimeLog
from backend.models.calendar import Calendar
from backend.utils import (
engine,
sqlite3_engine,
create_db,
tags_metadata,
execute_sample_sql,
)
from backend.api import (
user,
timelog,
forecast,
epic,
epic_area,
client,
rate,
team,
role,
sponsor,
capacity,
demand,
)
import csv
app = FastAPI(title="timeflow app API", openapi_tags=tags_metadata)
session = Session(engine)
app.include_router(timelog.router)
app.include_router(forecast.router)
app.include_router(user.router)
app.include_router(epic.router)
app.include_router(epic_area.router)
app.include_router(client.router)
app.include_router(rate.router)
app.include_router(team.router)
app.include_router(role.router)
app.include_router(sponsor.router)
app.include_router(capacity.router)
app.include_router(demand.router)
@app.on_event("startup")
def on_startup():
try:
statement = select(TimeLog)
results = session.exec(statement)
except OperationalError:
create_db()
execute_sample_sql(session)
@app.on_event("startup")
def implement_calendar_table():
try:
statement = select(Calendar.year_name).where(Calendar.id == 1)
result = session.exec(statement).one()
except Exception as e:
print(e)
values_sql = f"""INSERT INTO calendar (date, year_number, year_name, quarter_number, quarter_name
, month_number, month_name, week_number, week_name, week_day_number, week_day_name)
VALUES """
with open("backend/calendar.csv") as csvfile:
reader = csv.reader(csvfile, delimiter=",", quotechar="|")
values_list = []
for index, row in enumerate(reader):
if index > 0 and row[0] != "":
_row = [f"'{item}'" for item in row]
row_sql = ", ".join(_row)
values = f"({row_sql}),"
values_sql += values
values_sql += f"({row_sql});"
cur = sqlite3_engine.cursor()
cur.execute(values_sql)
sqlite3_engine.commit()
sqlite3_engine.close()
|
[
"sqlmodel.Session",
"sqlmodel.select"
] |
[((583, 598), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (590, 598), False, 'from sqlmodel import Session, select, SQLModel\n'), ((1080, 1095), 'sqlmodel.select', 'select', (['TimeLog'], {}), '(TimeLog)\n', (1086, 1095), False, 'from sqlmodel import Session, select, SQLModel\n'), ((1175, 1186), 'backend.utils.create_db', 'create_db', ([], {}), '()\n', (1184, 1186), False, 'from backend.utils import engine, sqlite3_engine, create_db, tags_metadata, execute_sample_sql\n'), ((1195, 1222), 'backend.utils.execute_sample_sql', 'execute_sample_sql', (['session'], {}), '(session)\n', (1213, 1222), False, 'from backend.utils import engine, sqlite3_engine, create_db, tags_metadata, execute_sample_sql\n'), ((1311, 1337), 'sqlmodel.select', 'select', (['Calendar.year_name'], {}), '(Calendar.year_name)\n', (1317, 1337), False, 'from sqlmodel import Session, select, SQLModel\n'), ((1769, 1818), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""', 'quotechar': '"""|"""'}), "(csvfile, delimiter=',', quotechar='|')\n", (1779, 1818), False, 'import csv\n'), ((2193, 2216), 'backend.utils.sqlite3_engine.cursor', 'sqlite3_engine.cursor', ([], {}), '()\n', (2214, 2216), False, 'from backend.utils import engine, sqlite3_engine, create_db, tags_metadata, execute_sample_sql\n'), ((2265, 2288), 'backend.utils.sqlite3_engine.commit', 'sqlite3_engine.commit', ([], {}), '()\n', (2286, 2288), False, 'from backend.utils import engine, sqlite3_engine, create_db, tags_metadata, execute_sample_sql\n'), ((2301, 2323), 'backend.utils.sqlite3_engine.close', 'sqlite3_engine.close', ([], {}), '()\n', (2321, 2323), False, 'from backend.utils import engine, sqlite3_engine, create_db, tags_metadata, execute_sample_sql\n')]
|
import megengine as mge
import megengine.module as M
from megengine import functional as F
import numpy as np
from .transformer import MultiheadAttention
#from .utility import has_nan_or_inf
# mge.core.set_option('async_level', 0)
class DecoderWrapper(M.Module):
def __init__(self, cfg):
super().__init__()
channels = cfg.distiller.HIDDEN_DIM
heads = cfg.distiller.ATT_HEADS
# this is a local module derived from official implementation, we modify the last modules
self.matt = MultiheadAttention(channels, heads)
self.pos_projector = M.Linear(in_features=channels, out_features=channels)
self.use_pos = cfg.distiller.USE_POS_EMBEDDING
self.pos_on_v = cfg.distiller.DECODER_POSEMB_ON_V
def with_pos_embed(self, tensor, pos):
'''
tensor: [S, N, C]
pos: [S, N, C] or [S, 1, C]
'''
if not self.use_pos:
return tensor
pos = self.pos_projector(pos)
return tensor if pos is None else tensor + pos
def forward(self, q, k, v, query_mask=None, key_padding_mask=None, pos_embedding=None, proj_only=False):
# q, v: [sequence_len, batch_size, channels]
k = self.with_pos_embed(k, pos_embedding)
if self.pos_on_v:
v = self.with_pos_embed(v, pos_embedding)
att, mask, values = self.matt(
q, k, v, key_padding_mask=key_padding_mask, proj_only=proj_only)
return att, mask, values
|
[
"megengine.module.Linear"
] |
[((590, 643), 'megengine.module.Linear', 'M.Linear', ([], {'in_features': 'channels', 'out_features': 'channels'}), '(in_features=channels, out_features=channels)\n', (598, 643), True, 'import megengine.module as M\n')]
|
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = F.concat(rpn_bbox_list,axis=1)[0]
rpn_iou_prob_final = F.concat(rpn_iou_list, axis=1)[0]
rpn_bbox_offsets = rpn_bbox_offsets_final.reshape(-1, 4)
rpn_cls_scores = rpn_cls_scores_final.reshape(-1, 1)
rpn_iou_prob = rpn_iou_prob_final.reshape(-1, 1)
n, c = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (n, 1, c)).reshape(-1, c)
rpn_bbox = bbox_transform_inv_opr(anchors, rpn_bbox_offsets)
pred_boxes = F.concat([rpn_bbox, rpn_cls_scores, rpn_iou_prob], axis=1)
return pred_boxes
class RetinaNetCriteriaV2(M.Module):
def __init__(self):
super().__init__()
def anchor_iou_target_opr(self, boxes, im_info, all_anchors,
rpn_bbox_offsets):
n = rpn_bbox_offsets.shape[0]
res = []
for i in range(n):
gtboxes = boxes[i, :im_info[i, 5].astype(np.int32)]
offsets = rpn_bbox_offsets[i].reshape(-1, 4).detach()
m = offsets.shape[0]
an, ac = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (an, 1, ac)).reshape(-1, ac)
dtboxes = bbox_transform_inv_opr(anchors[:,:4], offsets[:, :4])
overlaps = box_overlap_opr(dtboxes, gtboxes[:, :4])
ignore_mask = 1 - F.equal(gtboxes[:, 4], config.anchor_ignore_label).astype(np.float32)
ignore_mask = F.expand_dims(ignore_mask, axis=0)
overlaps = overlaps * ignore_mask
index = F.argmax(overlaps, axis = 1)
value = F.nn.indexing_one_hot(overlaps, index, 1)
value = F.expand_dims(F.expand_dims(value, axis=1), axis=0)
res.append(value)
result = F.concat(res, 0)
return result
def forward(self, pred_cls_list, rpn_num_prob_list, pred_reg_list,
anchors_list, rpn_iou_list, boxes, im_info):
all_anchors_list = [F.concat([a, i*F.ones([a.shape[0], 1]).to(a.device)], axis=1)
for i, a in enumerate(anchors_list)]
all_anchors_final = F.concat(all_anchors_list, axis = 0)
rpn_bbox_offset_final = F.concat(pred_reg_list, axis = 1)
rpn_cls_prob_final = F.concat(pred_cls_list, axis = 1)
rpn_iou_prob_final = F.concat(rpn_iou_list, axis = 1)
rpn_num_per_points_final = F.concat(rpn_num_prob_list, axis = 1)
rpn_labels, rpn_target_boxes = rpn_anchor_target_opr(boxes, im_info, all_anchors_final)
ious_target = self.anchor_iou_target_opr(boxes, im_info, all_anchors_final,
rpn_bbox_offset_final)
n = rpn_labels.shape[0]
target_boxes = rpn_target_boxes.reshape(n, -1, 2, 4).transpose(2, 0, 1, 3)
rpn_cls_prob_final = rpn_cls_prob_final
offsets_final = rpn_bbox_offset_final
target_boxes = target_boxes[0]
rpn_labels = rpn_labels.transpose(2, 0, 1)
labels = rpn_labels[0]
cls_loss = sigmoid_cross_entropy_retina(rpn_cls_prob_final,
labels, alpha = config.focal_loss_alpha, gamma = config.focal_loss_gamma)
rpn_bbox_loss = smooth_l1_loss_retina(offsets_final, target_boxes, labels)
rpn_labels = F.expand_dims(labels, axis=2)
rpn_iou_loss = iou_l1_loss(rpn_iou_prob_final, ious_target, rpn_labels)
loss_dict = {}
loss_dict['rpn_cls_loss'] = cls_loss
loss_dict['rpn_bbox_loss'] = 2 * rpn_bbox_loss
loss_dict['rpn_iou_loss'] = 2 * rpn_iou_loss
return loss_dict
class RetinaNetHead(M.Module):
def __init__(self):
super().__init__()
num_convs = 4
in_channels = 256
cls_subnet, bbox_subnet = [], []
for _ in range(num_convs):
cls_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
cls_subnet.append(M.ReLU())
bbox_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
bbox_subnet.append(M.ReLU())
self.cls_subnet = M.Sequential(*cls_subnet)
self.bbox_subnet = M.Sequential(*bbox_subnet)
# predictor
self.cls_score = M.Conv2d(
in_channels, config.num_cell_anchors * (config.num_classes-1) * 1,
kernel_size=3, stride=1, padding=1)
self.bbox_pred = M.Conv2d(
in_channels, config.num_cell_anchors * 4 * 1,
kernel_size=3, stride=1, padding=1)
self.iou_pred = M.Conv2d(
in_channels, config.num_cell_anchors * 1,
kernel_size = 3, stride=1, padding = 1)
self.num_pred = M.Conv2d(in_channels,
config.num_cell_anchors * 1,
kernel_size = 3, stride=1, padding = 1)
self._init_weights()
def _init_weights(self):
# Initialization
for modules in [self.cls_subnet, self.bbox_subnet, self.num_pred,
self.cls_score, self.bbox_pred, self.iou_pred]:
for layer in modules.modules():
if isinstance(layer, M.Conv2d):
M.init.normal_(layer.weight, std=0.01)
M.init.fill_(layer.bias, 0)
prior_prob = 0.01
# Use prior in model initialization to improve stability
bias_value = -(math.log((1 - prior_prob) / prior_prob))
M.init.fill_(self.cls_score.bias, bias_value)
def forward(self, features):
cls_prob_list, rpn_num_prob_list, pred_bbox_list, rpn_iou_prob_list = [], [], [], []
for feature in features:
rpn_cls_conv = self.cls_subnet(feature)
cls_score = self.cls_score(rpn_cls_conv)
rpn_num_prob = self.num_pred(rpn_cls_conv)
cls_prob = F.sigmoid(cls_score)
rpn_box_conv = self.bbox_subnet(feature)
offsets = self.bbox_pred(rpn_box_conv)
rpn_iou_prob = self.iou_pred(rpn_box_conv)
cls_prob_list.append(cls_prob)
pred_bbox_list.append(offsets)
rpn_iou_prob_list.append(rpn_iou_prob)
rpn_num_prob_list.append(rpn_num_prob)
assert cls_prob_list[0].ndim == 4
pred_cls_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, (config.num_classes-1))
for _ in cls_prob_list]
pred_reg_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, 4)
for _ in pred_bbox_list]
rpn_iou_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, (config.num_classes-1))
for _ in rpn_iou_prob_list]
rpn_num_prob_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, (config.num_classes-1))
for _ in rpn_num_prob_list]
return pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list
class FPN(M.Module):
"""
This module implements Feature Pyramid Network.
It creates pyramid features built on top of some input feature maps.
"""
def __init__(self, bottom_up):
super(FPN, self).__init__()
in_channels = [512, 1024, 2048]
fpn_dim = 256
use_bias =True
lateral_convs, output_convs = [], []
for idx, in_channels in enumerate(in_channels):
lateral_conv = M.Conv2d(
in_channels, fpn_dim, kernel_size=1, bias=use_bias)
output_conv = M.Conv2d(
fpn_dim, fpn_dim, kernel_size=3, stride=1, padding=1, bias=use_bias)
M.init.msra_normal_(lateral_conv.weight, mode="fan_in")
M.init.msra_normal_(output_conv.weight, mode="fan_in")
if use_bias:
M.init.fill_(lateral_conv.bias, 0)
M.init.fill_(output_conv.bias, 0)
lateral_convs.append(lateral_conv)
output_convs.append(output_conv)
self.p6 = M.Conv2d(fpn_dim, fpn_dim, kernel_size=3, stride=2, padding=1, bias=use_bias)
self.p7 = M.Conv2d(fpn_dim, fpn_dim, kernel_size=3, stride=2, padding=1, bias=use_bias)
self.relu = M.ReLU()
lateral_convs.reverse()
output_convs.reverse()
self.lateral_convs = lateral_convs
self.output_convs = output_convs
self.bottom_up = bottom_up
def forward(self, x):
bottom_up_features = self.bottom_up(x)
# bottom_up_features = bottom_up_features[::-1]
bottom_up_features.reverse()
results = []
prev_features = self.lateral_convs[0](bottom_up_features[0])
results.append(self.output_convs[0](prev_features))
for features, lateral_conv, output_conv in zip(
bottom_up_features[1:], self.lateral_convs[1:], self.output_convs[1:]
):
fh, fw = features.shape[2:]
top_down_features = F.nn.interpolate(
prev_features, size = (fh, fw), mode="BILINEAR")
lateral_features = lateral_conv(features)
prev_features = lateral_features + top_down_features
results.append(output_conv(prev_features))
# p6
p6 = self.p6(results[0])
results.insert(0, p6)
p7 = self.p7(self.relu(p6))
results.insert(0, p7)
return results
|
[
"megengine.module.init.fill_",
"megengine.module.ReLU",
"megengine.functional.sigmoid",
"megengine.functional.ones",
"megengine.module.Conv2d",
"megengine.functional.expand_dims",
"megengine.functional.nn.interpolate",
"megengine.functional.stack",
"megengine.functional.concat",
"megengine.functional.argmax",
"megengine.tensor",
"megengine.module.Sequential",
"megengine.functional.equal",
"megengine.module.init.normal_",
"megengine.functional.linspace",
"megengine.module.init.msra_normal_",
"megengine.functional.nn.indexing_one_hot"
] |
[((1393, 1470), 'megengine.functional.stack', 'F.stack', (['[broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y]'], {'axis': '(1)'}), '([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)\n', (1400, 1470), True, 'import megengine.functional as F\n'), ((2352, 2362), 'backbone.resnet50.ResNet50', 'ResNet50', ([], {}), '()\n', (2360, 2362), False, 'from backbone.resnet50 import ResNet50\n'), ((4203, 4239), 'det_opr.utils.get_padded_tensor', 'get_padded_tensor', (['normed_images', '(64)'], {}), '(normed_images, 64)\n', (4220, 4239), False, 'from det_opr.utils import get_padded_tensor\n'), ((5673, 5703), 'megengine.functional.concat', 'F.concat', (['anchors_list'], {'axis': '(0)'}), '(anchors_list, axis=0)\n', (5681, 5703), True, 'import megengine.functional as F\n'), ((6253, 6302), 'det_opr.bbox_opr.bbox_transform_inv_opr', 'bbox_transform_inv_opr', (['anchors', 'rpn_bbox_offsets'], {}), '(anchors, rpn_bbox_offsets)\n', (6275, 6302), False, 'from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr\n'), ((6324, 6382), 'megengine.functional.concat', 'F.concat', (['[rpn_bbox, rpn_cls_scores, rpn_iou_prob]'], {'axis': '(1)'}), '([rpn_bbox, rpn_cls_scores, rpn_iou_prob], axis=1)\n', (6332, 6382), True, 'import megengine.functional as F\n'), ((7613, 7629), 'megengine.functional.concat', 'F.concat', (['res', '(0)'], {}), '(res, 0)\n', (7621, 7629), True, 'import megengine.functional as F\n'), ((7947, 7981), 'megengine.functional.concat', 'F.concat', (['all_anchors_list'], {'axis': '(0)'}), '(all_anchors_list, axis=0)\n', (7955, 7981), True, 'import megengine.functional as F\n'), ((8025, 8056), 'megengine.functional.concat', 'F.concat', (['pred_reg_list'], {'axis': '(1)'}), '(pred_reg_list, axis=1)\n', (8033, 8056), True, 'import megengine.functional as F\n'), ((8088, 8119), 'megengine.functional.concat', 'F.concat', (['pred_cls_list'], {'axis': '(1)'}), '(pred_cls_list, axis=1)\n', (8096, 8119), True, 'import megengine.functional as F\n'), ((8151, 8181), 'megengine.functional.concat', 'F.concat', (['rpn_iou_list'], {'axis': '(1)'}), '(rpn_iou_list, axis=1)\n', (8159, 8181), True, 'import megengine.functional as F\n'), ((8219, 8254), 'megengine.functional.concat', 'F.concat', (['rpn_num_prob_list'], {'axis': '(1)'}), '(rpn_num_prob_list, axis=1)\n', (8227, 8254), True, 'import megengine.functional as F\n'), ((8298, 8354), 'rpn_anchor_target_opr.rpn_anchor_target_opr', 'rpn_anchor_target_opr', (['boxes', 'im_info', 'all_anchors_final'], {}), '(boxes, im_info, all_anchors_final)\n', (8319, 8354), False, 'from rpn_anchor_target_opr import rpn_anchor_target_opr\n'), ((8834, 8957), 'det_opr.loss_opr.sigmoid_cross_entropy_retina', 'sigmoid_cross_entropy_retina', (['rpn_cls_prob_final', 'labels'], {'alpha': 'config.focal_loss_alpha', 'gamma': 'config.focal_loss_gamma'}), '(rpn_cls_prob_final, labels, alpha=config.\n focal_loss_alpha, gamma=config.focal_loss_gamma)\n', (8862, 8957), False, 'from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss\n'), ((8998, 9056), 'det_opr.loss_opr.smooth_l1_loss_retina', 'smooth_l1_loss_retina', (['offsets_final', 'target_boxes', 'labels'], {}), '(offsets_final, target_boxes, labels)\n', (9019, 9056), False, 'from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss\n'), ((9079, 9108), 'megengine.functional.expand_dims', 'F.expand_dims', (['labels'], {'axis': '(2)'}), '(labels, axis=2)\n', (9092, 9108), True, 'import megengine.functional as F\n'), ((9132, 9188), 'det_opr.loss_opr.iou_l1_loss', 'iou_l1_loss', (['rpn_iou_prob_final', 'ious_target', 'rpn_labels'], {}), '(rpn_iou_prob_final, ious_target, rpn_labels)\n', (9143, 9188), False, 'from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss\n'), ((9983, 10008), 'megengine.module.Sequential', 'M.Sequential', (['*cls_subnet'], {}), '(*cls_subnet)\n', (9995, 10008), True, 'import megengine.module as M\n'), ((10036, 10062), 'megengine.module.Sequential', 'M.Sequential', (['*bbox_subnet'], {}), '(*bbox_subnet)\n', (10048, 10062), True, 'import megengine.module as M\n'), ((10108, 10226), 'megengine.module.Conv2d', 'M.Conv2d', (['in_channels', '(config.num_cell_anchors * (config.num_classes - 1) * 1)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels, config.num_cell_anchors * (config.num_classes - 1) * \n 1, kernel_size=3, stride=1, padding=1)\n', (10116, 10226), True, 'import megengine.module as M\n'), ((10270, 10364), 'megengine.module.Conv2d', 'M.Conv2d', (['in_channels', '(config.num_cell_anchors * 4 * 1)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels, config.num_cell_anchors * 4 * 1, kernel_size=3,\n stride=1, padding=1)\n', (10278, 10364), True, 'import megengine.module as M\n'), ((10411, 10501), 'megengine.module.Conv2d', 'M.Conv2d', (['in_channels', '(config.num_cell_anchors * 1)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels, config.num_cell_anchors * 1, kernel_size=3, stride=1,\n padding=1)\n', (10419, 10501), True, 'import megengine.module as M\n'), ((10552, 10642), 'megengine.module.Conv2d', 'M.Conv2d', (['in_channels', '(config.num_cell_anchors * 1)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels, config.num_cell_anchors * 1, kernel_size=3, stride=1,\n padding=1)\n', (10560, 10642), True, 'import megengine.module as M\n'), ((11253, 11298), 'megengine.module.init.fill_', 'M.init.fill_', (['self.cls_score.bias', 'bias_value'], {}), '(self.cls_score.bias, bias_value)\n', (11265, 11298), True, 'import megengine.module as M\n'), ((13742, 13819), 'megengine.module.Conv2d', 'M.Conv2d', (['fpn_dim', 'fpn_dim'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'bias': 'use_bias'}), '(fpn_dim, fpn_dim, kernel_size=3, stride=2, padding=1, bias=use_bias)\n', (13750, 13819), True, 'import megengine.module as M\n'), ((13838, 13915), 'megengine.module.Conv2d', 'M.Conv2d', (['fpn_dim', 'fpn_dim'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'bias': 'use_bias'}), '(fpn_dim, fpn_dim, kernel_size=3, stride=2, padding=1, bias=use_bias)\n', (13846, 13915), True, 'import megengine.module as M\n'), ((13936, 13944), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (13942, 13944), True, 'import megengine.module as M\n'), ((1523, 1553), 'megengine.functional.expand_dims', 'F.expand_dims', (['anchors'], {'axis': '(0)'}), '(anchors, axis=0)\n', (1536, 1553), True, 'import megengine.functional as F\n'), ((1556, 1585), 'megengine.functional.expand_dims', 'F.expand_dims', (['shifts'], {'axis': '(1)'}), '(shifts, axis=1)\n', (1569, 1585), True, 'import megengine.functional as F\n'), ((5737, 5767), 'megengine.functional.concat', 'F.concat', (['rpn_cls_list'], {'axis': '(1)'}), '(rpn_cls_list, axis=1)\n', (5745, 5767), True, 'import megengine.functional as F\n'), ((5804, 5835), 'megengine.functional.concat', 'F.concat', (['rpn_bbox_list'], {'axis': '(1)'}), '(rpn_bbox_list, axis=1)\n', (5812, 5835), True, 'import megengine.functional as F\n'), ((5867, 5897), 'megengine.functional.concat', 'F.concat', (['rpn_iou_list'], {'axis': '(1)'}), '(rpn_iou_list, axis=1)\n', (5875, 5897), True, 'import megengine.functional as F\n'), ((7044, 7098), 'det_opr.bbox_opr.bbox_transform_inv_opr', 'bbox_transform_inv_opr', (['anchors[:, :4]', 'offsets[:, :4]'], {}), '(anchors[:, :4], offsets[:, :4])\n', (7066, 7098), False, 'from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr\n'), ((7121, 7161), 'det_opr.bbox_opr.box_overlap_opr', 'box_overlap_opr', (['dtboxes', 'gtboxes[:, :4]'], {}), '(dtboxes, gtboxes[:, :4])\n', (7136, 7161), False, 'from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr\n'), ((7288, 7322), 'megengine.functional.expand_dims', 'F.expand_dims', (['ignore_mask'], {'axis': '(0)'}), '(ignore_mask, axis=0)\n', (7301, 7322), True, 'import megengine.functional as F\n'), ((7402, 7428), 'megengine.functional.argmax', 'F.argmax', (['overlaps'], {'axis': '(1)'}), '(overlaps, axis=1)\n', (7410, 7428), True, 'import megengine.functional as F\n'), ((7451, 7492), 'megengine.functional.nn.indexing_one_hot', 'F.nn.indexing_one_hot', (['overlaps', 'index', '(1)'], {}), '(overlaps, index, 1)\n', (7472, 7492), True, 'import megengine.functional as F\n'), ((11204, 11243), 'math.log', 'math.log', (['((1 - prior_prob) / prior_prob)'], {}), '((1 - prior_prob) / prior_prob)\n', (11212, 11243), False, 'import math\n'), ((11648, 11668), 'megengine.functional.sigmoid', 'F.sigmoid', (['cls_score'], {}), '(cls_score)\n', (11657, 11668), True, 'import megengine.functional as F\n'), ((13163, 13223), 'megengine.module.Conv2d', 'M.Conv2d', (['in_channels', 'fpn_dim'], {'kernel_size': '(1)', 'bias': 'use_bias'}), '(in_channels, fpn_dim, kernel_size=1, bias=use_bias)\n', (13171, 13223), True, 'import megengine.module as M\n'), ((13267, 13344), 'megengine.module.Conv2d', 'M.Conv2d', (['fpn_dim', 'fpn_dim'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': 'use_bias'}), '(fpn_dim, fpn_dim, kernel_size=3, stride=1, padding=1, bias=use_bias)\n', (13275, 13344), True, 'import megengine.module as M\n'), ((13374, 13429), 'megengine.module.init.msra_normal_', 'M.init.msra_normal_', (['lateral_conv.weight'], {'mode': '"""fan_in"""'}), "(lateral_conv.weight, mode='fan_in')\n", (13393, 13429), True, 'import megengine.module as M\n'), ((13442, 13496), 'megengine.module.init.msra_normal_', 'M.init.msra_normal_', (['output_conv.weight'], {'mode': '"""fan_in"""'}), "(output_conv.weight, mode='fan_in')\n", (13461, 13496), True, 'import megengine.module as M\n'), ((14669, 14732), 'megengine.functional.nn.interpolate', 'F.nn.interpolate', (['prev_features'], {'size': '(fh, fw)', 'mode': '"""BILINEAR"""'}), "(prev_features, size=(fh, fw), mode='BILINEAR')\n", (14685, 14732), True, 'import megengine.functional as F\n'), ((841, 864), 'numpy.array', 'np.array', (['anchor_ratios'], {}), '(anchor_ratios)\n', (849, 864), True, 'import numpy as np\n'), ((885, 908), 'numpy.array', 'np.array', (['anchor_scales'], {}), '(anchor_scales)\n', (893, 908), True, 'import numpy as np\n'), ((959, 981), 'megengine.tensor', 'mge.tensor', (['np_anchors'], {}), '(np_anchors)\n', (969, 981), True, 'import megengine as mge\n'), ((1891, 1925), 'numpy.array', 'np.array', (['config.anchor_base_scale'], {}), '(config.anchor_base_scale)\n', (1899, 1925), True, 'import numpy as np\n'), ((3906, 3944), 'config.config.image_mean.reshape', 'config.image_mean.reshape', (['(1)', '(-1)', '(1)', '(1)'], {}), '(1, -1, 1, 1)\n', (3931, 3944), False, 'from config import config\n'), ((3978, 4015), 'config.config.image_std.reshape', 'config.image_std.reshape', (['(1)', '(-1)', '(1)', '(1)'], {}), '(1, -1, 1, 1)\n', (4002, 4015), False, 'from config import config\n'), ((4050, 4066), 'megengine.tensor', 'mge.tensor', (['mean'], {}), '(mean)\n', (4060, 4066), True, 'import megengine as mge\n'), ((4099, 4114), 'megengine.tensor', 'mge.tensor', (['std'], {}), '(std)\n', (4109, 4114), True, 'import megengine as mge\n'), ((7527, 7555), 'megengine.functional.expand_dims', 'F.expand_dims', (['value'], {'axis': '(1)'}), '(value, axis=1)\n', (7540, 7555), True, 'import megengine.functional as F\n'), ((9658, 9728), 'megengine.module.Conv2d', 'M.Conv2d', (['in_channels', 'in_channels'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels, in_channels, kernel_size=3, stride=1, padding=1)\n', (9666, 9728), True, 'import megengine.module as M\n'), ((9773, 9781), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (9779, 9781), True, 'import megengine.module as M\n'), ((9831, 9901), 'megengine.module.Conv2d', 'M.Conv2d', (['in_channels', 'in_channels'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels, in_channels, kernel_size=3, stride=1, padding=1)\n', (9839, 9901), True, 'import megengine.module as M\n'), ((9947, 9955), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (9953, 9955), True, 'import megengine.module as M\n'), ((13538, 13572), 'megengine.module.init.fill_', 'M.init.fill_', (['lateral_conv.bias', '(0)'], {}), '(lateral_conv.bias, 0)\n', (13550, 13572), True, 'import megengine.module as M\n'), ((13589, 13622), 'megengine.module.init.fill_', 'M.init.fill_', (['output_conv.bias', '(0)'], {}), '(output_conv.bias, 0)\n', (13601, 13622), True, 'import megengine.module as M\n'), ((1068, 1099), 'megengine.functional.linspace', 'F.linspace', (['(0)', '(width - 1)', 'width'], {}), '(0, width - 1, width)\n', (1078, 1099), True, 'import megengine.functional as F\n'), ((1139, 1172), 'megengine.functional.linspace', 'F.linspace', (['(0)', '(height - 1)', 'height'], {}), '(0, height - 1, height)\n', (1149, 1172), True, 'import megengine.functional as F\n'), ((6177, 6206), 'megengine.functional.expand_dims', 'F.expand_dims', (['all_anchors', '(1)'], {}), '(all_anchors, 1)\n', (6190, 6206), True, 'import megengine.functional as F\n'), ((11002, 11040), 'megengine.module.init.normal_', 'M.init.normal_', (['layer.weight'], {'std': '(0.01)'}), '(layer.weight, std=0.01)\n', (11016, 11040), True, 'import megengine.module as M\n'), ((11061, 11088), 'megengine.module.init.fill_', 'M.init.fill_', (['layer.bias', '(0)'], {}), '(layer.bias, 0)\n', (11073, 11088), True, 'import megengine.module as M\n'), ((3490, 3525), 'numpy.random.random', 'np.random.random', (['[2, 3, 756, 1400]'], {}), '([2, 3, 756, 1400])\n', (3506, 3525), True, 'import numpy as np\n'), ((3629, 3653), 'numpy.random.random', 'np.random.random', (['[2, 6]'], {}), '([2, 6])\n', (3645, 3653), True, 'import numpy as np\n'), ((3758, 3787), 'numpy.random.random', 'np.random.random', (['[2, 500, 5]'], {}), '([2, 500, 5])\n', (3774, 3787), True, 'import numpy as np\n'), ((6962, 6991), 'megengine.functional.expand_dims', 'F.expand_dims', (['all_anchors', '(1)'], {}), '(all_anchors, 1)\n', (6975, 6991), True, 'import megengine.functional as F\n'), ((7192, 7242), 'megengine.functional.equal', 'F.equal', (['gtboxes[:, 4]', 'config.anchor_ignore_label'], {}), '(gtboxes[:, 4], config.anchor_ignore_label)\n', (7199, 7242), True, 'import megengine.functional as F\n'), ((7821, 7844), 'megengine.functional.ones', 'F.ones', (['[a.shape[0], 1]'], {}), '([a.shape[0], 1])\n', (7827, 7844), True, 'import megengine.functional as F\n')]
|
from typing import Optional
from sqlmodel import Field, Session, SQLModel, create_engine, select
class Team(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
headquarters: str
class Hero(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: Optional[int] = Field(default=None, index=True)
team_id: Optional[int] = Field(default=None, foreign_key="team.id")
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
with Session(engine) as session:
team_preventers = Team(name="Preventers", headquarters="Sharp Tower")
team_z_force = Team(name="Z-Force", headquarters="Sister Margaret’s Bar")
session.add(team_preventers)
session.add(team_z_force)
session.commit()
hero_deadpond = Hero(
name="Deadpond", secret_name="<NAME>", team_id=team_z_force.id
)
hero_rusty_man = Hero(
name="Rusty-Man",
secret_name="<NAME>",
age=48,
team_id=team_preventers.id,
)
hero_spider_boy = Hero(name="Spider-Boy", secret_name="<NAME>")
session.add(hero_deadpond)
session.add(hero_rusty_man)
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_deadpond)
session.refresh(hero_rusty_man)
session.refresh(hero_spider_boy)
print("Created hero:", hero_deadpond)
print("Created hero:", hero_rusty_man)
print("Created hero:", hero_spider_boy)
def select_heroes():
with Session(engine) as session:
statement = select(Hero, Team).where(Hero.team_id == Team.id)
results = session.exec(statement)
for hero, team in results:
print("Hero:", hero, "Team:", team)
def main():
create_db_and_tables()
create_heroes()
select_heroes()
if __name__ == "__main__":
main()
|
[
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.Session",
"sqlmodel.Field",
"sqlmodel.select",
"sqlmodel.create_engine"
] |
[((625, 661), 'sqlmodel.create_engine', 'create_engine', (['sqlite_url'], {'echo': '(True)'}), '(sqlite_url, echo=True)\n', (638, 661), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((158, 195), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (163, 195), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((212, 229), 'sqlmodel.Field', 'Field', ([], {'index': '(True)'}), '(index=True)\n', (217, 229), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((312, 349), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (317, 349), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((366, 383), 'sqlmodel.Field', 'Field', ([], {'index': '(True)'}), '(index=True)\n', (371, 383), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((430, 461), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'index': '(True)'}), '(default=None, index=True)\n', (435, 461), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((492, 534), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""team.id"""'}), "(default=None, foreign_key='team.id')\n", (497, 534), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((696, 732), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (724, 732), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((765, 780), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (772, 780), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((1830, 1845), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1837, 1845), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((1878, 1896), 'sqlmodel.select', 'select', (['Hero', 'Team'], {}), '(Hero, Team)\n', (1884, 1896), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n')]
|
from typing import List
from uuid import UUID
import inject
from sqlalchemy.sql.expression import desc
from sqlmodel import Session, between, select
from src.core.events import EventDescription
from src.core.helpers.exceptions import DataValidationError, NotAuthorizedError, NotFoundError
from src.core.models import (
Client,
Context,
CreateBalance,
CreateOrder,
Item,
Order,
OrderDetail,
QueryOrder,
UpdateOrder,
UpdateOrderStatus,
)
from src.core.services import Streamer
from . import balance
def get_all(session: Session, query_schema: QueryOrder, context: Context) -> List[Order]:
args = []
if query_schema.status is not None:
args.append(Order.status == query_schema.status)
if not context.user_is_super_user:
args.append(Order.owner_id == context.user_id)
if query_schema.start_date is not None and query_schema.end_date is not None:
args.append(between(Order.date, query_schema.start_date, query_schema.end_date))
return session.exec(select(Order).where(*args).order_by(desc(Order.date))).all()
def get_by_id(session: Session, order_id: UUID, context: Context) -> Order:
order = session.exec(select(Order).where(Order.id == order_id)).first()
if not order:
raise NotFoundError("Não foi possível localizar a venda com ID: %s" % order_id)
if not context.user_is_super_user and order.owner_id != context.user_id:
raise NotAuthorizedError(f"Você não possui permissão para consultar a Venda {order_id}")
return order
@inject.params(streamer=Streamer)
def register_sale(session: Session, schema: CreateOrder, context: Context, streamer: Streamer) -> Order:
if not session.exec(select(Client).where(Client.id == schema.client_id)).first():
raise NotFoundError(f"Não foi possível localizar o cliente com ID: {schema.client_id}")
checked_ids = []
for detail in schema.items:
if detail.item_id in checked_ids:
continue
checked_ids.append(detail.item_id)
item = session.exec(select(Item).where(Item.id == detail.item_id)).first()
if not item:
raise NotFoundError(f"Não foi possível salvar a venda, item com o ID {detail.item_id} não existe")
if not item.avaliable:
raise DataValidationError(f"O item {item.name} de ID {item.id} não está disponível!")
total_required = sum(x.item_amount for x in schema.items if x.item_id == item.id)
if item.amount < total_required:
raise DataValidationError(
"O item %(name)s não possui estoque suficiente, disponível: %(amount)s, solicitado: %(required)s"
% {"name": item.name, "amount": item.amount, "required": total_required}
)
item.amount -= total_required
session.add(item)
order = Order(**schema.dict(exclude={"details": ...}), owner_id=context.user_id)
session.add(order)
for detail in schema.items:
detail_obj = OrderDetail(**detail.dict(), order_id=order.id)
session.add(detail_obj)
balance_schema = CreateBalance(
value=sum(detail.value for detail in schema.items),
operation=schema.sale_type.value,
description=schema.description,
)
balance_obj = balance.create(session, balance_schema, context=context)
session.add(balance_obj)
session.commit()
streamer.send_event(description=EventDescription.CREATE_ORDER, context=context, order=order.dict())
return order
@inject.params(streamer=Streamer)
def delete_by_id(session: Session, order_id: UUID, context: Context, streamer: Streamer) -> Order:
order = session.exec(select(Order).where(Order.id == order_id)).first()
if not order:
raise NotFoundError(f"Não foi possível localizar a venda com o ID: {order_id}")
if not context.user_is_super_user and order.owner_id != context.user_id:
raise NotAuthorizedError(f"Você não possui permissão para excluir a Venda {order_id}")
for detail in [detail for detail in order.items]:
item = detail.item
item.amount += detail.item_amount
session.add(item)
session.delete(order)
session.commit()
streamer.send_event(EventDescription.DELETE_ORDER, context=context, order=order.dict())
return order
@inject.params(streamer=Streamer)
def update(session: Session, data: UpdateOrder, context: Context, streamer: Streamer) -> Client:
order = session.exec(select(Order).where(Order.id == data.id)).first()
if not order:
raise NotFoundError(f"Não foi possível localizar a venda com ID: {data.id}")
if not context.user_is_super_user and order.owner_id != context.user_id:
raise NotAuthorizedError(f"Você não possui permissão para excluir a venda com ID: {data.id}")
columns = order.__table__.columns.keys()
for key, value in data.dict(exclude_defaults=True).items():
if key not in columns:
continue
setattr(order, key, value)
session.add(order)
session.commit()
streamer.send_event(
description=EventDescription.UPDATE_ORDER,
context=context,
data={"order_data": order.dict(), "update_schema": data.dict()},
)
return order
@inject.params(streamer=Streamer)
def update_status(session: Session, schema: UpdateOrderStatus, context: Context, streamer: Streamer) -> None:
order = session.exec(select(Order).where(Order.id == schema.order_id)).first()
if not order:
raise NotFoundError(f"Não foi possível localizar a venda com o ID: {schema.order_id}")
if not context.user_is_super_user and order.owner_id != context.user_id:
raise NotAuthorizedError(f"Você não possui permissão para alterar o status da venda {schema.order_id}")
order.status = schema.status
session.add(order)
session.commit()
streamer.send_event(EventDescription.UPDATE_ORDER, context=context, order=order.dict(), new_status=schema.status)
|
[
"sqlmodel.select",
"sqlmodel.between"
] |
[((1554, 1586), 'inject.params', 'inject.params', ([], {'streamer': 'Streamer'}), '(streamer=Streamer)\n', (1567, 1586), False, 'import inject\n'), ((3515, 3547), 'inject.params', 'inject.params', ([], {'streamer': 'Streamer'}), '(streamer=Streamer)\n', (3528, 3547), False, 'import inject\n'), ((4315, 4347), 'inject.params', 'inject.params', ([], {'streamer': 'Streamer'}), '(streamer=Streamer)\n', (4328, 4347), False, 'import inject\n'), ((5250, 5282), 'inject.params', 'inject.params', ([], {'streamer': 'Streamer'}), '(streamer=Streamer)\n', (5263, 5282), False, 'import inject\n'), ((1284, 1357), 'src.core.helpers.exceptions.NotFoundError', 'NotFoundError', (["('Não foi possível localizar a venda com ID: %s' % order_id)"], {}), "('Não foi possível localizar a venda com ID: %s' % order_id)\n", (1297, 1357), False, 'from src.core.helpers.exceptions import DataValidationError, NotAuthorizedError, NotFoundError\n'), ((1450, 1537), 'src.core.helpers.exceptions.NotAuthorizedError', 'NotAuthorizedError', (['f"""Você não possui permissão para consultar a Venda {order_id}"""'], {}), "(\n f'Você não possui permissão para consultar a Venda {order_id}')\n", (1468, 1537), False, 'from src.core.helpers.exceptions import DataValidationError, NotAuthorizedError, NotFoundError\n'), ((1792, 1878), 'src.core.helpers.exceptions.NotFoundError', 'NotFoundError', (['f"""Não foi possível localizar o cliente com ID: {schema.client_id}"""'], {}), "(\n f'Não foi possível localizar o cliente com ID: {schema.client_id}')\n", (1805, 1878), False, 'from src.core.helpers.exceptions import DataValidationError, NotAuthorizedError, NotFoundError\n'), ((3756, 3829), 'src.core.helpers.exceptions.NotFoundError', 'NotFoundError', (['f"""Não foi possível localizar a venda com o ID: {order_id}"""'], {}), "(f'Não foi possível localizar a venda com o ID: {order_id}')\n", (3769, 3829), False, 'from src.core.helpers.exceptions import DataValidationError, NotAuthorizedError, NotFoundError\n'), ((3922, 4007), 'src.core.helpers.exceptions.NotAuthorizedError', 'NotAuthorizedError', (['f"""Você não possui permissão para excluir a Venda {order_id}"""'], {}), "(f'Você não possui permissão para excluir a Venda {order_id}'\n )\n", (3940, 4007), False, 'from src.core.helpers.exceptions import DataValidationError, NotAuthorizedError, NotFoundError\n'), ((4553, 4623), 'src.core.helpers.exceptions.NotFoundError', 'NotFoundError', (['f"""Não foi possível localizar a venda com ID: {data.id}"""'], {}), "(f'Não foi possível localizar a venda com ID: {data.id}')\n", (4566, 4623), False, 'from src.core.helpers.exceptions import DataValidationError, NotAuthorizedError, NotFoundError\n'), ((4716, 4808), 'src.core.helpers.exceptions.NotAuthorizedError', 'NotAuthorizedError', (['f"""Você não possui permissão para excluir a venda com ID: {data.id}"""'], {}), "(\n f'Você não possui permissão para excluir a venda com ID: {data.id}')\n", (4734, 4808), False, 'from src.core.helpers.exceptions import DataValidationError, NotAuthorizedError, NotFoundError\n'), ((5509, 5594), 'src.core.helpers.exceptions.NotFoundError', 'NotFoundError', (['f"""Não foi possível localizar a venda com o ID: {schema.order_id}"""'], {}), "(f'Não foi possível localizar a venda com o ID: {schema.order_id}'\n )\n", (5522, 5594), False, 'from src.core.helpers.exceptions import DataValidationError, NotAuthorizedError, NotFoundError\n'), ((5682, 5789), 'src.core.helpers.exceptions.NotAuthorizedError', 'NotAuthorizedError', (['f"""Você não possui permissão para alterar o status da venda {schema.order_id}"""'], {}), "(\n f'Você não possui permissão para alterar o status da venda {schema.order_id}'\n )\n", (5700, 5789), False, 'from src.core.helpers.exceptions import DataValidationError, NotAuthorizedError, NotFoundError\n'), ((942, 1009), 'sqlmodel.between', 'between', (['Order.date', 'query_schema.start_date', 'query_schema.end_date'], {}), '(Order.date, query_schema.start_date, query_schema.end_date)\n', (949, 1009), False, 'from sqlmodel import Session, between, select\n'), ((2159, 2261), 'src.core.helpers.exceptions.NotFoundError', 'NotFoundError', (['f"""Não foi possível salvar a venda, item com o ID {detail.item_id} não existe"""'], {}), "(\n f'Não foi possível salvar a venda, item com o ID {detail.item_id} não existe'\n )\n", (2172, 2261), False, 'from src.core.helpers.exceptions import DataValidationError, NotAuthorizedError, NotFoundError\n'), ((2302, 2381), 'src.core.helpers.exceptions.DataValidationError', 'DataValidationError', (['f"""O item {item.name} de ID {item.id} não está disponível!"""'], {}), "(f'O item {item.name} de ID {item.id} não está disponível!')\n", (2321, 2381), False, 'from src.core.helpers.exceptions import DataValidationError, NotAuthorizedError, NotFoundError\n'), ((2533, 2734), 'src.core.helpers.exceptions.DataValidationError', 'DataValidationError', (["('O item %(name)s não possui estoque suficiente, disponível: %(amount)s, solicitado: %(required)s'\n % {'name': item.name, 'amount': item.amount, 'required': total_required})"], {}), "(\n 'O item %(name)s não possui estoque suficiente, disponível: %(amount)s, solicitado: %(required)s'\n % {'name': item.name, 'amount': item.amount, 'required': total_required})\n", (2552, 2734), False, 'from src.core.helpers.exceptions import DataValidationError, NotAuthorizedError, NotFoundError\n'), ((1072, 1088), 'sqlalchemy.sql.expression.desc', 'desc', (['Order.date'], {}), '(Order.date)\n', (1076, 1088), False, 'from sqlalchemy.sql.expression import desc\n'), ((1200, 1213), 'sqlmodel.select', 'select', (['Order'], {}), '(Order)\n', (1206, 1213), False, 'from sqlmodel import Session, between, select\n'), ((3672, 3685), 'sqlmodel.select', 'select', (['Order'], {}), '(Order)\n', (3678, 3685), False, 'from sqlmodel import Session, between, select\n'), ((4470, 4483), 'sqlmodel.select', 'select', (['Order'], {}), '(Order)\n', (4476, 4483), False, 'from sqlmodel import Session, between, select\n'), ((5418, 5431), 'sqlmodel.select', 'select', (['Order'], {}), '(Order)\n', (5424, 5431), False, 'from sqlmodel import Session, between, select\n'), ((1716, 1730), 'sqlmodel.select', 'select', (['Client'], {}), '(Client)\n', (1722, 1730), False, 'from sqlmodel import Session, between, select\n'), ((2064, 2076), 'sqlmodel.select', 'select', (['Item'], {}), '(Item)\n', (2070, 2076), False, 'from sqlmodel import Session, between, select\n'), ((1036, 1049), 'sqlmodel.select', 'select', (['Order'], {}), '(Order)\n', (1042, 1049), False, 'from sqlmodel import Session, between, select\n')]
|
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
#del images
# process the images
normed_images = self.pre_process(images)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.refinement = True
self.fc1 = M.Linear(256*7*7, 1024)
self.fc2 = M.Linear(1024, 1024)
self.fc3 = M.Linear(1054, 1024) if self.refinement else None
self.relu = M.ReLU()
self.n = config.num_classes
self.a = M.Linear(1024, 5 * self.n)
self.b = M.Linear(1024, 5 * self.n)
self.q = M.Linear(1024, 5 * self.n) if self.refinement else None
self.r = M.Linear(1024, 5 * self.n) if self.refinement else None
self._init_weights()
def _init_weights(self,):
for l in [self.fc1, self.fc2, self.a, self.b]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
if self.refinement:
for l in [self.q, self.r, self.fc3]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def refinement_module(self, prob, fc2):
m = prob.reshape(-1, 5*self.n)
offsets, scores = m[:, :-self.n], m[:, -self.n:]
n = offsets.shape[0]
offsets = offsets.reshape(-1, self.n, 4)
cls_scores = F.expand_dims(F.softmax(scores, axis=1), axis=2)
pred_boxes = F.concat([offsets, cls_scores], axis=2)[:, 1]
n, c = pred_boxes.shape
pred_boxes = F.broadcast_to(F.expand_dims(pred_boxes, axis=1), (n, 6, c)).reshape(n,-1)
n, c = fc2.shape
fc3 = F.broadcast_to(F.expand_dims(fc2, axis=1), (n, 2, c)).reshape(-1, c)
fc3 = F.concat([fc3, pred_boxes], axis=1)
fc3 = self.relu(self.fc3(fc3))
fc3 = fc3.reshape(n, 2, -1).transpose(1, 0, 2)
a = self.q(fc3[0])
b = self.r(fc3[1])
prob = F.stack([a, b], axis=1).reshape(-1, a.shape[1])
return prob
def forward(self, fpn_fms, rcnn_rois, labels=None, bbox_targets=None):
# stride: 64,32,16,8,4 -> 4, 8, 16, 32
fpn_fms = fpn_fms[1:]
fpn_fms.reverse()
stride = [4, 8, 16, 32]
poo5, rcnn_rois, labels, bbox_targets = roi_pool(
fpn_fms, rcnn_rois, stride, (7, 7), 'roi_align',
labels, bbox_targets)
poo5 = F.flatten(poo5, start_axis=1)
fc1 = F.relu(self.fc1(poo5))
fc2 = F.relu(self.fc2(fc1))
a = self.a(fc2)
b = self.b(fc2)
prob = F.stack([a, b], axis=1).reshape(-1, a.shape[1])
if self.refinement:
final_prob = self.refinement_module(prob, fc2)
if self.training:
emd_loss = self.compute_gemini_loss(prob, bbox_targets, labels)
loss_dict = {}
loss_dict['loss_rcnn_emd'] = emd_loss
if self.refinement_module:
final_emd_loss = self.compute_gemini_loss(final_prob, bbox_targets, labels)
loss_dict['final_rcnn_emd'] = final_emd_loss
return loss_dict
else:
offsets, cls_scores = prob[:, :-self.n], prob[:, -self.n:]
pred_bbox = offsets.reshape(-1, self.n, 4)
cls_prob = F.softmax(cls_scores, axis=1)
n = rcnn_rois.shape[0]
rois = F.broadcast_to(F.expand_dims(rcnn_rois[:, 1:5], axis=1), (n, 2, 4)).reshape(-1, 4)
normalized = config.rcnn_bbox_normalize_targets
pred_boxes = restore_bbox(rois, pred_bbox, normalized, config)
pred_bbox = F.concat([pred_boxes, F.expand_dims(cls_prob, axis=2)], axis=2)
return pred_bbox
def compute_emd_loss(self, a, b, bbox_targets, labels):
c = a.shape[1]
prob = F.stack([a, b], axis = 1).reshape(-1, c)
pred_bbox, cls_scores = prob[:,:-self.n], prob[:,-self.n:]
n, c = bbox_targets.shape[0], bbox_targets.shape[1]
bbox_targets, labels = bbox_targets.reshape(-1, 4), labels.flatten()
cls_loss = softmax_loss_opr(cls_scores, labels)
pred_bbox = pred_bbox.reshape(-1, self.n, 4)
rcnn_bbox_loss = smooth_l1_loss_rcnn_opr(pred_bbox, bbox_targets, labels,
config.rcnn_smooth_l1_beta)
loss = cls_loss + rcnn_bbox_loss
loss = loss.reshape(-1, 2).sum(axis=1)
return loss
def compute_gemini_loss(self, prob, bbox_targets, labels):
c = prob.shape[1]
prob = prob.reshape(-1, 2, c).transpose(1, 0, 2)
a, b = prob[0], prob[1]
loss0 = self.compute_emd_loss(a, b, bbox_targets, labels)
loss1 = self.compute_emd_loss(b, a, bbox_targets, labels)
loss = F.stack([loss0, loss1], axis=1)
vlabel = (labels > -1).reshape(-1, 2).sum(axis=1) > 1
emd_loss = loss.min(axis=1).sum() / F.maximum(vlabel.sum(), 1)
return emd_loss
class FPN(M.Module):
"""
This module implements Feature Pyramid Network.
It creates pyramid features built on top of some input feature maps.
"""
def __init__(self, bottom_up):
super(FPN, self).__init__()
in_channels = [256, 512, 1024, 2048]
fpn_dim = 256
use_bias =True
# lateral_convs = list()
# output_convs = list()
lateral_convs, output_convs = [], []
for idx, in_channels in enumerate(in_channels):
lateral_conv = M.Conv2d(
in_channels, fpn_dim, kernel_size=1, bias=use_bias)
output_conv = M.Conv2d(
fpn_dim, fpn_dim, kernel_size=3, stride=1, padding=1, bias=use_bias)
M.init.msra_normal_(lateral_conv.weight, mode="fan_in")
M.init.msra_normal_(output_conv.weight, mode="fan_in")
if use_bias:
M.init.fill_(lateral_conv.bias, 0)
M.init.fill_(output_conv.bias, 0)
lateral_convs.append(lateral_conv)
output_convs.append(output_conv)
self.lateral_convs = lateral_convs[::-1]
self.output_convs = output_convs[::-1]
self.bottom_up = bottom_up
def forward(self, x):
bottom_up_features = self.bottom_up(x)
bottom_up_features = bottom_up_features[::-1]
results = []
prev_features = self.lateral_convs[0](bottom_up_features[0])
results.append(self.output_convs[0](prev_features))
for features, lateral_conv, output_conv in zip(
bottom_up_features[1:], self.lateral_convs[1:], self.output_convs[1:]
):
fh, fw = features.shape[2:]
top_down_features = F.nn.interpolate(
prev_features, size = (fh, fw), mode="BILINEAR")
lateral_features = lateral_conv(features)
prev_features = lateral_features + top_down_features
results.append(output_conv(prev_features))
# p6
last_p6 = F.max_pool2d(results[0], kernel_size=1, stride=2, padding=0)
results.insert(0, last_p6)
return results
|
[
"megengine.module.ReLU",
"megengine.functional.softmax",
"megengine.functional.flatten",
"megengine.functional.stack",
"megengine.tensor",
"megengine.module.init.msra_normal_",
"megengine.functional.max_pool2d",
"megengine.functional.concat",
"megengine.module.init.normal_",
"megengine.module.init.fill_",
"megengine.module.Conv2d",
"megengine.module.Linear",
"megengine.functional.expand_dims",
"megengine.functional.nn.interpolate"
] |
[((656, 666), 'backbone.resnet50.ResNet50', 'ResNet50', ([], {}), '()\n', (664, 666), False, 'from backbone.resnet50 import ResNet50\n'), ((1312, 1335), 'module.rpn.RPN', 'RPN', (['config.rpn_channel'], {}), '(config.rpn_channel)\n', (1315, 1335), False, 'from module.rpn import RPN\n'), ((2306, 2342), 'det_opr.utils.get_padded_tensor', 'get_padded_tensor', (['normed_images', '(64)'], {}), '(normed_images, 64)\n', (2323, 2342), False, 'from det_opr.utils import get_padded_tensor\n'), ((3081, 3133), 'det_opr.fpn_roi_target.fpn_roi_target', 'fpn_roi_target', (['rpn_rois', 'im_info', 'gt_boxes'], {'top_k': '(2)'}), '(rpn_rois, im_info, gt_boxes, top_k=2)\n', (3095, 3133), False, 'from det_opr.fpn_roi_target import fpn_roi_target\n'), ((3724, 3751), 'megengine.module.Linear', 'M.Linear', (['(256 * 7 * 7)', '(1024)'], {}), '(256 * 7 * 7, 1024)\n', (3732, 3751), True, 'import megengine.module as M\n'), ((3767, 3787), 'megengine.module.Linear', 'M.Linear', (['(1024)', '(1024)'], {}), '(1024, 1024)\n', (3775, 3787), True, 'import megengine.module as M\n'), ((3878, 3886), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (3884, 3886), True, 'import megengine.module as M\n'), ((3941, 3967), 'megengine.module.Linear', 'M.Linear', (['(1024)', '(5 * self.n)'], {}), '(1024, 5 * self.n)\n', (3949, 3967), True, 'import megengine.module as M\n'), ((3985, 4011), 'megengine.module.Linear', 'M.Linear', (['(1024)', '(5 * self.n)'], {}), '(1024, 5 * self.n)\n', (3993, 4011), True, 'import megengine.module as M\n'), ((5155, 5190), 'megengine.functional.concat', 'F.concat', (['[fc3, pred_boxes]'], {'axis': '(1)'}), '([fc3, pred_boxes], axis=1)\n', (5163, 5190), True, 'import megengine.functional as F\n'), ((5690, 5769), 'layers.roi_pool.roi_pool', 'roi_pool', (['fpn_fms', 'rcnn_rois', 'stride', '(7, 7)', '"""roi_align"""', 'labels', 'bbox_targets'], {}), "(fpn_fms, rcnn_rois, stride, (7, 7), 'roi_align', labels, bbox_targets)\n", (5698, 5769), False, 'from layers.roi_pool import roi_pool\n'), ((5818, 5847), 'megengine.functional.flatten', 'F.flatten', (['poo5'], {'start_axis': '(1)'}), '(poo5, start_axis=1)\n', (5827, 5847), True, 'import megengine.functional as F\n'), ((7502, 7538), 'det_opr.loss_opr.softmax_loss_opr', 'softmax_loss_opr', (['cls_scores', 'labels'], {}), '(cls_scores, labels)\n', (7518, 7538), False, 'from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr\n'), ((7617, 7706), 'det_opr.loss_opr.smooth_l1_loss_rcnn_opr', 'smooth_l1_loss_rcnn_opr', (['pred_bbox', 'bbox_targets', 'labels', 'config.rcnn_smooth_l1_beta'], {}), '(pred_bbox, bbox_targets, labels, config.\n rcnn_smooth_l1_beta)\n', (7640, 7706), False, 'from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr\n'), ((8149, 8180), 'megengine.functional.stack', 'F.stack', (['[loss0, loss1]'], {'axis': '(1)'}), '([loss0, loss1], axis=1)\n', (8156, 8180), True, 'import megengine.functional as F\n'), ((10329, 10389), 'megengine.functional.max_pool2d', 'F.max_pool2d', (['results[0]'], {'kernel_size': '(1)', 'stride': '(2)', 'padding': '(0)'}), '(results[0], kernel_size=1, stride=2, padding=0)\n', (10341, 10389), True, 'import megengine.functional as F\n'), ((3807, 3827), 'megengine.module.Linear', 'M.Linear', (['(1054)', '(1024)'], {}), '(1054, 1024)\n', (3815, 3827), True, 'import megengine.module as M\n'), ((4030, 4056), 'megengine.module.Linear', 'M.Linear', (['(1024)', '(5 * self.n)'], {}), '(1024, 5 * self.n)\n', (4038, 4056), True, 'import megengine.module as M\n'), ((4103, 4129), 'megengine.module.Linear', 'M.Linear', (['(1024)', '(5 * self.n)'], {}), '(1024, 5 * self.n)\n', (4111, 4129), True, 'import megengine.module as M\n'), ((4295, 4329), 'megengine.module.init.normal_', 'M.init.normal_', (['l.weight'], {'std': '(0.01)'}), '(l.weight, std=0.01)\n', (4309, 4329), True, 'import megengine.module as M\n'), ((4342, 4365), 'megengine.module.init.fill_', 'M.init.fill_', (['l.bias', '(0)'], {}), '(l.bias, 0)\n', (4354, 4365), True, 'import megengine.module as M\n'), ((4802, 4827), 'megengine.functional.softmax', 'F.softmax', (['scores'], {'axis': '(1)'}), '(scores, axis=1)\n', (4811, 4827), True, 'import megengine.functional as F\n'), ((4858, 4897), 'megengine.functional.concat', 'F.concat', (['[offsets, cls_scores]'], {'axis': '(2)'}), '([offsets, cls_scores], axis=2)\n', (4866, 4897), True, 'import megengine.functional as F\n'), ((6714, 6743), 'megengine.functional.softmax', 'F.softmax', (['cls_scores'], {'axis': '(1)'}), '(cls_scores, axis=1)\n', (6723, 6743), True, 'import megengine.functional as F\n'), ((6966, 7015), 'det_opr.bbox_opr.restore_bbox', 'restore_bbox', (['rois', 'pred_bbox', 'normalized', 'config'], {}), '(rois, pred_bbox, normalized, config)\n', (6978, 7015), False, 'from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox\n'), ((8856, 8916), 'megengine.module.Conv2d', 'M.Conv2d', (['in_channels', 'fpn_dim'], {'kernel_size': '(1)', 'bias': 'use_bias'}), '(in_channels, fpn_dim, kernel_size=1, bias=use_bias)\n', (8864, 8916), True, 'import megengine.module as M\n'), ((8960, 9037), 'megengine.module.Conv2d', 'M.Conv2d', (['fpn_dim', 'fpn_dim'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': 'use_bias'}), '(fpn_dim, fpn_dim, kernel_size=3, stride=1, padding=1, bias=use_bias)\n', (8968, 9037), True, 'import megengine.module as M\n'), ((9067, 9122), 'megengine.module.init.msra_normal_', 'M.init.msra_normal_', (['lateral_conv.weight'], {'mode': '"""fan_in"""'}), "(lateral_conv.weight, mode='fan_in')\n", (9086, 9122), True, 'import megengine.module as M\n'), ((9135, 9189), 'megengine.module.init.msra_normal_', 'M.init.msra_normal_', (['output_conv.weight'], {'mode': '"""fan_in"""'}), "(output_conv.weight, mode='fan_in')\n", (9154, 9189), True, 'import megengine.module as M\n'), ((10041, 10104), 'megengine.functional.nn.interpolate', 'F.nn.interpolate', (['prev_features'], {'size': '(fh, fw)', 'mode': '"""BILINEAR"""'}), "(prev_features, size=(fh, fw), mode='BILINEAR')\n", (10057, 10104), True, 'import megengine.functional as F\n'), ((2011, 2048), 'config.config.image_mean.reshape', 'config.image_mean.reshape', (['(1)', '(3)', '(1)', '(1)'], {}), '(1, 3, 1, 1)\n', (2036, 2048), False, 'from config import config\n'), ((2082, 2118), 'config.config.image_std.reshape', 'config.image_std.reshape', (['(1)', '(3)', '(1)', '(1)'], {}), '(1, 3, 1, 1)\n', (2106, 2118), False, 'from config import config\n'), ((2153, 2169), 'megengine.tensor', 'mge.tensor', (['mean'], {}), '(mean)\n', (2163, 2169), True, 'import megengine as mge\n'), ((2202, 2217), 'megengine.tensor', 'mge.tensor', (['std'], {}), '(std)\n', (2212, 2217), True, 'import megengine as mge\n'), ((4460, 4494), 'megengine.module.init.normal_', 'M.init.normal_', (['l.weight'], {'std': '(0.01)'}), '(l.weight, std=0.01)\n', (4474, 4494), True, 'import megengine.module as M\n'), ((4511, 4534), 'megengine.module.init.fill_', 'M.init.fill_', (['l.bias', '(0)'], {}), '(l.bias, 0)\n', (4523, 4534), True, 'import megengine.module as M\n'), ((5363, 5386), 'megengine.functional.stack', 'F.stack', (['[a, b]'], {'axis': '(1)'}), '([a, b], axis=1)\n', (5370, 5386), True, 'import megengine.functional as F\n'), ((5985, 6008), 'megengine.functional.stack', 'F.stack', (['[a, b]'], {'axis': '(1)'}), '([a, b], axis=1)\n', (5992, 6008), True, 'import megengine.functional as F\n'), ((7237, 7260), 'megengine.functional.stack', 'F.stack', (['[a, b]'], {'axis': '(1)'}), '([a, b], axis=1)\n', (7244, 7260), True, 'import megengine.functional as F\n'), ((9231, 9265), 'megengine.module.init.fill_', 'M.init.fill_', (['lateral_conv.bias', '(0)'], {}), '(lateral_conv.bias, 0)\n', (9243, 9265), True, 'import megengine.module as M\n'), ((9282, 9315), 'megengine.module.init.fill_', 'M.init.fill_', (['output_conv.bias', '(0)'], {}), '(output_conv.bias, 0)\n', (9294, 9315), True, 'import megengine.module as M\n'), ((4972, 5005), 'megengine.functional.expand_dims', 'F.expand_dims', (['pred_boxes'], {'axis': '(1)'}), '(pred_boxes, axis=1)\n', (4985, 5005), True, 'import megengine.functional as F\n'), ((5087, 5113), 'megengine.functional.expand_dims', 'F.expand_dims', (['fc2'], {'axis': '(1)'}), '(fc2, axis=1)\n', (5100, 5113), True, 'import megengine.functional as F\n'), ((7062, 7093), 'megengine.functional.expand_dims', 'F.expand_dims', (['cls_prob'], {'axis': '(2)'}), '(cls_prob, axis=2)\n', (7075, 7093), True, 'import megengine.functional as F\n'), ((1596, 1630), 'numpy.random.random', 'np.random.random', (['[2, 3, 224, 224]'], {}), '([2, 3, 224, 224])\n', (1612, 1630), True, 'import numpy as np\n'), ((1734, 1758), 'numpy.random.random', 'np.random.random', (['[2, 5]'], {}), '([2, 5])\n', (1750, 1758), True, 'import numpy as np\n'), ((1863, 1892), 'numpy.random.random', 'np.random.random', (['[2, 100, 5]'], {}), '([2, 100, 5])\n', (1879, 1892), True, 'import numpy as np\n'), ((6813, 6853), 'megengine.functional.expand_dims', 'F.expand_dims', (['rcnn_rois[:, 1:5]'], {'axis': '(1)'}), '(rcnn_rois[:, 1:5], axis=1)\n', (6826, 6853), True, 'import megengine.functional as F\n')]
|
# Copyright 2021 Modelyst LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
from uuid import UUID, uuid4
import requests
from pydantic import HttpUrl
from pydantic.tools import parse_obj_as
from sqlalchemy.sql.expression import text
from sqlmodel import Session, select
from dbgen import Const, Entity, Extract, Generator, Model, Query
from dbgen.configuration import config, get_engines
from dbgen.core.node.transforms import PyBlock
class CustomJsonExtract(Extract):
url: HttpUrl = parse_obj_as(HttpUrl, 'https://jsonplaceholder.typicode.com/posts')
outputs: List[str] = ['out', 'uuid']
def setup(self, **_):
self._response = requests.get(self.url).json()
self._response += [{}]
def extract(self):
for row in self._response:
row['uuid'] = uuid4()
yield {'out': row, 'uuid': row['uuid']}
def length(self, **_):
return len(self._response)
class JSONEntityBase(Entity):
__tablename__ = 'json_entity'
tags: Optional[List[dict]]
my_uuid: Optional[UUID]
class JSONEntity(JSONEntityBase, table=True):
__tablename__ = 'json_entity'
__identifying__ = {
'json_val',
}
json_val: Optional[dict]
model = Model(name='test_json')
load_json = Generator(name='load_json', loads=[JSONEntity.load(insert=True, json_val=Const({}))])
model.add_gen(load_json)
extract = CustomJsonExtract()
load = JSONEntity.load(insert=True, json_val=extract['out'], my_uuid=extract['uuid'])
load_http_json = Generator(name='load_http_json', extract=extract, loads=[load])
model.add_gen(load_http_json)
query = Query(select(JSONEntity.id, JSONEntity.json_val.op('->')(text("'title'")).label('title')))
def get_title_words(text: str):
if text:
return [{'word': word} for word in text.split(' ')]
pb = PyBlock(function=get_title_words, inputs=[query['title']])
load = JSONEntity.load(json_entity=query['id'], tags=pb['out'])
add_tags = Generator(name='add_tags', extract=query, transforms=[pb], loads=[load])
model.add_gen(add_tags)
if __name__ == '__main__':
main_engine, _ = get_engines(config)
with Session(main_engine) as session:
json_entity = session.exec(select(JSONEntity)).first()
if json_entity:
print(json_entity.dict().keys())
print(json_entity.json_val)
|
[
"sqlmodel.Session",
"sqlmodel.select"
] |
[((1773, 1796), 'dbgen.Model', 'Model', ([], {'name': '"""test_json"""'}), "(name='test_json')\n", (1778, 1796), False, 'from dbgen import Const, Entity, Extract, Generator, Model, Query\n'), ((2054, 2117), 'dbgen.Generator', 'Generator', ([], {'name': '"""load_http_json"""', 'extract': 'extract', 'loads': '[load]'}), "(name='load_http_json', extract=extract, loads=[load])\n", (2063, 2117), False, 'from dbgen import Const, Entity, Extract, Generator, Model, Query\n'), ((2362, 2420), 'dbgen.core.node.transforms.PyBlock', 'PyBlock', ([], {'function': 'get_title_words', 'inputs': "[query['title']]"}), "(function=get_title_words, inputs=[query['title']])\n", (2369, 2420), False, 'from dbgen.core.node.transforms import PyBlock\n'), ((2496, 2568), 'dbgen.Generator', 'Generator', ([], {'name': '"""add_tags"""', 'extract': 'query', 'transforms': '[pb]', 'loads': '[load]'}), "(name='add_tags', extract=query, transforms=[pb], loads=[load])\n", (2505, 2568), False, 'from dbgen import Const, Entity, Extract, Generator, Model, Query\n'), ((1047, 1114), 'pydantic.tools.parse_obj_as', 'parse_obj_as', (['HttpUrl', '"""https://jsonplaceholder.typicode.com/posts"""'], {}), "(HttpUrl, 'https://jsonplaceholder.typicode.com/posts')\n", (1059, 1114), False, 'from pydantic.tools import parse_obj_as\n'), ((2642, 2661), 'dbgen.configuration.get_engines', 'get_engines', (['config'], {}), '(config)\n', (2653, 2661), False, 'from dbgen.configuration import config, get_engines\n'), ((2671, 2691), 'sqlmodel.Session', 'Session', (['main_engine'], {}), '(main_engine)\n', (2678, 2691), False, 'from sqlmodel import Session, select\n'), ((1354, 1361), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (1359, 1361), False, 'from uuid import UUID, uuid4\n'), ((1208, 1230), 'requests.get', 'requests.get', (['self.url'], {}), '(self.url)\n', (1220, 1230), False, 'import requests\n'), ((2338, 2353), 'sqlalchemy.sql.expression.text.split', 'text.split', (['""" """'], {}), "(' ')\n", (2348, 2353), False, 'from sqlalchemy.sql.expression import text\n'), ((1882, 1891), 'dbgen.Const', 'Const', (['{}'], {}), '({})\n', (1887, 1891), False, 'from dbgen import Const, Entity, Extract, Generator, Model, Query\n'), ((2214, 2229), 'sqlalchemy.sql.expression.text', 'text', (['"""\'title\'"""'], {}), '("\'title\'")\n', (2218, 2229), False, 'from sqlalchemy.sql.expression import text\n'), ((2739, 2757), 'sqlmodel.select', 'select', (['JSONEntity'], {}), '(JSONEntity)\n', (2745, 2757), False, 'from sqlmodel import Session, select\n')]
|
from typing import Optional
from pydantic import EmailStr
from sqlmodel import Field, SQLModel
from sb_backend.app.models.base.base_model import TimeStampMixin
# Shared properties
class UserBase(SQLModel):
email: Optional[EmailStr] = None
is_active: Optional[bool] = True
is_superuser: bool = False
full_name: Optional[str] = None
# Properties to receive via API on creation
class UserCreate(UserBase):
email: EmailStr
password: str
# Properties to receive via API on update
class UserUpdate(UserBase):
password: Optional[str] = None
class UserInDBBase(UserBase):
id: Optional[int] = Field(default=None, primary_key=True)
# Additional properties to return via API
class User(UserInDBBase, TimeStampMixin, table=True):
__tablename__ = "user"
pass
# Additional properties stored in DB
class UserInDB(UserInDBBase):
hashed_password: str
|
[
"sqlmodel.Field"
] |
[((624, 661), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (629, 661), False, 'from sqlmodel import Field, SQLModel\n')]
|
import numpy as nm
from sfepy.base.base import Struct, assert_
from sfepy.discrete.fem.mappings import VolumeMapping, SurfaceMapping
from poly_spaces import PolySpace
from fe_surface import FESurface
def set_mesh_coors(domain, fields, coors, update_fields=False, actual=False,
clear_all=True):
if actual:
domain.mesh.coors_act = coors.copy()
else:
domain.mesh.coors = coors.copy()
if update_fields:
for field in fields.itervalues():
field.setup_coors(coors)
field.clear_mappings(clear_all=clear_all)
def eval_nodal_coors(coors, mesh_coors, region, poly_space, geom_poly_space,
econn, ig, only_extra=True):
"""
Compute coordinates of nodes corresponding to `poly_space`, given
mesh coordinates and `geom_poly_space`.
"""
if only_extra:
iex = (poly_space.nts[:,0] > 0).nonzero()[0]
if iex.shape[0] == 0: return
qp_coors = poly_space.node_coors[iex, :]
econn = econn[:, iex].copy()
else:
qp_coors = poly_space.node_coors
##
# Evaluate geometry interpolation base functions in (extra) nodes.
bf = geom_poly_space.eval_base(qp_coors)
bf = bf[:,0,:].copy()
##
# Evaluate extra coordinates with 'bf'.
group = region.domain.groups[ig]
cells = region.get_cells(ig)
ecoors = nm.dot(bf, mesh_coors[group.conn[cells]])
coors[econn] = nm.swapaxes(ecoors, 0, 1)
##
# 04.08.2005, c
def _interp_to_faces( vertex_vals, bfs, faces ):
dim = vertex_vals.shape[1]
n_face = faces.shape[0]
n_qp = bfs.shape[0]
faces_vals = nm.zeros( (n_face, n_qp, dim), nm.float64 )
for ii, face in enumerate( faces ):
vals = vertex_vals[face,:dim]
faces_vals[ii,:,:] = nm.dot( bfs[:,0,:], vals )
return( faces_vals )
class Interpolant( Struct ):
"""A simple wrapper around PolySpace."""
def __init__(self, name, gel, space='H1', base='lagrange',
approx_order=1, force_bubble=False):
self.name = name
self.gel = gel
self.poly_spaces = poly_spaces = {}
poly_spaces['v'] = PolySpace.any_from_args(name, gel, approx_order,
base=base,
force_bubble=force_bubble)
gel = gel.surface_facet
if gel is not None:
ps = PolySpace.any_from_args(name, gel, approx_order,
base=base,
force_bubble=False)
skey = 's%d' % ps.n_nod
poly_spaces[skey] = ps
def describe_nodes( self ):
ps = self.poly_spaces['v']
node_desc = ps.describe_nodes()
return node_desc
##
# 16.11.2007, c
def get_n_nodes( self ):
nn = {}
for key, ps in self.poly_spaces.iteritems():
nn[key] = ps.nodes.shape[0]
return nn
def get_geom_poly_space(self, key):
if key == 'v':
ps = self.gel.interp.poly_spaces['v']
elif key[0] == 's':
n_v = self.gel.surface_facet.n_vertex
ps = self.gel.interp.poly_spaces['s%d' % n_v]
else:
raise ValueError('bad polynomial space key! (%s)' % key)
return ps
class SurfaceInterpolant(Interpolant):
"""
Like Interpolant, but for use with SurfaceField and
SurfaceApproximation.
"""
def __init__(self, name, gel, space='H1', base='lagrange',
approx_order=1, force_bubble=False):
Interpolant.__init__(self, name, gel, space=space, base=base,
approx_order=approx_order,
force_bubble=force_bubble)
# Make alias 'v' <-> 's#'.
ps = self.poly_spaces['v']
self.poly_spaces['s%d' % ps.n_nod] = ps
def get_geom_poly_space(self, key):
assert_(key[0] == 's')
ps = self.gel.interp.poly_spaces['v']
return ps
##
# 18.07.2006, c
class Approximation( Struct ):
##
# 18.07.2006, c
# 10.10.2006
# 11.07.2007
# 17.07.2007
def __init__(self, name, interp, region, ig, is_surface=False):
"""interp, region are borrowed."""
self.name = name
self.interp = interp
self.region = region
self.ig = ig
self.is_surface = is_surface
self.surface_data = {}
self.edge_data = {}
self.point_data = {}
self.n_ep = self.interp.get_n_nodes()
self.ori = None
self.clear_qp_base()
def eval_extra_coor(self, coors, mesh_coors):
"""
Compute coordinates of extra nodes.
"""
gps = self.interp.gel.interp.poly_spaces['v']
ps = self.interp.poly_spaces['v']
eval_nodal_coors(coors, mesh_coors, self.region, ps, gps, self.econn, self.ig)
##
# c: 05.09.2006, r: 09.05.2008
def setup_surface_data( self, region ):
"""nodes[leconn] == econn"""
"""nodes are sorted by node number -> same order as region.vertices"""
sd = FESurface('surface_data_%s' % region.name, region,
self.efaces, self.econn, self.ig)
self.surface_data[region.name] = sd
return sd
##
# 11.07.2007, c
def setup_point_data( self, field, region ):
conn = field.get_dofs_in_region(region, merge=True, igs=region.igs)
## conn = [nods]\
## + [nm.empty( (0,), dtype = nm.int32 )]\
## * (len( region.igs ) - 1)
conn.shape += (1,)
self.point_data[region.name] = conn
def get_connectivity(self, region, integration, is_trace=False):
"""
Return the DOF connectivity for the given geometry type.
Parameters
----------
region : Region instance
The region, used to index surface and volume connectivities.
integration : one of ('volume', 'plate', 'surface', 'surface_extra')
The term integration type.
"""
if integration == 'surface':
sd = self.surface_data[region.name]
conn = sd.get_connectivity(self.is_surface, is_trace=is_trace)
elif integration in ('volume', 'plate', 'surface_extra'):
if region.name == self.region.name:
conn = self.econn
else:
aux = integration in ('volume', 'plate')
cells = region.get_cells(self.ig, true_cells_only=aux)
conn = nm.take(self.econn, cells.astype(nm.int32), axis=0)
else:
raise ValueError('unsupported term integration! (%s)' % integration)
return conn
def get_poly_space(self, key, from_geometry=False):
"""
Get the polynomial space.
Parameters
----------
key : 'v' or 's?'
The key denoting volume or surface.
from_geometry : bool
If True, return the polynomial space for affine geometrical
interpolation.
Returns
-------
ps : PolySpace instance
The polynomial space.
"""
if from_geometry:
ps = self.interp.get_geom_poly_space(key)
else:
ps = self.interp.poly_spaces[key]
return ps
def clear_qp_base(self):
"""
Remove cached quadrature points and base functions.
"""
self.qp_coors = {}
self.bf = {}
def get_qp(self, key, integral):
"""
Get quadrature points and weights corresponding to the given key
and integral. The key is 'v' or 's#', where # is the number of
face vertices.
"""
qpkey = (integral.name, key)
if not self.qp_coors.has_key(qpkey):
interp = self.interp
if (key[0] == 's'):
dim = interp.gel.dim - 1
n_fp = interp.gel.surface_facet.n_vertex
geometry = '%d_%d' % (dim, n_fp)
else:
geometry = interp.gel.name
vals, weights = integral.get_qp(geometry)
self.qp_coors[qpkey] = Struct(vals=vals, weights=weights)
return self.qp_coors[qpkey]
def get_base(self, key, derivative, integral, iels=None,
from_geometry=False, base_only=True):
qp = self.get_qp(key, integral)
ps = self.get_poly_space(key, from_geometry=from_geometry)
_key = key if not from_geometry else 'g' + key
bf_key = (integral.name, _key, derivative)
if not self.bf.has_key(bf_key):
if (iels is not None) and (self.ori is not None):
ori = self.ori[iels]
else:
ori = self.ori
self.bf[bf_key] = ps.eval_base(qp.vals, diff=derivative, ori=ori)
if base_only:
return self.bf[bf_key]
else:
return self.bf[bf_key], qp.weights
def describe_geometry(self, field, gtype, region, integral,
return_mapping=False):
"""
Compute jacobians, element volumes and base function derivatives
for Volume-type geometries (volume mappings), and jacobians,
normals and base function derivatives for Surface-type
geometries (surface mappings).
Notes
-----
- volume mappings can be defined on a part of an element group,
although the field has to be defined always on the whole group.
- surface mappings are defined on the surface region
- surface mappings require field order to be > 0
"""
domain = field.domain
group = domain.groups[self.ig]
coors = domain.get_mesh_coors(actual=True)
if gtype == 'volume':
qp = self.get_qp('v', integral)
iels = region.get_cells(self.ig)
geo_ps = self.interp.get_geom_poly_space('v')
ps = self.interp.poly_spaces['v']
bf = self.get_base('v', 0, integral, iels=iels)
conn = nm.take(group.conn, iels.astype(nm.int32), axis=0)
mapping = VolumeMapping(coors, conn, poly_space=geo_ps)
vg = mapping.get_mapping(qp.vals, qp.weights, poly_space=ps,
ori=self.ori)
out = vg
elif gtype == 'plate':
import sfepy.mechanics.membranes as mm
from sfepy.linalg import dot_sequences
qp = self.get_qp('v', integral)
iels = region.get_cells(self.ig)
ps = self.interp.poly_spaces['v']
bf = self.get_base('v', 0, integral, iels=iels)
conn = nm.take(group.conn, nm.int32(iels), axis=0)
ccoors = coors[conn]
# Coordinate transformation matrix (transposed!).
mtx_t = mm.create_transformation_matrix(ccoors)
# Transform coordinates to the local coordinate system.
coors_loc = dot_sequences((ccoors - ccoors[:, 0:1, :]), mtx_t)
# Mapping from transformed elements to reference elements.
mapping = mm.create_mapping(coors_loc, field.gel, 1)
vg = mapping.get_mapping(qp.vals, qp.weights, poly_space=ps,
ori=self.ori)
vg.mtx_t = mtx_t
out = vg
elif (gtype == 'surface') or (gtype == 'surface_extra'):
assert_(field.approx_order > 0)
if self.ori is not None:
msg = 'surface integrals do not work yet with the' \
' hierarchical basis!'
raise ValueError(msg)
sd = domain.surface_groups[self.ig][region.name]
esd = self.surface_data[region.name]
qp = self.get_qp(sd.face_type, integral)
geo_ps = self.interp.get_geom_poly_space(sd.face_type)
ps = self.interp.poly_spaces[esd.face_type]
bf = self.get_base(esd.face_type, 0, integral)
conn = sd.get_connectivity()
mapping = SurfaceMapping(coors, conn, poly_space=geo_ps)
sg = mapping.get_mapping(qp.vals, qp.weights, poly_space=ps,
mode=gtype)
if gtype == 'surface_extra':
sg.alloc_extra_data(self.n_ep['v'])
self.create_bqp(region.name, integral)
qp = self.qp_coors[(integral.name, esd.bkey)]
v_geo_ps = self.interp.get_geom_poly_space('v')
bf_bg = v_geo_ps.eval_base(qp.vals, diff=True)
ebf_bg = self.get_base(esd.bkey, 1, integral)
sg.evaluate_bfbgm(bf_bg, ebf_bg, coors, sd.fis, group.conn)
out = sg
elif gtype == 'point':
out = mapping = None
else:
raise ValueError('unknown geometry type: %s' % gtype)
if out is not None:
# Store the integral used.
out.integral = integral
out.qp = qp
out.ps = ps
# Update base.
out.bf[:] = bf
if return_mapping:
out = (out, mapping)
return out
def _create_bqp(self, skey, bf_s, weights, integral_name):
interp = self.interp
gel = interp.gel
bkey = 'b%s' % skey[1:]
bqpkey = (integral_name, bkey)
coors, faces = gel.coors, gel.get_surface_entities()
vals = _interp_to_faces(coors, bf_s, faces)
self.qp_coors[bqpkey] = Struct(name = 'BQP_%s' % bkey,
vals = vals, weights = weights)
interp.poly_spaces[bkey] = interp.poly_spaces['v']
return bkey
def create_bqp(self, region_name, integral):
sd = self.surface_data[region_name]
bqpkey = (integral.name, sd.bkey)
if not bqpkey in self.qp_coors:
bf_s = self.get_base(sd.face_type, 0, integral,
from_geometry=True)
qp = self.get_qp(sd.face_type, integral)
bkey = self._create_bqp(sd.face_type, bf_s, qp.weights,
integral.name)
assert_(bkey == sd.bkey)
class DiscontinuousApproximation(Approximation):
def eval_extra_coor(self, coors, mesh_coors):
"""
Compute coordinates of extra nodes. For discontinuous
approximations, all nodes are treated as extra.
"""
gps = self.interp.gel.interp.poly_spaces['v']
ps = self.interp.poly_spaces['v']
eval_nodal_coors(coors, mesh_coors, self.region, ps, gps,
self.econn, self.ig, only_extra=False)
class SurfaceApproximation(Approximation):
def __init__(self, name, interp, region, ig):
Approximation.__init__(self, name, interp, region, ig, is_surface=True)
def get_qp(self, key, integral):
"""
Get quadrature points and weights corresponding to the given key
and integral. The key is 's#', where # is the number of
face vertices.
"""
assert_(key[0] == 's')
qpkey = (integral.name, key)
if not self.qp_coors.has_key(qpkey):
interp = self.interp
geometry = interp.gel.name
vals, weights = integral.get_qp(geometry)
self.qp_coors[qpkey] = Struct(vals=vals, weights=weights)
return self.qp_coors[qpkey]
|
[
"sfepy.mechanics.membranes.create_transformation_matrix",
"sfepy.base.base.Struct",
"sfepy.mechanics.membranes.create_mapping",
"sfepy.discrete.fem.mappings.SurfaceMapping",
"sfepy.linalg.dot_sequences",
"sfepy.base.base.assert_",
"sfepy.discrete.fem.mappings.VolumeMapping"
] |
[((1374, 1415), 'numpy.dot', 'nm.dot', (['bf', 'mesh_coors[group.conn[cells]]'], {}), '(bf, mesh_coors[group.conn[cells]])\n', (1380, 1415), True, 'import numpy as nm\n'), ((1435, 1460), 'numpy.swapaxes', 'nm.swapaxes', (['ecoors', '(0)', '(1)'], {}), '(ecoors, 0, 1)\n', (1446, 1460), True, 'import numpy as nm\n'), ((1636, 1677), 'numpy.zeros', 'nm.zeros', (['(n_face, n_qp, dim)', 'nm.float64'], {}), '((n_face, n_qp, dim), nm.float64)\n', (1644, 1677), True, 'import numpy as nm\n'), ((1787, 1813), 'numpy.dot', 'nm.dot', (['bfs[:, 0, :]', 'vals'], {}), '(bfs[:, 0, :], vals)\n', (1793, 1813), True, 'import numpy as nm\n'), ((2153, 2244), 'poly_spaces.PolySpace.any_from_args', 'PolySpace.any_from_args', (['name', 'gel', 'approx_order'], {'base': 'base', 'force_bubble': 'force_bubble'}), '(name, gel, approx_order, base=base, force_bubble=\n force_bubble)\n', (2176, 2244), False, 'from poly_spaces import PolySpace\n'), ((3930, 3952), 'sfepy.base.base.assert_', 'assert_', (["(key[0] == 's')"], {}), "(key[0] == 's')\n", (3937, 3952), False, 'from sfepy.base.base import Struct, assert_\n'), ((5108, 5196), 'fe_surface.FESurface', 'FESurface', (["('surface_data_%s' % region.name)", 'region', 'self.efaces', 'self.econn', 'self.ig'], {}), "('surface_data_%s' % region.name, region, self.efaces, self.econn,\n self.ig)\n", (5117, 5196), False, 'from fe_surface import FESurface\n'), ((13467, 13523), 'sfepy.base.base.Struct', 'Struct', ([], {'name': "('BQP_%s' % bkey)", 'vals': 'vals', 'weights': 'weights'}), "(name='BQP_%s' % bkey, vals=vals, weights=weights)\n", (13473, 13523), False, 'from sfepy.base.base import Struct, assert_\n'), ((15022, 15044), 'sfepy.base.base.assert_', 'assert_', (["(key[0] == 's')"], {}), "(key[0] == 's')\n", (15029, 15044), False, 'from sfepy.base.base import Struct, assert_\n'), ((2419, 2498), 'poly_spaces.PolySpace.any_from_args', 'PolySpace.any_from_args', (['name', 'gel', 'approx_order'], {'base': 'base', 'force_bubble': '(False)'}), '(name, gel, approx_order, base=base, force_bubble=False)\n', (2442, 2498), False, 'from poly_spaces import PolySpace\n'), ((8152, 8186), 'sfepy.base.base.Struct', 'Struct', ([], {'vals': 'vals', 'weights': 'weights'}), '(vals=vals, weights=weights)\n', (8158, 8186), False, 'from sfepy.base.base import Struct, assert_\n'), ((10118, 10163), 'sfepy.discrete.fem.mappings.VolumeMapping', 'VolumeMapping', (['coors', 'conn'], {'poly_space': 'geo_ps'}), '(coors, conn, poly_space=geo_ps)\n', (10131, 10163), False, 'from sfepy.discrete.fem.mappings import VolumeMapping, SurfaceMapping\n'), ((14122, 14146), 'sfepy.base.base.assert_', 'assert_', (['(bkey == sd.bkey)'], {}), '(bkey == sd.bkey)\n', (14129, 14146), False, 'from sfepy.base.base import Struct, assert_\n'), ((15290, 15324), 'sfepy.base.base.Struct', 'Struct', ([], {'vals': 'vals', 'weights': 'weights'}), '(vals=vals, weights=weights)\n', (15296, 15324), False, 'from sfepy.base.base import Struct, assert_\n'), ((10821, 10860), 'sfepy.mechanics.membranes.create_transformation_matrix', 'mm.create_transformation_matrix', (['ccoors'], {}), '(ccoors)\n', (10852, 10860), True, 'import sfepy.mechanics.membranes as mm\n'), ((10954, 11002), 'sfepy.linalg.dot_sequences', 'dot_sequences', (['(ccoors - ccoors[:, 0:1, :])', 'mtx_t'], {}), '(ccoors - ccoors[:, 0:1, :], mtx_t)\n', (10967, 11002), False, 'from sfepy.linalg import dot_sequences\n'), ((11099, 11141), 'sfepy.mechanics.membranes.create_mapping', 'mm.create_mapping', (['coors_loc', 'field.gel', '(1)'], {}), '(coors_loc, field.gel, 1)\n', (11116, 11141), True, 'import sfepy.mechanics.membranes as mm\n'), ((10681, 10695), 'numpy.int32', 'nm.int32', (['iels'], {}), '(iels)\n', (10689, 10695), True, 'import numpy as nm\n'), ((11394, 11425), 'sfepy.base.base.assert_', 'assert_', (['(field.approx_order > 0)'], {}), '(field.approx_order > 0)\n', (11401, 11425), False, 'from sfepy.base.base import Struct, assert_\n'), ((12029, 12075), 'sfepy.discrete.fem.mappings.SurfaceMapping', 'SurfaceMapping', (['coors', 'conn'], {'poly_space': 'geo_ps'}), '(coors, conn, poly_space=geo_ps)\n', (12043, 12075), False, 'from sfepy.discrete.fem.mappings import VolumeMapping, SurfaceMapping\n')]
|
from fastapi import Path, Depends, HTTPException
from sqlmodel import Session, select
from db import get_db_session
from model.warehouse import Warehouse
from service.base_crud import BaseCRUD
async def validate_warehouse_id(
warehouse_id: int = Path(...),
db_session: Session = Depends(get_db_session)
) -> Warehouse:
"""Validates the if a warehouse is present with the given id.
Args:
warehouse_id: int. The id of the warehouse.
db_session: Session. The database session used to interact with the DB.
Returns:
Warehouse. The warehouse corresponding to the warehouse_id.
Raises:
HTTPException. Warehouse does not exist in the DB.
"""
warehouse: Warehouse = warehouse_crud.get(db_session, warehouse_id)
if not warehouse or not warehouse.active:
raise HTTPException(status_code=404, detail='Warehouse does not exist')
return warehouse
class WarehouseCRUD(BaseCRUD):
model = Warehouse
def get_by_name(self, db_session: Session, name: str) -> Warehouse:
"""Fetches warehouse by name.
Args:
db_session: Session. The database session used to interact
with the DB.
name: str. The name of the warehouse.
Returns:
Warehouse. The warehouse having the given name.
"""
statement = select(self.model).where(self.model.name == name)
return db_session.exec(statement).first()
warehouse_crud = WarehouseCRUD()
|
[
"sqlmodel.select"
] |
[((257, 266), 'fastapi.Path', 'Path', (['...'], {}), '(...)\n', (261, 266), False, 'from fastapi import Path, Depends, HTTPException\n'), ((298, 321), 'fastapi.Depends', 'Depends', (['get_db_session'], {}), '(get_db_session)\n', (305, 321), False, 'from fastapi import Path, Depends, HTTPException\n'), ((843, 908), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Warehouse does not exist"""'}), "(status_code=404, detail='Warehouse does not exist')\n", (856, 908), False, 'from fastapi import Path, Depends, HTTPException\n'), ((1371, 1389), 'sqlmodel.select', 'select', (['self.model'], {}), '(self.model)\n', (1377, 1389), False, 'from sqlmodel import Session, select\n')]
|
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
@jit.trace(symbolic=True)
def train_func(image, label):
model.train()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
optimizer.backward(loss) # compute gradients
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
# Build train and valid datasets
logger.info("preparing dataset..")
train_dataset = data.dataset.ImageNet(args.data, train=True)
train_sampler = data.Infinite(data.RandomSampler(
train_dataset, batch_size=args.batch_size, drop_last=True
))
train_queue = data.DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
[
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
T.ToMode("CHW"),
]
),
num_workers=args.workers,
)
train_queue = iter(train_queue)
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[
T.Resize(256),
T.CenterCrop(224),
T.ToMode("CHW"),
]
),
num_workers=args.workers,
)
# Start training
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
total_time = AverageMeter("Time")
t = time.time()
for step in range(step_start, args.steps + 1):
# Linear learning rate decay
decay = 1.0
decay = 1 - float(step) / args.steps if step < args.steps else 0
for param_group in optimizer.param_groups:
param_group["lr"] = args.learning_rate * decay
image, label = next(train_queue)
time_data=time.time()-t
image = image.astype("float32")
label = label.astype("int32")
n = image.shape[0]
optimizer.zero_grad()
loss, acc1, acc5 = train_func(image, label)
optimizer.step()
top1.update(100 * acc1.numpy()[0], n)
top5.update(100 * acc5.numpy()[0], n)
objs.update(loss.numpy()[0], n)
total_time.update(time.time() - t)
time_iter=time.time()-t
t = time.time()
if step % args.report_freq == 0 and rank == 0:
logger.info(
"TRAIN Iter %06d: lr = %f,\tloss = %f,\twc_loss = 1,\tTop-1 err = %f,\tTop-5 err = %f,\tdata_time = %f,\ttrain_time = %f,\tremain_hours=%f",
step,
args.learning_rate * decay,
float(objs.__str__().split()[1]),
1-float(top1.__str__().split()[1])/100,
1-float(top5.__str__().split()[1])/100,
time_data,
time_iter - time_data,
time_iter * (args.steps - step) / 3600,
)
writers['train'].add_scalar('loss', float(objs.__str__().split()[1]), global_step=step)
writers['train'].add_scalar('top1_err', 1-float(top1.__str__().split()[1])/100, global_step=step)
writers['train'].add_scalar('top5_err', 1-float(top5.__str__().split()[1])/100, global_step=step)
objs.reset()
top1.reset()
top5.reset()
total_time.reset()
if step % 10000 == 0 and rank == 0 and step != 0:
logger.info("SAVING %06d", step)
mge.save(
model.state_dict(),
os.path.join(save_dir, "checkpoint-{:06d}.pkl".format(step)),
)
if step % 10000 == 0 and step != 0:
loss, valid_acc, valid_acc5 = infer(valid_func, valid_queue, args)
logger.info("TEST Iter %06d: loss = %f,\tTop-1 err = %f,\tTop-5 err = %f", step, loss, 1-valid_acc/100, 1-valid_acc5/100)
if rank == 0:
writers['valid'].add_scalar('loss', loss, global_step=step)
writers['valid'].add_scalar('top1_err', 1-valid_acc/100, global_step=step)
writers['valid'].add_scalar('top5_err', 1-valid_acc5/100, global_step=step)
mge.save(
model.state_dict(), os.path.join(save_dir, "checkpoint-{:06d}.pkl".format(step))
)
loss, valid_acc, valid_acc5 = infer(valid_func, valid_queue, args)
logger.info("TEST Iter %06d: loss=%f,\tTop-1 err = %f,\tTop-5 err = %f", step, _, 1-valid_acc/100, 1-valid_acc5/100)
def infer(model, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
total_time = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
n = image.shape[0]
image = image.astype("float32") # convert np.uint8 to float32
label = label.astype("int32")
loss, acc1, acc5 = model(image, label)
objs.update(loss.numpy()[0], n)
top1.update(100 * acc1.numpy()[0], n)
top5.update(100 * acc5.numpy()[0], n)
total_time.update(time.time() - t)
t = time.time()
if step % args.report_freq == 0 and dist.get_rank() == 0:
logger.info(
"Step %d, %s %s %s %s",
step,
objs,
top1,
top5,
total_time,
)
return objs.avg, top1.avg, top5.avg
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=":.3f"):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
if __name__ == "__main__":
main()
|
[
"megengine.get_device_count",
"megengine.distributed.is_distributed",
"megengine.data.transform.ToMode",
"megengine.data.transform.CenterCrop",
"megengine.data.SequentialSampler",
"megengine.distributed.get_rank",
"megengine.distributed.get_world_size",
"megengine.get_logger",
"megengine.data.dataset.ImageNet",
"megengine.data.transform.RandomHorizontalFlip",
"megengine.jit.trace",
"megengine.data.transform.Resize",
"megengine.data.transform.ColorJitter",
"megengine.data.RandomSampler",
"megengine.functional.accuracy",
"megengine.data.transform.RandomResizedCrop",
"megengine.distributed.all_reduce_sum",
"megengine.functional.cross_entropy_with_softmax",
"megengine.distributed.init_process_group",
"megengine.load"
] |
[((2178, 2202), 'megengine.get_logger', 'mge.get_logger', (['__name__'], {}), '(__name__)\n', (2192, 2202), True, 'import megengine as mge\n'), ((2230, 2255), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2253, 2255), False, 'import argparse\n'), ((5245, 5279), 'os.path.join', 'os.path.join', (['args.save', 'args.arch'], {}), '(args.save, args.arch)\n', (5257, 5279), False, 'import os\n'), ((5890, 5914), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)'}), '(symbolic=True)\n', (5899, 5914), True, 'import megengine.jit as jit\n'), ((6485, 6509), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)'}), '(symbolic=True)\n', (6494, 6509), True, 'import megengine.jit as jit\n'), ((7116, 7160), 'megengine.data.dataset.ImageNet', 'data.dataset.ImageNet', (['args.data'], {'train': '(True)'}), '(args.data, train=True)\n', (7137, 7160), True, 'import megengine.data as data\n'), ((7737, 7782), 'megengine.data.dataset.ImageNet', 'data.dataset.ImageNet', (['args.data'], {'train': '(False)'}), '(args.data, train=False)\n', (7758, 7782), True, 'import megengine.data as data\n'), ((7803, 7873), 'megengine.data.SequentialSampler', 'data.SequentialSampler', (['valid_dataset'], {'batch_size': '(100)', 'drop_last': '(False)'}), '(valid_dataset, batch_size=100, drop_last=False)\n', (7825, 7873), True, 'import megengine.data as data\n'), ((8351, 8362), 'time.time', 'time.time', ([], {}), '()\n', (8360, 8362), False, 'import time\n'), ((11489, 11500), 'time.time', 'time.time', ([], {}), '()\n', (11498, 11500), False, 'import time\n'), ((3225, 3252), 'megengine.get_device_count', 'mge.get_device_count', (['"""gpu"""'], {}), "('gpu')\n", (3245, 3252), True, 'import megengine as mge\n'), ((3397, 3421), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (3411, 3421), False, 'import os\n'), ((3431, 3452), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (3442, 3452), False, 'import os\n'), ((3474, 3507), 'os.path.join', 'os.path.join', (['save_dir', '"""log.txt"""'], {}), "(save_dir, 'log.txt')\n", (3486, 3507), False, 'import os\n'), ((3521, 3548), 'os.path.exists', 'os.path.exists', (['args.output'], {}), '(args.output)\n', (3535, 3548), False, 'import os\n'), ((3558, 3582), 'os.makedirs', 'os.makedirs', (['args.output'], {}), '(args.output)\n', (3569, 3582), False, 'import os\n'), ((3766, 3794), 'multiprocessing.set_start_method', 'mp.set_start_method', (['"""spawn"""'], {}), "('spawn')\n", (3785, 3794), True, 'import multiprocessing as mp\n'), ((4835, 4880), 'os.path.join', 'os.path.join', (['args.save', 'args.arch', '"""log.txt"""'], {}), "(args.save, args.arch, 'log.txt')\n", (4847, 4880), False, 'import os\n'), ((5048, 5161), 'megengine.distributed.init_process_group', 'dist.init_process_group', ([], {'master_ip': '"""localhost"""', 'master_port': '(23456)', 'world_size': 'world_size', 'rank': 'rank', 'dev': 'rank'}), "(master_ip='localhost', master_port=23456,\n world_size=world_size, rank=rank, dev=rank)\n", (5071, 5161), True, 'import megengine.distributed as dist\n'), ((6016, 6077), 'megengine.functional.cross_entropy_with_softmax', 'F.cross_entropy_with_softmax', (['logits', 'label'], {'label_smooth': '(0.1)'}), '(logits, label, label_smooth=0.1)\n', (6044, 6077), True, 'import megengine.functional as F\n'), ((6099, 6132), 'megengine.functional.accuracy', 'F.accuracy', (['logits', 'label', '(1, 5)'], {}), '(logits, label, (1, 5))\n', (6109, 6132), True, 'import megengine.functional as F\n'), ((6198, 6219), 'megengine.distributed.is_distributed', 'dist.is_distributed', ([], {}), '()\n', (6217, 6219), True, 'import megengine.distributed as dist\n'), ((6610, 6671), 'megengine.functional.cross_entropy_with_softmax', 'F.cross_entropy_with_softmax', (['logits', 'label'], {'label_smooth': '(0.1)'}), '(logits, label, label_smooth=0.1)\n', (6638, 6671), True, 'import megengine.functional as F\n'), ((6693, 6726), 'megengine.functional.accuracy', 'F.accuracy', (['logits', 'label', '(1, 5)'], {}), '(logits, label, (1, 5))\n', (6703, 6726), True, 'import megengine.functional as F\n'), ((6738, 6759), 'megengine.distributed.is_distributed', 'dist.is_distributed', ([], {}), '()\n', (6757, 6759), True, 'import megengine.distributed as dist\n'), ((7195, 7272), 'megengine.data.RandomSampler', 'data.RandomSampler', (['train_dataset'], {'batch_size': 'args.batch_size', 'drop_last': '(True)'}), '(train_dataset, batch_size=args.batch_size, drop_last=True)\n', (7213, 7272), True, 'import megengine.data as data\n'), ((9162, 9173), 'time.time', 'time.time', ([], {}), '()\n', (9171, 9173), False, 'import time\n'), ((11928, 11939), 'time.time', 'time.time', ([], {}), '()\n', (11937, 11939), False, 'import time\n'), ((3873, 3929), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(rank, world_size, args)'}), '(target=worker, args=(rank, world_size, args))\n', (3883, 3929), True, 'import multiprocessing as mp\n'), ((5594, 5614), 'megengine.load', 'mge.load', (['args.model'], {}), '(args.model)\n', (5602, 5614), True, 'import megengine as mge\n'), ((8714, 8725), 'time.time', 'time.time', ([], {}), '()\n', (8723, 8725), False, 'import time\n'), ((9136, 9147), 'time.time', 'time.time', ([], {}), '()\n', (9145, 9147), False, 'import time\n'), ((5375, 5408), 'os.path.join', 'os.path.join', (['args.output', 'prefix'], {}), '(args.output, prefix)\n', (5387, 5408), False, 'import os\n'), ((6259, 6284), 'megengine.distributed.all_reduce_sum', 'dist.all_reduce_sum', (['loss'], {}), '(loss)\n', (6278, 6284), True, 'import megengine.distributed as dist\n'), ((6287, 6308), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (6306, 6308), True, 'import megengine.distributed as dist\n'), ((6328, 6353), 'megengine.distributed.all_reduce_sum', 'dist.all_reduce_sum', (['acc1'], {}), '(acc1)\n', (6347, 6353), True, 'import megengine.distributed as dist\n'), ((6356, 6377), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (6375, 6377), True, 'import megengine.distributed as dist\n'), ((6397, 6422), 'megengine.distributed.all_reduce_sum', 'dist.all_reduce_sum', (['acc5'], {}), '(acc5)\n', (6416, 6422), True, 'import megengine.distributed as dist\n'), ((6425, 6446), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (6444, 6446), True, 'import megengine.distributed as dist\n'), ((6799, 6824), 'megengine.distributed.all_reduce_sum', 'dist.all_reduce_sum', (['loss'], {}), '(loss)\n', (6818, 6824), True, 'import megengine.distributed as dist\n'), ((6827, 6848), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (6846, 6848), True, 'import megengine.distributed as dist\n'), ((6868, 6893), 'megengine.distributed.all_reduce_sum', 'dist.all_reduce_sum', (['acc1'], {}), '(acc1)\n', (6887, 6893), True, 'import megengine.distributed as dist\n'), ((6896, 6917), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (6915, 6917), True, 'import megengine.distributed as dist\n'), ((6937, 6962), 'megengine.distributed.all_reduce_sum', 'dist.all_reduce_sum', (['acc5'], {}), '(acc5)\n', (6956, 6962), True, 'import megengine.distributed as dist\n'), ((6965, 6986), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (6984, 6986), True, 'import megengine.distributed as dist\n'), ((9101, 9112), 'time.time', 'time.time', ([], {}), '()\n', (9110, 9112), False, 'import time\n'), ((11899, 11910), 'time.time', 'time.time', ([], {}), '()\n', (11908, 11910), False, 'import time\n'), ((11985, 12000), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (11998, 12000), True, 'import megengine.distributed as dist\n'), ((7436, 7460), 'megengine.data.transform.RandomResizedCrop', 'T.RandomResizedCrop', (['(224)'], {}), '(224)\n', (7455, 7460), True, 'import megengine.data.transform as T\n'), ((7478, 7502), 'megengine.data.transform.RandomHorizontalFlip', 'T.RandomHorizontalFlip', ([], {}), '()\n', (7500, 7502), True, 'import megengine.data.transform as T\n'), ((7520, 7579), 'megengine.data.transform.ColorJitter', 'T.ColorJitter', ([], {'brightness': '(0.4)', 'contrast': '(0.4)', 'saturation': '(0.4)'}), '(brightness=0.4, contrast=0.4, saturation=0.4)\n', (7533, 7579), True, 'import megengine.data.transform as T\n'), ((7597, 7612), 'megengine.data.transform.ToMode', 'T.ToMode', (['"""CHW"""'], {}), "('CHW')\n", (7605, 7612), True, 'import megengine.data.transform as T\n'), ((8036, 8049), 'megengine.data.transform.Resize', 'T.Resize', (['(256)'], {}), '(256)\n', (8044, 8049), True, 'import megengine.data.transform as T\n'), ((8067, 8084), 'megengine.data.transform.CenterCrop', 'T.CenterCrop', (['(224)'], {}), '(224)\n', (8079, 8084), True, 'import megengine.data.transform as T\n'), ((8102, 8117), 'megengine.data.transform.ToMode', 'T.ToMode', (['"""CHW"""'], {}), "('CHW')\n", (8110, 8117), True, 'import megengine.data.transform as T\n')]
|
# Copyright (c) 2014-2022 Megvii Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import megengine
import megengine.functional as F
import megengine.module as nn
import numpy as np
from basecls.layers import DropPath, init_weights
from basecls.utils import registers
def _fuse_prebn_conv1x1(bn, conv):
module_output = copy.deepcopy(conv)
module_output.bias = megengine.Parameter(np.zeros(module_output._infer_bias_shape(), dtype=np.float32))
assert conv.groups == 1
kernel = conv.weight
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = F.sqrt(running_var + eps)
t = (gamma / std).reshape(1, -1, 1, 1)
module_output.weight[:] = kernel * t
module_output.bias[:] = F.conv2d(beta - running_mean * gamma / std, kernel, conv.bias)
return module_output
def _fuse_conv_bn(conv, bn):
module_output = copy.deepcopy(conv)
module_output.bias = megengine.Parameter(np.zeros(module_output._infer_bias_shape(), dtype=np.float32))
# flatten then reshape in case of group conv
kernel = F.flatten(conv.weight, end_axis=conv.weight.ndim - 4)
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = F.sqrt(running_var + eps)
t = (gamma / std).reshape(-1, 1, 1, 1)
module_output.weight[:] = (kernel * t).reshape(module_output.weight.shape)
module_output.bias[:] = beta + ((conv.bias if conv.bias is not None else 0) - running_mean) * gamma / std
return module_output
class ConvBn2d(nn.ConvBn2d):
def __init__(self, *args, **kwargs):
bias = kwargs.pop("bias", False) and False
super().__init__(*args, bias=bias, **kwargs)
@classmethod
def fuse_conv_bn(cls, module: nn.Module):
module_output = module
if isinstance(module, ConvBn2d):
return _fuse_conv_bn(module.conv, module.bn)
for name, child in module.named_children():
setattr(module_output, name, cls.fuse_conv_bn(child))
del module
return module_output
class LargeKernelReparam(nn.Module):
def __init__(self, channels, kernel, small_kernels=()):
super(LargeKernelReparam, self).__init__()
self.dw_large = ConvBn2d(channels, channels, kernel, padding=kernel // 2, groups=channels)
self.small_kernels = small_kernels
for k in self.small_kernels:
setattr(self, f"dw_small_{k}", ConvBn2d(channels, channels, k, padding=k // 2, groups=channels))
def forward(self, inp):
outp = self.dw_large(inp)
for k in self.small_kernels:
outp += getattr(self, f"dw_small_{k}")(inp)
return outp
@classmethod
def convert_to_deploy(cls, module: nn.Module):
module_output = module
if isinstance(module, LargeKernelReparam):
module = ConvBn2d.fuse_conv_bn(module)
module_output = copy.deepcopy(module.dw_large)
kernel = module_output.kernel_size[0]
for k in module.small_kernels:
dw_small = getattr(module, f"dw_small_{k}")
module_output.weight += F.pad(dw_small.weight, [[0, 0]] * 3 + [[(kernel - k) // 2] * 2] * 2)
module_output.bias += dw_small.bias
return module_output
for name, child in module.named_children():
setattr(module_output, name, cls.convert_to_deploy(child))
del module
return module_output
class Mlp(nn.Module):
def __init__(self, in_channels, hidden_channels=None, out_channels=None, act_layer=nn.GELU, drop=0.,):
super().__init__()
out_features = out_channels or in_channels
hidden_features = hidden_channels or in_channels
self.fc1 = ConvBn2d(in_channels, hidden_features, 1, stride=1, padding=0)
self.act = act_layer()
self.fc2 = ConvBn2d(hidden_features, out_features, 1, stride=1, padding=0)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class RepLKBlock(nn.Module):
def __init__(self, channels, kernel, small_kernels=(), dw_ratio=1.0, mlp_ratio=4.0, drop_path=0., activation=nn.ReLU):
super().__init__()
self.pre_bn = nn.BatchNorm2d(channels)
self.pw1 = ConvBn2d(channels, int(channels * dw_ratio), 1, 1, 0)
self.pw1_act = activation()
self.dw = LargeKernelReparam(int(channels * dw_ratio), kernel, small_kernels=small_kernels)
self.dw_act = activation()
self.pw2 = ConvBn2d(int(channels * dw_ratio), channels, 1, 1, 0)
self.premlp_bn = nn.BatchNorm2d(channels)
self.mlp = Mlp(in_channels=channels, hidden_channels=int(channels * mlp_ratio))
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
y = self.pre_bn(x)
y = self.pw1_act(self.pw1(y))
y = self.dw_act(self.dw(y))
y = self.pw2(y)
x = x + self.drop_path(y)
y = self.premlp_bn(x)
y = self.mlp(y)
x = x + self.drop_path(y)
return x
@classmethod
def convert_to_deploy(cls, module: nn.Module):
module_output = module
if isinstance(module, RepLKBlock):
LargeKernelReparam.convert_to_deploy(module)
ConvBn2d.fuse_conv_bn(module)
module.pre_bn, module.pw1 = nn.Identity(), _fuse_prebn_conv1x1(module.pre_bn, module.pw1)
module.premlp_bn, module.mlp.fc1 = nn.Identity(), _fuse_prebn_conv1x1(module.premlp_bn, module.mlp.fc1)
return module_output
for name, child in module.named_children():
setattr(module_output, name, cls.convert_to_deploy(child))
del module
return module_output
class DownSample(nn.Sequential):
def __init__(self, in_channels, out_channels, activation=nn.ReLU):
super().__init__(
ConvBn2d(in_channels, out_channels, 1),
activation(),
ConvBn2d(out_channels, out_channels, 3, stride=2, padding=1, groups=out_channels),
activation(),
)
class Stem(nn.Sequential):
def __init__(self, in_channels, out_channels, activation=nn.ReLU):
super().__init__(
ConvBn2d(in_channels, out_channels, 3, stride=2, padding=1),
activation(),
ConvBn2d(out_channels, out_channels, 3, padding=1, groups=out_channels),
activation(),
ConvBn2d(out_channels, out_channels, 1),
activation(),
ConvBn2d(out_channels, out_channels, 3, stride=2, padding=1, groups=out_channels),
activation(),
)
class RepLKNet(nn.Module):
def __init__(
self,
in_channels=3,
depths=(2, 2, 18, 2),
dims=(128, 256, 512, 1024),
kernel_sizes=(31, 29, 27, 13),
small_kernels=(5,),
dw_ratio=1.0,
mlp_ratio=4.0,
num_classes=1000,
drop_path_rate=0.5,
):
super().__init__()
self.stem = Stem(in_channels, dims[0])
# stochastic depth
dpr = (x for x in np.linspace(0, drop_path_rate, sum(depths))) # stochastic depth decay rule
self.blocks = []
for stage, (depth, dim, ksize) in enumerate(zip(depths, dims, kernel_sizes)):
for _ in range(depth):
self.blocks.append(
RepLKBlock(dim, ksize, small_kernels=small_kernels,
dw_ratio=dw_ratio, mlp_ratio=mlp_ratio, drop_path=next(dpr))
)
if stage < len(depths) - 1:
self.blocks.append(DownSample(dim, dims[stage + 1]))
self.norm = nn.BatchNorm2d(dims[-1])
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.head = nn.Linear(dims[-1], num_classes) if num_classes > 0 else nn.Identity()
init_weights(self)
def forward_features(self, x):
x = self.stem(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
x = self.avgpool(x)
x = F.flatten(x, 1)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
@classmethod
def convert_to_deploy(cls, module: nn.Module):
module_output = module
if isinstance(module, RepLKNet):
RepLKBlock.convert_to_deploy(module)
ConvBn2d.fuse_conv_bn(module)
return module_output
for name, child in module.named_children():
setattr(module_output, name, cls.convert_to_deploy(child))
del module
return module_output
@registers.models.register()
def replknet31_base(**kwargs):
kwargs.pop("head", None)
return RepLKNet(dims=(128, 256, 512, 1024), dw_ratio=1.0, **kwargs)
@registers.models.register()
def replknet31_large(**kwargs):
kwargs.pop("head", None)
return RepLKNet(dims=(192, 384, 768, 1536), dw_ratio=1.0, **kwargs)
@registers.models.register()
def replknet_xlarge(**kwargs):
kwargs.pop("head", None)
return RepLKNet(dims=(256, 512, 1024, 2048), kernel_sizes=(27, 27, 27, 13), small_kernels=(), dw_ratio=1.5, **kwargs)
|
[
"megengine.module.Dropout",
"megengine.module.Linear",
"megengine.functional.pad",
"megengine.functional.sqrt",
"megengine.module.BatchNorm2d",
"megengine.functional.conv2d",
"megengine.module.AdaptiveAvgPool2d",
"megengine.module.Identity",
"megengine.functional.flatten"
] |
[((9027, 9054), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (9052, 9054), False, 'from basecls.utils import registers\n'), ((9190, 9217), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (9215, 9217), False, 'from basecls.utils import registers\n'), ((9354, 9381), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (9379, 9381), False, 'from basecls.utils import registers\n'), ((597, 616), 'copy.deepcopy', 'copy.deepcopy', (['conv'], {}), '(conv)\n', (610, 616), False, 'import copy\n'), ((914, 939), 'megengine.functional.sqrt', 'F.sqrt', (['(running_var + eps)'], {}), '(running_var + eps)\n', (920, 939), True, 'import megengine.functional as F\n'), ((1052, 1114), 'megengine.functional.conv2d', 'F.conv2d', (['(beta - running_mean * gamma / std)', 'kernel', 'conv.bias'], {}), '(beta - running_mean * gamma / std, kernel, conv.bias)\n', (1060, 1114), True, 'import megengine.functional as F\n'), ((1191, 1210), 'copy.deepcopy', 'copy.deepcopy', (['conv'], {}), '(conv)\n', (1204, 1210), False, 'import copy\n'), ((1381, 1434), 'megengine.functional.flatten', 'F.flatten', (['conv.weight'], {'end_axis': '(conv.weight.ndim - 4)'}), '(conv.weight, end_axis=conv.weight.ndim - 4)\n', (1390, 1434), True, 'import megengine.functional as F\n'), ((1571, 1596), 'megengine.functional.sqrt', 'F.sqrt', (['(running_var + eps)'], {}), '(running_var + eps)\n', (1577, 1596), True, 'import megengine.functional as F\n'), ((4265, 4281), 'megengine.module.Dropout', 'nn.Dropout', (['drop'], {}), '(drop)\n', (4275, 4281), True, 'import megengine.module as nn\n'), ((4653, 4677), 'megengine.module.BatchNorm2d', 'nn.BatchNorm2d', (['channels'], {}), '(channels)\n', (4667, 4677), True, 'import megengine.module as nn\n'), ((5021, 5045), 'megengine.module.BatchNorm2d', 'nn.BatchNorm2d', (['channels'], {}), '(channels)\n', (5035, 5045), True, 'import megengine.module as nn\n'), ((8078, 8102), 'megengine.module.BatchNorm2d', 'nn.BatchNorm2d', (['dims[-1]'], {}), '(dims[-1])\n', (8092, 8102), True, 'import megengine.module as nn\n'), ((8126, 8149), 'megengine.module.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (8146, 8149), True, 'import megengine.module as nn\n'), ((8249, 8267), 'basecls.layers.init_weights', 'init_weights', (['self'], {}), '(self)\n', (8261, 8267), False, 'from basecls.layers import DropPath, init_weights\n'), ((8449, 8464), 'megengine.functional.flatten', 'F.flatten', (['x', '(1)'], {}), '(x, 1)\n', (8458, 8464), True, 'import megengine.functional as F\n'), ((3234, 3264), 'copy.deepcopy', 'copy.deepcopy', (['module.dw_large'], {}), '(module.dw_large)\n', (3247, 3264), False, 'import copy\n'), ((5160, 5179), 'basecls.layers.DropPath', 'DropPath', (['drop_path'], {}), '(drop_path)\n', (5168, 5179), False, 'from basecls.layers import DropPath, init_weights\n'), ((5203, 5216), 'megengine.module.Identity', 'nn.Identity', ([], {}), '()\n', (5214, 5216), True, 'import megengine.module as nn\n'), ((8170, 8202), 'megengine.module.Linear', 'nn.Linear', (['dims[-1]', 'num_classes'], {}), '(dims[-1], num_classes)\n', (8179, 8202), True, 'import megengine.module as nn\n'), ((8227, 8240), 'megengine.module.Identity', 'nn.Identity', ([], {}), '()\n', (8238, 8240), True, 'import megengine.module as nn\n'), ((3458, 3526), 'megengine.functional.pad', 'F.pad', (['dw_small.weight', '([[0, 0]] * 3 + [[(kernel - k) // 2] * 2] * 2)'], {}), '(dw_small.weight, [[0, 0]] * 3 + [[(kernel - k) // 2] * 2] * 2)\n', (3463, 3526), True, 'import megengine.functional as F\n'), ((5793, 5806), 'megengine.module.Identity', 'nn.Identity', ([], {}), '()\n', (5804, 5806), True, 'import megengine.module as nn\n'), ((5902, 5915), 'megengine.module.Identity', 'nn.Identity', ([], {}), '()\n', (5913, 5915), True, 'import megengine.module as nn\n')]
|
import numpy as nm
from sfepy.base.conf import transform_functions
from sfepy.base.testing import TestCommon
def get_vertices(coors, domain=None):
x, z = coors[:,0], coors[:,2]
return nm.where((z < 0.1) & (x < 0.1))[0]
def get_cells(coors, domain=None):
return nm.where(coors[:, 0] < 0)[0]
class Test(TestCommon):
@staticmethod
def from_conf( conf, options ):
from sfepy import data_dir
from sfepy.discrete.fem import Mesh, FEDomain
from sfepy.discrete import Functions
mesh = Mesh('test mesh',
data_dir + '/meshes/various_formats/abaqus_tet.inp')
mesh.nodal_bcs['set0'] = [0, 7]
domain = FEDomain('test domain', mesh)
conf_functions = {
'get_vertices' : (get_vertices,),
'get_cells' : (get_cells,),
}
functions = Functions.from_conf(transform_functions(conf_functions))
test = Test(conf=conf, options=options,
domain=domain, functions=functions)
return test
def test_selectors(self):
"""
Test basic region selectors.
"""
selectors = [
['all', 'cell'],
['vertices of surface', 'facet'],
['vertices of group 0', 'facet'],
['vertices of set set0', 'vertex'],
['vertices in (z < 0.1) & (x < 0.1)', 'facet'],
['vertices by get_vertices', 'cell'],
['vertex 0, 1, 2', 'vertex'],
['vertex in r.r6', 'vertex'],
['cells of group 0', 'cell'],
# ['cells of set 0', 'cell'], not implemented...
['cells by get_cells', 'cell'],
['cell 1, 4, 5', 'cell'],
['cell (0, 1), (0, 4), (0, 5)', 'cell'],
['copy r.r5', 'cell'],
['r.r5', 'cell'],
]
vertices = [
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
[0, 1, 3, 7],
[0, 7],
[1, 2, 3, 4, 5, 9, 11],
[1, 2, 3, 4, 5, 9, 11],
[0, 1, 2],
[0],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
[0, 1, 2, 3, 4, 5, 6, 9, 10, 11],
[0, 1, 2, 3, 4, 5, 6, 8],
[0, 1, 2, 3, 4, 5, 6, 8],
[1, 2, 3, 4, 5, 9, 11],
[1, 2, 3, 4, 5, 9, 11],
]
ok = True
for ii, sel in enumerate(selectors):
self.report('select:', sel)
reg = self.domain.create_region('r%d' % ii, sel[0], kind=sel[1],
functions=self.functions)
_ok = ((len(reg.vertices) == len(vertices[ii]))
and (reg.vertices == vertices[ii]).all())
self.report(' vertices:', _ok)
ok = ok and _ok
return ok
def test_operators(self):
"""
Test operators in region selectors.
"""
ok = True
r1 = self.domain.create_region('r1', 'all')
sel = 'r.r1 -v vertices of group 0'
self.report('select:', sel)
reg = self.domain.create_region('reg', sel, kind='vertex')
av = [2, 4, 5, 6, 8, 9, 10, 11, 12]
_ok = (reg.vertices == nm.array(av)).all()
self.report(' vertices:', _ok)
ok = ok and _ok
sel = 'vertex 0, 1, 2 +v vertices of group 0'
self.report('select:', sel)
reg = self.domain.create_region('reg', sel, kind='vertex')
av = [0, 1, 2, 3, 7]
_ok = (reg.vertices == nm.array(av)).all()
self.report(' vertices:', _ok)
ok = ok and _ok
sel = 'vertex 0, 1, 2 *v vertices of group 0'
self.report('select:', sel)
reg = self.domain.create_region('reg', sel, kind='vertex')
av = [0, 1]
_ok = (reg.vertices == nm.array(av)).all()
self.report(' vertices:', _ok)
ok = ok and _ok
sel = 'r.r1 -c cell 1, 4, 5'
self.report('select:', sel)
reg = self.domain.create_region('reg', sel)
_ok = (nm.setdiff1d(r1.cells[0], [1, 4, 5]) == reg.cells[0]).all()
self.report(' cells:', _ok)
ok = ok and _ok
sel = 'cell 8, 3 +c cell 1, 4, 5'
self.report('select:', sel)
reg = self.domain.create_region('reg', sel)
cells = [1, 3, 4, 5, 8]
_ok = (reg.cells == nm.array(cells)).all()
self.report(' cells:', _ok)
ok = ok and _ok
sel = 'cell 8, 3, 2 *c cell 8, 4, 2, 7'
self.report('select:', sel)
reg = self.domain.create_region('reg', sel)
cells = [2, 8]
_ok = (reg.cells == nm.array(cells)).all()
self.report(' cells:', _ok)
ok = ok and _ok
return ok
|
[
"sfepy.base.conf.transform_functions",
"sfepy.discrete.fem.Mesh",
"sfepy.discrete.fem.FEDomain"
] |
[((195, 226), 'numpy.where', 'nm.where', (['((z < 0.1) & (x < 0.1))'], {}), '((z < 0.1) & (x < 0.1))\n', (203, 226), True, 'import numpy as nm\n'), ((277, 302), 'numpy.where', 'nm.where', (['(coors[:, 0] < 0)'], {}), '(coors[:, 0] < 0)\n', (285, 302), True, 'import numpy as nm\n'), ((536, 606), 'sfepy.discrete.fem.Mesh', 'Mesh', (['"""test mesh"""', "(data_dir + '/meshes/various_formats/abaqus_tet.inp')"], {}), "('test mesh', data_dir + '/meshes/various_formats/abaqus_tet.inp')\n", (540, 606), False, 'from sfepy.discrete.fem import Mesh, FEDomain\n'), ((684, 713), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['"""test domain"""', 'mesh'], {}), "('test domain', mesh)\n", (692, 713), False, 'from sfepy.discrete.fem import Mesh, FEDomain\n'), ((878, 913), 'sfepy.base.conf.transform_functions', 'transform_functions', (['conf_functions'], {}), '(conf_functions)\n', (897, 913), False, 'from sfepy.base.conf import transform_functions\n'), ((3318, 3330), 'numpy.array', 'nm.array', (['av'], {}), '(av)\n', (3326, 3330), True, 'import numpy as nm\n'), ((3624, 3636), 'numpy.array', 'nm.array', (['av'], {}), '(av)\n', (3632, 3636), True, 'import numpy as nm\n'), ((3918, 3930), 'numpy.array', 'nm.array', (['av'], {}), '(av)\n', (3926, 3930), True, 'import numpy as nm\n'), ((4143, 4179), 'numpy.setdiff1d', 'nm.setdiff1d', (['r1.cells[0]', '[1, 4, 5]'], {}), '(r1.cells[0], [1, 4, 5])\n', (4155, 4179), True, 'import numpy as nm\n'), ((4459, 4474), 'numpy.array', 'nm.array', (['cells'], {}), '(cells)\n', (4467, 4474), True, 'import numpy as nm\n'), ((4732, 4747), 'numpy.array', 'nm.array', (['cells'], {}), '(cells)\n', (4740, 4747), True, 'import numpy as nm\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine import tensor
from megengine.core.autodiff.grad import Grad
from megengine.core.tensor.function import Function
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.fake_quant import TQT_Function
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import QuantMode, fake_quant_tensor
class numpy_TQT_Function:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_TQT():
f = TQT_Function(-127, 127)
nf = numpy_TQT_Function(-127, 127)
def check_inp(a, b, c, a_np, b_np, c_np):
np.testing.assert_allclose(
f.forward(a, b).numpy(),
nf.forward(a_np, b_np).astype("float32"),
rtol=1e-6,
atol=1e-6,
)
c1, c2 = f.backward(c)
c1_np, c2_np = nf.backward(c_np)
np.testing.assert_allclose(c1.numpy(), c1_np.astype("float32"), rtol=1e-6)
np.testing.assert_allclose(c2.numpy(), c2_np.astype("float32"), rtol=5e-5)
a_np = np.random.random((4, 3)).astype("float32")
b_np = np.random.random((1)).astype("float32")
a = tensor(a_np)
b = tensor(b_np)
check_inp(a, b, b, a_np, b_np, b_np)
def _save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
def run(zero_point, scale):
q_dict = {}
q_dict["mode"] = QuantMode.ASYMMERTIC
q_dict["scale"] = scale
q_dict["zero_point"] = zero_point
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qmin, qmax, q_dict).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
grad = Grad().wrt(x, callback=_save_to(x))
y = fake_quant_tensor(x, qmin, qmax, q_dict)
grad(y, tensor(F.ones_like(x)))
x1 = tensor(inp_data, dtype=np.float32)
grad = Grad().wrt(x1, callback=_save_to(x1))
y1 = fake_quant_tensor_gt(x1, scale, zero_point, qmin, qmax)
grad(y1, tensor(F.ones_like(x1)))
assert np.allclose(x.grad.numpy(), x1.grad.numpy())
assert make_shape_tuple(x.grad.shape) == make_shape_tuple(x1.grad.shape)
zero_point = tensor([1.0], dtype=np.float32)
scale = tensor([4.0], dtype=np.float32)
run(zero_point, scale)
zero_point = tensor(1.0 * np.ones((1, 32, 1, 1)), dtype=np.float32)
scale = tensor(4.0 * np.ones((1, 32, 1, 1)), dtype=np.float32)
run(zero_point, scale)
|
[
"megengine.tensor",
"megengine.core.tensor.utils.make_shape_tuple",
"megengine.quantization.utils.fake_quant_tensor",
"megengine.quantization.fake_quant.TQT_Function",
"megengine.core.autodiff.grad.Grad"
] |
[((2261, 2284), 'megengine.quantization.fake_quant.TQT_Function', 'TQT_Function', (['(-127)', '(127)'], {}), '(-127, 127)\n', (2273, 2284), False, 'from megengine.quantization.fake_quant import TQT_Function\n'), ((2906, 2918), 'megengine.tensor', 'tensor', (['a_np'], {}), '(a_np)\n', (2912, 2918), False, 'from megengine import tensor\n'), ((2927, 2939), 'megengine.tensor', 'tensor', (['b_np'], {}), '(b_np)\n', (2933, 2939), False, 'from megengine import tensor\n'), ((4657, 4688), 'megengine.tensor', 'tensor', (['[1.0]'], {'dtype': 'np.float32'}), '([1.0], dtype=np.float32)\n', (4663, 4688), False, 'from megengine import tensor\n'), ((4701, 4732), 'megengine.tensor', 'tensor', (['[4.0]'], {'dtype': 'np.float32'}), '([4.0], dtype=np.float32)\n', (4707, 4732), False, 'from megengine import tensor\n'), ((1238, 1259), 'numpy.round', 'np.round', (['inp_clipped'], {}), '(inp_clipped)\n', (1246, 1259), True, 'import numpy as np\n'), ((1670, 1691), 'numpy.abs', 'np.abs', (['(mask_clip - 1)'], {}), '(mask_clip - 1)\n', (1676, 1691), True, 'import numpy as np\n'), ((3704, 3767), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-512.0)', 'high': '(512.0)', 'size': '(1, 32, 32, 32)'}), '(low=-512.0, high=512.0, size=(1, 32, 32, 32))\n', (3721, 3767), True, 'import numpy as np\n'), ((3782, 3816), 'megengine.tensor', 'tensor', (['inp_data'], {'dtype': 'np.float32'}), '(inp_data, dtype=np.float32)\n', (3788, 3816), False, 'from megengine import tensor\n'), ((4002, 4026), 'numpy.allclose', 'np.allclose', (['oup', 'oup_gt'], {}), '(oup, oup_gt)\n', (4013, 4026), True, 'import numpy as np\n'), ((4105, 4139), 'megengine.tensor', 'tensor', (['inp_data'], {'dtype': 'np.float32'}), '(inp_data, dtype=np.float32)\n', (4111, 4139), False, 'from megengine import tensor\n'), ((4203, 4243), 'megengine.quantization.utils.fake_quant_tensor', 'fake_quant_tensor', (['x', 'qmin', 'qmax', 'q_dict'], {}), '(x, qmin, qmax, q_dict)\n', (4220, 4243), False, 'from megengine.quantization.utils import QuantMode, fake_quant_tensor\n'), ((4298, 4332), 'megengine.tensor', 'tensor', (['inp_data'], {'dtype': 'np.float32'}), '(inp_data, dtype=np.float32)\n', (4304, 4332), False, 'from megengine import tensor\n'), ((1149, 1188), 'numpy.minimum', 'np.minimum', (['inp_scaled', 'self.upperbound'], {}), '(inp_scaled, self.upperbound)\n', (1159, 1188), True, 'import numpy as np\n'), ((2148, 2157), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (2154, 2157), True, 'import numpy as np\n'), ((2804, 2828), 'numpy.random.random', 'np.random.random', (['(4, 3)'], {}), '((4, 3))\n', (2820, 2828), True, 'import numpy as np\n'), ((2858, 2877), 'numpy.random.random', 'np.random.random', (['(1)'], {}), '(1)\n', (2874, 2877), True, 'import numpy as np\n'), ((4573, 4603), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['x.grad.shape'], {}), '(x.grad.shape)\n', (4589, 4603), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((4607, 4638), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['x1.grad.shape'], {}), '(x1.grad.shape)\n', (4623, 4638), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((4791, 4813), 'numpy.ones', 'np.ones', (['(1, 32, 1, 1)'], {}), '((1, 32, 1, 1))\n', (4798, 4813), True, 'import numpy as np\n'), ((4858, 4880), 'numpy.ones', 'np.ones', (['(1, 32, 1, 1)'], {}), '((1, 32, 1, 1))\n', (4865, 4880), True, 'import numpy as np\n'), ((3854, 3896), 'megengine.quantization.utils.fake_quant_tensor', 'fake_quant_tensor', (['inp', 'qmin', 'qmax', 'q_dict'], {}), '(inp, qmin, qmax, q_dict)\n', (3871, 3896), False, 'from megengine.quantization.utils import QuantMode, fake_quant_tensor\n'), ((4155, 4161), 'megengine.core.autodiff.grad.Grad', 'Grad', ([], {}), '()\n', (4159, 4161), False, 'from megengine.core.autodiff.grad import Grad\n'), ((4348, 4354), 'megengine.core.autodiff.grad.Grad', 'Grad', ([], {}), '()\n', (4352, 4354), False, 'from megengine.core.autodiff.grad import Grad\n')]
|
import numpy as np
import argparse
from datetime import datetime
import time
import model as resnet_model
import megengine as mge
import megengine.autodiff as ad
import megengine.functional as F
import megengine.optimizer as optim
parser = argparse.ArgumentParser(description="MegEngine ResNet Training")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"--steps",
default=10,
type=int,
help="number of total steps to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
parser.add_argument(
"--memory-budget",
dest="mem_budget",
default=5,
type=int,
help="memory budget for DTR, measured in GB (default: 5)",
)
args = parser.parse_args()
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=args.mem_budget*1024**3)
batch_size = args.batch_size
image = mge.tensor(np.random.random((batch_size, 3, 224, 224)))
label = mge.tensor(np.random.randint(100, size=(batch_size,)))
#model = resnet_model.__dict__["resnet50"]()
model = resnet_model.__dict__[args.arch]()
gm=ad.GradManager().attach(model.parameters())
opt=optim.SGD(model.parameters(), lr=0.0125, momentum=0.9, weight_decay=1e-4)
# miliseconds
print(datetime.now().timetz())
time_list = []
cur_time = int(round(time.time()*1000))
for i in range(args.steps):
with gm:
logits=model(image)
loss=F.nn.cross_entropy(logits, label)
gm.backward(loss)
total, free = mge.get_mem_status_bytes()
print('iter = {}, used bytes(/MB) = {}'.format(i+1, float(total - free)/1024.0/1024.0))
opt.step().clear_grad()
next_time = int(round(time.time()*1000))
time_list.append(next_time - cur_time)
cur_time = next_time
print("iter = {}, loss = {}".format(i+1, loss.numpy()))
print('throughput: {} ms!!!'.format(np.average(np.array(time_list))))
|
[
"megengine.functional.nn.cross_entropy",
"megengine.get_mem_status_bytes",
"megengine.utils.dtr.DTR",
"megengine.autodiff.GradManager"
] |
[((243, 307), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""MegEngine ResNet Training"""'}), "(description='MegEngine ResNet Training')\n", (266, 307), False, 'import argparse\n'), ((1079, 1125), 'megengine.utils.dtr.DTR', 'DTR', ([], {'memory_budget': '(args.mem_budget * 1024 ** 3)'}), '(memory_budget=args.mem_budget * 1024 ** 3)\n', (1082, 1125), False, 'from megengine.utils.dtr import DTR\n'), ((1171, 1214), 'numpy.random.random', 'np.random.random', (['(batch_size, 3, 224, 224)'], {}), '((batch_size, 3, 224, 224))\n', (1187, 1214), True, 'import numpy as np\n'), ((1235, 1277), 'numpy.random.randint', 'np.random.randint', (['(100)'], {'size': '(batch_size,)'}), '(100, size=(batch_size,))\n', (1252, 1277), True, 'import numpy as np\n'), ((1371, 1387), 'megengine.autodiff.GradManager', 'ad.GradManager', ([], {}), '()\n', (1385, 1387), True, 'import megengine.autodiff as ad\n'), ((1676, 1709), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits', 'label'], {}), '(logits, label)\n', (1694, 1709), True, 'import megengine.functional as F\n'), ((1758, 1784), 'megengine.get_mem_status_bytes', 'mge.get_mem_status_bytes', ([], {}), '()\n', (1782, 1784), True, 'import megengine as mge\n'), ((1514, 1528), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1526, 1528), False, 'from datetime import datetime\n'), ((1575, 1586), 'time.time', 'time.time', ([], {}), '()\n', (1584, 1586), False, 'import time\n'), ((2152, 2171), 'numpy.array', 'np.array', (['time_list'], {}), '(time_list)\n', (2160, 2171), True, 'import numpy as np\n'), ((1944, 1955), 'time.time', 'time.time', ([], {}), '()\n', (1953, 1955), False, 'import time\n')]
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: disable=import-error,no-name-in-module,no-member
from typing import List, Union
import megengine as mge
from megengine.traced_module import TracedModule
from ..backend.ir_to_onnx.onnx_converter import OnnxConverter
from ..converter_ir.ir_quantizer import IRQuantizer
from ..converter_ir.ir_transform import IRTransform, TransformerRule
from ..frontend.tm_to_ir import TM_FrontEnd
from ..frontend.tm_to_ir.tm_utils import _update_inputs_qparams
def tracedmodule_to_onnx(
traced_module,
output="out.onnx",
*,
graph_name="graph",
opset=8,
outspec=None,
input_data_type: str = None,
input_scales: Union[float, List[float]] = None,
input_zero_points: Union[int, List[int]] = None,
require_quantize=False,
param_fake_quant=False,
quantize_file_path="quant_params.json",
):
"""
Convert megengine model to ONNX,
and save the ONNX model to file `output`.
:param mge_fpath: the file path of megengine model.
:type fpath: str
:param output: the filename used for the saved model.
:type output: str
:param graph_name: the name of the ONNX graph.
:type graph_name: str
:param opset: opset version of ONNX model.
:type opset: int
"""
if isinstance(traced_module, str):
traced_module = mge.load(traced_module)
assert isinstance(
traced_module, TracedModule
), "Input should be a traced module or a path of traced module."
_update_inputs_qparams(
traced_module, input_data_type, input_scales, input_zero_points
)
assert not require_quantize, "Caffe do not support quantize model."
tm_resolver = TM_FrontEnd(traced_module, outspec=outspec)
irgraph = tm_resolver.resolve()
transformer_options = [
TransformerRule.REMOVE_RESHAPE_REALTED_OP,
TransformerRule.REMOVE_UNRELATED_IROP,
TransformerRule.EXPAND_CONVRELU,
]
transformer = IRTransform(transformer_options)
transformed_irgraph = transformer.transform(irgraph)
quantizer = IRQuantizer(
require_quantize=require_quantize, param_fake_quant=param_fake_quant
)
if tm_resolver.has_qat:
quantizer.save_quantize_params(transformed_irgraph)
converter = OnnxConverter(transformed_irgraph, opset, graph_name, quantizer)
model = converter.convert()
if tm_resolver.has_qat:
quantizer.dump_quant_param(path=quantize_file_path)
assert isinstance(output, str), "onnx_fpath must be string"
with open(output, "wb") as fout:
fout.write(model.SerializeToString())
|
[
"megengine.load"
] |
[((1616, 1639), 'megengine.load', 'mge.load', (['traced_module'], {}), '(traced_module)\n', (1624, 1639), True, 'import megengine as mge\n')]
|
# Vibroacoustics
#
# E.Rohan, V.Lukeš
# Homogenization of the vibro–acoustic transmission on periodically
# perforated elastic plates with arrays of resonators.
# https://arxiv.org/abs/2104.01367 (arXiv:2104.01367v1)
import os
import numpy as nm
from sfepy.base.base import Struct
from sfepy.homogenization.coefficients import Coefficients
from sfepy.discrete.fem import Mesh, FEDomain
def coefs2qp(out, coefs, nqp):
others = {}
for k, v in coefs.items():
if type(v) is nm.float64:
v = nm.array(v)
if type(v) is not nm.ndarray:
others[k] = v
continue
if k[0] == 's':
out[k] = nm.tile(v, (nqp, 1, 1))
else:
if not(k in out):
out[k] = nm.tile(v, (nqp, 1, 1))
out.update(others)
return out
def get_homogmat(coors, mode, pb, coefs_filename, omega=None):
if mode == 'qp':
nqp = coors.shape[0]
outdir = pb.conf.options['output_dir']
cfname = os.path.join(outdir, coefs_filename + '.h5')
out = {}
print('>>> coefs from: ', cfname)
coefs_ = Coefficients.from_file_hdf5(cfname).to_dict()
coefs = {}
if 'omega' in coefs_ and omega is not None:
idx = (nm.abs(coefs_['omega'] - omega)).argmin()
rerr = nm.abs(coefs_['omega'][idx] - omega) / omega
if rerr > 1e-3:
raise ValueError('omega: given=%e, found=%e'
% (omega, coefs_['omega'][idx]))
print('found coeficcients for w=%e' % coefs_['omega'][idx])
del(coefs_['omega'])
else:
idx = 4 # magic index?
for k, v in coefs_.items():
if isinstance(v, nm.ndarray) and len(v.shape) == 3:
coefs[k] = v[idx, ...]
else:
coefs[k] = v
coefs2qp(out, coefs, nqp)
transpose = [k for k, v in out.items()
if type(v) == nm.ndarray and (v.shape[-1] > v.shape[-2])]
for k in transpose:
out[k] = out[k].transpose((0, 2, 1))
return out
def read_dict_hdf5(filename, level=0, group=None, fd=None):
import tables as pt
out = {}
if level == 0:
# fd = pt.openFile(filename, mode='r')
fd = pt.open_file(filename, mode='r')
group = fd.root
for name, gr in group._v_groups.items():
name = name.replace('_', '', 1)
out[name] = read_dict_hdf5(filename, level + 1, gr, fd)
for name, data in group._v_leaves.items():
name = name.replace('_', '', 1)
out[name] = data.read()
if level == 0:
fd.close()
return out
def eval_phi(pb, state_p1, state_p2, p_inc):
pvars = pb.create_variables(['P1', 'P2'])
# transmission loss function: log10(|p_in|^2/|p_out|^2)
pvars['P2'].set_data(nm.ones_like(state_p2) * p_inc**2)
phi_In = pb.evaluate('ev_surface_integrate.5.GammaIn(P2)',
P2=pvars['P2'])
pvars['P1'].set_data(state_p1**2)
phi_Out = pb.evaluate('ev_surface_integrate.5.GammaOut(P1)',
P1=pvars['P1'])
return 10.0 * nm.log10(nm.absolute(phi_In) / nm.absolute(phi_Out))
def post_process(out, pb, state, save_var0='p0'):
rmap = {'g01': 0, 'g02': 0, 'g0': 0, 'dp0': 0, 'sp0': 0, 'p0': 0,
'px': 1, 'p1': 1, 'p2': 2}
for k in out.keys():
if 'real_' in k or 'imag_' in k:
newk = k[:4] + '.' + k[5:]
out[newk] = out[k]
del(out[k])
midfn = pb.conf.filename_mesh_plate
fname, _ = os.path.splitext(os.path.basename(midfn))
fname = os.path.join(pb.output_dir, fname + '.h5')
aux = []
for k, v in read_dict_hdf5(fname)['step0'].items():
if ('real' in k) or ('imag' in k):
aux.append(k)
vn = k.strip('_').split('_')
key = '%s.%s' % tuple(vn)
if key not in out:
out[key] = Struct(name=v['name'].decode('ascii'),
mode=v['mode'].decode('ascii'),
dofs=[j.decode('ascii') for j in v['dofs']],
var_name=v['varname'].decode('ascii'),
shape=v['shape'],
data=v['data'],
dname=v['dname'])
if 'imag' in k:
rmap[vn[1]] = 0
absvars = [ii[4:] for ii in out.keys() if ii[0:4] == 'imag']
for ii in absvars:
if type(out['real' + ii]) is dict:
rpart = out.pop('real' + ii)
rdata = rpart['data']
ipart = out.pop('imag' + ii)
idata = ipart['data']
dim = rdata.shape[1]
varname = save_var0
if dim > 1:
aux = nm.zeros((rdata.shape[0], 1), dtype=nm.float64)
data = rdata if dim < 2 else nm.hstack((rdata, aux))
out['real' + ii] = Struct(name=rpart['name'],
mode=rpart['mode'],
dofs=rpart['dofs'],
var_name=varname,
data=data.copy())
data = idata if dim < 2 else nm.hstack((idata, aux))
out['imag' + ii] = Struct(name=ipart['name'],
mode=ipart['mode'],
dofs=ipart['dofs'],
var_name=varname,
data=data.copy())
else:
rpart = out['real' + ii].__dict__
rdata = rpart['data']
ipart = out['imag' + ii].__dict__
idata = ipart['data']
varname = rpart['var_name']
absval = nm.absolute(rdata + 1j*idata)
if rdata.shape[1] > 1:
aux = nm.zeros((rpart['data'].shape[0], 1), dtype=nm.float64)
absval = nm.hstack((absval, aux))
out[ii[1:]] = Struct(name=rpart['name'],
mode=rpart['mode'],
dofs=rpart['dofs'],
var_name=varname,
data=absval.copy())
# all plate variables as save_var0
for k in out.keys():
k0 = k.replace('imag.', '').replace('real.', '')
if rmap[k0] == 0:
out[k].var_name = save_var0
return out
def get_region_entities(rvar, noff=0):
reg = rvar.field.region
mesh = reg.domain.mesh
rnodes = reg.entities[0]
coors = mesh.coors
ngrp = mesh.cmesh.vertex_groups.squeeze()
descs = mesh.descs[0]
rcells = reg.entities[-1]
rconn = mesh.get_conn(descs)[rcells]
mat_ids = mesh.cmesh.cell_groups[rcells]
remap = -nm.ones((nm.max(rnodes) + 1,), dtype=nm.int64)
remap[rnodes] = nm.arange(rnodes.shape[0]) + noff
rconn = remap[rconn]
nmap = nm.where(remap >= 0)[0]
return coors[rnodes, :], ngrp[rnodes], rconn, mat_ids, descs, nmap
def generate_plate_mesh(fname):
dim_tab = {'3_4': '2_3', '3_8': '2_4'}
mesh3d = Mesh.from_file(fname)
domain = FEDomain('domain', mesh3d)
domain.create_region('Omega1', 'cells of group 1')
domain.create_region('Omega2', 'cells of group 2')
gamma0 = domain.create_region('Gamma0', 'r.Omega1 *v r.Omega2', 'facet')
cmesh = mesh3d.cmesh
cmesh.setup_connectivity(2, 0)
fcnd = cmesh.get_conn(2, 0)
fcidxs = gamma0.entities[2]
fcconn = []
for ii in fcidxs:
fcconn.append(fcnd.indices[fcnd.offsets[ii]:fcnd.offsets[ii + 1]])
fcconn = nm.array(fcconn)
remap = nm.zeros((nm.max(fcconn) + 1,), dtype=nm.int32)
remap[fcconn] = 1
ndidxs = nm.where(remap > 0)[0]
remap[ndidxs] = nm.arange(len(ndidxs))
coors2 = domain.mesh.coors[ndidxs, :]
conn2 = remap[fcconn]
ngrps2 = nm.ones((coors2.shape[0],))
mids2 = nm.ones((conn2.shape[0],))
midfn = fname[:-4] + '_plate.vtk'
mesh2d = Mesh.from_data('2d plate', coors2, ngrps2, [conn2], [mids2],
[dim_tab[mesh3d.descs[0]]])
mesh2d.write(midfn)
return midfn
|
[
"sfepy.homogenization.coefficients.Coefficients.from_file_hdf5",
"sfepy.discrete.fem.Mesh.from_data",
"sfepy.discrete.fem.Mesh.from_file",
"sfepy.discrete.fem.FEDomain"
] |
[((3633, 3675), 'os.path.join', 'os.path.join', (['pb.output_dir', "(fname + '.h5')"], {}), "(pb.output_dir, fname + '.h5')\n", (3645, 3675), False, 'import os\n'), ((7126, 7147), 'sfepy.discrete.fem.Mesh.from_file', 'Mesh.from_file', (['fname'], {}), '(fname)\n', (7140, 7147), False, 'from sfepy.discrete.fem import Mesh, FEDomain\n'), ((7161, 7187), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['"""domain"""', 'mesh3d'], {}), "('domain', mesh3d)\n", (7169, 7187), False, 'from sfepy.discrete.fem import Mesh, FEDomain\n'), ((7625, 7641), 'numpy.array', 'nm.array', (['fcconn'], {}), '(fcconn)\n', (7633, 7641), True, 'import numpy as nm\n'), ((7885, 7912), 'numpy.ones', 'nm.ones', (['(coors2.shape[0],)'], {}), '((coors2.shape[0],))\n', (7892, 7912), True, 'import numpy as nm\n'), ((7925, 7951), 'numpy.ones', 'nm.ones', (['(conn2.shape[0],)'], {}), '((conn2.shape[0],))\n', (7932, 7951), True, 'import numpy as nm\n'), ((8004, 8097), 'sfepy.discrete.fem.Mesh.from_data', 'Mesh.from_data', (['"""2d plate"""', 'coors2', 'ngrps2', '[conn2]', '[mids2]', '[dim_tab[mesh3d.descs[0]]]'], {}), "('2d plate', coors2, ngrps2, [conn2], [mids2], [dim_tab[\n mesh3d.descs[0]]])\n", (8018, 8097), False, 'from sfepy.discrete.fem import Mesh, FEDomain\n'), ((999, 1043), 'os.path.join', 'os.path.join', (['outdir', "(coefs_filename + '.h5')"], {}), "(outdir, coefs_filename + '.h5')\n", (1011, 1043), False, 'import os\n'), ((2287, 2319), 'tables.open_file', 'pt.open_file', (['filename'], {'mode': '"""r"""'}), "(filename, mode='r')\n", (2299, 2319), True, 'import tables as pt\n'), ((3596, 3619), 'os.path.basename', 'os.path.basename', (['midfn'], {}), '(midfn)\n', (3612, 3619), False, 'import os\n'), ((5821, 5854), 'numpy.absolute', 'nm.absolute', (['(rdata + 1.0j * idata)'], {}), '(rdata + 1.0j * idata)\n', (5832, 5854), True, 'import numpy as nm\n'), ((6868, 6894), 'numpy.arange', 'nm.arange', (['rnodes.shape[0]'], {}), '(rnodes.shape[0])\n', (6877, 6894), True, 'import numpy as nm\n'), ((6939, 6959), 'numpy.where', 'nm.where', (['(remap >= 0)'], {}), '(remap >= 0)\n', (6947, 6959), True, 'import numpy as nm\n'), ((7738, 7757), 'numpy.where', 'nm.where', (['(remap > 0)'], {}), '(remap > 0)\n', (7746, 7757), True, 'import numpy as nm\n'), ((517, 528), 'numpy.array', 'nm.array', (['v'], {}), '(v)\n', (525, 528), True, 'import numpy as nm\n'), ((661, 684), 'numpy.tile', 'nm.tile', (['v', '(nqp, 1, 1)'], {}), '(v, (nqp, 1, 1))\n', (668, 684), True, 'import numpy as nm\n'), ((2848, 2870), 'numpy.ones_like', 'nm.ones_like', (['state_p2'], {}), '(state_p2)\n', (2860, 2870), True, 'import numpy as nm\n'), ((5900, 5955), 'numpy.zeros', 'nm.zeros', (["(rpart['data'].shape[0], 1)"], {'dtype': 'nm.float64'}), "((rpart['data'].shape[0], 1), dtype=nm.float64)\n", (5908, 5955), True, 'import numpy as nm\n'), ((5977, 6001), 'numpy.hstack', 'nm.hstack', (['(absval, aux)'], {}), '((absval, aux))\n', (5986, 6001), True, 'import numpy as nm\n'), ((755, 778), 'numpy.tile', 'nm.tile', (['v', '(nqp, 1, 1)'], {}), '(v, (nqp, 1, 1))\n', (762, 778), True, 'import numpy as nm\n'), ((1121, 1156), 'sfepy.homogenization.coefficients.Coefficients.from_file_hdf5', 'Coefficients.from_file_hdf5', (['cfname'], {}), '(cfname)\n', (1148, 1156), False, 'from sfepy.homogenization.coefficients import Coefficients\n'), ((1319, 1355), 'numpy.abs', 'nm.abs', (["(coefs_['omega'][idx] - omega)"], {}), "(coefs_['omega'][idx] - omega)\n", (1325, 1355), True, 'import numpy as nm\n'), ((3157, 3176), 'numpy.absolute', 'nm.absolute', (['phi_In'], {}), '(phi_In)\n', (3168, 3176), True, 'import numpy as nm\n'), ((3179, 3199), 'numpy.absolute', 'nm.absolute', (['phi_Out'], {}), '(phi_Out)\n', (3190, 3199), True, 'import numpy as nm\n'), ((4837, 4884), 'numpy.zeros', 'nm.zeros', (['(rdata.shape[0], 1)'], {'dtype': 'nm.float64'}), '((rdata.shape[0], 1), dtype=nm.float64)\n', (4845, 4884), True, 'import numpy as nm\n'), ((4927, 4950), 'numpy.hstack', 'nm.hstack', (['(rdata, aux)'], {}), '((rdata, aux))\n', (4936, 4950), True, 'import numpy as nm\n'), ((5278, 5301), 'numpy.hstack', 'nm.hstack', (['(idata, aux)'], {}), '((idata, aux))\n', (5287, 5301), True, 'import numpy as nm\n'), ((7665, 7679), 'numpy.max', 'nm.max', (['fcconn'], {}), '(fcconn)\n', (7671, 7679), True, 'import numpy as nm\n'), ((1258, 1289), 'numpy.abs', 'nm.abs', (["(coefs_['omega'] - omega)"], {}), "(coefs_['omega'] - omega)\n", (1264, 1289), True, 'import numpy as nm\n'), ((6810, 6824), 'numpy.max', 'nm.max', (['rnodes'], {}), '(rnodes)\n', (6816, 6824), True, 'import numpy as nm\n')]
|
from typing import Type
from sqlmodel import select, Session
from ..db import engine
from ..models.base import TSQLModelDB
class BaseRepository:
model: Type[TSQLModelDB]
@classmethod
def create(cls, **kwargs) -> TSQLModelDB:
db_model = cls.model(**kwargs)
db_model.save()
return db_model
@classmethod
def get_all(cls, offset: int = 0, limit: int = 100) -> list[TSQLModelDB] | None:
with Session(engine) as session:
return session.exec(select(cls.model)
.offset(offset)
.limit(limit)
).unique().all()
@classmethod
def get_model_by_id(cls, _id: int) -> TSQLModelDB | None:
with Session(engine) as session:
return session.get(cls.model, _id)
@classmethod
def get_model_by_attr(cls, **kwargs) -> TSQLModelDB | None:
with Session(engine) as session:
return session.exec(select(cls.model)
.filter_by(**kwargs)
).first()
@classmethod
def update_model(cls, db_model: TSQLModelDB, new_data: dict) -> TSQLModelDB:
for key, value in new_data.items():
setattr(db_model, key, value)
db_model.save()
return db_model
@classmethod
def delete_model(cls, db_model: TSQLModelDB) -> None:
db_model.delete()
|
[
"sqlmodel.Session",
"sqlmodel.select"
] |
[((445, 460), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (452, 460), False, 'from sqlmodel import select, Session\n'), ((759, 774), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (766, 774), False, 'from sqlmodel import select, Session\n'), ((929, 944), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (936, 944), False, 'from sqlmodel import select, Session\n'), ((989, 1006), 'sqlmodel.select', 'select', (['cls.model'], {}), '(cls.model)\n', (995, 1006), False, 'from sqlmodel import select, Session\n'), ((505, 522), 'sqlmodel.select', 'select', (['cls.model'], {}), '(cls.model)\n', (511, 522), False, 'from sqlmodel import select, Session\n')]
|
import os
import sys
import pytest
from megengine.core._imperative_rt.imperative import sync
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
def pytest_runtest_teardown():
sync()
|
[
"megengine.core._imperative_rt.imperative.sync"
] |
[((201, 207), 'megengine.core._imperative_rt.imperative.sync', 'sync', ([], {}), '()\n', (205, 207), False, 'from megengine.core._imperative_rt.imperative import sync\n'), ((125, 150), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (140, 150), False, 'import os\n')]
|
"""Issuer Database Tables/Models.
Models of the Traction tables for Issuer and related data.
"""
import uuid
from datetime import datetime
from typing import List, Optional
from sqlalchemy.orm import selectinload
from sqlmodel import Field, Relationship
from sqlalchemy import (
Column,
func,
String,
select,
desc,
JSON,
text,
)
from sqlalchemy.dialects.postgresql import UUID, TIMESTAMP, ARRAY
from sqlmodel.ext.asyncio.session import AsyncSession
from api.db.models.base import BaseModel
from api.db.models.v1.contact import Contact
from api.db.models.v1.governance import CredentialTemplate
from api.endpoints.models.v1.errors import (
NotFoundError,
)
class IssuerCredential(BaseModel, table=True):
"""Issuer Credential.
Model for the Issuer Credential table (postgresql specific dialects in use).
This will track Issuer Credentials for the Tenants.
Attributes:
issuer_credential_id: Traction ID for issuer credential
credential_template_id: Traction Credential Template ID
contact_id: Traction Contact ID
cred_def_id: Credential Definition ID (ledger)
tenant_id: Traction Tenant ID
status: Business and Tenant indicator for Credential state; independent of AcaPy
Credential Exchange state
external_reference_id: Set by tenant to correlate this Credential with entity in
external system
revoked: when True, this credential has been revoked
deleted: Issuer Credential "soft" delete indicator.
preview_persisted: when True, store the credential attributes and preview
tags: Set by tenant for arbitrary grouping of Credentials
comment: Comment supplied when issuing
credential_preview: attributes (list of name / values ) for offered/issued cred.
This will be empty once offer is made and preview_persisted = False.
revocation_comment: comment entered when revoking Credential
state: The underlying AcaPy credential exchange state
thread_id: AcaPy thread id
credential_exchange_id: AcaPy id for the credential exchange
revoc_reg_id: revocation registry id (needed for revocation)
revocation_id: credential revocation id (needed for revocation)
created_at: Timestamp when record was created in Traction
updated_at: Timestamp when record was last modified in Traction
"""
__tablename__ = "issuer_credential"
issuer_credential_id: uuid.UUID = Field(
sa_column=Column(
UUID(as_uuid=True),
primary_key=True,
server_default=text("gen_random_uuid()"),
)
)
credential_template_id: uuid.UUID = Field(
foreign_key="credential_template.credential_template_id", index=True
)
tenant_id: uuid.UUID = Field(foreign_key="tenant.id", index=True)
contact_id: uuid.UUID = Field(foreign_key="contact.contact_id", index=True)
status: str = Field(nullable=False)
external_reference_id: str = Field(nullable=True)
revoked: bool = Field(nullable=False, default=False)
deleted: bool = Field(nullable=False, default=False)
tags: List[str] = Field(sa_column=Column(ARRAY(String)))
preview_persisted: bool = Field(nullable=False, default=False)
comment: str = Field(nullable=True)
revocation_comment: str = Field(nullable=True)
# acapy data ---
state: str = Field(nullable=False)
cred_def_id: str = Field(nullable=False, index=True)
thread_id: str = Field(nullable=True)
credential_exchange_id: str = Field(nullable=True)
revoc_reg_id: str = Field(nullable=True)
revocation_id: str = Field(nullable=True)
credential_preview: dict = Field(default={}, sa_column=Column(JSON))
# --- acapy data
# relationships ---
contact: Optional[Contact] = Relationship(back_populates="issuer_credentials")
credential_template: Optional[CredentialTemplate] = Relationship(
back_populates="issuer_credentials"
)
# --- relationships
created_at: datetime = Field(
sa_column=Column(TIMESTAMP, nullable=False, server_default=func.now())
)
updated_at: datetime = Field(
sa_column=Column(
TIMESTAMP, nullable=False, server_default=func.now(), onupdate=func.now()
)
)
@classmethod
async def get_by_id(
cls: "IssuerCredential",
db: AsyncSession,
tenant_id: uuid.UUID,
issuer_credential_id: uuid.UUID,
deleted: bool | None = False,
) -> "IssuerCredential":
"""Get IssuerCredential by id.
Find and return the database CredentialDefinition record
Args:
db: database session
tenant_id: Traction ID of tenant making the call
issuer_credential_id: Traction ID of IssuerCredential
Returns: The Traction IssuerCredential (db) record
Raises:
NotFoundError: if the IssuerCredential cannot be found by ID and deleted
flag
"""
q = (
select(cls)
.where(cls.tenant_id == tenant_id)
.where(cls.issuer_credential_id == issuer_credential_id)
.where(cls.deleted == deleted)
.options(selectinload(cls.contact), selectinload(cls.credential_template))
)
q_result = await db.execute(q)
db_rec = q_result.scalar_one_or_none()
if not db_rec:
raise NotFoundError(
code="issuer_credential.id_not_found",
title="Issuer Credential does not exist",
detail=f"Issuer Credential does not exist for id<{issuer_credential_id}>", # noqa: E501
)
return db_rec
@classmethod
async def get_by_credential_exchange_id(
cls: "IssuerCredential",
db: AsyncSession,
tenant_id: uuid.UUID,
credential_exchange_id: str,
) -> "IssuerCredential":
"""Get IssuerCredential by Credential Exchange ID.
Find and return the database IssuerCredential record
Args:
db: database session
tenant_id: Traction ID of tenant making the call
credential_exchange_id: acapy message Credential Exchange ID
Returns: The Traction IssuerCredential (db) record
Raises:
NotFoundError: if the IssuerCredential cannot be found by ID and deleted
flag
"""
q = (
select(cls)
.where(cls.tenant_id == tenant_id)
.where(cls.credential_exchange_id == credential_exchange_id)
.options(selectinload(cls.contact), selectinload(cls.credential_template))
)
q_result = await db.execute(q)
db_rec = q_result.scalar_one_or_none()
if not db_rec:
raise NotFoundError(
code="issuer_credential.credential_exchange_id_not_found",
title="Issuer Credential does not exist",
detail=f"Issuer Credential does not exist for credential exchange id<{credential_exchange_id}>", # noqa: E501
)
return db_rec
@classmethod
async def list_by_credential_template_id(
cls: "IssuerCredential",
db: AsyncSession,
tenant_id: uuid.UUID,
credential_template_id: uuid.UUID,
) -> List["IssuerCredential"]:
"""List by Credential Template ID.
Find and return list of Issuer Credential records for Credential Template.
tenant_id: Traction ID of tenant making the call
credential_template_id: Traction ID of Credential Template
Returns: List of Traction IssuerCredential (db) records in descending order
"""
q = (
select(cls)
.where(cls.credential_template_id == credential_template_id)
.where(cls.tenant_id == tenant_id)
.options(selectinload(cls.contact), selectinload(cls.credential_template))
.order_by(desc(cls.updated_at))
)
q_result = await db.execute(q)
db_recs = q_result.scalars()
return db_recs
@classmethod
async def list_by_cred_def_id(
cls: "IssuerCredential",
db: AsyncSession,
tenant_id: uuid.UUID,
cred_def_id: str,
) -> List["IssuerCredential"]:
"""List by Cred Def ID.
Find and return list of Issuer Credential records for Cred. Def.
tenant_id: Traction ID of tenant making the call
cred_def_id: Traction ID of Credential Definition
Returns: List of Traction IssuerCredential (db) records in descending order
"""
q = (
select(cls)
.where(cls.cred_def_id == cred_def_id)
.where(cls.tenant_id == tenant_id)
.options(selectinload(cls.contact), selectinload(cls.credential_template))
.order_by(desc(cls.updated_at))
)
q_result = await db.execute(q)
db_recs = q_result.scalars()
return db_recs
@classmethod
async def list_by_contact_id(
cls: "IssuerCredential",
db: AsyncSession,
tenant_id: uuid.UUID,
contact_id: uuid.UUID,
) -> List["IssuerCredential"]:
"""List by Contact ID.
Find and return list of Issuer Credential records for Contact.
tenant_id: Traction ID of tenant making the call
contact_id: Traction ID of Contact
Returns: List of Traction IssuerCredential (db) records in descending order
"""
q = (
select(cls)
.where(cls.contact_id == contact_id)
.where(cls.tenant_id == tenant_id)
.options(selectinload(cls.contact), selectinload(cls.credential_template))
.order_by(desc(cls.updated_at))
)
q_result = await db.execute(q)
db_recs = q_result.scalars()
return db_recs
@classmethod
async def list_by_tenant_id(
cls: "IssuerCredential",
db: AsyncSession,
tenant_id: uuid.UUID,
) -> List["IssuerCredential"]:
"""List by Tenant ID.
Find and return list of Issuer Credential records for Tenant.
tenant_id: Traction ID of tenant making the call
Returns: List of Traction Issuer Credential (db) records in descending order
"""
q = (
select(cls)
.where(cls.tenant_id == tenant_id)
.options(selectinload(cls.contact), selectinload(cls.credential_template))
.order_by(desc(cls.updated_at))
)
q_result = await db.execute(q)
db_recs = q_result.scalars()
return db_recs
@classmethod
async def list_by_thread_id(
cls: "IssuerCredential",
db: AsyncSession,
tenant_id: uuid.UUID,
thread_id: str,
) -> List["IssuerCredential"]:
"""List by Thread ID.
Find and return list of Issuer Credential records for Thread ID.
tenant_id: Traction ID of tenant making the call
thread_id: AcaPy Thread ID of Issuer Credential
Returns: List of Traction IssuerCredential (db) records in descending order
"""
q = (
select(cls)
.where(cls.thread_id == thread_id)
.where(cls.tenant_id == tenant_id)
.options(selectinload(cls.contact), selectinload(cls.credential_template))
.order_by(desc(cls.updated_at))
)
q_result = await db.execute(q)
db_recs = q_result.scalars()
return db_recs
class IssuerCredentialTimeline(BaseModel, table=True):
"""Issuer Credential Timeline.
Model for Issuer Credential Timeline table (postgresql specific dialects in use).
Timeline represents history of changes to status and/or state.
Attributes:
issuer_credential_timeline_id: Unique ID in table
issuer_credential_id: Traction Issuer Credential ID
status: Business and Tenant indicator for Issuer Credential state; independent of
AcaPy Credential State
state: The underlying AcaPy Credential state
created_at: Timestamp when record was created in Traction
"""
__tablename__ = "issuer_credential_timeline"
issuer_credential_timeline_id: uuid.UUID = Field(
sa_column=Column(
UUID(as_uuid=True),
primary_key=True,
server_default=text("gen_random_uuid()"),
)
)
issuer_credential_id: uuid.UUID = Field(
foreign_key="issuer_credential.issuer_credential_id", index=True
)
status: str = Field(nullable=False)
state: str = Field(nullable=False)
created_at: datetime = Field(
sa_column=Column(TIMESTAMP, nullable=False, server_default=func.now())
)
@classmethod
async def list_by_issuer_credential_id(
cls: "IssuerCredentialTimeline",
db: AsyncSession,
issuer_credential_id: UUID,
) -> List:
"""List by Issuer Credential ID.
Find and return list of Timeline records for Issuer Credential.
Args:
db: database session
issuer_credential_id: Traction ID of Issuer Credential
Returns: List of Traction Issuer Credential Timeline (db) records in descending
order
"""
q = (
select(cls)
.where(cls.issuer_credential_id == issuer_credential_id)
.order_by(desc(cls.created_at))
)
q_result = await db.execute(q)
db_items = q_result.scalars()
return db_items
|
[
"sqlmodel.Relationship",
"sqlmodel.Field"
] |
[((2667, 2742), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""credential_template.credential_template_id"""', 'index': '(True)'}), "(foreign_key='credential_template.credential_template_id', index=True)\n", (2672, 2742), False, 'from sqlmodel import Field, Relationship\n'), ((2784, 2826), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""tenant.id"""', 'index': '(True)'}), "(foreign_key='tenant.id', index=True)\n", (2789, 2826), False, 'from sqlmodel import Field, Relationship\n'), ((2855, 2906), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""contact.contact_id"""', 'index': '(True)'}), "(foreign_key='contact.contact_id', index=True)\n", (2860, 2906), False, 'from sqlmodel import Field, Relationship\n'), ((2925, 2946), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (2930, 2946), False, 'from sqlmodel import Field, Relationship\n'), ((2980, 3000), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (2985, 3000), False, 'from sqlmodel import Field, Relationship\n'), ((3021, 3057), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)', 'default': '(False)'}), '(nullable=False, default=False)\n', (3026, 3057), False, 'from sqlmodel import Field, Relationship\n'), ((3078, 3114), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)', 'default': '(False)'}), '(nullable=False, default=False)\n', (3083, 3114), False, 'from sqlmodel import Field, Relationship\n'), ((3206, 3242), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)', 'default': '(False)'}), '(nullable=False, default=False)\n', (3211, 3242), False, 'from sqlmodel import Field, Relationship\n'), ((3263, 3283), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (3268, 3283), False, 'from sqlmodel import Field, Relationship\n'), ((3314, 3334), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (3319, 3334), False, 'from sqlmodel import Field, Relationship\n'), ((3374, 3395), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (3379, 3395), False, 'from sqlmodel import Field, Relationship\n'), ((3419, 3452), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)', 'index': '(True)'}), '(nullable=False, index=True)\n', (3424, 3452), False, 'from sqlmodel import Field, Relationship\n'), ((3474, 3494), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (3479, 3494), False, 'from sqlmodel import Field, Relationship\n'), ((3529, 3549), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (3534, 3549), False, 'from sqlmodel import Field, Relationship\n'), ((3574, 3594), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (3579, 3594), False, 'from sqlmodel import Field, Relationship\n'), ((3620, 3640), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (3625, 3640), False, 'from sqlmodel import Field, Relationship\n'), ((3793, 3842), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""issuer_credentials"""'}), "(back_populates='issuer_credentials')\n", (3805, 3842), False, 'from sqlmodel import Field, Relationship\n'), ((3899, 3948), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""issuer_credentials"""'}), "(back_populates='issuer_credentials')\n", (3911, 3948), False, 'from sqlmodel import Field, Relationship\n'), ((12401, 12472), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""issuer_credential.issuer_credential_id"""', 'index': '(True)'}), "(foreign_key='issuer_credential.issuer_credential_id', index=True)\n", (12406, 12472), False, 'from sqlmodel import Field, Relationship\n'), ((12506, 12527), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (12511, 12527), False, 'from sqlmodel import Field, Relationship\n'), ((12545, 12566), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (12550, 12566), False, 'from sqlmodel import Field, Relationship\n'), ((3700, 3712), 'sqlalchemy.Column', 'Column', (['JSON'], {}), '(JSON)\n', (3706, 3712), False, 'from sqlalchemy import Column, func, String, select, desc, JSON, text\n'), ((5189, 5214), 'sqlalchemy.orm.selectinload', 'selectinload', (['cls.contact'], {}), '(cls.contact)\n', (5201, 5214), False, 'from sqlalchemy.orm import selectinload\n'), ((5216, 5253), 'sqlalchemy.orm.selectinload', 'selectinload', (['cls.credential_template'], {}), '(cls.credential_template)\n', (5228, 5253), False, 'from sqlalchemy.orm import selectinload\n'), ((5392, 5571), 'api.endpoints.models.v1.errors.NotFoundError', 'NotFoundError', ([], {'code': '"""issuer_credential.id_not_found"""', 'title': '"""Issuer Credential does not exist"""', 'detail': 'f"""Issuer Credential does not exist for id<{issuer_credential_id}>"""'}), "(code='issuer_credential.id_not_found', title=\n 'Issuer Credential does not exist', detail=\n f'Issuer Credential does not exist for id<{issuer_credential_id}>')\n", (5405, 5571), False, 'from api.endpoints.models.v1.errors import NotFoundError\n'), ((6543, 6568), 'sqlalchemy.orm.selectinload', 'selectinload', (['cls.contact'], {}), '(cls.contact)\n', (6555, 6568), False, 'from sqlalchemy.orm import selectinload\n'), ((6570, 6607), 'sqlalchemy.orm.selectinload', 'selectinload', (['cls.credential_template'], {}), '(cls.credential_template)\n', (6582, 6607), False, 'from sqlalchemy.orm import selectinload\n'), ((6746, 6971), 'api.endpoints.models.v1.errors.NotFoundError', 'NotFoundError', ([], {'code': '"""issuer_credential.credential_exchange_id_not_found"""', 'title': '"""Issuer Credential does not exist"""', 'detail': 'f"""Issuer Credential does not exist for credential exchange id<{credential_exchange_id}>"""'}), "(code='issuer_credential.credential_exchange_id_not_found',\n title='Issuer Credential does not exist', detail=\n f'Issuer Credential does not exist for credential exchange id<{credential_exchange_id}>'\n )\n", (6759, 6971), False, 'from api.endpoints.models.v1.errors import NotFoundError\n'), ((7909, 7929), 'sqlalchemy.desc', 'desc', (['cls.updated_at'], {}), '(cls.updated_at)\n', (7913, 7929), False, 'from sqlalchemy import Column, func, String, select, desc, JSON, text\n'), ((8812, 8832), 'sqlalchemy.desc', 'desc', (['cls.updated_at'], {}), '(cls.updated_at)\n', (8816, 8832), False, 'from sqlalchemy import Column, func, String, select, desc, JSON, text\n'), ((9699, 9719), 'sqlalchemy.desc', 'desc', (['cls.updated_at'], {}), '(cls.updated_at)\n', (9703, 9719), False, 'from sqlalchemy import Column, func, String, select, desc, JSON, text\n'), ((10459, 10479), 'sqlalchemy.desc', 'desc', (['cls.updated_at'], {}), '(cls.updated_at)\n', (10463, 10479), False, 'from sqlalchemy import Column, func, String, select, desc, JSON, text\n'), ((11350, 11370), 'sqlalchemy.desc', 'desc', (['cls.updated_at'], {}), '(cls.updated_at)\n', (11354, 11370), False, 'from sqlalchemy import Column, func, String, select, desc, JSON, text\n'), ((13338, 13358), 'sqlalchemy.desc', 'desc', (['cls.created_at'], {}), '(cls.created_at)\n', (13342, 13358), False, 'from sqlalchemy import Column, func, String, select, desc, JSON, text\n'), ((2507, 2525), 'sqlalchemy.dialects.postgresql.UUID', 'UUID', ([], {'as_uuid': '(True)'}), '(as_uuid=True)\n', (2511, 2525), False, 'from sqlalchemy.dialects.postgresql import UUID, TIMESTAMP, ARRAY\n'), ((3160, 3173), 'sqlalchemy.dialects.postgresql.ARRAY', 'ARRAY', (['String'], {}), '(String)\n', (3165, 3173), False, 'from sqlalchemy.dialects.postgresql import UUID, TIMESTAMP, ARRAY\n'), ((12243, 12261), 'sqlalchemy.dialects.postgresql.UUID', 'UUID', ([], {'as_uuid': '(True)'}), '(as_uuid=True)\n', (12247, 12261), False, 'from sqlalchemy.dialects.postgresql import UUID, TIMESTAMP, ARRAY\n'), ((2584, 2609), 'sqlalchemy.text', 'text', (['"""gen_random_uuid()"""'], {}), "('gen_random_uuid()')\n", (2588, 2609), False, 'from sqlalchemy import Column, func, String, select, desc, JSON, text\n'), ((4089, 4099), 'sqlalchemy.func.now', 'func.now', ([], {}), '()\n', (4097, 4099), False, 'from sqlalchemy import Column, func, String, select, desc, JSON, text\n'), ((4221, 4231), 'sqlalchemy.func.now', 'func.now', ([], {}), '()\n', (4229, 4231), False, 'from sqlalchemy import Column, func, String, select, desc, JSON, text\n'), ((4242, 4252), 'sqlalchemy.func.now', 'func.now', ([], {}), '()\n', (4250, 4252), False, 'from sqlalchemy import Column, func, String, select, desc, JSON, text\n'), ((7821, 7846), 'sqlalchemy.orm.selectinload', 'selectinload', (['cls.contact'], {}), '(cls.contact)\n', (7833, 7846), False, 'from sqlalchemy.orm import selectinload\n'), ((7848, 7885), 'sqlalchemy.orm.selectinload', 'selectinload', (['cls.credential_template'], {}), '(cls.credential_template)\n', (7860, 7885), False, 'from sqlalchemy.orm import selectinload\n'), ((8724, 8749), 'sqlalchemy.orm.selectinload', 'selectinload', (['cls.contact'], {}), '(cls.contact)\n', (8736, 8749), False, 'from sqlalchemy.orm import selectinload\n'), ((8751, 8788), 'sqlalchemy.orm.selectinload', 'selectinload', (['cls.credential_template'], {}), '(cls.credential_template)\n', (8763, 8788), False, 'from sqlalchemy.orm import selectinload\n'), ((9611, 9636), 'sqlalchemy.orm.selectinload', 'selectinload', (['cls.contact'], {}), '(cls.contact)\n', (9623, 9636), False, 'from sqlalchemy.orm import selectinload\n'), ((9638, 9675), 'sqlalchemy.orm.selectinload', 'selectinload', (['cls.credential_template'], {}), '(cls.credential_template)\n', (9650, 9675), False, 'from sqlalchemy.orm import selectinload\n'), ((10371, 10396), 'sqlalchemy.orm.selectinload', 'selectinload', (['cls.contact'], {}), '(cls.contact)\n', (10383, 10396), False, 'from sqlalchemy.orm import selectinload\n'), ((10398, 10435), 'sqlalchemy.orm.selectinload', 'selectinload', (['cls.credential_template'], {}), '(cls.credential_template)\n', (10410, 10435), False, 'from sqlalchemy.orm import selectinload\n'), ((11262, 11287), 'sqlalchemy.orm.selectinload', 'selectinload', (['cls.contact'], {}), '(cls.contact)\n', (11274, 11287), False, 'from sqlalchemy.orm import selectinload\n'), ((11289, 11326), 'sqlalchemy.orm.selectinload', 'selectinload', (['cls.credential_template'], {}), '(cls.credential_template)\n', (11301, 11326), False, 'from sqlalchemy.orm import selectinload\n'), ((12320, 12345), 'sqlalchemy.text', 'text', (['"""gen_random_uuid()"""'], {}), "('gen_random_uuid()')\n", (12324, 12345), False, 'from sqlalchemy import Column, func, String, select, desc, JSON, text\n'), ((12668, 12678), 'sqlalchemy.func.now', 'func.now', ([], {}), '()\n', (12676, 12678), False, 'from sqlalchemy import Column, func, String, select, desc, JSON, text\n'), ((13235, 13246), 'sqlalchemy.select', 'select', (['cls'], {}), '(cls)\n', (13241, 13246), False, 'from sqlalchemy import Column, func, String, select, desc, JSON, text\n'), ((6390, 6401), 'sqlalchemy.select', 'select', (['cls'], {}), '(cls)\n', (6396, 6401), False, 'from sqlalchemy import Column, func, String, select, desc, JSON, text\n'), ((10291, 10302), 'sqlalchemy.select', 'select', (['cls'], {}), '(cls)\n', (10297, 10302), False, 'from sqlalchemy import Column, func, String, select, desc, JSON, text\n'), ((4997, 5008), 'sqlalchemy.select', 'select', (['cls'], {}), '(cls)\n', (5003, 5008), False, 'from sqlalchemy import Column, func, String, select, desc, JSON, text\n'), ((7668, 7679), 'sqlalchemy.select', 'select', (['cls'], {}), '(cls)\n', (7674, 7679), False, 'from sqlalchemy import Column, func, String, select, desc, JSON, text\n'), ((8593, 8604), 'sqlalchemy.select', 'select', (['cls'], {}), '(cls)\n', (8599, 8604), False, 'from sqlalchemy import Column, func, String, select, desc, JSON, text\n'), ((9482, 9493), 'sqlalchemy.select', 'select', (['cls'], {}), '(cls)\n', (9488, 9493), False, 'from sqlalchemy import Column, func, String, select, desc, JSON, text\n'), ((11135, 11146), 'sqlalchemy.select', 'select', (['cls'], {}), '(cls)\n', (11141, 11146), False, 'from sqlalchemy import Column, func, String, select, desc, JSON, text\n')]
|
from __future__ import absolute_import
import os
import numpy as nm
from sfepy.base.testing import TestCommon
from sfepy import data_dir
from six.moves import range
# n_vertex, n_edge, n_face, n_cell
# d1 -> d2 : num, n_incident
expected = {
'1_2_2.mesh' : ([3, 2, 0, 0], {
(0, 0) : (3, 4),
(0, 1) : (3, 4),
(1, 0) : (2, 4),
(1, 1) : (2, 2),
}),
'2_3_2.mesh' : ([4, 5, 2, 0], {
(0, 0) : (4, 10),
(0, 1) : (4, 10),
(0, 2) : (4, 6),
(1, 0) : (5, 10),
(1, 1) : (5, 16),
(1, 2) : (5, 6),
(2, 0) : (2, 6),
(2, 1) : (2, 6),
(2, 2) : (2, 2),
}),
'2_4_2.mesh' : ([6, 7, 2, 0], {
(0, 0) : (6, 22),
(0, 1) : (6, 14),
(0, 2) : (6, 8),
(1, 0) : (7, 14),
(1, 1) : (7, 20),
(1, 2) : (7, 8),
(2, 0) : (2, 8),
(2, 1) : (2, 8),
(2, 2) : (2, 2),
}),
'3_4_2.mesh' : ([5, 9, 7, 2], {
(0, 0) : (5, 18),
(0, 1) : (5, 18),
(0, 2) : (5, 21),
(0, 3) : (5, 8),
(1, 0) : (9, 18),
(1, 1) : (9, 48),
(1, 2) : (9, 21),
(1, 3) : (9, 12),
(2, 0) : (7, 21),
(2, 1) : (7, 21),
(2, 2) : (7, 42),
(2, 3) : (7, 8),
(3, 0) : (2, 8),
(3, 1) : (2, 12),
(3, 2) : (2, 8),
(3, 3) : (2, 2),
}),
'3_8_2.mesh' : ([12, 20, 11, 2], {
(0, 0) : (12, 100),
(0, 1) : (12, 40),
(0, 2) : (12, 44),
(0, 3) : (12, 16),
(1, 0) : (20, 40),
(1, 1) : (20, 96),
(1, 2) : (20, 44),
(1, 3) : (20, 24),
(2, 0) : (11, 44),
(2, 1) : (11, 44),
(2, 2) : (11, 72),
(2, 3) : (11, 12),
(3, 0) : (2, 16),
(3, 1) : (2, 24),
(3, 2) : (2, 12),
(3, 3) : (2, 2),
}),
'square_triquad.mesh' : ([470, 1127, 658, 0], {
(0, 0) : (470, 3054),
(0, 1) : (470, 2254),
(0, 2) : (470, 2174),
(1, 0) : (1127, 2254),
(1, 1) : (1127, 9174),
(1, 2) : (1127, 2174),
(2, 0) : (658, 2174),
(2, 1) : (658, 2174),
(2, 2) : (658, 6686),
}),
}
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
filename_meshes = [data_dir + '/meshes/elements/%s_2.mesh' % geom
for geom in ['1_2', '2_3', '2_4', '3_4', '3_8']]
filename_meshes.append(data_dir
+ '/meshes/2d/special/square_triquad.mesh')
test = Test(filename_meshes=filename_meshes,
conf=conf, options=options)
return test
def test_cmesh_counts(self):
from sfepy.discrete.fem import Mesh
from sfepy.discrete.fem.geometry_element import create_geometry_elements
from sfepy.discrete.common.extmods.cmesh import CMesh, get_cmem_usage
gels = create_geometry_elements()
ok = True
for filename in self.filename_meshes:
basename = os.path.basename(filename)
enum, esizes = expected[basename]
self.report('mesh: %s' % basename)
mesh = Mesh.from_file(filename)
cmesh = mesh.cmesh
cmesh.set_local_entities(gels)
cmesh.setup_entities()
self.report('dim:', cmesh.dim)
self.report('n_vertex: %d, n_edge: %d, n_face: %d, n_cell: %d' %
tuple(cmesh.num))
_ok = (enum == cmesh.num).all()
if not _ok:
self.report('%s == %s failed!' % (enum, cmesh.num))
ok = ok and _ok
dim = cmesh.dim
for ir in range(dim + 1):
for ic in range(dim + 1):
cmesh.setup_connectivity(ir, ic)
mem_usage1 = get_cmem_usage()[0]
if (ir == dim) and (ic == 0):
continue
cmesh.free_connectivity(ir, ic)
mem_usage2 = get_cmem_usage()[0]
cmesh.setup_connectivity(ir, ic)
mem_usage3 = get_cmem_usage()[0]
conn = cmesh.get_conn(ir, ic)
self.report('(%d, %d) : (%d, %d)'
% (ir, ic, conn.num, conn.n_incident))
sizes = nm.array([conn.num, conn.n_incident])
_ok = (esizes[ir, ic] == sizes).all()
if not _ok:
self.report('%s == %s failed!' % (esizes, sizes))
ok = ok and _ok
_ok1 = mem_usage3 == mem_usage1
_ok2 = mem_usage3 > mem_usage2
if not (_ok1 and _ok2):
self.report('unexpected memory usage! (%s)'
% (mem_usage1, mem_usage2, mem_usage3))
ok = ok and (_ok1 and _ok2)
return ok
def test_entity_volumes(self):
import sfepy
from sfepy.discrete.fem import Mesh, FEDomain
from sfepy.discrete.common import Field
from sfepy.discrete import Integral
mesh = Mesh.from_file('meshes/3d/special/cross3d.mesh',
prefix_dir=sfepy.data_dir)
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
gamma = domain.create_region('Gamma', 'vertices of surface', 'facet')
top = domain.create_region('Top', 'cell 2')
vfield = Field.from_args('v', nm.float64, 'scalar', omega,
approx_order=1)
sfield = Field.from_args('s', nm.float64, 'scalar', gamma,
approx_order=1)
integral = Integral('i', order=3)
vgeo, _ = vfield.get_mapping(omega, integral, 'volume')
domain.create_surface_group(gamma)
sgeo, _ = sfield.get_mapping(gamma, integral, 'surface')
evols = mesh.cmesh.get_volumes(1)
fvols = mesh.cmesh.get_volumes(2) # Approximate for non-planar faces.
cvols = mesh.cmesh.get_volumes(3)
ok = True
_ok = abs(cvols.sum() - vgeo.volume.sum()) < 1e-15
self.report('total cell volume: %s (ok: %s)' % (cvols.sum(), _ok))
ok = _ok and ok
top_evols = nm.array([ 1. , 1. ,
1. , 1. ,
0.7211102550927979, 0.7211102550927979,
0.7211102550927979, 0.7211102550927979,
1.16619037896906 , 1.16619037896906 ,
1.16619037896906 , 1.16619037896906 ])
_ok = nm.allclose(top_evols, evols[top.edges], rtol=0.0, atol=1e-15)
self.report('total top cell edge length: %s (ok: %s)'
% (evols[top.edges].sum(), _ok))
ok = _ok and ok
i1 = [5, 6, 8, 9]
i2 = nm.setdiff1d(nm.arange(len(gamma.faces)), i1)
aux = fvols[gamma.faces] - sgeo.volume.ravel()
_ok = nm.allclose(aux[i1], 0.10560208437556773, rtol=0.0, atol=1e-15)
ok = _ok and ok
self.report('non-planar faces diff: %s (ok: %s)' % (aux[i1], _ok))
_ok = (nm.abs(aux[i2]) < 1e-15).all()
self.report('max. planar faces diff: %s (ok: %s)'
% (nm.abs(aux[i2]).max(), _ok))
ok = _ok and ok
return ok
|
[
"sfepy.discrete.fem.geometry_element.create_geometry_elements",
"sfepy.discrete.fem.Mesh.from_file",
"sfepy.discrete.common.Field.from_args",
"sfepy.discrete.Integral",
"sfepy.discrete.common.extmods.cmesh.get_cmem_usage",
"sfepy.discrete.fem.FEDomain"
] |
[((2942, 2968), 'sfepy.discrete.fem.geometry_element.create_geometry_elements', 'create_geometry_elements', ([], {}), '()\n', (2966, 2968), False, 'from sfepy.discrete.fem.geometry_element import create_geometry_elements\n'), ((5196, 5271), 'sfepy.discrete.fem.Mesh.from_file', 'Mesh.from_file', (['"""meshes/3d/special/cross3d.mesh"""'], {'prefix_dir': 'sfepy.data_dir'}), "('meshes/3d/special/cross3d.mesh', prefix_dir=sfepy.data_dir)\n", (5210, 5271), False, 'from sfepy.discrete.fem import Mesh, FEDomain\n'), ((5319, 5343), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['"""domain"""', 'mesh'], {}), "('domain', mesh)\n", (5327, 5343), False, 'from sfepy.discrete.fem import Mesh, FEDomain\n'), ((5546, 5611), 'sfepy.discrete.common.Field.from_args', 'Field.from_args', (['"""v"""', 'nm.float64', '"""scalar"""', 'omega'], {'approx_order': '(1)'}), "('v', nm.float64, 'scalar', omega, approx_order=1)\n", (5561, 5611), False, 'from sfepy.discrete.common import Field\n'), ((5662, 5727), 'sfepy.discrete.common.Field.from_args', 'Field.from_args', (['"""s"""', 'nm.float64', '"""scalar"""', 'gamma'], {'approx_order': '(1)'}), "('s', nm.float64, 'scalar', gamma, approx_order=1)\n", (5677, 5727), False, 'from sfepy.discrete.common import Field\n'), ((5781, 5803), 'sfepy.discrete.Integral', 'Integral', (['"""i"""'], {'order': '(3)'}), "('i', order=3)\n", (5789, 5803), False, 'from sfepy.discrete import Integral\n'), ((6337, 6529), 'numpy.array', 'nm.array', (['[1.0, 1.0, 1.0, 1.0, 0.7211102550927979, 0.7211102550927979, \n 0.7211102550927979, 0.7211102550927979, 1.16619037896906, \n 1.16619037896906, 1.16619037896906, 1.16619037896906]'], {}), '([1.0, 1.0, 1.0, 1.0, 0.7211102550927979, 0.7211102550927979, \n 0.7211102550927979, 0.7211102550927979, 1.16619037896906, \n 1.16619037896906, 1.16619037896906, 1.16619037896906])\n', (6345, 6529), True, 'import numpy as nm\n'), ((6765, 6827), 'numpy.allclose', 'nm.allclose', (['top_evols', 'evols[top.edges]'], {'rtol': '(0.0)', 'atol': '(1e-15)'}), '(top_evols, evols[top.edges], rtol=0.0, atol=1e-15)\n', (6776, 6827), True, 'import numpy as nm\n'), ((7123, 7186), 'numpy.allclose', 'nm.allclose', (['aux[i1]', '(0.10560208437556773)'], {'rtol': '(0.0)', 'atol': '(1e-15)'}), '(aux[i1], 0.10560208437556773, rtol=0.0, atol=1e-15)\n', (7134, 7186), True, 'import numpy as nm\n'), ((3058, 3084), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (3074, 3084), False, 'import os\n'), ((3199, 3223), 'sfepy.discrete.fem.Mesh.from_file', 'Mesh.from_file', (['filename'], {}), '(filename)\n', (3213, 3223), False, 'from sfepy.discrete.fem import Mesh, FEDomain\n'), ((3713, 3727), 'six.moves.range', 'range', (['(dim + 1)'], {}), '(dim + 1)\n', (3718, 3727), False, 'from six.moves import range\n'), ((3755, 3769), 'six.moves.range', 'range', (['(dim + 1)'], {}), '(dim + 1)\n', (3760, 3769), False, 'from six.moves import range\n'), ((4379, 4416), 'numpy.array', 'nm.array', (['[conn.num, conn.n_incident]'], {}), '([conn.num, conn.n_incident])\n', (4387, 4416), True, 'import numpy as nm\n'), ((7302, 7317), 'numpy.abs', 'nm.abs', (['aux[i2]'], {}), '(aux[i2])\n', (7308, 7317), True, 'import numpy as nm\n'), ((3857, 3873), 'sfepy.discrete.common.extmods.cmesh.get_cmem_usage', 'get_cmem_usage', ([], {}), '()\n', (3871, 3873), False, 'from sfepy.discrete.common.extmods.cmesh import CMesh, get_cmem_usage\n'), ((4047, 4063), 'sfepy.discrete.common.extmods.cmesh.get_cmem_usage', 'get_cmem_usage', ([], {}), '()\n', (4061, 4063), False, 'from sfepy.discrete.common.extmods.cmesh import CMesh, get_cmem_usage\n'), ((4154, 4170), 'sfepy.discrete.common.extmods.cmesh.get_cmem_usage', 'get_cmem_usage', ([], {}), '()\n', (4168, 4170), False, 'from sfepy.discrete.common.extmods.cmesh import CMesh, get_cmem_usage\n'), ((7414, 7429), 'numpy.abs', 'nm.abs', (['aux[i2]'], {}), '(aux[i2])\n', (7420, 7429), True, 'import numpy as nm\n')]
|
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import json
import logging
import megengine as mge
import coloredlogs
import numpy as np
import megengine.functional as F
class Params():
"""Class that loads hyperparameters from a json file.
Example:
```
params = Params(json_path)
print(params.learning_rate)
params.learning_rate = 0.5 # change the value of learning_rate in params
```
"""
def __init__(self, json_path):
with open(json_path) as f:
params = json.load(f)
self.update(params)
def save(self, json_path):
with open(json_path, 'w') as f:
json.dump(self.__dict__, f, indent=4)
def update(self, dict):
"""Loads parameters from json file"""
self.__dict__.update(dict)
@property
def dict(self):
"""Gives dict-like access to Params instance by `params.dict['learning_rate']"""
return self.__dict__
class RunningAverage():
"""A simple class that maintains the running average of a quantity
Example:
```
loss_avg = RunningAverage()
loss_avg.update(2)
loss_avg.update(4)
loss_avg() = 3
```
"""
def __init__(self):
self.steps = 0
self.total = 0
def update(self, val):
self.total += val
self.steps += 1
def __call__(self):
return self.total / float(self.steps)
class AverageMeter():
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.val_previous = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, num):
self.val_previous = self.val
self.val = val
self.sum += val * num
self.count += num
self.avg = self.sum / self.count
def loss_meter_manager_intial(loss_meter_names):
loss_meters = []
for name in loss_meter_names:
exec("%s = %s" % (name, 'AverageMeter()'))
exec("loss_meters.append(%s)" % name)
return loss_meters
def tensor_mge(batch, check_on=True):
if check_on:
for k, v in batch.items():
if isinstance(v, np.ndarray):
batch[k] = mge.Tensor(v)
else:
for k, v in batch.items():
batch[k] = v.numpy()
return batch
def set_logger(log_path):
"""Set the logger to log info in terminal and file `log_path`.
In general, it is useful to have a logger so that every output to the terminal is saved
in a permanent file. Here we save it to `model_dir/train.log`.
Example:
```
logging.info("Starting training...")
```
Args:
log_path: (string) where to log
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
coloredlogs.install(level='INFO', logger=logger, fmt='%(asctime)s %(name)s %(message)s')
file_handler = logging.FileHandler(log_path)
log_formatter = logging.Formatter('%(asctime)s - %(message)s')
file_handler.setFormatter(log_formatter)
logger.addHandler(file_handler)
logger.info('Output and logs will be saved to {}'.format(log_path))
return logger
def save_dict_to_json(d, json_path):
"""Saves dict of floats in json file
Args:
d: (dict) of float-castable values (np.float, int, float, etc.)
json_path: (string) path to json file
"""
save_dict = {}
with open(json_path, "w") as f:
# We need to convert the values to float for json (it doesn"t accept np.array, np.float, )
for k, v in d.items():
if isinstance(v, AverageMeter):
save_dict[k] = float(v.avg)
else:
save_dict[k] = float(v)
json.dump(save_dict, f, indent=4)
def upsample2d_flow_as(inputs, target_as, mode="bilinear", if_rate=False):
_, _, h, w = target_as.shape
res = F.vision.interpolate(inputs, [h, w], mode=mode, align_corners=True)
_, _, h_, w_ = inputs.shape
if if_rate:
u_scale = (w / w_)
v_scale = (h / h_)
res[:, 0] *= u_scale
res[:, 1] *= v_scale
return res
def mesh_grid(B, H, W):
# mesh grid
x_base = F.arange(0, W)
x_base = F.tile(x_base, (B, H, 1))
y_base = F.arange(0, H) # BHW
y_base = F.tile(y_base, (B, W, 1)).transpose(0, 2, 1)
base_grid = F.stack([x_base, y_base], 1) # B2HW
return base_grid
def flow_warp(x, flow12):
B, _, H, W = x.shape
base_grid = mesh_grid(B, H, W).astype(x) # B2HW
grid_warp = base_grid + flow12
grid_warp = F.transpose(grid_warp, (0, 2, 3, 1))
warp_imgs = F.vision.remap(x, grid_warp)
return warp_imgs
def euclidean(t):
return F.sqrt(F.sum(t**2, axis=(1, ), keepdims=True))
def flow_error_avg(pred_flow, gt_flow):
_, _, H, W = gt_flow.shape
_, _, h, w = pred_flow.shape
assert (H == h) and (W == w), "inps shape is not the same: {} - {}".format((H, W), (h, w))
diff = euclidean(pred_flow - gt_flow)
diff_s = F.mean(diff)
error = diff_s
return error
def weight_parameters(module):
return [param for name, param in module.named_parameters() if "weight" in name]
def bias_parameters(module):
return [param for name, param in module.named_parameters() if "bias" in name]
|
[
"megengine.functional.arange",
"megengine.functional.tile",
"megengine.Tensor",
"megengine.functional.stack",
"megengine.functional.vision.remap",
"megengine.functional.mean",
"megengine.functional.transpose",
"megengine.functional.vision.interpolate",
"megengine.functional.sum"
] |
[((3019, 3038), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (3036, 3038), False, 'import logging\n'), ((3078, 3171), 'coloredlogs.install', 'coloredlogs.install', ([], {'level': '"""INFO"""', 'logger': 'logger', 'fmt': '"""%(asctime)s %(name)s %(message)s"""'}), "(level='INFO', logger=logger, fmt=\n '%(asctime)s %(name)s %(message)s')\n", (3097, 3171), False, 'import coloredlogs\n'), ((3186, 3215), 'logging.FileHandler', 'logging.FileHandler', (['log_path'], {}), '(log_path)\n', (3205, 3215), False, 'import logging\n'), ((3236, 3282), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(message)s"""'], {}), "('%(asctime)s - %(message)s')\n", (3253, 3282), False, 'import logging\n'), ((4164, 4231), 'megengine.functional.vision.interpolate', 'F.vision.interpolate', (['inputs', '[h, w]'], {'mode': 'mode', 'align_corners': '(True)'}), '(inputs, [h, w], mode=mode, align_corners=True)\n', (4184, 4231), True, 'import megengine.functional as F\n'), ((4462, 4476), 'megengine.functional.arange', 'F.arange', (['(0)', 'W'], {}), '(0, W)\n', (4470, 4476), True, 'import megengine.functional as F\n'), ((4490, 4515), 'megengine.functional.tile', 'F.tile', (['x_base', '(B, H, 1)'], {}), '(x_base, (B, H, 1))\n', (4496, 4515), True, 'import megengine.functional as F\n'), ((4530, 4544), 'megengine.functional.arange', 'F.arange', (['(0)', 'H'], {}), '(0, H)\n', (4538, 4544), True, 'import megengine.functional as F\n'), ((4627, 4655), 'megengine.functional.stack', 'F.stack', (['[x_base, y_base]', '(1)'], {}), '([x_base, y_base], 1)\n', (4634, 4655), True, 'import megengine.functional as F\n'), ((4844, 4880), 'megengine.functional.transpose', 'F.transpose', (['grid_warp', '(0, 2, 3, 1)'], {}), '(grid_warp, (0, 2, 3, 1))\n', (4855, 4880), True, 'import megengine.functional as F\n'), ((4898, 4926), 'megengine.functional.vision.remap', 'F.vision.remap', (['x', 'grid_warp'], {}), '(x, grid_warp)\n', (4912, 4926), True, 'import megengine.functional as F\n'), ((5283, 5295), 'megengine.functional.mean', 'F.mean', (['diff'], {}), '(diff)\n', (5289, 5295), True, 'import megengine.functional as F\n'), ((4010, 4043), 'json.dump', 'json.dump', (['save_dict', 'f'], {'indent': '(4)'}), '(save_dict, f, indent=4)\n', (4019, 4043), False, 'import json\n'), ((4986, 5025), 'megengine.functional.sum', 'F.sum', (['(t ** 2)'], {'axis': '(1,)', 'keepdims': '(True)'}), '(t ** 2, axis=(1,), keepdims=True)\n', (4991, 5025), True, 'import megengine.functional as F\n'), ((846, 858), 'json.load', 'json.load', (['f'], {}), '(f)\n', (855, 858), False, 'import json\n'), ((975, 1012), 'json.dump', 'json.dump', (['self.__dict__', 'f'], {'indent': '(4)'}), '(self.__dict__, f, indent=4)\n', (984, 1012), False, 'import json\n'), ((4565, 4590), 'megengine.functional.tile', 'F.tile', (['y_base', '(B, W, 1)'], {}), '(y_base, (B, W, 1))\n', (4571, 4590), True, 'import megengine.functional as F\n'), ((2512, 2525), 'megengine.Tensor', 'mge.Tensor', (['v'], {}), '(v)\n', (2522, 2525), True, 'import megengine as mge\n')]
|
"""Initial
Revision ID: d63ccd5484d7
Revises:
Create Date: 2021-11-14 00:28:55.123695
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('facilities',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('category', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('notes', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_facilities_category'), 'facilities', ['category'], unique=False)
op.create_index(op.f('ix_facilities_created_at'), 'facilities', ['created_at'], unique=False)
op.create_index(op.f('ix_facilities_id'), 'facilities', ['id'], unique=False)
op.create_index(op.f('ix_facilities_name'), 'facilities', ['name'], unique=False)
op.create_index(op.f('ix_facilities_notes'), 'facilities', ['notes'], unique=False)
op.create_index(op.f('ix_facilities_updated_at'), 'facilities', ['updated_at'], unique=False)
op.create_table('increment',
sa.Column('id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_increment_id'), 'increment', ['id'], unique=False)
op.create_table('listings',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('is_active', sa.Boolean(), nullable=False),
sa.Column('title', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('description', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('url', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source_id', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source_code', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('address', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('short_postal_code', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('property_type', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('postal_code', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('ber_code', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('bedrooms', sa.Integer(), nullable=True),
sa.Column('bathrooms', sa.Integer(), nullable=True),
sa.Column('price', sa.Integer(), nullable=True),
sa.Column('rating_auto', sa.Integer(), nullable=True),
sa.Column('rating_user', sa.Integer(), nullable=True),
sa.Column('telegram_sent_at', sa.DateTime(), nullable=True),
sa.Column('images_count', sa.Integer(), nullable=True),
sa.Column('latitude', sa.Float(), nullable=True),
sa.Column('longitude', sa.Float(), nullable=True),
sa.Column('notes', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('publish_date', sa.DateTime(), nullable=True),
sa.Column('last_updated', sa.DateTime(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_listings_address'), 'listings', ['address'], unique=False)
op.create_index(op.f('ix_listings_bathrooms'), 'listings', ['bathrooms'], unique=False)
op.create_index(op.f('ix_listings_bedrooms'), 'listings', ['bedrooms'], unique=False)
op.create_index(op.f('ix_listings_ber_code'), 'listings', ['ber_code'], unique=False)
op.create_index(op.f('ix_listings_created_at'), 'listings', ['created_at'], unique=False)
op.create_index(op.f('ix_listings_description'), 'listings', ['description'], unique=False)
op.create_index(op.f('ix_listings_id'), 'listings', ['id'], unique=False)
op.create_index(op.f('ix_listings_images_count'), 'listings', ['images_count'], unique=False)
op.create_index(op.f('ix_listings_is_active'), 'listings', ['is_active'], unique=False)
op.create_index(op.f('ix_listings_last_updated'), 'listings', ['last_updated'], unique=False)
op.create_index(op.f('ix_listings_latitude'), 'listings', ['latitude'], unique=False)
op.create_index(op.f('ix_listings_longitude'), 'listings', ['longitude'], unique=False)
op.create_index(op.f('ix_listings_notes'), 'listings', ['notes'], unique=False)
op.create_index(op.f('ix_listings_postal_code'), 'listings', ['postal_code'], unique=False)
op.create_index(op.f('ix_listings_price'), 'listings', ['price'], unique=False)
op.create_index(op.f('ix_listings_property_type'), 'listings', ['property_type'], unique=False)
op.create_index(op.f('ix_listings_publish_date'), 'listings', ['publish_date'], unique=False)
op.create_index(op.f('ix_listings_rating_auto'), 'listings', ['rating_auto'], unique=False)
op.create_index(op.f('ix_listings_rating_user'), 'listings', ['rating_user'], unique=False)
op.create_index(op.f('ix_listings_short_postal_code'), 'listings', ['short_postal_code'], unique=False)
op.create_index(op.f('ix_listings_source'), 'listings', ['source'], unique=False)
op.create_index(op.f('ix_listings_source_code'), 'listings', ['source_code'], unique=False)
op.create_index(op.f('ix_listings_source_id'), 'listings', ['source_id'], unique=False)
op.create_index(op.f('ix_listings_telegram_sent_at'), 'listings', ['telegram_sent_at'], unique=False)
op.create_index(op.f('ix_listings_title'), 'listings', ['title'], unique=False)
op.create_index(op.f('ix_listings_updated_at'), 'listings', ['updated_at'], unique=False)
op.create_index(op.f('ix_listings_url'), 'listings', ['url'], unique=False)
op.create_table('song',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('artist', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('year', sa.Integer(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_song_artist'), 'song', ['artist'], unique=False)
op.create_index(op.f('ix_song_created_at'), 'song', ['created_at'], unique=False)
op.create_index(op.f('ix_song_id'), 'song', ['id'], unique=False)
op.create_index(op.f('ix_song_name'), 'song', ['name'], unique=False)
op.create_index(op.f('ix_song_updated_at'), 'song', ['updated_at'], unique=False)
op.create_index(op.f('ix_song_year'), 'song', ['year'], unique=False)
op.create_table('images',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('url', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('size_x', sa.Float(), nullable=True),
sa.Column('size_y', sa.Float(), nullable=True),
sa.Column('listing_id', sa.Integer(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['listing_id'], ['listings.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_images_created_at'), 'images', ['created_at'], unique=False)
op.create_index(op.f('ix_images_id'), 'images', ['id'], unique=False)
op.create_index(op.f('ix_images_listing_id'), 'images', ['listing_id'], unique=False)
op.create_index(op.f('ix_images_size_x'), 'images', ['size_x'], unique=False)
op.create_index(op.f('ix_images_size_y'), 'images', ['size_y'], unique=False)
op.create_index(op.f('ix_images_updated_at'), 'images', ['updated_at'], unique=False)
op.create_index(op.f('ix_images_url'), 'images', ['url'], unique=False)
op.create_table('listingfacilitylink',
sa.Column('listing_id', sa.Integer(), nullable=False),
sa.Column('facility_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['facility_id'], ['facilities.id'], ),
sa.ForeignKeyConstraint(['listing_id'], ['listings.id'], ),
sa.PrimaryKeyConstraint('listing_id', 'facility_id')
)
op.create_index(op.f('ix_listingfacilitylink_facility_id'), 'listingfacilitylink', ['facility_id'], unique=False)
op.create_index(op.f('ix_listingfacilitylink_listing_id'), 'listingfacilitylink', ['listing_id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_listingfacilitylink_listing_id'), table_name='listingfacilitylink')
op.drop_index(op.f('ix_listingfacilitylink_facility_id'), table_name='listingfacilitylink')
op.drop_table('listingfacilitylink')
op.drop_index(op.f('ix_images_url'), table_name='images')
op.drop_index(op.f('ix_images_updated_at'), table_name='images')
op.drop_index(op.f('ix_images_size_y'), table_name='images')
op.drop_index(op.f('ix_images_size_x'), table_name='images')
op.drop_index(op.f('ix_images_listing_id'), table_name='images')
op.drop_index(op.f('ix_images_id'), table_name='images')
op.drop_index(op.f('ix_images_created_at'), table_name='images')
op.drop_table('images')
op.drop_index(op.f('ix_song_year'), table_name='song')
op.drop_index(op.f('ix_song_updated_at'), table_name='song')
op.drop_index(op.f('ix_song_name'), table_name='song')
op.drop_index(op.f('ix_song_id'), table_name='song')
op.drop_index(op.f('ix_song_created_at'), table_name='song')
op.drop_index(op.f('ix_song_artist'), table_name='song')
op.drop_table('song')
op.drop_index(op.f('ix_listings_url'), table_name='listings')
op.drop_index(op.f('ix_listings_updated_at'), table_name='listings')
op.drop_index(op.f('ix_listings_title'), table_name='listings')
op.drop_index(op.f('ix_listings_telegram_sent_at'), table_name='listings')
op.drop_index(op.f('ix_listings_source_id'), table_name='listings')
op.drop_index(op.f('ix_listings_source_code'), table_name='listings')
op.drop_index(op.f('ix_listings_source'), table_name='listings')
op.drop_index(op.f('ix_listings_short_postal_code'), table_name='listings')
op.drop_index(op.f('ix_listings_rating_user'), table_name='listings')
op.drop_index(op.f('ix_listings_rating_auto'), table_name='listings')
op.drop_index(op.f('ix_listings_publish_date'), table_name='listings')
op.drop_index(op.f('ix_listings_property_type'), table_name='listings')
op.drop_index(op.f('ix_listings_price'), table_name='listings')
op.drop_index(op.f('ix_listings_postal_code'), table_name='listings')
op.drop_index(op.f('ix_listings_notes'), table_name='listings')
op.drop_index(op.f('ix_listings_longitude'), table_name='listings')
op.drop_index(op.f('ix_listings_latitude'), table_name='listings')
op.drop_index(op.f('ix_listings_last_updated'), table_name='listings')
op.drop_index(op.f('ix_listings_is_active'), table_name='listings')
op.drop_index(op.f('ix_listings_images_count'), table_name='listings')
op.drop_index(op.f('ix_listings_id'), table_name='listings')
op.drop_index(op.f('ix_listings_description'), table_name='listings')
op.drop_index(op.f('ix_listings_created_at'), table_name='listings')
op.drop_index(op.f('ix_listings_ber_code'), table_name='listings')
op.drop_index(op.f('ix_listings_bedrooms'), table_name='listings')
op.drop_index(op.f('ix_listings_bathrooms'), table_name='listings')
op.drop_index(op.f('ix_listings_address'), table_name='listings')
op.drop_table('listings')
op.drop_index(op.f('ix_increment_id'), table_name='increment')
op.drop_table('increment')
op.drop_index(op.f('ix_facilities_updated_at'), table_name='facilities')
op.drop_index(op.f('ix_facilities_notes'), table_name='facilities')
op.drop_index(op.f('ix_facilities_name'), table_name='facilities')
op.drop_index(op.f('ix_facilities_id'), table_name='facilities')
op.drop_index(op.f('ix_facilities_created_at'), table_name='facilities')
op.drop_index(op.f('ix_facilities_category'), table_name='facilities')
op.drop_table('facilities')
# ### end Alembic commands ###
|
[
"sqlmodel.sql.sqltypes.AutoString"
] |
[((8907, 8943), 'alembic.op.drop_table', 'op.drop_table', (['"""listingfacilitylink"""'], {}), "('listingfacilitylink')\n", (8920, 8943), False, 'from alembic import op\n'), ((9408, 9431), 'alembic.op.drop_table', 'op.drop_table', (['"""images"""'], {}), "('images')\n", (9421, 9431), False, 'from alembic import op\n'), ((9802, 9823), 'alembic.op.drop_table', 'op.drop_table', (['"""song"""'], {}), "('song')\n", (9815, 9823), False, 'from alembic import op\n'), ((11779, 11804), 'alembic.op.drop_table', 'op.drop_table', (['"""listings"""'], {}), "('listings')\n", (11792, 11804), False, 'from alembic import op\n'), ((11876, 11902), 'alembic.op.drop_table', 'op.drop_table', (['"""increment"""'], {}), "('increment')\n", (11889, 11902), False, 'from alembic import op\n'), ((12348, 12375), 'alembic.op.drop_table', 'op.drop_table', (['"""facilities"""'], {}), "('facilities')\n", (12361, 12375), False, 'from alembic import op\n'), ((796, 825), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (819, 825), True, 'import sqlalchemy as sa\n'), ((852, 882), 'alembic.op.f', 'op.f', (['"""ix_facilities_category"""'], {}), "('ix_facilities_category')\n", (856, 882), False, 'from alembic import op\n'), ((946, 978), 'alembic.op.f', 'op.f', (['"""ix_facilities_created_at"""'], {}), "('ix_facilities_created_at')\n", (950, 978), False, 'from alembic import op\n'), ((1044, 1068), 'alembic.op.f', 'op.f', (['"""ix_facilities_id"""'], {}), "('ix_facilities_id')\n", (1048, 1068), False, 'from alembic import op\n'), ((1126, 1152), 'alembic.op.f', 'op.f', (['"""ix_facilities_name"""'], {}), "('ix_facilities_name')\n", (1130, 1152), False, 'from alembic import op\n'), ((1212, 1239), 'alembic.op.f', 'op.f', (['"""ix_facilities_notes"""'], {}), "('ix_facilities_notes')\n", (1216, 1239), False, 'from alembic import op\n'), ((1300, 1332), 'alembic.op.f', 'op.f', (['"""ix_facilities_updated_at"""'], {}), "('ix_facilities_updated_at')\n", (1304, 1332), False, 'from alembic import op\n'), ((1466, 1495), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (1489, 1495), True, 'import sqlalchemy as sa\n'), ((1522, 1545), 'alembic.op.f', 'op.f', (['"""ix_increment_id"""'], {}), "('ix_increment_id')\n", (1526, 1545), False, 'from alembic import op\n'), ((3436, 3465), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (3459, 3465), True, 'import sqlalchemy as sa\n'), ((3492, 3519), 'alembic.op.f', 'op.f', (['"""ix_listings_address"""'], {}), "('ix_listings_address')\n", (3496, 3519), False, 'from alembic import op\n'), ((3580, 3609), 'alembic.op.f', 'op.f', (['"""ix_listings_bathrooms"""'], {}), "('ix_listings_bathrooms')\n", (3584, 3609), False, 'from alembic import op\n'), ((3672, 3700), 'alembic.op.f', 'op.f', (['"""ix_listings_bedrooms"""'], {}), "('ix_listings_bedrooms')\n", (3676, 3700), False, 'from alembic import op\n'), ((3762, 3790), 'alembic.op.f', 'op.f', (['"""ix_listings_ber_code"""'], {}), "('ix_listings_ber_code')\n", (3766, 3790), False, 'from alembic import op\n'), ((3852, 3882), 'alembic.op.f', 'op.f', (['"""ix_listings_created_at"""'], {}), "('ix_listings_created_at')\n", (3856, 3882), False, 'from alembic import op\n'), ((3946, 3977), 'alembic.op.f', 'op.f', (['"""ix_listings_description"""'], {}), "('ix_listings_description')\n", (3950, 3977), False, 'from alembic import op\n'), ((4042, 4064), 'alembic.op.f', 'op.f', (['"""ix_listings_id"""'], {}), "('ix_listings_id')\n", (4046, 4064), False, 'from alembic import op\n'), ((4120, 4152), 'alembic.op.f', 'op.f', (['"""ix_listings_images_count"""'], {}), "('ix_listings_images_count')\n", (4124, 4152), False, 'from alembic import op\n'), ((4218, 4247), 'alembic.op.f', 'op.f', (['"""ix_listings_is_active"""'], {}), "('ix_listings_is_active')\n", (4222, 4247), False, 'from alembic import op\n'), ((4310, 4342), 'alembic.op.f', 'op.f', (['"""ix_listings_last_updated"""'], {}), "('ix_listings_last_updated')\n", (4314, 4342), False, 'from alembic import op\n'), ((4408, 4436), 'alembic.op.f', 'op.f', (['"""ix_listings_latitude"""'], {}), "('ix_listings_latitude')\n", (4412, 4436), False, 'from alembic import op\n'), ((4498, 4527), 'alembic.op.f', 'op.f', (['"""ix_listings_longitude"""'], {}), "('ix_listings_longitude')\n", (4502, 4527), False, 'from alembic import op\n'), ((4590, 4615), 'alembic.op.f', 'op.f', (['"""ix_listings_notes"""'], {}), "('ix_listings_notes')\n", (4594, 4615), False, 'from alembic import op\n'), ((4674, 4705), 'alembic.op.f', 'op.f', (['"""ix_listings_postal_code"""'], {}), "('ix_listings_postal_code')\n", (4678, 4705), False, 'from alembic import op\n'), ((4770, 4795), 'alembic.op.f', 'op.f', (['"""ix_listings_price"""'], {}), "('ix_listings_price')\n", (4774, 4795), False, 'from alembic import op\n'), ((4854, 4887), 'alembic.op.f', 'op.f', (['"""ix_listings_property_type"""'], {}), "('ix_listings_property_type')\n", (4858, 4887), False, 'from alembic import op\n'), ((4954, 4986), 'alembic.op.f', 'op.f', (['"""ix_listings_publish_date"""'], {}), "('ix_listings_publish_date')\n", (4958, 4986), False, 'from alembic import op\n'), ((5052, 5083), 'alembic.op.f', 'op.f', (['"""ix_listings_rating_auto"""'], {}), "('ix_listings_rating_auto')\n", (5056, 5083), False, 'from alembic import op\n'), ((5148, 5179), 'alembic.op.f', 'op.f', (['"""ix_listings_rating_user"""'], {}), "('ix_listings_rating_user')\n", (5152, 5179), False, 'from alembic import op\n'), ((5244, 5281), 'alembic.op.f', 'op.f', (['"""ix_listings_short_postal_code"""'], {}), "('ix_listings_short_postal_code')\n", (5248, 5281), False, 'from alembic import op\n'), ((5352, 5378), 'alembic.op.f', 'op.f', (['"""ix_listings_source"""'], {}), "('ix_listings_source')\n", (5356, 5378), False, 'from alembic import op\n'), ((5438, 5469), 'alembic.op.f', 'op.f', (['"""ix_listings_source_code"""'], {}), "('ix_listings_source_code')\n", (5442, 5469), False, 'from alembic import op\n'), ((5534, 5563), 'alembic.op.f', 'op.f', (['"""ix_listings_source_id"""'], {}), "('ix_listings_source_id')\n", (5538, 5563), False, 'from alembic import op\n'), ((5626, 5662), 'alembic.op.f', 'op.f', (['"""ix_listings_telegram_sent_at"""'], {}), "('ix_listings_telegram_sent_at')\n", (5630, 5662), False, 'from alembic import op\n'), ((5732, 5757), 'alembic.op.f', 'op.f', (['"""ix_listings_title"""'], {}), "('ix_listings_title')\n", (5736, 5757), False, 'from alembic import op\n'), ((5816, 5846), 'alembic.op.f', 'op.f', (['"""ix_listings_updated_at"""'], {}), "('ix_listings_updated_at')\n", (5820, 5846), False, 'from alembic import op\n'), ((5910, 5933), 'alembic.op.f', 'op.f', (['"""ix_listings_url"""'], {}), "('ix_listings_url')\n", (5914, 5933), False, 'from alembic import op\n'), ((6375, 6404), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (6398, 6404), True, 'import sqlalchemy as sa\n'), ((6431, 6453), 'alembic.op.f', 'op.f', (['"""ix_song_artist"""'], {}), "('ix_song_artist')\n", (6435, 6453), False, 'from alembic import op\n'), ((6509, 6535), 'alembic.op.f', 'op.f', (['"""ix_song_created_at"""'], {}), "('ix_song_created_at')\n", (6513, 6535), False, 'from alembic import op\n'), ((6595, 6613), 'alembic.op.f', 'op.f', (['"""ix_song_id"""'], {}), "('ix_song_id')\n", (6599, 6613), False, 'from alembic import op\n'), ((6665, 6685), 'alembic.op.f', 'op.f', (['"""ix_song_name"""'], {}), "('ix_song_name')\n", (6669, 6685), False, 'from alembic import op\n'), ((6739, 6765), 'alembic.op.f', 'op.f', (['"""ix_song_updated_at"""'], {}), "('ix_song_updated_at')\n", (6743, 6765), False, 'from alembic import op\n'), ((6825, 6845), 'alembic.op.f', 'op.f', (['"""ix_song_year"""'], {}), "('ix_song_year')\n", (6829, 6845), False, 'from alembic import op\n'), ((7318, 7374), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['listing_id']", "['listings.id']"], {}), "(['listing_id'], ['listings.id'])\n", (7341, 7374), True, 'import sqlalchemy as sa\n'), ((7382, 7411), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (7405, 7411), True, 'import sqlalchemy as sa\n'), ((7438, 7466), 'alembic.op.f', 'op.f', (['"""ix_images_created_at"""'], {}), "('ix_images_created_at')\n", (7442, 7466), False, 'from alembic import op\n'), ((7528, 7548), 'alembic.op.f', 'op.f', (['"""ix_images_id"""'], {}), "('ix_images_id')\n", (7532, 7548), False, 'from alembic import op\n'), ((7602, 7630), 'alembic.op.f', 'op.f', (['"""ix_images_listing_id"""'], {}), "('ix_images_listing_id')\n", (7606, 7630), False, 'from alembic import op\n'), ((7692, 7716), 'alembic.op.f', 'op.f', (['"""ix_images_size_x"""'], {}), "('ix_images_size_x')\n", (7696, 7716), False, 'from alembic import op\n'), ((7774, 7798), 'alembic.op.f', 'op.f', (['"""ix_images_size_y"""'], {}), "('ix_images_size_y')\n", (7778, 7798), False, 'from alembic import op\n'), ((7856, 7884), 'alembic.op.f', 'op.f', (['"""ix_images_updated_at"""'], {}), "('ix_images_updated_at')\n", (7860, 7884), False, 'from alembic import op\n'), ((7946, 7967), 'alembic.op.f', 'op.f', (['"""ix_images_url"""'], {}), "('ix_images_url')\n", (7950, 7967), False, 'from alembic import op\n'), ((8168, 8227), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['facility_id']", "['facilities.id']"], {}), "(['facility_id'], ['facilities.id'])\n", (8191, 8227), True, 'import sqlalchemy as sa\n'), ((8235, 8291), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['listing_id']", "['listings.id']"], {}), "(['listing_id'], ['listings.id'])\n", (8258, 8291), True, 'import sqlalchemy as sa\n'), ((8299, 8351), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""listing_id"""', '"""facility_id"""'], {}), "('listing_id', 'facility_id')\n", (8322, 8351), True, 'import sqlalchemy as sa\n'), ((8378, 8420), 'alembic.op.f', 'op.f', (['"""ix_listingfacilitylink_facility_id"""'], {}), "('ix_listingfacilitylink_facility_id')\n", (8382, 8420), False, 'from alembic import op\n'), ((8496, 8537), 'alembic.op.f', 'op.f', (['"""ix_listingfacilitylink_listing_id"""'], {}), "('ix_listingfacilitylink_listing_id')\n", (8500, 8537), False, 'from alembic import op\n'), ((8730, 8771), 'alembic.op.f', 'op.f', (['"""ix_listingfacilitylink_listing_id"""'], {}), "('ix_listingfacilitylink_listing_id')\n", (8734, 8771), False, 'from alembic import op\n'), ((8825, 8867), 'alembic.op.f', 'op.f', (['"""ix_listingfacilitylink_facility_id"""'], {}), "('ix_listingfacilitylink_facility_id')\n", (8829, 8867), False, 'from alembic import op\n'), ((8962, 8983), 'alembic.op.f', 'op.f', (['"""ix_images_url"""'], {}), "('ix_images_url')\n", (8966, 8983), False, 'from alembic import op\n'), ((9024, 9052), 'alembic.op.f', 'op.f', (['"""ix_images_updated_at"""'], {}), "('ix_images_updated_at')\n", (9028, 9052), False, 'from alembic import op\n'), ((9093, 9117), 'alembic.op.f', 'op.f', (['"""ix_images_size_y"""'], {}), "('ix_images_size_y')\n", (9097, 9117), False, 'from alembic import op\n'), ((9158, 9182), 'alembic.op.f', 'op.f', (['"""ix_images_size_x"""'], {}), "('ix_images_size_x')\n", (9162, 9182), False, 'from alembic import op\n'), ((9223, 9251), 'alembic.op.f', 'op.f', (['"""ix_images_listing_id"""'], {}), "('ix_images_listing_id')\n", (9227, 9251), False, 'from alembic import op\n'), ((9292, 9312), 'alembic.op.f', 'op.f', (['"""ix_images_id"""'], {}), "('ix_images_id')\n", (9296, 9312), False, 'from alembic import op\n'), ((9353, 9381), 'alembic.op.f', 'op.f', (['"""ix_images_created_at"""'], {}), "('ix_images_created_at')\n", (9357, 9381), False, 'from alembic import op\n'), ((9450, 9470), 'alembic.op.f', 'op.f', (['"""ix_song_year"""'], {}), "('ix_song_year')\n", (9454, 9470), False, 'from alembic import op\n'), ((9509, 9535), 'alembic.op.f', 'op.f', (['"""ix_song_updated_at"""'], {}), "('ix_song_updated_at')\n", (9513, 9535), False, 'from alembic import op\n'), ((9574, 9594), 'alembic.op.f', 'op.f', (['"""ix_song_name"""'], {}), "('ix_song_name')\n", (9578, 9594), False, 'from alembic import op\n'), ((9633, 9651), 'alembic.op.f', 'op.f', (['"""ix_song_id"""'], {}), "('ix_song_id')\n", (9637, 9651), False, 'from alembic import op\n'), ((9690, 9716), 'alembic.op.f', 'op.f', (['"""ix_song_created_at"""'], {}), "('ix_song_created_at')\n", (9694, 9716), False, 'from alembic import op\n'), ((9755, 9777), 'alembic.op.f', 'op.f', (['"""ix_song_artist"""'], {}), "('ix_song_artist')\n", (9759, 9777), False, 'from alembic import op\n'), ((9842, 9865), 'alembic.op.f', 'op.f', (['"""ix_listings_url"""'], {}), "('ix_listings_url')\n", (9846, 9865), False, 'from alembic import op\n'), ((9908, 9938), 'alembic.op.f', 'op.f', (['"""ix_listings_updated_at"""'], {}), "('ix_listings_updated_at')\n", (9912, 9938), False, 'from alembic import op\n'), ((9981, 10006), 'alembic.op.f', 'op.f', (['"""ix_listings_title"""'], {}), "('ix_listings_title')\n", (9985, 10006), False, 'from alembic import op\n'), ((10049, 10085), 'alembic.op.f', 'op.f', (['"""ix_listings_telegram_sent_at"""'], {}), "('ix_listings_telegram_sent_at')\n", (10053, 10085), False, 'from alembic import op\n'), ((10128, 10157), 'alembic.op.f', 'op.f', (['"""ix_listings_source_id"""'], {}), "('ix_listings_source_id')\n", (10132, 10157), False, 'from alembic import op\n'), ((10200, 10231), 'alembic.op.f', 'op.f', (['"""ix_listings_source_code"""'], {}), "('ix_listings_source_code')\n", (10204, 10231), False, 'from alembic import op\n'), ((10274, 10300), 'alembic.op.f', 'op.f', (['"""ix_listings_source"""'], {}), "('ix_listings_source')\n", (10278, 10300), False, 'from alembic import op\n'), ((10343, 10380), 'alembic.op.f', 'op.f', (['"""ix_listings_short_postal_code"""'], {}), "('ix_listings_short_postal_code')\n", (10347, 10380), False, 'from alembic import op\n'), ((10423, 10454), 'alembic.op.f', 'op.f', (['"""ix_listings_rating_user"""'], {}), "('ix_listings_rating_user')\n", (10427, 10454), False, 'from alembic import op\n'), ((10497, 10528), 'alembic.op.f', 'op.f', (['"""ix_listings_rating_auto"""'], {}), "('ix_listings_rating_auto')\n", (10501, 10528), False, 'from alembic import op\n'), ((10571, 10603), 'alembic.op.f', 'op.f', (['"""ix_listings_publish_date"""'], {}), "('ix_listings_publish_date')\n", (10575, 10603), False, 'from alembic import op\n'), ((10646, 10679), 'alembic.op.f', 'op.f', (['"""ix_listings_property_type"""'], {}), "('ix_listings_property_type')\n", (10650, 10679), False, 'from alembic import op\n'), ((10722, 10747), 'alembic.op.f', 'op.f', (['"""ix_listings_price"""'], {}), "('ix_listings_price')\n", (10726, 10747), False, 'from alembic import op\n'), ((10790, 10821), 'alembic.op.f', 'op.f', (['"""ix_listings_postal_code"""'], {}), "('ix_listings_postal_code')\n", (10794, 10821), False, 'from alembic import op\n'), ((10864, 10889), 'alembic.op.f', 'op.f', (['"""ix_listings_notes"""'], {}), "('ix_listings_notes')\n", (10868, 10889), False, 'from alembic import op\n'), ((10932, 10961), 'alembic.op.f', 'op.f', (['"""ix_listings_longitude"""'], {}), "('ix_listings_longitude')\n", (10936, 10961), False, 'from alembic import op\n'), ((11004, 11032), 'alembic.op.f', 'op.f', (['"""ix_listings_latitude"""'], {}), "('ix_listings_latitude')\n", (11008, 11032), False, 'from alembic import op\n'), ((11075, 11107), 'alembic.op.f', 'op.f', (['"""ix_listings_last_updated"""'], {}), "('ix_listings_last_updated')\n", (11079, 11107), False, 'from alembic import op\n'), ((11150, 11179), 'alembic.op.f', 'op.f', (['"""ix_listings_is_active"""'], {}), "('ix_listings_is_active')\n", (11154, 11179), False, 'from alembic import op\n'), ((11222, 11254), 'alembic.op.f', 'op.f', (['"""ix_listings_images_count"""'], {}), "('ix_listings_images_count')\n", (11226, 11254), False, 'from alembic import op\n'), ((11297, 11319), 'alembic.op.f', 'op.f', (['"""ix_listings_id"""'], {}), "('ix_listings_id')\n", (11301, 11319), False, 'from alembic import op\n'), ((11362, 11393), 'alembic.op.f', 'op.f', (['"""ix_listings_description"""'], {}), "('ix_listings_description')\n", (11366, 11393), False, 'from alembic import op\n'), ((11436, 11466), 'alembic.op.f', 'op.f', (['"""ix_listings_created_at"""'], {}), "('ix_listings_created_at')\n", (11440, 11466), False, 'from alembic import op\n'), ((11509, 11537), 'alembic.op.f', 'op.f', (['"""ix_listings_ber_code"""'], {}), "('ix_listings_ber_code')\n", (11513, 11537), False, 'from alembic import op\n'), ((11580, 11608), 'alembic.op.f', 'op.f', (['"""ix_listings_bedrooms"""'], {}), "('ix_listings_bedrooms')\n", (11584, 11608), False, 'from alembic import op\n'), ((11651, 11680), 'alembic.op.f', 'op.f', (['"""ix_listings_bathrooms"""'], {}), "('ix_listings_bathrooms')\n", (11655, 11680), False, 'from alembic import op\n'), ((11723, 11750), 'alembic.op.f', 'op.f', (['"""ix_listings_address"""'], {}), "('ix_listings_address')\n", (11727, 11750), False, 'from alembic import op\n'), ((11823, 11846), 'alembic.op.f', 'op.f', (['"""ix_increment_id"""'], {}), "('ix_increment_id')\n", (11827, 11846), False, 'from alembic import op\n'), ((11921, 11953), 'alembic.op.f', 'op.f', (['"""ix_facilities_updated_at"""'], {}), "('ix_facilities_updated_at')\n", (11925, 11953), False, 'from alembic import op\n'), ((11998, 12025), 'alembic.op.f', 'op.f', (['"""ix_facilities_notes"""'], {}), "('ix_facilities_notes')\n", (12002, 12025), False, 'from alembic import op\n'), ((12070, 12096), 'alembic.op.f', 'op.f', (['"""ix_facilities_name"""'], {}), "('ix_facilities_name')\n", (12074, 12096), False, 'from alembic import op\n'), ((12141, 12165), 'alembic.op.f', 'op.f', (['"""ix_facilities_id"""'], {}), "('ix_facilities_id')\n", (12145, 12165), False, 'from alembic import op\n'), ((12210, 12242), 'alembic.op.f', 'op.f', (['"""ix_facilities_created_at"""'], {}), "('ix_facilities_created_at')\n", (12214, 12242), False, 'from alembic import op\n'), ((12287, 12317), 'alembic.op.f', 'op.f', (['"""ix_facilities_category"""'], {}), "('ix_facilities_category')\n", (12291, 12317), False, 'from alembic import op\n'), ((415, 427), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (425, 427), True, 'import sqlalchemy as sa\n'), ((468, 502), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (500, 502), False, 'import sqlmodel\n'), ((547, 581), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (579, 581), False, 'import sqlmodel\n'), ((622, 656), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (654, 656), False, 'import sqlmodel\n'), ((702, 715), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (713, 715), True, 'import sqlalchemy as sa\n'), ((761, 774), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (772, 774), True, 'import sqlalchemy as sa\n'), ((1431, 1443), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1441, 1443), True, 'import sqlalchemy as sa\n'), ((1634, 1646), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1644, 1646), True, 'import sqlalchemy as sa\n'), ((1692, 1704), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (1702, 1704), True, 'import sqlalchemy as sa\n'), ((1746, 1780), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1778, 1780), False, 'import sqlmodel\n'), ((1827, 1861), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1859, 1861), False, 'import sqlmodel\n'), ((1900, 1934), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1932, 1934), False, 'import sqlmodel\n'), ((1977, 2011), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2009, 2011), False, 'import sqlmodel\n'), ((2057, 2091), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2089, 2091), False, 'import sqlmodel\n'), ((2139, 2173), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2171, 2173), False, 'import sqlmodel\n'), ((2217, 2251), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2249, 2251), False, 'import sqlmodel\n'), ((2305, 2339), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2337, 2339), False, 'import sqlmodel\n'), ((2388, 2422), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2420, 2422), False, 'import sqlmodel\n'), ((2469, 2503), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2501, 2503), False, 'import sqlmodel\n'), ((2547, 2581), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2579, 2581), False, 'import sqlmodel\n'), ((2625, 2637), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2635, 2637), True, 'import sqlalchemy as sa\n'), ((2682, 2694), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2692, 2694), True, 'import sqlalchemy as sa\n'), ((2735, 2747), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2745, 2747), True, 'import sqlalchemy as sa\n'), ((2794, 2806), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2804, 2806), True, 'import sqlalchemy as sa\n'), ((2853, 2865), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2863, 2865), True, 'import sqlalchemy as sa\n'), ((2917, 2930), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (2928, 2930), True, 'import sqlalchemy as sa\n'), ((2978, 2990), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2988, 2990), True, 'import sqlalchemy as sa\n'), ((3034, 3044), 'sqlalchemy.Float', 'sa.Float', ([], {}), '()\n', (3042, 3044), True, 'import sqlalchemy as sa\n'), ((3089, 3099), 'sqlalchemy.Float', 'sa.Float', ([], {}), '()\n', (3097, 3099), True, 'import sqlalchemy as sa\n'), ((3140, 3174), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (3172, 3174), False, 'import sqlmodel\n'), ((3222, 3235), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (3233, 3235), True, 'import sqlalchemy as sa\n'), ((3283, 3296), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (3294, 3296), True, 'import sqlalchemy as sa\n'), ((3342, 3355), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (3353, 3355), True, 'import sqlalchemy as sa\n'), ((3401, 3414), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (3412, 3414), True, 'import sqlalchemy as sa\n'), ((6018, 6030), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (6028, 6030), True, 'import sqlalchemy as sa\n'), ((6071, 6105), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (6103, 6105), False, 'import sqlmodel\n'), ((6148, 6182), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (6180, 6182), False, 'import sqlmodel\n'), ((6223, 6235), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (6233, 6235), True, 'import sqlalchemy as sa\n'), ((6281, 6294), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (6292, 6294), True, 'import sqlalchemy as sa\n'), ((6340, 6353), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (6351, 6353), True, 'import sqlalchemy as sa\n'), ((6929, 6941), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (6939, 6941), True, 'import sqlalchemy as sa\n'), ((6981, 7015), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (7013, 7015), False, 'import sqlmodel\n'), ((7058, 7068), 'sqlalchemy.Float', 'sa.Float', ([], {}), '()\n', (7066, 7068), True, 'import sqlalchemy as sa\n'), ((7110, 7120), 'sqlalchemy.Float', 'sa.Float', ([], {}), '()\n', (7118, 7120), True, 'import sqlalchemy as sa\n'), ((7166, 7178), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (7176, 7178), True, 'import sqlalchemy as sa\n'), ((7224, 7237), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (7235, 7237), True, 'import sqlalchemy as sa\n'), ((7283, 7296), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (7294, 7296), True, 'import sqlalchemy as sa\n'), ((8073, 8085), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (8083, 8085), True, 'import sqlalchemy as sa\n'), ((8133, 8145), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (8143, 8145), True, 'import sqlalchemy as sa\n')]
|
"""participant id as string
Revision ID: <KEY>
Revises: 11<PASSWORD>3<PASSWORD>
Create Date: 2022-04-04 04:34:56.202331+00:00
"""
import sqlalchemy as sa
import sqlmodel
from alembic import op
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "11505f38b<PASSWORD>"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(
"applications_participant_id_fkey", "applications", type_="foreignkey"
)
op.alter_column(
"applications",
"participant_id",
type_=sqlmodel.sql.sqltypes.AutoString(),
nullable=False,
)
op.alter_column(
"participants", "id", type_=sqlmodel.sql.sqltypes.AutoString(), nullable=False
)
op.create_foreign_key(
None,
"applications",
"participants",
["participant_id"],
["id"],
ondelete="CASCADE",
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(
"applications_participant_id_fkey", "applications", type_="foreignkey"
)
op.alter_column(
"applications", "participant_id", type_=sa.Integer(), nullable=False
)
op.alter_column("participants", "id", type_=sa.Integer(), nullable=False)
op.create_foreign_key(
None,
"applications",
"participants",
["participant_id"],
["id"],
ondelete="CASCADE",
)
# ### end Alembic commands ###
|
[
"sqlmodel.sql.sqltypes.AutoString"
] |
[((420, 514), 'alembic.op.drop_constraint', 'op.drop_constraint', (['"""applications_participant_id_fkey"""', '"""applications"""'], {'type_': '"""foreignkey"""'}), "('applications_participant_id_fkey', 'applications',\n type_='foreignkey')\n", (438, 514), False, 'from alembic import op\n'), ((794, 906), 'alembic.op.create_foreign_key', 'op.create_foreign_key', (['None', '"""applications"""', '"""participants"""', "['participant_id']", "['id']"], {'ondelete': '"""CASCADE"""'}), "(None, 'applications', 'participants', [\n 'participant_id'], ['id'], ondelete='CASCADE')\n", (815, 906), False, 'from alembic import op\n'), ((1081, 1175), 'alembic.op.drop_constraint', 'op.drop_constraint', (['"""applications_participant_id_fkey"""', '"""applications"""'], {'type_': '"""foreignkey"""'}), "('applications_participant_id_fkey', 'applications',\n type_='foreignkey')\n", (1099, 1175), False, 'from alembic import op\n'), ((1372, 1484), 'alembic.op.create_foreign_key', 'op.create_foreign_key', (['None', '"""applications"""', '"""participants"""', "['participant_id']", "['id']"], {'ondelete': '"""CASCADE"""'}), "(None, 'applications', 'participants', [\n 'participant_id'], ['id'], ondelete='CASCADE')\n", (1393, 1484), False, 'from alembic import op\n'), ((610, 644), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (642, 644), False, 'import sqlmodel\n'), ((733, 767), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (765, 767), False, 'import sqlmodel\n'), ((1255, 1267), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1265, 1267), True, 'import sqlalchemy as sa\n'), ((1338, 1350), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1348, 1350), True, 'import sqlalchemy as sa\n')]
|
import os
import numpy as nm
from sfepy.base.testing import TestCommon
from sfepy import data_dir
# n_cell, n_face, n_edge, n_vertex
# d1 -> d2 : num, n_incident
expected = {
'2_3_2.mesh' : ([4, 5, 2, 0], {
(0, 0) : (4, 10),
(0, 1) : (4, 10),
(0, 2) : (4, 6),
(1, 0) : (5, 10),
(1, 1) : (5, 16),
(1, 2) : (5, 6),
(2, 0) : (2, 6),
(2, 1) : (2, 6),
(2, 2) : (2, 2),
}),
'2_4_2.mesh' : ([6, 7, 2, 0], {
(0, 0) : (6, 22),
(0, 1) : (6, 14),
(0, 2) : (6, 8),
(1, 0) : (7, 14),
(1, 1) : (7, 20),
(1, 2) : (7, 8),
(2, 0) : (2, 8),
(2, 1) : (2, 8),
(2, 2) : (2, 2),
}),
'3_4_2.mesh' : ([5, 9, 7, 2], {
(0, 0) : (5, 18),
(0, 1) : (5, 18),
(0, 2) : (5, 21),
(0, 3) : (5, 8),
(1, 0) : (9, 18),
(1, 1) : (9, 48),
(1, 2) : (9, 21),
(1, 3) : (9, 12),
(2, 0) : (7, 21),
(2, 1) : (7, 21),
(2, 2) : (7, 42),
(2, 3) : (7, 8),
(3, 0) : (2, 8),
(3, 1) : (2, 12),
(3, 2) : (2, 8),
(3, 3) : (2, 2),
}),
'3_8_2.mesh' : ([12, 20, 11, 2], {
(0, 0) : (12, 100),
(0, 1) : (12, 40),
(0, 2) : (12, 44),
(0, 3) : (12, 16),
(1, 0) : (20, 40),
(1, 1) : (20, 96),
(1, 2) : (20, 44),
(1, 3) : (20, 24),
(2, 0) : (11, 44),
(2, 1) : (11, 44),
(2, 2) : (11, 72),
(2, 3) : (11, 12),
(3, 0) : (2, 16),
(3, 1) : (2, 24),
(3, 2) : (2, 12),
(3, 3) : (2, 2),
}),
'square_triquad.mesh' : ([470, 1127, 658, 0], {
(0, 0) : (470, 3054),
(0, 1) : (470, 2254),
(0, 2) : (470, 2174),
(1, 0) : (1127, 2254),
(1, 1) : (1127, 9174),
(1, 2) : (1127, 2174),
(2, 0) : (658, 2174),
(2, 1) : (658, 2174),
(2, 2) : (658, 6686),
}),
}
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
filename_meshes = [data_dir + '/meshes/elements/%s_2.mesh' % geom
for geom in ['2_3', '2_4', '3_4', '3_8']]
filename_meshes.append(data_dir
+ '/meshes/2d/special/square_triquad.mesh')
test = Test(filename_meshes=filename_meshes,
conf=conf, options=options)
return test
def test_cmesh_counts(self):
from sfepy.discrete.fem import Mesh
from sfepy.discrete.fem.geometry_element import create_geometry_elements
from sfepy.discrete.fem.extmods.cmesh import CMesh, get_cmem_usage
gels = create_geometry_elements()
ok = True
for filename in self.filename_meshes:
basename = os.path.basename(filename)
enum, esizes = expected[basename]
self.report('mesh: %s' % basename)
mesh = Mesh.from_file(filename)
cmesh = CMesh.from_mesh(mesh)
cmesh.set_local_entities(gels)
cmesh.setup_entities()
self.report('dim:', cmesh.dim)
self.report('n_cell: %d, n_face: %d, n_edge: %d, n_vertex: %d' %
tuple(cmesh.num))
_ok = (enum == cmesh.num).all()
if not _ok:
self.report('%s == %s failed!' % (enum, cmesh.num))
ok = ok and _ok
dim = cmesh.dim
for ir in range(dim + 1):
for ic in range(dim + 1):
cmesh.setup_connectivity(ir, ic)
mem_usage1 = get_cmem_usage()[0]
if (ir == dim) and (ic == 0):
continue
cmesh.free_connectivity(ir, ic)
mem_usage2 = get_cmem_usage()[0]
cmesh.setup_connectivity(ir, ic)
mem_usage3 = get_cmem_usage()[0]
conn = cmesh.get_conn(ir, ic)
self.report('(%d, %d) : (%d, %d)'
% (ir, ic, conn.num, conn.n_incident))
sizes = nm.array([conn.num, conn.n_incident])
_ok = (esizes[ir, ic] == sizes).all()
if not _ok:
self.report('%s == %s failed!' % (esizes, sizes))
ok = ok and _ok
_ok1 = mem_usage3 == mem_usage1
_ok2 = mem_usage3 > mem_usage2
if not (_ok1 and _ok2):
self.report('unexpected memory usage! (%s)'
% (mem_usage1, mem_usage2, mem_usage3))
ok = ok and (_ok1 and _ok2)
return ok
|
[
"sfepy.discrete.fem.geometry_element.create_geometry_elements",
"sfepy.discrete.fem.extmods.cmesh.CMesh.from_mesh",
"sfepy.discrete.fem.Mesh.from_file",
"sfepy.discrete.fem.extmods.cmesh.get_cmem_usage"
] |
[((2717, 2743), 'sfepy.discrete.fem.geometry_element.create_geometry_elements', 'create_geometry_elements', ([], {}), '()\n', (2741, 2743), False, 'from sfepy.discrete.fem.geometry_element import create_geometry_elements\n'), ((2833, 2859), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (2849, 2859), False, 'import os\n'), ((2974, 2998), 'sfepy.discrete.fem.Mesh.from_file', 'Mesh.from_file', (['filename'], {}), '(filename)\n', (2988, 2998), False, 'from sfepy.discrete.fem import Mesh\n'), ((3019, 3040), 'sfepy.discrete.fem.extmods.cmesh.CMesh.from_mesh', 'CMesh.from_mesh', (['mesh'], {}), '(mesh)\n', (3034, 3040), False, 'from sfepy.discrete.fem.extmods.cmesh import CMesh, get_cmem_usage\n'), ((4165, 4202), 'numpy.array', 'nm.array', (['[conn.num, conn.n_incident]'], {}), '([conn.num, conn.n_incident])\n', (4173, 4202), True, 'import numpy as nm\n'), ((3643, 3659), 'sfepy.discrete.fem.extmods.cmesh.get_cmem_usage', 'get_cmem_usage', ([], {}), '()\n', (3657, 3659), False, 'from sfepy.discrete.fem.extmods.cmesh import CMesh, get_cmem_usage\n'), ((3833, 3849), 'sfepy.discrete.fem.extmods.cmesh.get_cmem_usage', 'get_cmem_usage', ([], {}), '()\n', (3847, 3849), False, 'from sfepy.discrete.fem.extmods.cmesh import CMesh, get_cmem_usage\n'), ((3940, 3956), 'sfepy.discrete.fem.extmods.cmesh.get_cmem_usage', 'get_cmem_usage', ([], {}), '()\n', (3954, 3956), False, 'from sfepy.discrete.fem.extmods.cmesh import CMesh, get_cmem_usage\n')]
|
from datetime import datetime
from typing import Optional
from sqlmodel import Field, SQLModel
class YouTube(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
video_id: str
title: str
description: str
thumb: str
published: datetime
class YouTubeRead(SQLModel):
id: int
video_id: str
title: str
description: str
thumb: str
published: datetime
|
[
"sqlmodel.Field"
] |
[((159, 196), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (164, 196), False, 'from sqlmodel import Field, SQLModel\n')]
|
from __future__ import absolute_import
input_name = '../examples/multi_physics/piezo_elasticity.py'
output_name = 'test_piezo_elasticity.vtk'
from tests_basic import TestInput
class Test( TestInput ):
def from_conf( conf, options ):
return TestInput.from_conf( conf, options, cls = Test )
from_conf = staticmethod( from_conf )
def test_ebc( self ):
import numpy as nm
from sfepy.discrete import Problem
pb = Problem.from_conf(self.test_conf)
pb.time_update()
vvs = pb.get_variables()
setv = vvs.set_state_part
make_full = vvs.make_full_vec
svec_u = nm.ones( (vvs.adi.n_dof['u'],), dtype = nm.float64 )
svec_phi = nm.empty( (vvs.adi.n_dof['phi'],), dtype = nm.float64 )
svec_phi.fill( 2.0 )
svec = vvs.create_stripped_state_vector()
setv( svec, svec_u, 'u', stripped = True )
setv( svec, svec_phi, 'phi', stripped = True )
vec = make_full( svec )
ii_u = vvs.di.indx['u'].start + vvs['u'].eq_map.eqi
ii_phi = vvs.di.indx['phi'].start + vvs['phi'].eq_map.eqi
ok_ebc = vvs.has_ebc( vec )
ok_u = nm.all( vec[ii_u] == svec_u )
ok_phi = nm.all( vec[ii_phi] == svec_phi )
msg = '%s: %s'
self.report( msg % ('ebc', ok_ebc) )
self.report( msg % ('u', ok_u) )
self.report( msg % ('phi', ok_phi) )
ok = ok_ebc and ok_u and ok_phi
return ok
|
[
"sfepy.discrete.Problem.from_conf"
] |
[((256, 300), 'tests_basic.TestInput.from_conf', 'TestInput.from_conf', (['conf', 'options'], {'cls': 'Test'}), '(conf, options, cls=Test)\n', (275, 300), False, 'from tests_basic import TestInput\n'), ((458, 491), 'sfepy.discrete.Problem.from_conf', 'Problem.from_conf', (['self.test_conf'], {}), '(self.test_conf)\n', (475, 491), False, 'from sfepy.discrete import Problem\n'), ((641, 689), 'numpy.ones', 'nm.ones', (["(vvs.adi.n_dof['u'],)"], {'dtype': 'nm.float64'}), "((vvs.adi.n_dof['u'],), dtype=nm.float64)\n", (648, 689), True, 'import numpy as nm\n'), ((713, 764), 'numpy.empty', 'nm.empty', (["(vvs.adi.n_dof['phi'],)"], {'dtype': 'nm.float64'}), "((vvs.adi.n_dof['phi'],), dtype=nm.float64)\n", (721, 764), True, 'import numpy as nm\n'), ((1167, 1194), 'numpy.all', 'nm.all', (['(vec[ii_u] == svec_u)'], {}), '(vec[ii_u] == svec_u)\n', (1173, 1194), True, 'import numpy as nm\n'), ((1214, 1245), 'numpy.all', 'nm.all', (['(vec[ii_phi] == svec_phi)'], {}), '(vec[ii_phi] == svec_phi)\n', (1220, 1245), True, 'import numpy as nm\n')]
|
from typing import Any
import sqlalchemy.exc
from ariadne import convert_kwargs_to_snake_case
from graphql.type.definition import GraphQLResolveInfo
from graphql_relay.node.node import from_global_id
from sqlmodel import select
from ariadne_example.app.db.session import Session, engine
from ariadne_example.app.core.struсtures import TaskStatusEnum, TASK_QUEUES
from ariadne_example.app.models import Task
from ariadne_example.app.core.exceptions import NotFoundError
@convert_kwargs_to_snake_case
def resolve_create_task(
obj: Any,
info: GraphQLResolveInfo,
user_id: str,
task_input: dict,
) -> int:
with Session(engine) as session:
local_user_id, _ = from_global_id(user_id)
try:
task = Task(
title=task_input.get("title"),
created_at=task_input.get("created_at"),
status=task_input.get("status"),
user_id=local_user_id
)
session.add(task)
session.commit()
session.refresh(task)
except sqlalchemy.exc.IntegrityError:
raise NotFoundError(msg='Не найден пользователь с таким user_id')
return task.id
@convert_kwargs_to_snake_case
async def resolve_change_task_status(
obj: Any,
info: GraphQLResolveInfo,
new_status: TaskStatusEnum,
task_id: str,
) -> None:
with Session(engine) as session:
local_task_id, _ = from_global_id(task_id)
try:
statement = select(Task).where(Task.id == local_task_id)
task = session.execute(statement)
task.status = new_status
session.add(task)
session.commit()
session.refresh(task)
except sqlalchemy.exc.IntegrityError:
raise NotFoundError(msg='Не найдена задача с таким task_id')
for queue in TASK_QUEUES:
queue.put(task)
|
[
"sqlmodel.select"
] |
[((646, 661), 'ariadne_example.app.db.session.Session', 'Session', (['engine'], {}), '(engine)\n', (653, 661), False, 'from ariadne_example.app.db.session import Session, engine\n'), ((701, 724), 'graphql_relay.node.node.from_global_id', 'from_global_id', (['user_id'], {}), '(user_id)\n', (715, 724), False, 'from graphql_relay.node.node import from_global_id\n'), ((1408, 1423), 'ariadne_example.app.db.session.Session', 'Session', (['engine'], {}), '(engine)\n', (1415, 1423), False, 'from ariadne_example.app.db.session import Session, engine\n'), ((1463, 1486), 'graphql_relay.node.node.from_global_id', 'from_global_id', (['task_id'], {}), '(task_id)\n', (1477, 1486), False, 'from graphql_relay.node.node import from_global_id\n'), ((1125, 1184), 'ariadne_example.app.core.exceptions.NotFoundError', 'NotFoundError', ([], {'msg': '"""Не найден пользователь с таким user_id"""'}), "(msg='Не найден пользователь с таким user_id')\n", (1138, 1184), False, 'from ariadne_example.app.core.exceptions import NotFoundError\n'), ((1809, 1863), 'ariadne_example.app.core.exceptions.NotFoundError', 'NotFoundError', ([], {'msg': '"""Не найдена задача с таким task_id"""'}), "(msg='Не найдена задача с таким task_id')\n", (1822, 1863), False, 'from ariadne_example.app.core.exceptions import NotFoundError\n'), ((1524, 1536), 'sqlmodel.select', 'select', (['Task'], {}), '(Task)\n', (1530, 1536), False, 'from sqlmodel import select\n')]
|
"""
This module is not a test file. It contains classes grouping some common
functionality, that is used in several test files.
"""
from __future__ import absolute_import
from sfepy.base.base import IndexedStruct
from sfepy.base.testing import TestCommon
import os.path as op
class NLSStatus(IndexedStruct):
"""
Custom nonlinear solver status storing stopping condition of all
time steps.
"""
def __setitem__(self, key, val):
IndexedStruct.__setitem__(self, key, val)
if key == 'condition':
self.conditions.append(val)
class TestDummy(TestCommon):
"""Simulate test OK result for missing optional modules."""
@staticmethod
def from_conf(conf, options):
return TestDummy()
def test_dummy(self):
return True
class TestInput(TestCommon):
"""Test that an input file works. See test_input_*.py files."""
@staticmethod
def from_conf(conf, options, cls=None):
from sfepy.base.base import Struct
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.applications import assign_standard_hooks
required, other = get_standard_keywords()
input_name = op.join(op.dirname(__file__), conf.input_name)
test_conf = ProblemConf.from_file(input_name, required, other)
if cls is None:
cls = TestInput
test = cls(test_conf=test_conf, conf=conf, options=options)
assign_standard_hooks(test, test_conf.options.get, test_conf)
name = test.get_output_name_trunk()
ext = test.get_output_name_ext()
test.solver_options = Struct(output_filename_trunk=name,
output_format=ext if ext != '' else "vtk",
save_ebc=False, save_ebc_nodes=False,
save_regions=False,
save_regions_as_groups=False,
save_field_meshes=False,
solve_not=False)
return test
def get_output_name_trunk(self):
return op.splitext(op.split(self.conf.output_name)[1])[0]
def get_output_name_ext(self):
return op.splitext(op.split(self.conf.output_name)[1])[1].replace(".", "")
def check_conditions(self, conditions):
ok = (conditions == 0).all()
if not ok:
self.report('nls stopping conditions:')
self.report(conditions)
return ok
def test_input(self):
import numpy as nm
from sfepy.applications import solve_pde
self.report('solving %s...' % self.conf.input_name)
status = IndexedStruct(nls_status=NLSStatus(conditions=[]))
solve_pde(self.test_conf,
self.solver_options,
status=status,
output_dir=self.options.out_dir,
step_hook=self.step_hook,
post_process_hook=self.post_process_hook,
post_process_hook_final=self.post_process_hook_final)
self.report('%s solved' % self.conf.input_name)
ok = self.check_conditions(nm.array(status.nls_status.conditions))
return ok
class TestInputEvolutionary(TestInput):
@staticmethod
def from_conf(conf, options, cls=None):
if cls is None:
cls = TestInputEvolutionary
return TestInput.from_conf(conf, options, cls=cls)
def get_output_name_trunk(self):
return self.conf.output_name_trunk
def get_output_name_ext(self):
return ""
class TestLCBC(TestCommon):
"""Test linear combination BC. See test_lcbc_*.py files."""
@staticmethod
def from_conf(conf, options):
return TestLCBC(conf=conf, options=options)
def test_linear_rigid_body_bc(self):
import scipy
if scipy.version.version == "0.6.0":
# This test uses a functionality implemented in scipy svn, which is
# missing in scipy 0.6.0
return True
from sfepy.base.base import Struct
from sfepy.applications import solve_pde
from sfepy.base.base import IndexedStruct
status = IndexedStruct()
problem, state = solve_pde(self.conf, status=status,
save_results=False)
ok = status.nls_status.condition == 0
self.report('converged: %s' % ok)
out = state.create_output_dict()
strain = problem.evaluate('ev_cauchy_strain.i.Y( u )', mode='el_avg')
out['strain'] = Struct(name='output_data',
mode='cell', data=strain, dofs=None)
name = op.join(self.options.out_dir,
op.split(self.conf.output_name)[1])
problem.domain.mesh.write(name, io='auto', out=out)
##
# Check if rigid body displacements are really rigid should go here.
return ok
|
[
"sfepy.base.conf.get_standard_keywords",
"sfepy.applications.assign_standard_hooks",
"sfepy.base.base.IndexedStruct.__setitem__",
"sfepy.base.conf.ProblemConf.from_file",
"sfepy.base.base.Struct",
"sfepy.applications.solve_pde",
"sfepy.base.base.IndexedStruct"
] |
[((455, 496), 'sfepy.base.base.IndexedStruct.__setitem__', 'IndexedStruct.__setitem__', (['self', 'key', 'val'], {}), '(self, key, val)\n', (480, 496), False, 'from sfepy.base.base import IndexedStruct\n'), ((1153, 1176), 'sfepy.base.conf.get_standard_keywords', 'get_standard_keywords', ([], {}), '()\n', (1174, 1176), False, 'from sfepy.base.conf import ProblemConf, get_standard_keywords\n'), ((1265, 1315), 'sfepy.base.conf.ProblemConf.from_file', 'ProblemConf.from_file', (['input_name', 'required', 'other'], {}), '(input_name, required, other)\n', (1286, 1315), False, 'from sfepy.base.conf import ProblemConf, get_standard_keywords\n'), ((1446, 1507), 'sfepy.applications.assign_standard_hooks', 'assign_standard_hooks', (['test', 'test_conf.options.get', 'test_conf'], {}), '(test, test_conf.options.get, test_conf)\n', (1467, 1507), False, 'from sfepy.applications import assign_standard_hooks\n'), ((1624, 1839), 'sfepy.base.base.Struct', 'Struct', ([], {'output_filename_trunk': 'name', 'output_format': "(ext if ext != '' else 'vtk')", 'save_ebc': '(False)', 'save_ebc_nodes': '(False)', 'save_regions': '(False)', 'save_regions_as_groups': '(False)', 'save_field_meshes': '(False)', 'solve_not': '(False)'}), "(output_filename_trunk=name, output_format=ext if ext != '' else\n 'vtk', save_ebc=False, save_ebc_nodes=False, save_regions=False,\n save_regions_as_groups=False, save_field_meshes=False, solve_not=False)\n", (1630, 1839), False, 'from sfepy.base.base import Struct\n'), ((2748, 2974), 'sfepy.applications.solve_pde', 'solve_pde', (['self.test_conf', 'self.solver_options'], {'status': 'status', 'output_dir': 'self.options.out_dir', 'step_hook': 'self.step_hook', 'post_process_hook': 'self.post_process_hook', 'post_process_hook_final': 'self.post_process_hook_final'}), '(self.test_conf, self.solver_options, status=status, output_dir=\n self.options.out_dir, step_hook=self.step_hook, post_process_hook=self.\n post_process_hook, post_process_hook_final=self.post_process_hook_final)\n', (2757, 2974), False, 'from sfepy.applications import solve_pde\n'), ((4194, 4209), 'sfepy.base.base.IndexedStruct', 'IndexedStruct', ([], {}), '()\n', (4207, 4209), False, 'from sfepy.base.base import IndexedStruct\n'), ((4235, 4290), 'sfepy.applications.solve_pde', 'solve_pde', (['self.conf'], {'status': 'status', 'save_results': '(False)'}), '(self.conf, status=status, save_results=False)\n', (4244, 4290), False, 'from sfepy.applications import solve_pde\n'), ((4558, 4621), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""cell"""', 'data': 'strain', 'dofs': 'None'}), "(name='output_data', mode='cell', data=strain, dofs=None)\n", (4564, 4621), False, 'from sfepy.base.base import Struct\n'), ((1206, 1226), 'os.path.dirname', 'op.dirname', (['__file__'], {}), '(__file__)\n', (1216, 1226), True, 'import os.path as op\n'), ((3165, 3203), 'numpy.array', 'nm.array', (['status.nls_status.conditions'], {}), '(status.nls_status.conditions)\n', (3173, 3203), True, 'import numpy as nm\n'), ((4722, 4753), 'os.path.split', 'op.split', (['self.conf.output_name'], {}), '(self.conf.output_name)\n', (4730, 4753), True, 'import os.path as op\n'), ((2140, 2171), 'os.path.split', 'op.split', (['self.conf.output_name'], {}), '(self.conf.output_name)\n', (2148, 2171), True, 'import os.path as op\n'), ((2242, 2273), 'os.path.split', 'op.split', (['self.conf.output_name'], {}), '(self.conf.output_name)\n', (2250, 2273), True, 'import os.path as op\n')]
|
from typer.testing import CliRunner
import os
from timerdo.main import app, sqlite_file_name
from timerdo.tables import ToDo, Timer
from sqlmodel import create_engine, Session, select
from datetime import datetime, timedelta
try:
os.rename('/home/cmts/.config/timerdo/timerdo_db.db',
'/home/cmts/.config/timerdo/timerdo_db_moved.db')
except FileNotFoundError:
pass
sqlite_url = f'sqlite:///{sqlite_file_name}'
engine = create_engine(sqlite_url, echo=True)
runner = CliRunner()
def test_add_none():
"""Test add function with no argument"""
result = runner.invoke(app, ['add'])
assert result.exit_code == 2
def test_add_task():
"""Test add function with task argument"""
task = 'test add'
result = runner.invoke(app, ['add', task])
with Session(engine) as session:
query = session.exec(select(ToDo).where(ToDo.task == task)).one()
task = query.task
status = query.status
assert result.exit_code == 0
assert task == task
assert status == 'to do'
def test_add_status():
"""Test status"""
task = 'Test status'
status = 'dif'
result = runner.invoke(app, ['add', task, '--status', status])
assert result.exit_code == 1
assert 'status must be "to do" or "doing"\n' in result.stdout
def test_add_due_date():
"""Test due date"""
task = 'Test due date'
date = datetime.strftime(datetime.now(), '%Y-%m-%d')
result = runner.invoke(app, ['add', task, '--due-date', date])
assert result.exit_code == 1
assert f'due date must be grater than {datetime.today().date()}\n' in \
result.stdout
def test_add_reminder():
"""Test reminder"""
task = 'Test reminder'
date = datetime.strftime(datetime.now(), '%Y-%m-%d')
result = runner.invoke(app, ['add', task, '--reminder', date])
assert result.exit_code == 1
assert f'reminder must be grater than {datetime.today().date()}\n' in \
result.stdout
def test_add_due_date_reminder():
"""Test due-date and reminder"""
task = 'Test due-date and reminder'
due_date = datetime.strftime(
datetime.now() + timedelta(days=2), '%Y-%m-%d')
reminder = datetime.strftime(
datetime.now() + timedelta(days=2), '%Y-%m-%d')
result = runner.invoke(app, ['add', task, '--reminder', reminder,
'--due-date', due_date])
assert result.exit_code == 1
assert f'reminder must be smaller than {due_date}\n' in \
result.stdout
def test_add_full_entry():
"""Test add full task"""
task = 'something'
project = 'test project'
due_date = datetime.strftime(
datetime.now() + timedelta(days=2), '%Y-%m-%d')
reminder = datetime.strftime(
datetime.now() + timedelta(days=1), '%Y-%m-%d')
status = 'doing'
tag = 'tag'
result = runner.invoke(app, ['add', task,
'--project', project,
'--due-date', due_date,
'--reminder', reminder,
'--status', status,
'--tag', tag])
assert result.exit_code == 0
with Session(engine) as session:
query = session.exec(select(ToDo).where(ToDo.task == task,
ToDo.project == project,
ToDo.status == status,
ToDo.tag == tag)).one()
assert query is not None
def test_start():
"""Test start"""
todo_id = '1'
result = runner.invoke(app, ['start', todo_id])
assert result.exit_code == 0
with Session(engine) as session:
query = session.exec(select(ToDo.status).where(ToDo.id ==
todo_id)).one()
assert query == 'doing'
def test_start_running():
"""Test start when running"""
todo_id = '1'
result = runner.invoke(app, ['start', todo_id])
assert result.exit_code == 1
assert 'The Timer must be stopped first' in result.stdout
def test_stop():
"""Test stop"""
result = runner.invoke(app, ['stop'])
assert result.exit_code == 0
def test_stop_no_run():
"""Test stop with no run"""
result = runner.invoke(app, ['stop'])
assert result.exit_code == 1
def test_duration():
"""test duration"""
todo_id = 1
with Session(engine) as session:
todo = session.exec(select(ToDo.duration).where(ToDo.id ==
todo_id)).one()
timer = session.exec(select(Timer.duration).where(Timer.id_todo
== todo_id)).one()
assert todo is not None and todo == timer
def test_return_db():
try:
os.remove('/home/cmts/.config/timerdo/timerdo_db.db')
os.rename('/home/cmts/.config/timerdo/timerdo_db_moved.db',
'/home/cmts/.config/timerdo/timerdo_db.db')
except FileNotFoundError:
pass
|
[
"sqlmodel.Session",
"sqlmodel.select",
"sqlmodel.create_engine"
] |
[((447, 483), 'sqlmodel.create_engine', 'create_engine', (['sqlite_url'], {'echo': '(True)'}), '(sqlite_url, echo=True)\n', (460, 483), False, 'from sqlmodel import create_engine, Session, select\n'), ((494, 505), 'typer.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (503, 505), False, 'from typer.testing import CliRunner\n'), ((237, 344), 'os.rename', 'os.rename', (['"""/home/cmts/.config/timerdo/timerdo_db.db"""', '"""/home/cmts/.config/timerdo/timerdo_db_moved.db"""'], {}), "('/home/cmts/.config/timerdo/timerdo_db.db',\n '/home/cmts/.config/timerdo/timerdo_db_moved.db')\n", (246, 344), False, 'import os\n'), ((797, 812), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (804, 812), False, 'from sqlmodel import create_engine, Session, select\n'), ((1407, 1421), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1419, 1421), False, 'from datetime import datetime, timedelta\n'), ((1744, 1758), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1756, 1758), False, 'from datetime import datetime, timedelta\n'), ((3204, 3219), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (3211, 3219), False, 'from sqlmodel import create_engine, Session, select\n'), ((3703, 3718), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (3710, 3718), False, 'from sqlmodel import create_engine, Session, select\n'), ((4450, 4465), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (4457, 4465), False, 'from sqlmodel import create_engine, Session, select\n'), ((4858, 4911), 'os.remove', 'os.remove', (['"""/home/cmts/.config/timerdo/timerdo_db.db"""'], {}), "('/home/cmts/.config/timerdo/timerdo_db.db')\n", (4867, 4911), False, 'import os\n'), ((4920, 5027), 'os.rename', 'os.rename', (['"""/home/cmts/.config/timerdo/timerdo_db_moved.db"""', '"""/home/cmts/.config/timerdo/timerdo_db.db"""'], {}), "('/home/cmts/.config/timerdo/timerdo_db_moved.db',\n '/home/cmts/.config/timerdo/timerdo_db.db')\n", (4929, 5027), False, 'import os\n'), ((2129, 2143), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2141, 2143), False, 'from datetime import datetime, timedelta\n'), ((2146, 2163), 'datetime.timedelta', 'timedelta', ([], {'days': '(2)'}), '(days=2)\n', (2155, 2163), False, 'from datetime import datetime, timedelta\n'), ((2219, 2233), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2231, 2233), False, 'from datetime import datetime, timedelta\n'), ((2236, 2253), 'datetime.timedelta', 'timedelta', ([], {'days': '(2)'}), '(days=2)\n', (2245, 2253), False, 'from datetime import datetime, timedelta\n'), ((2668, 2682), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2680, 2682), False, 'from datetime import datetime, timedelta\n'), ((2685, 2702), 'datetime.timedelta', 'timedelta', ([], {'days': '(2)'}), '(days=2)\n', (2694, 2702), False, 'from datetime import datetime, timedelta\n'), ((2758, 2772), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2770, 2772), False, 'from datetime import datetime, timedelta\n'), ((2775, 2792), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (2784, 2792), False, 'from datetime import datetime, timedelta\n'), ((1579, 1595), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (1593, 1595), False, 'from datetime import datetime, timedelta\n'), ((1916, 1932), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (1930, 1932), False, 'from datetime import datetime, timedelta\n'), ((854, 866), 'sqlmodel.select', 'select', (['ToDo'], {}), '(ToDo)\n', (860, 866), False, 'from sqlmodel import create_engine, Session, select\n'), ((3261, 3273), 'sqlmodel.select', 'select', (['ToDo'], {}), '(ToDo)\n', (3267, 3273), False, 'from sqlmodel import create_engine, Session, select\n'), ((3760, 3779), 'sqlmodel.select', 'select', (['ToDo.status'], {}), '(ToDo.status)\n', (3766, 3779), False, 'from sqlmodel import create_engine, Session, select\n'), ((4506, 4527), 'sqlmodel.select', 'select', (['ToDo.duration'], {}), '(ToDo.duration)\n', (4512, 4527), False, 'from sqlmodel import create_engine, Session, select\n'), ((4646, 4668), 'sqlmodel.select', 'select', (['Timer.duration'], {}), '(Timer.duration)\n', (4652, 4668), False, 'from sqlmodel import create_engine, Session, select\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from megengine import tensor
from megengine.module import Conv1d, Conv2d, Conv3d, Linear
from megengine.module.init import calculate_fan_in_and_fan_out, fill_
def test_fill_():
x = tensor(np.zeros((2, 3, 4)), dtype=np.float32)
fill_(x, 5.0)
np.testing.assert_array_equal(
x.numpy(), np.full(shape=(2, 3, 4), fill_value=5.0, dtype=np.float32)
)
def test_calculate_fan_in_and_fan_out():
l = Linear(in_features=3, out_features=8)
fanin, fanout = calculate_fan_in_and_fan_out(l.weight)
assert fanin == 3
assert fanout == 8
with pytest.raises(ValueError):
calculate_fan_in_and_fan_out(l.bias)
l = Conv1d(in_channels=2, out_channels=3, kernel_size=5)
fanin, fanout = calculate_fan_in_and_fan_out(l.weight)
assert fanin == 2 * 5
assert fanout == 3 * 5
# FIXME: will be wrong for group conv1d
# l = Conv1d(in_channels=2, out_channels=4, kernel_size=5, groups=2)
# fanin, fanout = calculate_fan_in_and_fan_out(l.weight)
# assert fanin == 2 // 2 * 5
# assert fanout == 4 // 2 * 5
l = Conv2d(in_channels=2, out_channels=3, kernel_size=(5, 7))
fanin, fanout = calculate_fan_in_and_fan_out(l.weight)
assert fanin == 2 * 5 * 7
assert fanout == 3 * 5 * 7
l = Conv2d(in_channels=2, out_channels=4, kernel_size=(5, 7), groups=2)
fanin, fanout = calculate_fan_in_and_fan_out(l.weight)
assert fanin == 2 // 2 * 5 * 7
assert fanout == 4 // 2 * 5 * 7
# FIXME: will be wrong for conv3d
# l = Conv3d(in_channels=2, out_channels=3, kernel_size=(5, 7, 9))
# fanin, fanout = calculate_fan_in_and_fan_out(l.weight)
# assert fanin == 2 * 5 * 7 * 9
# assert fanout == 3 * 5 * 7 * 9
l = Conv3d(in_channels=2, out_channels=4, kernel_size=(5, 7, 9), groups=2)
fanin, fanout = calculate_fan_in_and_fan_out(l.weight)
assert fanin == 2 // 2 * 5 * 7 * 9
assert fanout == 4 // 2 * 5 * 7 * 9
|
[
"megengine.module.init.fill_",
"megengine.module.Conv3d",
"megengine.module.Conv1d",
"megengine.module.init.calculate_fan_in_and_fan_out",
"megengine.module.Conv2d",
"megengine.module.Linear"
] |
[((648, 661), 'megengine.module.init.fill_', 'fill_', (['x', '(5.0)'], {}), '(x, 5.0)\n', (653, 661), False, 'from megengine.module.init import calculate_fan_in_and_fan_out, fill_\n'), ((833, 870), 'megengine.module.Linear', 'Linear', ([], {'in_features': '(3)', 'out_features': '(8)'}), '(in_features=3, out_features=8)\n', (839, 870), False, 'from megengine.module import Conv1d, Conv2d, Conv3d, Linear\n'), ((891, 929), 'megengine.module.init.calculate_fan_in_and_fan_out', 'calculate_fan_in_and_fan_out', (['l.weight'], {}), '(l.weight)\n', (919, 929), False, 'from megengine.module.init import calculate_fan_in_and_fan_out, fill_\n'), ((1066, 1118), 'megengine.module.Conv1d', 'Conv1d', ([], {'in_channels': '(2)', 'out_channels': '(3)', 'kernel_size': '(5)'}), '(in_channels=2, out_channels=3, kernel_size=5)\n', (1072, 1118), False, 'from megengine.module import Conv1d, Conv2d, Conv3d, Linear\n'), ((1139, 1177), 'megengine.module.init.calculate_fan_in_and_fan_out', 'calculate_fan_in_and_fan_out', (['l.weight'], {}), '(l.weight)\n', (1167, 1177), False, 'from megengine.module.init import calculate_fan_in_and_fan_out, fill_\n'), ((1486, 1543), 'megengine.module.Conv2d', 'Conv2d', ([], {'in_channels': '(2)', 'out_channels': '(3)', 'kernel_size': '(5, 7)'}), '(in_channels=2, out_channels=3, kernel_size=(5, 7))\n', (1492, 1543), False, 'from megengine.module import Conv1d, Conv2d, Conv3d, Linear\n'), ((1564, 1602), 'megengine.module.init.calculate_fan_in_and_fan_out', 'calculate_fan_in_and_fan_out', (['l.weight'], {}), '(l.weight)\n', (1592, 1602), False, 'from megengine.module.init import calculate_fan_in_and_fan_out, fill_\n'), ((1673, 1740), 'megengine.module.Conv2d', 'Conv2d', ([], {'in_channels': '(2)', 'out_channels': '(4)', 'kernel_size': '(5, 7)', 'groups': '(2)'}), '(in_channels=2, out_channels=4, kernel_size=(5, 7), groups=2)\n', (1679, 1740), False, 'from megengine.module import Conv1d, Conv2d, Conv3d, Linear\n'), ((1761, 1799), 'megengine.module.init.calculate_fan_in_and_fan_out', 'calculate_fan_in_and_fan_out', (['l.weight'], {}), '(l.weight)\n', (1789, 1799), False, 'from megengine.module.init import calculate_fan_in_and_fan_out, fill_\n'), ((2124, 2194), 'megengine.module.Conv3d', 'Conv3d', ([], {'in_channels': '(2)', 'out_channels': '(4)', 'kernel_size': '(5, 7, 9)', 'groups': '(2)'}), '(in_channels=2, out_channels=4, kernel_size=(5, 7, 9), groups=2)\n', (2130, 2194), False, 'from megengine.module import Conv1d, Conv2d, Conv3d, Linear\n'), ((2215, 2253), 'megengine.module.init.calculate_fan_in_and_fan_out', 'calculate_fan_in_and_fan_out', (['l.weight'], {}), '(l.weight)\n', (2243, 2253), False, 'from megengine.module.init import calculate_fan_in_and_fan_out, fill_\n'), ((605, 624), 'numpy.zeros', 'np.zeros', (['(2, 3, 4)'], {}), '((2, 3, 4))\n', (613, 624), True, 'import numpy as np\n'), ((717, 775), 'numpy.full', 'np.full', ([], {'shape': '(2, 3, 4)', 'fill_value': '(5.0)', 'dtype': 'np.float32'}), '(shape=(2, 3, 4), fill_value=5.0, dtype=np.float32)\n', (724, 775), True, 'import numpy as np\n'), ((985, 1010), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (998, 1010), False, 'import pytest\n'), ((1020, 1056), 'megengine.module.init.calculate_fan_in_and_fan_out', 'calculate_fan_in_and_fan_out', (['l.bias'], {}), '(l.bias)\n', (1048, 1056), False, 'from megengine.module.init import calculate_fan_in_and_fan_out, fill_\n')]
|
from sfepy.base.testing import TestCommon, assert_, debug
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
return Test(conf=conf, options=options)
def test_tensors(self):
import numpy as nm
import sfepy.mechanics.tensors as tn
ok = True
a_full = 2.0 * nm.ones((5,3,3), dtype=nm.float64)
a_sym = 2.0 * nm.ones((5,6), dtype=nm.float64)
_tr = nm.array([6.0] * 5, dtype=nm.float64)
_vt_full = 2.0 * nm.tile(nm.eye(3, dtype=nm.float64), (5,1,1))
_vt_sym = nm.tile(nm.array([2, 2, 2, 0, 0, 0], dtype=nm.float64),
(5,1,1))
_dev_full = a_full - _vt_full
_dev_sym = a_sym - _vt_sym
_vms = 6.0 * nm.ones((5,1), dtype=nm.float64)
tr = tn.get_trace(a_full, sym_storage=False)
_ok = nm.allclose(tr, _tr, rtol=0.0, atol=1e-14)
self.report('trace full: %s' % _ok)
ok = ok and _ok
tr = tn.get_trace(a_sym, sym_storage=True)
ok = ok and nm.allclose(tr, _tr, rtol=0.0, atol=1e-14)
self.report('trace sym: %s' % _ok)
ok = ok and _ok
vt = tn.get_volumetric_tensor(a_full, sym_storage=False)
_ok = nm.allclose(vt, _vt_full, rtol=0.0, atol=1e-14)
self.report('volumetric tensor full: %s' % _ok)
ok = ok and _ok
vt = tn.get_volumetric_tensor(a_sym, sym_storage=True)
_ok = nm.allclose(vt, _vt_sym, rtol=0.0, atol=1e-14)
self.report('volumetric tensor sym: %s' % _ok)
ok = ok and _ok
dev = tn.get_deviator(a_full, sym_storage=False)
_ok = nm.allclose(dev, _dev_full, rtol=0.0, atol=1e-14)
self.report('deviator full: %s' % _ok)
ok = ok and _ok
aux = (dev * nm.transpose(dev, (0, 2, 1))).sum(axis=1).sum(axis=1)
vms2 = nm.sqrt((3.0/2.0) * aux)[:,None]
dev = tn.get_deviator(a_sym, sym_storage=True)
_ok = nm.allclose(dev, _dev_sym, rtol=0.0, atol=1e-14)
self.report('deviator sym: %s' % _ok)
ok = ok and _ok
vms = tn.get_von_mises_stress(a_full, sym_storage=False)
_ok = nm.allclose(vms, _vms, rtol=0.0, atol=1e-14)
self.report('von Mises stress full: %s' % _ok)
ok = ok and _ok
vms = tn.get_von_mises_stress(a_sym, sym_storage=True)
_ok = nm.allclose(vms, _vms, rtol=0.0, atol=1e-14)
self.report('von Mises stress sym: %s' % _ok)
ok = ok and _ok
_ok = nm.allclose(vms2, _vms, rtol=0.0, atol=1e-14)
self.report('von Mises stress via deviator: %s' % _ok)
ok = ok and _ok
return ok
|
[
"sfepy.mechanics.tensors.get_volumetric_tensor",
"sfepy.mechanics.tensors.get_deviator",
"sfepy.mechanics.tensors.get_trace",
"sfepy.mechanics.tensors.get_von_mises_stress"
] |
[((433, 470), 'numpy.array', 'nm.array', (['([6.0] * 5)'], {'dtype': 'nm.float64'}), '([6.0] * 5, dtype=nm.float64)\n', (441, 470), True, 'import numpy as nm\n'), ((792, 831), 'sfepy.mechanics.tensors.get_trace', 'tn.get_trace', (['a_full'], {'sym_storage': '(False)'}), '(a_full, sym_storage=False)\n', (804, 831), True, 'import sfepy.mechanics.tensors as tn\n'), ((846, 888), 'numpy.allclose', 'nm.allclose', (['tr', '_tr'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(tr, _tr, rtol=0.0, atol=1e-14)\n', (857, 888), True, 'import numpy as nm\n'), ((979, 1016), 'sfepy.mechanics.tensors.get_trace', 'tn.get_trace', (['a_sym'], {'sym_storage': '(True)'}), '(a_sym, sym_storage=True)\n', (991, 1016), True, 'import sfepy.mechanics.tensors as tn\n'), ((1161, 1212), 'sfepy.mechanics.tensors.get_volumetric_tensor', 'tn.get_volumetric_tensor', (['a_full'], {'sym_storage': '(False)'}), '(a_full, sym_storage=False)\n', (1185, 1212), True, 'import sfepy.mechanics.tensors as tn\n'), ((1227, 1274), 'numpy.allclose', 'nm.allclose', (['vt', '_vt_full'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(vt, _vt_full, rtol=0.0, atol=1e-14)\n', (1238, 1274), True, 'import numpy as nm\n'), ((1377, 1426), 'sfepy.mechanics.tensors.get_volumetric_tensor', 'tn.get_volumetric_tensor', (['a_sym'], {'sym_storage': '(True)'}), '(a_sym, sym_storage=True)\n', (1401, 1426), True, 'import sfepy.mechanics.tensors as tn\n'), ((1441, 1487), 'numpy.allclose', 'nm.allclose', (['vt', '_vt_sym'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(vt, _vt_sym, rtol=0.0, atol=1e-14)\n', (1452, 1487), True, 'import numpy as nm\n'), ((1590, 1632), 'sfepy.mechanics.tensors.get_deviator', 'tn.get_deviator', (['a_full'], {'sym_storage': '(False)'}), '(a_full, sym_storage=False)\n', (1605, 1632), True, 'import sfepy.mechanics.tensors as tn\n'), ((1647, 1696), 'numpy.allclose', 'nm.allclose', (['dev', '_dev_full'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(dev, _dev_full, rtol=0.0, atol=1e-14)\n', (1658, 1696), True, 'import numpy as nm\n'), ((1915, 1955), 'sfepy.mechanics.tensors.get_deviator', 'tn.get_deviator', (['a_sym'], {'sym_storage': '(True)'}), '(a_sym, sym_storage=True)\n', (1930, 1955), True, 'import sfepy.mechanics.tensors as tn\n'), ((1970, 2018), 'numpy.allclose', 'nm.allclose', (['dev', '_dev_sym'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(dev, _dev_sym, rtol=0.0, atol=1e-14)\n', (1981, 2018), True, 'import numpy as nm\n'), ((2112, 2162), 'sfepy.mechanics.tensors.get_von_mises_stress', 'tn.get_von_mises_stress', (['a_full'], {'sym_storage': '(False)'}), '(a_full, sym_storage=False)\n', (2135, 2162), True, 'import sfepy.mechanics.tensors as tn\n'), ((2177, 2221), 'numpy.allclose', 'nm.allclose', (['vms', '_vms'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(vms, _vms, rtol=0.0, atol=1e-14)\n', (2188, 2221), True, 'import numpy as nm\n'), ((2316, 2364), 'sfepy.mechanics.tensors.get_von_mises_stress', 'tn.get_von_mises_stress', (['a_sym'], {'sym_storage': '(True)'}), '(a_sym, sym_storage=True)\n', (2339, 2364), True, 'import sfepy.mechanics.tensors as tn\n'), ((2379, 2423), 'numpy.allclose', 'nm.allclose', (['vms', '_vms'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(vms, _vms, rtol=0.0, atol=1e-14)\n', (2390, 2423), True, 'import numpy as nm\n'), ((2517, 2562), 'numpy.allclose', 'nm.allclose', (['vms2', '_vms'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(vms2, _vms, rtol=0.0, atol=1e-14)\n', (2528, 2562), True, 'import numpy as nm\n'), ((328, 364), 'numpy.ones', 'nm.ones', (['(5, 3, 3)'], {'dtype': 'nm.float64'}), '((5, 3, 3), dtype=nm.float64)\n', (335, 364), True, 'import numpy as nm\n'), ((385, 418), 'numpy.ones', 'nm.ones', (['(5, 6)'], {'dtype': 'nm.float64'}), '((5, 6), dtype=nm.float64)\n', (392, 418), True, 'import numpy as nm\n'), ((568, 614), 'numpy.array', 'nm.array', (['[2, 2, 2, 0, 0, 0]'], {'dtype': 'nm.float64'}), '([2, 2, 2, 0, 0, 0], dtype=nm.float64)\n', (576, 614), True, 'import numpy as nm\n'), ((745, 778), 'numpy.ones', 'nm.ones', (['(5, 1)'], {'dtype': 'nm.float64'}), '((5, 1), dtype=nm.float64)\n', (752, 778), True, 'import numpy as nm\n'), ((1037, 1079), 'numpy.allclose', 'nm.allclose', (['tr', '_tr'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(tr, _tr, rtol=0.0, atol=1e-14)\n', (1048, 1079), True, 'import numpy as nm\n'), ((1859, 1883), 'numpy.sqrt', 'nm.sqrt', (['(3.0 / 2.0 * aux)'], {}), '(3.0 / 2.0 * aux)\n', (1866, 1883), True, 'import numpy as nm\n'), ((504, 531), 'numpy.eye', 'nm.eye', (['(3)'], {'dtype': 'nm.float64'}), '(3, dtype=nm.float64)\n', (510, 531), True, 'import numpy as nm\n'), ((1790, 1818), 'numpy.transpose', 'nm.transpose', (['dev', '(0, 2, 1)'], {}), '(dev, (0, 2, 1))\n', (1802, 1818), True, 'import numpy as nm\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import itertools
import numpy as np
from megengine import Parameter, tensor
from megengine.module import AvgPool2d, MaxPool2d
def test_avg_pool2d():
def test_func(
batch_size,
in_channels,
out_channels,
in_height,
in_width,
kernel_size,
stride,
padding,
):
pool = AvgPool2d(kernel_size, stride=stride, padding=padding, mode="average")
inp = np.random.normal(
size=(batch_size, in_channels, in_height, in_width)
).astype(np.float32)
out_height = (in_height + padding * 2 - kernel_size) // stride + 1
out_width = (in_width + padding * 2 - kernel_size) // stride + 1
out = pool(tensor(inp))
inp = np.pad(inp, ((0, 0), (0, 0), (padding, padding), (padding, padding)))
expected = np.zeros(
(batch_size, out_channels, out_height, out_width), dtype=np.float32,
)
for n, c, oh, ow in itertools.product(
*map(range, [batch_size, out_channels, out_height, out_width])
):
ih, iw = oh * stride, ow * stride
expected[n, c, oh, ow] = np.sum(
inp[n, c, ih : ih + kernel_size, iw : iw + kernel_size,]
) / (kernel_size * kernel_size)
np.testing.assert_almost_equal(out.numpy(), expected, 1e-5)
test_func(10, 4, 4, 5, 5, 2, 2, 1)
test_func(10, 4, 4, 6, 6, 2, 2, 0)
test_func(10, 16, 16, 14, 14, 2, 2, 0)
|
[
"megengine.module.AvgPool2d",
"megengine.tensor"
] |
[((725, 795), 'megengine.module.AvgPool2d', 'AvgPool2d', (['kernel_size'], {'stride': 'stride', 'padding': 'padding', 'mode': '"""average"""'}), "(kernel_size, stride=stride, padding=padding, mode='average')\n", (734, 795), False, 'from megengine.module import AvgPool2d, MaxPool2d\n'), ((1115, 1184), 'numpy.pad', 'np.pad', (['inp', '((0, 0), (0, 0), (padding, padding), (padding, padding))'], {}), '(inp, ((0, 0), (0, 0), (padding, padding), (padding, padding)))\n', (1121, 1184), True, 'import numpy as np\n'), ((1204, 1281), 'numpy.zeros', 'np.zeros', (['(batch_size, out_channels, out_height, out_width)'], {'dtype': 'np.float32'}), '((batch_size, out_channels, out_height, out_width), dtype=np.float32)\n', (1212, 1281), True, 'import numpy as np\n'), ((1088, 1099), 'megengine.tensor', 'tensor', (['inp'], {}), '(inp)\n', (1094, 1099), False, 'from megengine import Parameter, tensor\n'), ((810, 879), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(batch_size, in_channels, in_height, in_width)'}), '(size=(batch_size, in_channels, in_height, in_width))\n', (826, 879), True, 'import numpy as np\n'), ((1521, 1580), 'numpy.sum', 'np.sum', (['inp[n, c, ih:ih + kernel_size, iw:iw + kernel_size]'], {}), '(inp[n, c, ih:ih + kernel_size, iw:iw + kernel_size])\n', (1527, 1580), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing as mp
import os
import megengine as mge
import megengine.data as data
import megengine.data.dataset as dataset
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.jit as jit
import megengine.optimizer as optim
import numpy as np
from official.vision.segmentation.deeplabv3plus import (
DeepLabV3Plus,
softmax_cross_entropy,
)
from official.vision.segmentation.utils import import_config_from_file
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-c", "--config", type=str, required=True, help="configuration file"
)
parser.add_argument(
"-d", "--dataset_dir", type=str, default="/data/datasets/VOC2012",
)
parser.add_argument(
"-w", "--weight_file", type=str, default=None, help="pre-train weights file",
)
parser.add_argument(
"-n", "--ngpus", type=int, default=8, help="batchsize for training"
)
parser.add_argument(
"-r", "--resume", type=str, default=None, help="resume model file"
)
args = parser.parse_args()
world_size = args.ngpus
logger.info("Device Count = %d", world_size)
if world_size > 1:
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
cfg = import_config_from_file(args.config)
if world_size > 1:
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
logger.info("Init process group done")
logger.info("Prepare dataset")
train_loader, epoch_size = build_dataloader(cfg.BATCH_SIZE, args.dataset_dir, cfg)
batch_iter = epoch_size // (cfg.BATCH_SIZE * world_size)
net = DeepLabV3Plus(class_num=cfg.NUM_CLASSES, pretrained=args.weight_file)
base_lr = cfg.LEARNING_RATE * world_size
optimizer = optim.SGD(
net.parameters(requires_grad=True),
lr=base_lr,
momentum=0.9,
weight_decay=0.00004,
)
@jit.trace(symbolic=True, opt_level=2)
def train_func(data, label, net=None, optimizer=None):
net.train()
pred = net(data)
loss = softmax_cross_entropy(pred, label, ignore_index=cfg.IGNORE_INDEX)
optimizer.backward(loss)
return pred, loss
begin_epoch = 0
end_epoch = cfg.EPOCHS
if args.resume is not None:
pretrained = mge.load(args.resume)
begin_epoch = pretrained["epoch"] + 1
net.load_state_dict(pretrained["state_dict"])
logger.info("load success: epoch %d", begin_epoch)
itr = begin_epoch * batch_iter
max_itr = end_epoch * batch_iter
image = mge.tensor(
np.zeros([cfg.BATCH_SIZE, 3, cfg.IMG_HEIGHT, cfg.IMG_WIDTH]).astype(np.float32),
dtype="float32",
)
label = mge.tensor(
np.zeros([cfg.BATCH_SIZE, cfg.IMG_HEIGHT, cfg.IMG_WIDTH]).astype(np.int32),
dtype="int32",
)
exp_name = os.path.abspath(os.path.dirname(__file__)).split("/")[-1]
for epoch in range(begin_epoch, end_epoch):
for i_batch, sample_batched in enumerate(train_loader):
def adjust_lr(optimizer, itr, max_itr):
now_lr = base_lr * (1 - itr / (max_itr + 1)) ** 0.9
for param_group in optimizer.param_groups:
param_group["lr"] = now_lr
return now_lr
now_lr = adjust_lr(optimizer, itr, max_itr)
inputs_batched, labels_batched = sample_batched
labels_batched = np.squeeze(labels_batched, axis=1).astype(np.int32)
image.set_value(inputs_batched)
label.set_value(labels_batched)
optimizer.zero_grad()
_, loss = train_func(image, label, net=net, optimizer=optimizer)
optimizer.step()
running_loss = loss.numpy()[0]
if rank == 0:
logger.info(
"%s epoch:%d/%d\tbatch:%d/%d\titr:%d\tlr:%g\tloss:%g",
exp_name,
epoch,
end_epoch,
i_batch,
batch_iter,
itr + 1,
now_lr,
running_loss,
)
itr += 1
if rank == 0:
save_path = os.path.join(cfg.MODEL_SAVE_DIR, "epoch%d.pkl" % (epoch))
mge.save({"epoch": epoch, "state_dict": net.state_dict()}, save_path)
logger.info("save epoch%d", epoch)
def build_dataloader(batch_size, dataset_dir, cfg):
if cfg.DATASET == "VOC2012":
train_dataset = dataset.PascalVOC(
dataset_dir,
cfg.DATA_TYPE,
order=["image", "mask"]
)
elif cfg.DATASET == "Cityscapes":
train_dataset = dataset.Cityscapes(
dataset_dir,
"train",
mode='gtFine',
order=["image", "mask"]
)
else:
raise ValueError("Unsupported dataset {}".format(cfg.DATASET))
train_sampler = data.RandomSampler(train_dataset, batch_size, drop_last=True)
train_dataloader = data.DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
transforms=[
T.RandomHorizontalFlip(0.5),
T.RandomResize(scale_range=(0.5, 2)),
T.RandomCrop(
output_size=(cfg.IMG_HEIGHT, cfg.IMG_WIDTH),
padding_value=[0, 0, 0],
padding_maskvalue=255,
),
T.Normalize(mean=cfg.IMG_MEAN, std=cfg.IMG_STD),
T.ToMode(),
],
order=["image", "mask"],
),
num_workers=0,
)
return train_dataloader, train_dataset.__len__()
if __name__ == "__main__":
main()
|
[
"megengine.data.transform.RandomResize",
"megengine.jit.trace",
"megengine.data.transform.RandomHorizontalFlip",
"megengine.distributed.init_process_group",
"megengine.load",
"megengine.data.dataset.Cityscapes",
"megengine.get_logger",
"megengine.data.transform.Normalize",
"megengine.data.transform.ToMode",
"megengine.data.dataset.PascalVOC",
"megengine.data.transform.RandomCrop",
"megengine.data.RandomSampler"
] |
[((872, 896), 'megengine.get_logger', 'mge.get_logger', (['__name__'], {}), '(__name__)\n', (886, 896), True, 'import megengine as mge\n'), ((924, 949), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (947, 949), False, 'import argparse\n'), ((1986, 2022), 'official.vision.segmentation.utils.import_config_from_file', 'import_config_from_file', (['args.config'], {}), '(args.config)\n', (2009, 2022), False, 'from official.vision.segmentation.utils import import_config_from_file\n'), ((2478, 2547), 'official.vision.segmentation.deeplabv3plus.DeepLabV3Plus', 'DeepLabV3Plus', ([], {'class_num': 'cfg.NUM_CLASSES', 'pretrained': 'args.weight_file'}), '(class_num=cfg.NUM_CLASSES, pretrained=args.weight_file)\n', (2491, 2547), False, 'from official.vision.segmentation.deeplabv3plus import DeepLabV3Plus, softmax_cross_entropy\n'), ((2748, 2785), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)', 'opt_level': '(2)'}), '(symbolic=True, opt_level=2)\n', (2757, 2785), True, 'import megengine.jit as jit\n'), ((5754, 5815), 'megengine.data.RandomSampler', 'data.RandomSampler', (['train_dataset', 'batch_size'], {'drop_last': '(True)'}), '(train_dataset, batch_size, drop_last=True)\n', (5772, 5815), True, 'import megengine.data as data\n'), ((1634, 1662), 'multiprocessing.set_start_method', 'mp.set_start_method', (['"""spawn"""'], {}), "('spawn')\n", (1653, 1662), True, 'import multiprocessing as mp\n'), ((2055, 2168), 'megengine.distributed.init_process_group', 'dist.init_process_group', ([], {'master_ip': '"""localhost"""', 'master_port': '(23456)', 'world_size': 'world_size', 'rank': 'rank', 'dev': 'rank'}), "(master_ip='localhost', master_port=23456,\n world_size=world_size, rank=rank, dev=rank)\n", (2078, 2168), True, 'import megengine.distributed as dist\n'), ((2905, 2970), 'official.vision.segmentation.deeplabv3plus.softmax_cross_entropy', 'softmax_cross_entropy', (['pred', 'label'], {'ignore_index': 'cfg.IGNORE_INDEX'}), '(pred, label, ignore_index=cfg.IGNORE_INDEX)\n', (2926, 2970), False, 'from official.vision.segmentation.deeplabv3plus import DeepLabV3Plus, softmax_cross_entropy\n'), ((3131, 3152), 'megengine.load', 'mge.load', (['args.resume'], {}), '(args.resume)\n', (3139, 3152), True, 'import megengine as mge\n'), ((5335, 5405), 'megengine.data.dataset.PascalVOC', 'dataset.PascalVOC', (['dataset_dir', 'cfg.DATA_TYPE'], {'order': "['image', 'mask']"}), "(dataset_dir, cfg.DATA_TYPE, order=['image', 'mask'])\n", (5352, 5405), True, 'import megengine.data.dataset as dataset\n'), ((1741, 1797), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(rank, world_size, args)'}), '(target=worker, args=(rank, world_size, args))\n', (1751, 1797), True, 'import multiprocessing as mp\n'), ((5037, 5092), 'os.path.join', 'os.path.join', (['cfg.MODEL_SAVE_DIR', "('epoch%d.pkl' % epoch)"], {}), "(cfg.MODEL_SAVE_DIR, 'epoch%d.pkl' % epoch)\n", (5049, 5092), False, 'import os\n'), ((5514, 5599), 'megengine.data.dataset.Cityscapes', 'dataset.Cityscapes', (['dataset_dir', '"""train"""'], {'mode': '"""gtFine"""', 'order': "['image', 'mask']"}), "(dataset_dir, 'train', mode='gtFine', order=['image', 'mask']\n )\n", (5532, 5599), True, 'import megengine.data.dataset as dataset\n'), ((3418, 3478), 'numpy.zeros', 'np.zeros', (['[cfg.BATCH_SIZE, 3, cfg.IMG_HEIGHT, cfg.IMG_WIDTH]'], {}), '([cfg.BATCH_SIZE, 3, cfg.IMG_HEIGHT, cfg.IMG_WIDTH])\n', (3426, 3478), True, 'import numpy as np\n'), ((3562, 3619), 'numpy.zeros', 'np.zeros', (['[cfg.BATCH_SIZE, cfg.IMG_HEIGHT, cfg.IMG_WIDTH]'], {}), '([cfg.BATCH_SIZE, cfg.IMG_HEIGHT, cfg.IMG_WIDTH])\n', (3570, 3619), True, 'import numpy as np\n'), ((3698, 3723), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3713, 3723), False, 'import os\n'), ((4256, 4290), 'numpy.squeeze', 'np.squeeze', (['labels_batched'], {'axis': '(1)'}), '(labels_batched, axis=1)\n', (4266, 4290), True, 'import numpy as np\n'), ((5980, 6007), 'megengine.data.transform.RandomHorizontalFlip', 'T.RandomHorizontalFlip', (['(0.5)'], {}), '(0.5)\n', (6002, 6007), True, 'import megengine.data.transform as T\n'), ((6025, 6061), 'megengine.data.transform.RandomResize', 'T.RandomResize', ([], {'scale_range': '(0.5, 2)'}), '(scale_range=(0.5, 2))\n', (6039, 6061), True, 'import megengine.data.transform as T\n'), ((6079, 6188), 'megengine.data.transform.RandomCrop', 'T.RandomCrop', ([], {'output_size': '(cfg.IMG_HEIGHT, cfg.IMG_WIDTH)', 'padding_value': '[0, 0, 0]', 'padding_maskvalue': '(255)'}), '(output_size=(cfg.IMG_HEIGHT, cfg.IMG_WIDTH), padding_value=[0,\n 0, 0], padding_maskvalue=255)\n', (6091, 6188), True, 'import megengine.data.transform as T\n'), ((6281, 6328), 'megengine.data.transform.Normalize', 'T.Normalize', ([], {'mean': 'cfg.IMG_MEAN', 'std': 'cfg.IMG_STD'}), '(mean=cfg.IMG_MEAN, std=cfg.IMG_STD)\n', (6292, 6328), True, 'import megengine.data.transform as T\n'), ((6346, 6356), 'megengine.data.transform.ToMode', 'T.ToMode', ([], {}), '()\n', (6354, 6356), True, 'import megengine.data.transform as T\n')]
|
import os
from typing import List
from sqlmodel.sql.expression import select
from utilities.filepath import get_home_dir
from sqlmodel import create_engine, SQLModel, Session
# these are imported so that the initialization of the database can be done
from schemas.common.event import Event
from schemas.common.extension import Extension
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class DB(metaclass=Singleton):
def __init__(self) -> None:
self.engine = create_engine("sqlite:///{}".format(os.path.join(get_home_dir(), "extensions.db")))
def initialize(self) -> None:
SQLModel.metadata.create_all(self.engine)
def save(self, obj: SQLModel) -> None:
session = Session(self.engine)
session.add(obj)
session.commit()
session.refresh(obj)
return obj
def fetch_extensions(self)-> List[Extension]:
with Session(self.engine) as session:
results = session.exec(select(Extension)).all()
return results
if __name__=="__main__":
DB().initialize()
|
[
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.Session",
"sqlmodel.sql.expression.select"
] |
[((793, 834), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['self.engine'], {}), '(self.engine)\n', (821, 834), False, 'from sqlmodel import create_engine, SQLModel, Session\n'), ((897, 917), 'sqlmodel.Session', 'Session', (['self.engine'], {}), '(self.engine)\n', (904, 917), False, 'from sqlmodel import create_engine, SQLModel, Session\n'), ((1080, 1100), 'sqlmodel.Session', 'Session', (['self.engine'], {}), '(self.engine)\n', (1087, 1100), False, 'from sqlmodel import create_engine, SQLModel, Session\n'), ((715, 729), 'utilities.filepath.get_home_dir', 'get_home_dir', ([], {}), '()\n', (727, 729), False, 'from utilities.filepath import get_home_dir\n'), ((1148, 1165), 'sqlmodel.sql.expression.select', 'select', (['Extension'], {}), '(Extension)\n', (1154, 1165), False, 'from sqlmodel.sql.expression import select\n')]
|
"""v1-tenant_token
Revision ID: <KEY>
Revises: <KEY>
Create Date: 2022-05-18 14:55:32.794587
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "<KEY>"
branch_labels = None
depends_on = None
def upgrade():
op.add_column(
"tenant",
sa.Column("wallet_token", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
)
op.create_index(op.f("ix_tenant_name"), "tenant", ["name"], unique=False)
def downgrade():
op.drop_index(op.f("ix_tenant_name"), table_name="tenant")
op.drop_column("tenant", "wallet_token")
|
[
"sqlmodel.sql.sqltypes.AutoString"
] |
[((596, 636), 'alembic.op.drop_column', 'op.drop_column', (['"""tenant"""', '"""wallet_token"""'], {}), "('tenant', 'wallet_token')\n", (610, 636), False, 'from alembic import op\n'), ((452, 474), 'alembic.op.f', 'op.f', (['"""ix_tenant_name"""'], {}), "('ix_tenant_name')\n", (456, 474), False, 'from alembic import op\n'), ((547, 569), 'alembic.op.f', 'op.f', (['"""ix_tenant_name"""'], {}), "('ix_tenant_name')\n", (551, 569), False, 'from alembic import op\n'), ((374, 408), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (406, 408), False, 'import sqlmodel\n')]
|
from sqlmodel import create_engine
from config import settings
engine = create_engine(settings.database_url)
|
[
"sqlmodel.create_engine"
] |
[((73, 109), 'sqlmodel.create_engine', 'create_engine', (['settings.database_url'], {}), '(settings.database_url)\n', (86, 109), False, 'from sqlmodel import create_engine\n')]
|
"""
Basic uniform mesh refinement functions.
"""
import numpy as nm
from sfepy.discrete.fem import Mesh
def refine_2_3(mesh_in):
"""
Refines mesh out of triangles by cutting cutting each edge in half
and making 4 new finer triangles out of one coarser one.
"""
cmesh = mesh_in.cmesh
# Unique edge centres.
e_centres = cmesh.get_centroids(cmesh.dim - 1)
# New coordinates after the original ones.
coors = nm.r_[mesh_in.coors, e_centres]
o1 = mesh_in.n_nod
cc = cmesh.get_conn(cmesh.dim, cmesh.dim - 1)
conn = mesh_in.get_conn('2_3')
n_el = conn.shape[0]
e_nodes = cc.indices.reshape((n_el, 3)) + o1
c = nm.c_[conn, e_nodes].T
new_conn = nm.vstack([c[0], c[3], c[5],
c[3], c[4], c[5],
c[1], c[4], c[3],
c[2], c[5], c[4]]).T
new_conn = new_conn.reshape((4 * n_el, 3))
new_mat_id = cmesh.cell_groups.repeat(4)
mesh = Mesh.from_data(mesh_in.name + '_r', coors, None, [new_conn],
[new_mat_id], mesh_in.descs )
return mesh
def refine_2_4(mesh_in):
"""
Refines mesh out of quadrilaterals by cutting cutting each edge in
half and making 4 new finer quadrilaterals out of one coarser one.
"""
cmesh = mesh_in.cmesh
# Unique edge centres.
e_centres = cmesh.get_centroids(cmesh.dim - 1)
# Unique element centres.
centres = cmesh.get_centroids(cmesh.dim)
# New coordinates after the original ones.
coors = nm.r_[mesh_in.coors, e_centres, centres]
o1 = mesh_in.n_nod
o2 = o1 + e_centres.shape[0]
cc = cmesh.get_conn(cmesh.dim, cmesh.dim - 1)
conn = mesh_in.get_conn('2_4')
n_el = conn.shape[0]
e_nodes = cc.indices.reshape((n_el, 4)) + o1
nodes = nm.arange(n_el) + o2
c = nm.c_[conn, e_nodes, nodes].T
new_conn = nm.vstack([c[0], c[4], c[8], c[7],
c[1], c[5], c[8], c[4],
c[2], c[6], c[8], c[5],
c[3], c[7], c[8], c[6]]).T
new_conn = new_conn.reshape((4 * n_el, 4))
new_mat_id = cmesh.cell_groups.repeat(4)
mesh = Mesh.from_data(mesh_in.name + '_r', coors, None, [new_conn],
[new_mat_id], mesh_in.descs )
return mesh
def refine_3_4(mesh_in):
"""
Refines tetrahedra by cutting each edge in half and making 8 new
finer tetrahedra out of one coarser one. Old nodal coordinates come
first in `coors`, then the new ones. The new tetrahedra are similar
to the old one, no degeneration is supposed to occur as at most 3
congruence classes of tetrahedra appear, even when re-applied
iteratively (provided that `conns` are not modified between two
applications - ordering of vertices in tetrahedra matters not only
for positivity of volumes).
References:
- <NAME>: Simplicial grid refinement: on Freudenthal s algorithm and
the optimal number of congruence classes, Numer.Math. 85 (2000),
no. 1, 1--29, or
- <NAME>: Tetrahedral grid refinement, Computing 55 (1995),
no. 4, 355--378, or
http://citeseer.ist.psu.edu/bey95tetrahedral.html
"""
cmesh = mesh_in.cmesh
# Unique edge centres.
e_centres = cmesh.get_centroids(cmesh.dim - 2)
# New coordinates after the original ones.
coors = nm.r_[mesh_in.coors, e_centres]
o1 = mesh_in.n_nod
cc = cmesh.get_conn(cmesh.dim, cmesh.dim - 2)
conn = mesh_in.get_conn('3_4')
n_el = conn.shape[0]
e_nodes = cc.indices.reshape((n_el, 6)) + o1
c = nm.c_[conn, e_nodes].T
new_conn = nm.vstack([c[0], c[4], c[6], c[7],
c[4], c[1], c[5], c[8],
c[6], c[5], c[2], c[9],
c[7], c[8], c[9], c[3],
c[4], c[6], c[7], c[8],
c[4], c[6], c[8], c[5],
c[6], c[7], c[8], c[9],
c[6], c[5], c[9], c[8]]).T
new_conn = new_conn.reshape((8 * n_el, 4))
new_mat_id = cmesh.cell_groups.repeat(8)
mesh = Mesh.from_data(mesh_in.name + '_r', coors, None, [new_conn],
[new_mat_id], mesh_in.descs )
return mesh
def refine_3_8(mesh_in):
"""
Refines hexahedral mesh by cutting cutting each edge in half and
making 8 new finer hexahedrons out of one coarser one.
"""
cmesh = mesh_in.cmesh
# Unique edge centres.
e_centres = cmesh.get_centroids(cmesh.dim - 2)
# Unique face centres.
f_centres = cmesh.get_centroids(cmesh.dim - 1)
# Unique element centres.
centres = cmesh.get_centroids(cmesh.dim)
# New coordinates after the original ones.
coors = nm.r_[mesh_in.coors, e_centres, f_centres, centres]
o1 = mesh_in.n_nod
o2 = o1 + e_centres.shape[0]
o3 = o2 + f_centres.shape[0]
ecc = cmesh.get_conn(cmesh.dim, cmesh.dim - 2)
fcc = cmesh.get_conn(cmesh.dim, cmesh.dim - 1)
conn = mesh_in.get_conn('3_8')
n_el = conn.shape[0]
st = nm.vstack
e_nodes = ecc.indices.reshape((n_el, 12)) + o1
f_nodes = fcc.indices.reshape((n_el, 6)) + o2
nodes = nm.arange(n_el) + o3
c = nm.c_[conn, e_nodes, f_nodes, nodes].T
new_conn = st([c[0], c[8], c[20], c[11], c[16], c[22], c[26], c[21],
c[1], c[9], c[20], c[8], c[17], c[24], c[26], c[22],
c[2], c[10], c[20], c[9], c[18], c[25], c[26], c[24],
c[3], c[11], c[20], c[10], c[19], c[21], c[26], c[25],
c[4], c[15], c[23], c[12], c[16], c[21], c[26], c[22],
c[5], c[12], c[23], c[13], c[17], c[22], c[26], c[24],
c[6], c[13], c[23], c[14], c[18], c[24], c[26], c[25],
c[7], c[14], c[23], c[15], c[19], c[25], c[26], c[21]]).T
new_conn = new_conn.reshape((8 * n_el, 8))
new_mat_id = cmesh.cell_groups.repeat(8)
mesh = Mesh.from_data(mesh_in.name + '_r', coors, None, [new_conn],
[new_mat_id], mesh_in.descs )
return mesh
def refine_reference(geometry, level):
"""
Refine reference element given by `geometry`.
Notes
-----
The error edges must be generated in the order of the connectivity
of the previous (lower) level.
"""
from sfepy.discrete.fem import FEDomain
from sfepy.discrete.fem.geometry_element import geometry_data
gcoors, gconn = geometry.coors, geometry.conn
if level == 0:
return gcoors, gconn, None
gd = geometry_data[geometry.name]
conn = nm.array([gd.conn], dtype=nm.int32)
mat_id = conn[:, 0].copy()
mat_id[:] = 0
mesh = Mesh.from_data('aux', gd.coors, None, [conn],
[mat_id], [geometry.name])
domain = FEDomain('aux', mesh)
for ii in range(level):
domain = domain.refine()
coors = domain.mesh.coors
conn = domain.get_conn()
n_el = conn.shape[0]
if geometry.name == '2_3':
aux_conn = conn.reshape((n_el / 4, 4, 3))
ir = [[0, 1, 2], [2, 2, 3], [3, 3, 0]]
ic = [[0, 0, 0], [0, 1, 0], [0, 1, 0]]
elif geometry.name == '2_4':
aux_conn = conn.reshape((n_el / 4, 4, 4))
ir = [[0, 0, 1], [1, 1, 2], [2, 2, 3], [3, 3, 0], [0, 0, 2], [3, 3, 1]]
ic = [[0, 1, 0], [0, 1, 0], [0, 1, 0], [0, 1, 0], [1, 2, 1], [1, 2, 1]]
elif geometry.name == '3_4':
aux_conn = conn.reshape((n_el / 8, 8, 4))
ir = [[0, 0, 1], [1, 1, 2], [2, 0, 0], [3, 1, 1], [3, 2, 2], [3, 0, 0]]
ic = [[0, 1, 1], [1, 2, 2], [2, 2, 0], [3, 3, 1], [3, 3, 2], [3, 3, 0]]
elif geometry.name == '3_8':
aux_conn = conn.reshape((n_el / 8, 8, 8))
ir = [[0, 0, 1], [1, 1, 2], [2, 2, 3], [3, 0, 0], [0, 0, 2], [0, 0, 1],
[0, 0, 1], [1, 1, 2], [2, 2, 3], [3, 0, 0], [0, 0, 2], [0, 0, 1],
[4, 4, 5], [5, 5, 6], [6, 6, 7], [7, 4, 4], [4, 4, 6], [4, 4, 5],
[0, 0, 4], [1, 1, 5], [2, 2, 6], [3, 3, 7],
[0, 0, 4], [1, 1, 5], [2, 2, 6], [0, 0, 4],
[0, 0, 4]]
ic = [[0, 1, 0], [0, 1, 0], [0, 1, 0], [0, 3, 0], [1, 2, 1], [3, 2, 1],
[4, 5, 4], [4, 5, 4], [4, 5, 4], [4, 7, 4], [5, 6, 5], [7, 6, 5],
[0, 3, 0], [0, 3, 0], [0, 3, 0], [0, 1, 0], [3, 2, 3], [1, 2, 3],
[0, 4, 0], [0, 4, 0], [0, 4, 0], [0, 4, 0],
[1, 5, 3], [1, 5, 3], [1, 5, 3], [3, 7, 1],
[2, 6, 2]]
else:
raise ValueError('unsupported geometry! (%s)' % geometry.name)
conn = nm.array(conn, dtype=nm.int32)
error_edges = aux_conn[:, ir, ic]
return coors, conn, error_edges
|
[
"sfepy.discrete.fem.Mesh.from_data",
"sfepy.discrete.fem.FEDomain"
] |
[((979, 1072), 'sfepy.discrete.fem.Mesh.from_data', 'Mesh.from_data', (["(mesh_in.name + '_r')", 'coors', 'None', '[new_conn]', '[new_mat_id]', 'mesh_in.descs'], {}), "(mesh_in.name + '_r', coors, None, [new_conn], [new_mat_id],\n mesh_in.descs)\n", (993, 1072), False, 'from sfepy.discrete.fem import Mesh\n'), ((2179, 2272), 'sfepy.discrete.fem.Mesh.from_data', 'Mesh.from_data', (["(mesh_in.name + '_r')", 'coors', 'None', '[new_conn]', '[new_mat_id]', 'mesh_in.descs'], {}), "(mesh_in.name + '_r', coors, None, [new_conn], [new_mat_id],\n mesh_in.descs)\n", (2193, 2272), False, 'from sfepy.discrete.fem import Mesh\n'), ((4133, 4226), 'sfepy.discrete.fem.Mesh.from_data', 'Mesh.from_data', (["(mesh_in.name + '_r')", 'coors', 'None', '[new_conn]', '[new_mat_id]', 'mesh_in.descs'], {}), "(mesh_in.name + '_r', coors, None, [new_conn], [new_mat_id],\n mesh_in.descs)\n", (4147, 4226), False, 'from sfepy.discrete.fem import Mesh\n'), ((5963, 6056), 'sfepy.discrete.fem.Mesh.from_data', 'Mesh.from_data', (["(mesh_in.name + '_r')", 'coors', 'None', '[new_conn]', '[new_mat_id]', 'mesh_in.descs'], {}), "(mesh_in.name + '_r', coors, None, [new_conn], [new_mat_id],\n mesh_in.descs)\n", (5977, 6056), False, 'from sfepy.discrete.fem import Mesh\n'), ((6595, 6630), 'numpy.array', 'nm.array', (['[gd.conn]'], {'dtype': 'nm.int32'}), '([gd.conn], dtype=nm.int32)\n', (6603, 6630), True, 'import numpy as nm\n'), ((6692, 6764), 'sfepy.discrete.fem.Mesh.from_data', 'Mesh.from_data', (['"""aux"""', 'gd.coors', 'None', '[conn]', '[mat_id]', '[geometry.name]'], {}), "('aux', gd.coors, None, [conn], [mat_id], [geometry.name])\n", (6706, 6764), False, 'from sfepy.discrete.fem import Mesh\n'), ((6804, 6825), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['"""aux"""', 'mesh'], {}), "('aux', mesh)\n", (6812, 6825), False, 'from sfepy.discrete.fem import FEDomain\n'), ((8582, 8612), 'numpy.array', 'nm.array', (['conn'], {'dtype': 'nm.int32'}), '(conn, dtype=nm.int32)\n', (8590, 8612), True, 'import numpy as nm\n'), ((710, 797), 'numpy.vstack', 'nm.vstack', (['[c[0], c[3], c[5], c[3], c[4], c[5], c[1], c[4], c[3], c[2], c[5], c[4]]'], {}), '([c[0], c[3], c[5], c[3], c[4], c[5], c[1], c[4], c[3], c[2], c[5],\n c[4]])\n', (719, 797), True, 'import numpy as nm\n'), ((1810, 1825), 'numpy.arange', 'nm.arange', (['n_el'], {}), '(n_el)\n', (1819, 1825), True, 'import numpy as nm\n'), ((1886, 1997), 'numpy.vstack', 'nm.vstack', (['[c[0], c[4], c[8], c[7], c[1], c[5], c[8], c[4], c[2], c[6], c[8], c[5], c[\n 3], c[7], c[8], c[6]]'], {}), '([c[0], c[4], c[8], c[7], c[1], c[5], c[8], c[4], c[2], c[6], c[8],\n c[5], c[3], c[7], c[8], c[6]])\n', (1895, 1997), True, 'import numpy as nm\n'), ((3640, 3851), 'numpy.vstack', 'nm.vstack', (['[c[0], c[4], c[6], c[7], c[4], c[1], c[5], c[8], c[6], c[5], c[2], c[9], c[\n 7], c[8], c[9], c[3], c[4], c[6], c[7], c[8], c[4], c[6], c[8], c[5], c\n [6], c[7], c[8], c[9], c[6], c[5], c[9], c[8]]'], {}), '([c[0], c[4], c[6], c[7], c[4], c[1], c[5], c[8], c[6], c[5], c[2],\n c[9], c[7], c[8], c[9], c[3], c[4], c[6], c[7], c[8], c[4], c[6], c[8],\n c[5], c[6], c[7], c[8], c[9], c[6], c[5], c[9], c[8]])\n', (3649, 3851), True, 'import numpy as nm\n'), ((5197, 5212), 'numpy.arange', 'nm.arange', (['n_el'], {}), '(n_el)\n', (5206, 5212), True, 'import numpy as nm\n')]
|
# This example implements homogenization of piezoeletric porous media.
# The mathematical model and numerical results are described in:
#
# <NAME>., <NAME>.
# Homogenization of the fluid-saturated piezoelectric porous media.
# International Journal of Solids and Structures
# Volume 147, 15 August 2018, Pages 110-125
# https://doi.org/10.1016/j.ijsolstr.2018.05.017
#
# Run calculation of homogeized coefficients:
#
# ./homogen.py example_poropiezo-1/poropiezo_micro_dfc.py
#
# The results are stored in `example_poropiezo-1/results` directory.
#
import sys
import numpy as nm
import os.path as osp
from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson
from sfepy.homogenization.utils import coor_to_sym, define_box_regions
import sfepy.discrete.fem.periodic as per
from sfepy.discrete.fem.mesh import Mesh
import sfepy.homogenization.coefs_base as cb
from sfepy.base.base import Struct
data_dir = 'example_poropiezo-1'
def data_to_struct(data):
out = {}
for k, v in data.items():
out[k] = Struct(name='output_data',
mode='cell' if v[2] == 'c' else 'vertex',
data=v[0],
var_name=v[1],
dofs=None)
return out
def get_periodic_bc(var_tab, dim=3, dim_tab=None):
if dim_tab is None:
dim_tab = {'x': ['left', 'right'],
'z': ['bottom', 'top'],
'y': ['near', 'far']}
periodic = {}
epbcs = {}
for ivar, reg in var_tab:
periodic['per_%s' % ivar] = pers = []
for idim in 'xyz'[0:dim]:
key = 'per_%s_%s' % (ivar, idim)
regs = ['%s_%s' % (reg, ii) for ii in dim_tab[idim]]
epbcs[key] = (regs, {'%s.all' % ivar: '%s.all' % ivar},
'match_%s_plane' % idim)
pers.append(key)
return epbcs, periodic
# reconstruct displacement, and electric fields at the microscopic level,
# see Section 6.2
def recovery_micro_dfc(pb, corrs, macro):
eps0 = macro['eps0']
mesh = pb.domain.mesh
regions = pb.domain.regions
dim = mesh.dim
Yms_map = regions['Yms'].get_entities(0)
Ym_map = regions['Ym'].get_entities(0)
gl = '_' + list(corrs.keys())[0].split('_')[-1]
u1 = -corrs['corrs_p' + gl]['u'] * macro['press'][Yms_map, :]
phi = -corrs['corrs_p' + gl]['r'] * macro['press'][Ym_map, :]
for ii in range(2):
u1 += corrs['corrs_k%d' % ii + gl]['u'] * macro['phi'][ii]
phi += corrs['corrs_k%d' % ii + gl]['r'] * macro['phi'][ii]
for ii in range(dim):
for jj in range(dim):
kk = coor_to_sym(ii, jj, dim)
phi += corrs['corrs_rs' + gl]['r_%d%d' % (ii, jj)]\
* nm.expand_dims(macro['strain'][Ym_map, kk], axis=1)
u1 += corrs['corrs_rs' + gl]['u_%d%d' % (ii, jj)]\
* nm.expand_dims(macro['strain'][Yms_map, kk], axis=1)
u = macro['u'][Yms_map, :] + eps0 * u1
mvar = pb.create_variables(['u', 'r', 'svar'])
e_mac_Yms = [None] * macro['strain'].shape[1]
for ii in range(dim):
for jj in range(dim):
kk = coor_to_sym(ii, jj, dim)
mvar['svar'].set_data(macro['strain'][:, kk])
mac_e_Yms = pb.evaluate('ev_volume_integrate.i2.Yms(svar)',
mode='el_avg',
var_dict={'svar': mvar['svar']})
e_mac_Yms[kk] = mac_e_Yms.squeeze()
e_mac_Yms = nm.vstack(e_mac_Yms).T[:, nm.newaxis, :, nm.newaxis]
mvar['r'].set_data(phi)
E_mic = pb.evaluate('ev_grad.i2.Ym(r)',
mode='el_avg',
var_dict={'r': mvar['r']}) / eps0
mvar['u'].set_data(u1)
e_mic = pb.evaluate('ev_cauchy_strain.i2.Yms(u)',
mode='el_avg',
var_dict={'u': mvar['u']})
e_mic += e_mac_Yms
out = {
'u0': (macro['u'][Yms_map, :], 'u', 'p'), # macro displacement
'u1': (u1, 'u', 'p'), # local displacement corrections, see eq. (58)
'u': (u, 'u', 'p'), # total displacement
'e_mic': (e_mic, 'u', 'c'), # micro strain field, see eq. (58)
'phi': (phi, 'r', 'p'), # electric potential, see eq. (57)
'E_mic': (E_mic, 'r', 'c'), # electric field, see eq. (58)
}
return data_to_struct(out)
# define homogenized coefficients and subproblems for correctors
def define(grid0=100, filename_mesh=None):
eps0 = 0.01 / grid0
if filename_mesh is None:
filename_mesh = osp.join(data_dir, 'piezo_mesh_micro_dfc.vtk')
mesh = Mesh.from_file(filename_mesh)
n_conduct = len(nm.unique(mesh.cmesh.cell_groups)) - 2
sym_eye = 'nm.array([1,1,0])' if mesh.dim == 2 else\
'nm.array([1,1,1,0,0,0])'
bbox = mesh.get_bounding_box()
regions = define_box_regions(mesh.dim, bbox[0], bbox[1], eps=1e-3)
regions.update({
'Y': 'all',
# matrix
'Ym': 'cells of group 1',
'Ym_left': ('r.Ym *v r.Left', 'vertex'),
'Ym_right': ('r.Ym *v r.Right', 'vertex'),
'Ym_bottom': ('r.Ym *v r.Bottom', 'vertex'),
'Ym_top': ('r.Ym *v r.Top', 'vertex'),
'Ym_far': ('r.Ym *v r.Far', 'vertex'),
'Ym_near': ('r.Ym *v r.Near', 'vertex'),
'Gamma_mc': ('r.Ym *v r.Yc', 'facet', 'Ym'),
# channel / inclusion
'Yc': 'cells of group 2',
'Yc0': ('r.Yc -v r.Gamma_cm', 'vertex'),
'Gamma_cm': ('r.Ym *v r.Yc', 'facet', 'Yc'),
})
print('number of cnonductors: %d' % n_conduct)
regions.update({
'Yms': ('r.Ym +v r.Ys', 'cell'),
'Yms_left': ('r.Yms *v r.Left', 'vertex'),
'Yms_right': ('r.Yms *v r.Right', 'vertex'),
'Yms_bottom': ('r.Yms *v r.Bottom', 'vertex'),
'Yms_top': ('r.Yms *v r.Top', 'vertex'),
'Yms_far': ('r.Yms *v r.Far', 'vertex'),
'Yms_near': ('r.Yms *v r.Near', 'vertex'),
'Gamma_ms': ('r.Ym *v r.Ys', 'facet', 'Ym'),
'Gamma_msc': ('r.Yms *v r.Yc', 'facet', 'Yms'),
'Ys': (' +v '.join(['r.Ys%d' % k for k in range(n_conduct)]),
'cell'),
})
options = {
'coefs_filename': 'coefs_poropiezo_%d' % (grid0),
'volume': {
'variables': ['svar'],
'expression': 'd_volume.i2.Y(svar)',
},
'coefs': 'coefs',
'requirements': 'requirements',
'output_dir': osp.join(data_dir, 'results'),
'ls': 'ls',
'file_per_var': True,
'absolute_mesh_path': True,
'multiprocessing': False,
'recovery_hook': recovery_micro_dfc,
}
fields = {
'displacement': ('real', 'vector', 'Yms', 1),
'potential': ('real', 'scalar', 'Ym', 1),
'sfield': ('real', 'scalar', 'Y', 1),
}
variables = {
# displacement
'u': ('unknown field', 'displacement'),
'v': ('test field', 'displacement', 'u'),
'Pi_u': ('parameter field', 'displacement', 'u'),
'U1': ('parameter field', 'displacement', '(set-to-None)'),
'U2': ('parameter field', 'displacement', '(set-to-None)'),
# potential
'r': ('unknown field', 'potential'),
's': ('test field', 'potential', 'r'),
'Pi_r': ('parameter field', 'potential', 'r'),
'R1': ('parameter field', 'potential', '(set-to-None)'),
'R2': ('parameter field', 'potential', '(set-to-None)'),
# aux variable
'svar': ('parameter field', 'sfield', '(set-to-None)'),
}
epbcs, periodic = get_periodic_bc([('u', 'Yms'), ('r', 'Ym')])
mat_g_sc, mat_d_sc = eps0, eps0**2
# BaTiO3 - Miara, Rohan, ... doi: 10.1016/j.jmps.2005.05.006
materials = {
'matrix': ({
'D': {'Ym': nm.array([[1.504, 0.656, 0.659, 0, 0, 0],
[0.656, 1.504, 0.659, 0, 0, 0],
[0.659, 0.659, 1.455, 0, 0, 0],
[0, 0, 0, 0.424, 0, 0],
[0, 0, 0, 0, 0.439, 0],
[0, 0, 0, 0, 0, 0.439]]) * 1e11, }
},),
'piezo': ({
'g': nm.array([[0, 0, 0, 0, 11.404, 0],
[0, 0, 0, 0, 0, 11.404],
[-4.322, -4.322, 17.360, 0, 0, 0]]) / mat_g_sc,
'd': nm.array([[1.284, 0, 0],
[0, 1.284, 0],
[0, 0, 1.505]]) * 1e-8 / mat_d_sc,
},),
'fluid': ({'gamma': 1.0 / 2.15e9},),
}
functions = {
'match_x_plane': (per.match_x_plane,),
'match_y_plane': (per.match_y_plane,),
'match_z_plane': (per.match_z_plane,),
}
ebcs = {
'fixed_u': ('Corners', {'u.all': 0.0}),
'fixed_r': ('Gamma_ms', {'r.all': 0.0}),
}
integrals = {
'i2': 2,
'i5': 5,
}
solvers = {
'ls': ('ls.scipy_direct', {}),
'ns_em6': ('nls.newton', {
'i_max': 1,
'eps_a': 1e-6,
'eps_r': 1e-6,
'problem': 'nonlinear'}),
'ns_em3': ('nls.newton', {
'i_max': 1,
'eps_a': 1e-3,
'eps_r': 1e-6,
'problem': 'nonlinear'}),
}
coefs = {
# homogenized elasticity, see eq. (46)_1
'A': {
'requires': ['c.A1', 'c.A2'],
'expression': 'c.A1 + c.A2',
'class': cb.CoefEval,
},
'A1': {
'status': 'auxiliary',
'requires': ['pis_u', 'corrs_rs'],
'expression': 'dw_lin_elastic.i2.Yms(matrix.D, U1, U2)',
'set_variables': [('U1', ('corrs_rs', 'pis_u'), 'u'),
('U2', ('corrs_rs', 'pis_u'), 'u')],
'class': cb.CoefSymSym,
},
'A2': {
'status': 'auxiliary',
'requires': ['corrs_rs'],
'expression': 'dw_diffusion.i2.Ym(piezo.d, R1, R2)',
'set_variables': [('R1', 'corrs_rs', 'r'),
('R2', 'corrs_rs', 'r')],
'class': cb.CoefSymSym,
},
# homogenized Biot coefficient, see eq. (46)_2
'B': {
'requires': ['c.Phi', 'c.B1', 'c.B2'],
'expression': 'c.B1 - c.B2 + c.Phi * %s' % sym_eye,
'class': cb.CoefEval,
},
'B1': {
'status': 'auxiliary',
'requires': ['pis_u', 'corrs_p'],
'expression': 'dw_lin_elastic.i2.Yms(matrix.D, U1, U2)',
'set_variables': [('U1', 'corrs_p', 'u'),
('U2', 'pis_u', 'u')],
'class': cb.CoefSym,
},
'B2': {
'status': 'auxiliary',
'requires': ['pis_u', 'corrs_p'],
'expression': 'dw_piezo_coupling.i2.Ym(piezo.g, U1, R1)',
'set_variables': [('R1', 'corrs_p', 'r'),
('U1', 'pis_u', 'u')],
'class': cb.CoefSym,
},
# homogenized compressibility coefficient, see eq. (46)_6
'M': {
'requires': ['c.Phi', 'c.N'],
'expression': 'c.N + c.Phi * %e' % materials['fluid'][0]['gamma'],
'class': cb.CoefEval,
},
'N': {
'status': 'auxiliary',
'requires': ['corrs_p'],
'expression': 'dw_surface_ltr.i2.Gamma_msc(U1)',
'set_variables': [('U1', 'corrs_p', 'u')],
'class': cb.CoefOne,
},
'Phi': {
'requires': ['c.vol'],
'expression': 'c.vol["fraction_Yc"]',
'class': cb.CoefEval,
},
# volume fractions of Ym, Yc, Ys1, Ys2, ...
'vol': {
'regions': ['Ym', 'Yc'] + ['Ys%d' % k for k in range(n_conduct)],
'expression': 'd_volume.i2.%s(svar)',
'class': cb.VolumeFractions,
},
'eps0': {
'requires': [],
'expression': '%e' % eps0,
'class': cb.CoefEval,
},
'filenames': {},
}
requirements = {
'pis_u': {
'variables': ['u'],
'class': cb.ShapeDimDim,
},
'pis_r': {
'variables': ['r'],
'class': cb.ShapeDim,
},
# local subproblem defined by eq. (41)
'corrs_rs': {
'requires': ['pis_u'],
'ebcs': ['fixed_u', 'fixed_r'],
'epbcs': periodic['per_u'] + periodic['per_r'],
'is_linear': True,
'equations': {
'eq1':
"""dw_lin_elastic.i2.Yms(matrix.D, v, u)
- dw_piezo_coupling.i2.Ym(piezo.g, v, r)
= - dw_lin_elastic.i2.Yms(matrix.D, v, Pi_u)""",
'eq2':
"""
- dw_piezo_coupling.i2.Ym(piezo.g, u, s)
- dw_diffusion.i2.Ym(piezo.d, s, r)
= dw_piezo_coupling.i2.Ym(piezo.g, Pi_u, s)""",
},
'set_variables': [('Pi_u', 'pis_u', 'u')],
'class': cb.CorrDimDim,
'save_name': 'corrs_rs_%d' % grid0,
'dump_variables': ['u', 'r'],
'solvers': {'ls': 'ls', 'nls': 'ns_em3'},
},
# local subproblem defined by eq. (42)
'corrs_p': {
'requires': [],
'ebcs': ['fixed_u', 'fixed_r'],
'epbcs': periodic['per_u'] + periodic['per_r'],
'is_linear': True,
'equations': {
'eq1':
"""dw_lin_elastic.i2.Yms(matrix.D, v, u)
- dw_piezo_coupling.i2.Ym(piezo.g, v, r)
= dw_surface_ltr.i2.Gamma_msc(v)""",
'eq2':
"""
- dw_piezo_coupling.i2.Ym(piezo.g, u, s)
- dw_diffusion.i2.Ym(piezo.d, s, r)
= 0"""
},
'class': cb.CorrOne,
'save_name': 'corrs_p_%d' % grid0,
'dump_variables': ['u', 'r'],
'solvers': {'ls': 'ls', 'nls': 'ns_em6'},
},
# local subproblem defined by eq. (43)
'corrs_rho': {
'requires': [],
'ebcs': ['fixed_u', 'fixed_r'],
'epbcs': periodic['per_u'] + periodic['per_r'],
'is_linear': True,
'equations': {
'eq1':
"""dw_lin_elastic.i2.Yms(matrix.D, v, u)
- dw_piezo_coupling.i2.Ym(piezo.g, v, r)
= 0""",
'eq2':
"""
- dw_piezo_coupling.i2.Ym(piezo.g, u, s)
- dw_diffusion.i2.Ym(piezo.d, s, r)
=
- dw_surface_integrate.i2.Gamma_mc(s)"""
},
'class': cb.CorrOne,
'save_name': 'corrs_p_%d' % grid0,
'dump_variables': ['u', 'r'],
'solvers': {'ls': 'ls', 'nls': 'ns_em6'},
},
}
for k in range(n_conduct):
sk = '%d' % k
regions.update({
'Ys' + sk: 'cells of group %d' % (3 + k),
'Gamma_s' + sk: ('r.Ym *v r.Ys' + sk, 'facet', 'Ym'),
})
materials['matrix'][0]['D'].update({
'Ys' + sk: stiffness_from_youngpoisson(3, 200e9, 0.25),
})
ebcs.update({
'fixed_r1_k_' + sk: ('Gamma_s' + sk, {'r.0': 1.0}),
'fixed_r0_k_' + sk: ('Gamma_s' + sk, {'r.0': 0.0}),
})
fixed_r0_k = ['fixed_r0_k_%d' % ii for ii in range(n_conduct)
if not ii == k]
# local subproblems defined for conductors, see eq. (44)
requirements.update({
'corrs_k' + sk: {
'requires': ['pis_r'],
'ebcs': ['fixed_u', 'fixed_r1_k_' + sk] + fixed_r0_k,
'epbcs': periodic['per_u'] + periodic['per_r'],
'is_linear': True,
'equations': {
'eq1':
"""dw_lin_elastic.i2.Yms(matrix.D, v, u)
- dw_piezo_coupling.i2.Ym(piezo.g, v, r)
= 0""",
'eq2':
"""
- dw_piezo_coupling.i2.Ym(piezo.g, u, s)
- dw_diffusion.i2.Ym(piezo.d, s, r)
= 0"""
},
'class': cb.CorrOne,
'save_name': 'corrs_k' + sk + '_%d' % grid0,
'dump_variables': ['u', 'r'],
'solvers': {'ls': 'ls', 'nls': 'ns_em6'},
},
})
coefs.update({
# homogenized coefficient (46)_3
'H' + sk: {
'requires': ['c.H1_' + sk, 'c.H2_' + sk],
'expression': 'c.H1_%s - c.H2_%s' % (sk, sk),
'class': cb.CoefEval,
},
'H1_' + sk: {
'status': 'auxiliary',
'requires': ['pis_u', 'corrs_k' + sk],
'expression': 'dw_lin_elastic.i2.Yms(matrix.D, U1, U2)',
'set_variables': [('U1', 'corrs_k' + sk, 'u'),
('U2', 'pis_u', 'u')],
'class': cb.CoefSym,
},
'H2_' + sk: {
'status': 'auxiliary',
'requires': ['pis_u', 'corrs_k' + sk],
'expression': 'dw_piezo_coupling.i2.Ym(piezo.g, U1, R1)',
'set_variables': [('R1', 'corrs_k' + sk, 'r'),
('U1', 'pis_u', 'u')],
'class': cb.CoefSym,
},
# homogenized coefficient (46)_7
'Z' + sk: {
'requires': ['corrs_k' + sk],
'expression': 'dw_surface_ltr.i2.Gamma_msc(U1)',
'set_variables': [('U1', 'corrs_k' + sk, 'u')],
'class': cb.CoefOne,
},
})
return locals()
|
[
"sfepy.mechanics.matcoefs.stiffness_from_youngpoisson",
"sfepy.homogenization.utils.define_box_regions",
"sfepy.base.base.Struct",
"sfepy.homogenization.utils.coor_to_sym",
"sfepy.discrete.fem.mesh.Mesh.from_file"
] |
[((4603, 4632), 'sfepy.discrete.fem.mesh.Mesh.from_file', 'Mesh.from_file', (['filename_mesh'], {}), '(filename_mesh)\n', (4617, 4632), False, 'from sfepy.discrete.fem.mesh import Mesh\n'), ((4834, 4891), 'sfepy.homogenization.utils.define_box_regions', 'define_box_regions', (['mesh.dim', 'bbox[0]', 'bbox[1]'], {'eps': '(0.001)'}), '(mesh.dim, bbox[0], bbox[1], eps=0.001)\n', (4852, 4891), False, 'from sfepy.homogenization.utils import coor_to_sym, define_box_regions\n'), ((1025, 1135), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': "('cell' if v[2] == 'c' else 'vertex')", 'data': 'v[0]', 'var_name': 'v[1]', 'dofs': 'None'}), "(name='output_data', mode='cell' if v[2] == 'c' else 'vertex', data=v\n [0], var_name=v[1], dofs=None)\n", (1031, 1135), False, 'from sfepy.base.base import Struct\n'), ((4544, 4590), 'os.path.join', 'osp.join', (['data_dir', '"""piezo_mesh_micro_dfc.vtk"""'], {}), "(data_dir, 'piezo_mesh_micro_dfc.vtk')\n", (4552, 4590), True, 'import os.path as osp\n'), ((6417, 6446), 'os.path.join', 'osp.join', (['data_dir', '"""results"""'], {}), "(data_dir, 'results')\n", (6425, 6446), True, 'import os.path as osp\n'), ((2623, 2647), 'sfepy.homogenization.utils.coor_to_sym', 'coor_to_sym', (['ii', 'jj', 'dim'], {}), '(ii, jj, dim)\n', (2634, 2647), False, 'from sfepy.homogenization.utils import coor_to_sym, define_box_regions\n'), ((3137, 3161), 'sfepy.homogenization.utils.coor_to_sym', 'coor_to_sym', (['ii', 'jj', 'dim'], {}), '(ii, jj, dim)\n', (3148, 3161), False, 'from sfepy.homogenization.utils import coor_to_sym, define_box_regions\n'), ((3478, 3498), 'numpy.vstack', 'nm.vstack', (['e_mac_Yms'], {}), '(e_mac_Yms)\n', (3487, 3498), True, 'import numpy as nm\n'), ((4653, 4686), 'numpy.unique', 'nm.unique', (['mesh.cmesh.cell_groups'], {}), '(mesh.cmesh.cell_groups)\n', (4662, 4686), True, 'import numpy as nm\n'), ((2730, 2781), 'numpy.expand_dims', 'nm.expand_dims', (["macro['strain'][Ym_map, kk]"], {'axis': '(1)'}), "(macro['strain'][Ym_map, kk], axis=1)\n", (2744, 2781), True, 'import numpy as nm\n'), ((2863, 2915), 'numpy.expand_dims', 'nm.expand_dims', (["macro['strain'][Yms_map, kk]"], {'axis': '(1)'}), "(macro['strain'][Yms_map, kk], axis=1)\n", (2877, 2915), True, 'import numpy as nm\n'), ((15322, 15374), 'sfepy.mechanics.matcoefs.stiffness_from_youngpoisson', 'stiffness_from_youngpoisson', (['(3)', '(200000000000.0)', '(0.25)'], {}), '(3, 200000000000.0, 0.25)\n', (15349, 15374), False, 'from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson\n'), ((8160, 8258), 'numpy.array', 'nm.array', (['[[0, 0, 0, 0, 11.404, 0], [0, 0, 0, 0, 0, 11.404], [-4.322, -4.322, 17.36, \n 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 11.404, 0], [0, 0, 0, 0, 0, 11.404], [-4.322, -4.322,\n 17.36, 0, 0, 0]])\n', (8168, 8258), True, 'import numpy as nm\n'), ((7751, 7939), 'numpy.array', 'nm.array', (['[[1.504, 0.656, 0.659, 0, 0, 0], [0.656, 1.504, 0.659, 0, 0, 0], [0.659, \n 0.659, 1.455, 0, 0, 0], [0, 0, 0, 0.424, 0, 0], [0, 0, 0, 0, 0.439, 0],\n [0, 0, 0, 0, 0, 0.439]]'], {}), '([[1.504, 0.656, 0.659, 0, 0, 0], [0.656, 1.504, 0.659, 0, 0, 0], [\n 0.659, 0.659, 1.455, 0, 0, 0], [0, 0, 0, 0.424, 0, 0], [0, 0, 0, 0, \n 0.439, 0], [0, 0, 0, 0, 0, 0.439]])\n', (7759, 7939), True, 'import numpy as nm\n'), ((8339, 8394), 'numpy.array', 'nm.array', (['[[1.284, 0, 0], [0, 1.284, 0], [0, 0, 1.505]]'], {}), '([[1.284, 0, 0], [0, 1.284, 0], [0, 0, 1.505]])\n', (8347, 8394), True, 'import numpy as nm\n')]
|
from unittest.mock import patch
from sqlmodel import create_engine
from ....conftest import get_testing_print_function
expected_calls = [
[
"Created hero:",
{
"age": None,
"id": 1,
"secret_name": "<NAME>",
"team_id": 1,
"name": "Deadpond",
},
],
[
"Created hero:",
{
"age": 48,
"id": 2,
"secret_name": "<NAME>",
"team_id": 2,
"name": "Rusty-Man",
},
],
[
"Created hero:",
{
"age": None,
"id": 3,
"secret_name": "<NAME>",
"team_id": None,
"name": "Spider-Boy",
},
],
[
"Updated hero:",
{
"age": None,
"id": 3,
"secret_name": "<NAME>",
"team_id": 2,
"name": "Spider-Boy",
},
],
[
"Team Wakaland:",
{"id": 3, "headquarters": "Wakaland Capital City", "name": "Wakaland"},
],
[
"Preventers new hero:",
{
"age": 32,
"id": 6,
"secret_name": "<NAME>",
"team_id": 2,
"name": "Tarantula",
},
],
[
"Preventers new hero:",
{
"age": 36,
"id": 7,
"secret_name": "<NAME>",
"team_id": 2,
"name": "Dr. Weird",
},
],
[
"Preventers new hero:",
{
"age": 93,
"id": 8,
"secret_name": "<NAME>",
"team_id": 2,
"name": "Captain North America",
},
],
]
def test_tutorial(clear_sqlmodel):
from docs_src.tutorial.relationship_attributes.create_and_update_relationships import (
tutorial001 as mod,
)
mod.sqlite_url = "sqlite://"
mod.engine = create_engine(mod.sqlite_url)
calls = []
new_print = get_testing_print_function(calls)
with patch("builtins.print", new=new_print):
mod.main()
assert calls == expected_calls
|
[
"sqlmodel.create_engine"
] |
[((1912, 1941), 'sqlmodel.create_engine', 'create_engine', (['mod.sqlite_url'], {}), '(mod.sqlite_url)\n', (1925, 1941), False, 'from sqlmodel import create_engine\n'), ((2018, 2056), 'unittest.mock.patch', 'patch', (['"""builtins.print"""'], {'new': 'new_print'}), "('builtins.print', new=new_print)\n", (2023, 2056), False, 'from unittest.mock import patch\n'), ((2066, 2076), 'docs_src.tutorial.relationship_attributes.create_and_update_relationships.tutorial001.main', 'mod.main', ([], {}), '()\n', (2074, 2076), True, 'from docs_src.tutorial.relationship_attributes.create_and_update_relationships import tutorial001 as mod\n')]
|
from typing import Optional
from sqlmodel import Field, SQLModel
__all__ = ['User']
class User(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
username: str = Field(index=False, nullable=False)
password: str = Field(index=False, nullable=False)
|
[
"sqlmodel.Field"
] |
[((146, 183), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (151, 183), False, 'from sqlmodel import Field, SQLModel\n'), ((204, 238), 'sqlmodel.Field', 'Field', ([], {'index': '(False)', 'nullable': '(False)'}), '(index=False, nullable=False)\n', (209, 238), False, 'from sqlmodel import Field, SQLModel\n'), ((259, 293), 'sqlmodel.Field', 'Field', ([], {'index': '(False)', 'nullable': '(False)'}), '(index=False, nullable=False)\n', (264, 293), False, 'from sqlmodel import Field, SQLModel\n')]
|
from __future__ import absolute_import
import os.path as op
import numpy as nm
import sfepy
from sfepy.discrete import FieldVariable
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.base.base import assert_
from sfepy.base.testing import TestCommon
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
mesh = Mesh.from_file('meshes/2d/square_unit_tri.mesh',
prefix_dir=sfepy.data_dir)
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field = Field.from_args('linear', nm.float64, 'scalar', omega,
approx_order=1)
test = Test(conf=conf, options=options, omega=omega, field=field)
return test
def test_mass_matrix(self):
from sfepy.discrete.projections import create_mass_matrix
field = self.field
mtx = create_mass_matrix(field)
assert_(mtx.shape == (field.n_nod, field.n_nod))
assert_(abs(mtx.sum() - 1.0) < 1e-14)
return True
def test_projection_tri_quad(self):
from sfepy.discrete.projections import make_l2_projection
source = FieldVariable('us', 'unknown', self.field)
coors = self.field.get_coor()
vals = nm.sin(2.0 * nm.pi * coors[:,0] * coors[:,1])
source.set_data(vals)
name = op.join(self.options.out_dir,
'test_projection_tri_quad_source.vtk')
source.save_as_mesh(name)
mesh = Mesh.from_file('meshes/2d/square_quad.mesh',
prefix_dir=sfepy.data_dir)
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field = Field.from_args('bilinear', nm.float64, 'scalar', omega,
approx_order=1)
target = FieldVariable('ut', 'unknown', field)
make_l2_projection(target, source)
name = op.join(self.options.out_dir,
'test_projection_tri_quad_target.vtk')
target.save_as_mesh(name)
bbox = self.field.domain.get_mesh_bounding_box()
x = nm.linspace(bbox[0, 0] + 0.001, bbox[1, 0] - 0.001, 20)
y = nm.linspace(bbox[0, 1] + 0.001, bbox[1, 1] - 0.001, 20)
xx, yy = nm.meshgrid(x, y)
test_coors = nm.c_[xx.ravel(), yy.ravel()].copy()
vec1 = source.evaluate_at(test_coors)
vec2 = target.evaluate_at(test_coors)
ok = (nm.abs(vec1 - vec2) < 0.01).all()
return ok
def test_projection_iga_fem(self):
from sfepy.discrete import FieldVariable
from sfepy.discrete.fem import FEDomain, Field
from sfepy.discrete.iga.domain import IGDomain
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.discrete.iga.domain_generators import gen_patch_block_domain
from sfepy.discrete.projections import (make_l2_projection,
make_l2_projection_data)
shape = [10, 12, 12]
dims = [5, 6, 6]
centre = [0, 0, 0]
degrees = [2, 2, 2]
nurbs, bmesh, regions = gen_patch_block_domain(dims, shape, centre,
degrees,
cp_mode='greville',
name='iga')
ig_domain = IGDomain('iga', nurbs, bmesh, regions=regions)
ig_omega = ig_domain.create_region('Omega', 'all')
ig_field = Field.from_args('iga', nm.float64, 1, ig_omega,
approx_order='iga', poly_space_base='iga')
ig_u = FieldVariable('ig_u', 'parameter', ig_field,
primary_var_name='(set-to-None)')
mesh = gen_block_mesh(dims, shape, centre, name='fem')
fe_domain = FEDomain('fem', mesh)
fe_omega = fe_domain.create_region('Omega', 'all')
fe_field = Field.from_args('fem', nm.float64, 1, fe_omega,
approx_order=2)
fe_u = FieldVariable('fe_u', 'parameter', fe_field,
primary_var_name='(set-to-None)')
def _eval_data(ts, coors, mode, **kwargs):
return nm.prod(coors**2, axis=1)[:, None, None]
make_l2_projection_data(ig_u, _eval_data)
make_l2_projection(fe_u, ig_u) # This calls ig_u.evaluate_at().
coors = 0.5 * nm.random.rand(20, 3) * dims
ig_vals = ig_u.evaluate_at(coors)
fe_vals = fe_u.evaluate_at(coors)
ok = nm.allclose(ig_vals, fe_vals, rtol=0.0, atol=1e-12)
if not ok:
self.report('iga-fem projection failed!')
self.report('coors:')
self.report(coors)
self.report('iga fem diff:')
self.report(nm.c_[ig_vals, fe_vals, nm.abs(ig_vals - fe_vals)])
return ok
def test_project_tensors(self):
from sfepy.discrete import FieldVariable
from sfepy.discrete.projections import project_by_component
ok = True
u = FieldVariable('u', 'parameter', self.field,
primary_var_name='(set-to-None)')
u.set_constant(1.0)
component = FieldVariable('component', 'parameter', self.field,
primary_var_name='(set-to-None)')
nls_options = {'eps_a' : 1e-16, 'i_max' : 1}
u_qp = u.evaluate()
u2 = FieldVariable('u2', 'parameter', self.field,
primary_var_name='(set-to-None)')
project_by_component(u2, u_qp, component, self.field.approx_order,
nls_options=nls_options)
_ok = self.compare_vectors(u(), u2())
ok = ok and _ok
gu_qp = u.evaluate(mode='grad')
gfield = Field.from_args('gu', nm.float64, 2, self.field.region,
approx_order=self.field.approx_order)
gu = FieldVariable('gu', 'parameter', gfield,
primary_var_name='(set-to-None)')
project_by_component(gu, gu_qp, component, gfield.approx_order,
nls_options=nls_options)
_ok = self.compare_vectors(gu(), nm.zeros_like(gu()))
ok = ok and _ok
return ok
|
[
"sfepy.mesh.mesh_generators.gen_block_mesh",
"sfepy.discrete.projections.make_l2_projection_data",
"sfepy.discrete.fem.Mesh.from_file",
"sfepy.discrete.iga.domain_generators.gen_patch_block_domain",
"sfepy.discrete.fem.Field.from_args",
"sfepy.discrete.fem.FEDomain",
"sfepy.base.base.assert_",
"sfepy.discrete.projections.make_l2_projection",
"sfepy.discrete.projections.project_by_component",
"sfepy.discrete.iga.domain.IGDomain",
"sfepy.discrete.projections.create_mass_matrix",
"sfepy.discrete.FieldVariable"
] |
[((359, 434), 'sfepy.discrete.fem.Mesh.from_file', 'Mesh.from_file', (['"""meshes/2d/square_unit_tri.mesh"""'], {'prefix_dir': 'sfepy.data_dir'}), "('meshes/2d/square_unit_tri.mesh', prefix_dir=sfepy.data_dir)\n", (373, 434), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((482, 506), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['"""domain"""', 'mesh'], {}), "('domain', mesh)\n", (490, 506), False, 'from sfepy.discrete.fem import FEDomain, Field\n'), ((578, 648), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""linear"""', 'nm.float64', '"""scalar"""', 'omega'], {'approx_order': '(1)'}), "('linear', nm.float64, 'scalar', omega, approx_order=1)\n", (593, 648), False, 'from sfepy.discrete.fem import FEDomain, Field\n'), ((918, 943), 'sfepy.discrete.projections.create_mass_matrix', 'create_mass_matrix', (['field'], {}), '(field)\n', (936, 943), False, 'from sfepy.discrete.projections import create_mass_matrix\n'), ((953, 1001), 'sfepy.base.base.assert_', 'assert_', (['(mtx.shape == (field.n_nod, field.n_nod))'], {}), '(mtx.shape == (field.n_nod, field.n_nod))\n', (960, 1001), False, 'from sfepy.base.base import assert_\n'), ((1194, 1236), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""us"""', '"""unknown"""', 'self.field'], {}), "('us', 'unknown', self.field)\n", (1207, 1236), False, 'from sfepy.discrete import FieldVariable\n'), ((1291, 1338), 'numpy.sin', 'nm.sin', (['(2.0 * nm.pi * coors[:, 0] * coors[:, 1])'], {}), '(2.0 * nm.pi * coors[:, 0] * coors[:, 1])\n', (1297, 1338), True, 'import numpy as nm\n'), ((1383, 1451), 'os.path.join', 'op.join', (['self.options.out_dir', '"""test_projection_tri_quad_source.vtk"""'], {}), "(self.options.out_dir, 'test_projection_tri_quad_source.vtk')\n", (1390, 1451), True, 'import os.path as op\n'), ((1525, 1596), 'sfepy.discrete.fem.Mesh.from_file', 'Mesh.from_file', (['"""meshes/2d/square_quad.mesh"""'], {'prefix_dir': 'sfepy.data_dir'}), "('meshes/2d/square_quad.mesh', prefix_dir=sfepy.data_dir)\n", (1539, 1596), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((1644, 1668), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['"""domain"""', 'mesh'], {}), "('domain', mesh)\n", (1652, 1668), False, 'from sfepy.discrete.fem import FEDomain, Field\n'), ((1741, 1813), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""bilinear"""', 'nm.float64', '"""scalar"""', 'omega'], {'approx_order': '(1)'}), "('bilinear', nm.float64, 'scalar', omega, approx_order=1)\n", (1756, 1813), False, 'from sfepy.discrete.fem import FEDomain, Field\n'), ((1864, 1901), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""ut"""', '"""unknown"""', 'field'], {}), "('ut', 'unknown', field)\n", (1877, 1901), False, 'from sfepy.discrete import FieldVariable\n'), ((1911, 1945), 'sfepy.discrete.projections.make_l2_projection', 'make_l2_projection', (['target', 'source'], {}), '(target, source)\n', (1929, 1945), False, 'from sfepy.discrete.projections import make_l2_projection, make_l2_projection_data\n'), ((1962, 2030), 'os.path.join', 'op.join', (['self.options.out_dir', '"""test_projection_tri_quad_target.vtk"""'], {}), "(self.options.out_dir, 'test_projection_tri_quad_target.vtk')\n", (1969, 2030), True, 'import os.path as op\n'), ((2158, 2213), 'numpy.linspace', 'nm.linspace', (['(bbox[0, 0] + 0.001)', '(bbox[1, 0] - 0.001)', '(20)'], {}), '(bbox[0, 0] + 0.001, bbox[1, 0] - 0.001, 20)\n', (2169, 2213), True, 'import numpy as nm\n'), ((2226, 2281), 'numpy.linspace', 'nm.linspace', (['(bbox[0, 1] + 0.001)', '(bbox[1, 1] - 0.001)', '(20)'], {}), '(bbox[0, 1] + 0.001, bbox[1, 1] - 0.001, 20)\n', (2237, 2281), True, 'import numpy as nm\n'), ((2300, 2317), 'numpy.meshgrid', 'nm.meshgrid', (['x', 'y'], {}), '(x, y)\n', (2311, 2317), True, 'import numpy as nm\n'), ((3162, 3250), 'sfepy.discrete.iga.domain_generators.gen_patch_block_domain', 'gen_patch_block_domain', (['dims', 'shape', 'centre', 'degrees'], {'cp_mode': '"""greville"""', 'name': '"""iga"""'}), "(dims, shape, centre, degrees, cp_mode='greville',\n name='iga')\n", (3184, 3250), False, 'from sfepy.discrete.iga.domain_generators import gen_patch_block_domain\n'), ((3432, 3478), 'sfepy.discrete.iga.domain.IGDomain', 'IGDomain', (['"""iga"""', 'nurbs', 'bmesh'], {'regions': 'regions'}), "('iga', nurbs, bmesh, regions=regions)\n", (3440, 3478), False, 'from sfepy.discrete.iga.domain import IGDomain\n'), ((3558, 3652), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""iga"""', 'nm.float64', '(1)', 'ig_omega'], {'approx_order': '"""iga"""', 'poly_space_base': '"""iga"""'}), "('iga', nm.float64, 1, ig_omega, approx_order='iga',\n poly_space_base='iga')\n", (3573, 3652), False, 'from sfepy.discrete.fem import FEDomain, Field\n'), ((3699, 3777), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""ig_u"""', '"""parameter"""', 'ig_field'], {'primary_var_name': '"""(set-to-None)"""'}), "('ig_u', 'parameter', ig_field, primary_var_name='(set-to-None)')\n", (3712, 3777), False, 'from sfepy.discrete import FieldVariable\n'), ((3823, 3870), 'sfepy.mesh.mesh_generators.gen_block_mesh', 'gen_block_mesh', (['dims', 'shape', 'centre'], {'name': '"""fem"""'}), "(dims, shape, centre, name='fem')\n", (3837, 3870), False, 'from sfepy.mesh.mesh_generators import gen_block_mesh\n'), ((3891, 3912), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['"""fem"""', 'mesh'], {}), "('fem', mesh)\n", (3899, 3912), False, 'from sfepy.discrete.fem import FEDomain, Field\n'), ((3992, 4055), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""fem"""', 'nm.float64', '(1)', 'fe_omega'], {'approx_order': '(2)'}), "('fem', nm.float64, 1, fe_omega, approx_order=2)\n", (4007, 4055), False, 'from sfepy.discrete.fem import FEDomain, Field\n'), ((4106, 4184), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""fe_u"""', '"""parameter"""', 'fe_field'], {'primary_var_name': '"""(set-to-None)"""'}), "('fe_u', 'parameter', fe_field, primary_var_name='(set-to-None)')\n", (4119, 4184), False, 'from sfepy.discrete import FieldVariable\n'), ((4335, 4376), 'sfepy.discrete.projections.make_l2_projection_data', 'make_l2_projection_data', (['ig_u', '_eval_data'], {}), '(ig_u, _eval_data)\n', (4358, 4376), False, 'from sfepy.discrete.projections import make_l2_projection, make_l2_projection_data\n'), ((4386, 4416), 'sfepy.discrete.projections.make_l2_projection', 'make_l2_projection', (['fe_u', 'ig_u'], {}), '(fe_u, ig_u)\n', (4404, 4416), False, 'from sfepy.discrete.projections import make_l2_projection, make_l2_projection_data\n'), ((4601, 4652), 'numpy.allclose', 'nm.allclose', (['ig_vals', 'fe_vals'], {'rtol': '(0.0)', 'atol': '(1e-12)'}), '(ig_vals, fe_vals, rtol=0.0, atol=1e-12)\n', (4612, 4652), True, 'import numpy as nm\n'), ((5113, 5190), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""u"""', '"""parameter"""', 'self.field'], {'primary_var_name': '"""(set-to-None)"""'}), "('u', 'parameter', self.field, primary_var_name='(set-to-None)')\n", (5126, 5190), False, 'from sfepy.discrete import FieldVariable\n'), ((5266, 5356), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""component"""', '"""parameter"""', 'self.field'], {'primary_var_name': '"""(set-to-None)"""'}), "('component', 'parameter', self.field, primary_var_name=\n '(set-to-None)')\n", (5279, 5356), False, 'from sfepy.discrete import FieldVariable\n'), ((5482, 5560), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""u2"""', '"""parameter"""', 'self.field'], {'primary_var_name': '"""(set-to-None)"""'}), "('u2', 'parameter', self.field, primary_var_name='(set-to-None)')\n", (5495, 5560), False, 'from sfepy.discrete import FieldVariable\n'), ((5596, 5691), 'sfepy.discrete.projections.project_by_component', 'project_by_component', (['u2', 'u_qp', 'component', 'self.field.approx_order'], {'nls_options': 'nls_options'}), '(u2, u_qp, component, self.field.approx_order,\n nls_options=nls_options)\n', (5616, 5691), False, 'from sfepy.discrete.projections import project_by_component\n'), ((5847, 5945), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""gu"""', 'nm.float64', '(2)', 'self.field.region'], {'approx_order': 'self.field.approx_order'}), "('gu', nm.float64, 2, self.field.region, approx_order=self.\n field.approx_order)\n", (5862, 5945), False, 'from sfepy.discrete.fem import FEDomain, Field\n'), ((5987, 6061), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""gu"""', '"""parameter"""', 'gfield'], {'primary_var_name': '"""(set-to-None)"""'}), "('gu', 'parameter', gfield, primary_var_name='(set-to-None)')\n", (6000, 6061), False, 'from sfepy.discrete import FieldVariable\n'), ((6098, 6191), 'sfepy.discrete.projections.project_by_component', 'project_by_component', (['gu', 'gu_qp', 'component', 'gfield.approx_order'], {'nls_options': 'nls_options'}), '(gu, gu_qp, component, gfield.approx_order, nls_options\n =nls_options)\n', (6118, 6191), False, 'from sfepy.discrete.projections import project_by_component\n'), ((4285, 4312), 'numpy.prod', 'nm.prod', (['(coors ** 2)'], {'axis': '(1)'}), '(coors ** 2, axis=1)\n', (4292, 4312), True, 'import numpy as nm\n'), ((4473, 4494), 'numpy.random.rand', 'nm.random.rand', (['(20)', '(3)'], {}), '(20, 3)\n', (4487, 4494), True, 'import numpy as nm\n'), ((2484, 2503), 'numpy.abs', 'nm.abs', (['(vec1 - vec2)'], {}), '(vec1 - vec2)\n', (2490, 2503), True, 'import numpy as nm\n'), ((4880, 4905), 'numpy.abs', 'nm.abs', (['(ig_vals - fe_vals)'], {}), '(ig_vals - fe_vals)\n', (4886, 4905), True, 'import numpy as nm\n')]
|
r"""
Laplace equation with Dirichlet boundary conditions given by a sine function
and constants.
Find :math:`t` such that:
.. math::
\int_{\Omega} c \nabla s \cdot \nabla t
= 0
\;, \quad \forall s \;.
The :class:`sfepy.discrete.fem.meshio.UserMeshIO` class is used to refine the
original two-element mesh before the actual solution.
The FE polynomial basis and the approximation order can be chosen on the
command-line. By default, the fifth order Lagrange polynomial space is used,
see ``define()`` arguments.
This example demonstrates how to visualize higher order approximations of the
continuous solution. The adaptive linearization is applied in order to save
viewable results, see both the options keyword and the ``post_process()``
function that computes the solution gradient. The linearization parameters can
also be specified on the command line.
The Lagrange or Bernstein polynomial bases support higher order
DOFs in the Dirichlet boundary conditions, unlike the hierarchical Lobatto
basis implementation, compare the results of::
python simple.py examples/diffusion/sinbc.py -d basis=lagrange
python simple.py examples/diffusion/sinbc.py -d basis=bernstein
python simple.py examples/diffusion/sinbc.py -d basis=lobatto
Use the following commands to view each of the results of the above commands
(assuming default output directory and names)::
python postproc.py -b -d't,plot_warp_scalar,rel_scaling=1' 2_4_2_refined_t.vtk --wireframe
python postproc.py -b 2_4_2_refined_grad.vtk
"""
from __future__ import absolute_import
import numpy as nm
from sfepy import data_dir
from sfepy.base.base import output
from sfepy.discrete.fem import Mesh, FEDomain
from sfepy.discrete.fem.meshio import UserMeshIO, MeshIO
from sfepy.homogenization.utils import define_box_regions
from six.moves import range
base_mesh = data_dir + '/meshes/elements/2_4_2.mesh'
def mesh_hook(mesh, mode):
"""
Load and refine a mesh here.
"""
if mode == 'read':
mesh = Mesh.from_file(base_mesh)
domain = FEDomain(mesh.name, mesh)
for ii in range(3):
output('refine %d...' % ii)
domain = domain.refine()
output('... %d nodes %d elements'
% (domain.shape.n_nod, domain.shape.n_el))
domain.mesh.name = '2_4_2_refined'
return domain.mesh
elif mode == 'write':
pass
def post_process(out, pb, state, extend=False):
"""
Calculate gradient of the solution.
"""
from sfepy.discrete.fem.fields_base import create_expression_output
aux = create_expression_output('ev_grad.ie.Elements( t )',
'grad', 'temperature',
pb.fields, pb.get_materials(),
pb.get_variables(), functions=pb.functions,
mode='qp', verbose=False,
min_level=0, max_level=5, eps=1e-3)
out.update(aux)
return out
def define(order=5, basis='lagrange', min_level=0, max_level=5, eps=1e-3):
filename_mesh = UserMeshIO(mesh_hook)
# Get the mesh bounding box.
io = MeshIO.any_from_filename(base_mesh)
bbox, dim = io.read_bounding_box(ret_dim=True)
options = {
'nls' : 'newton',
'ls' : 'ls',
'post_process_hook' : 'post_process',
'linearization' : {
'kind' : 'adaptive',
'min_level' : min_level, # Min. refinement level applied everywhere.
'max_level' : max_level, # Max. refinement level.
'eps' : eps, # Relative error tolerance.
},
}
materials = {
'coef' : ({'val' : 1.0},),
}
regions = {
'Omega' : 'all',
}
regions.update(define_box_regions(dim, bbox[0], bbox[1], 1e-5))
fields = {
'temperature' : ('real', 1, 'Omega', order, 'H1', basis),
}
variables = {
't' : ('unknown field', 'temperature', 0),
's' : ('test field', 'temperature', 't'),
}
amplitude = 1.0
def ebc_sin(ts, coor, **kwargs):
x0 = 0.5 * (coor[:, 1].min() + coor[:, 1].max())
val = amplitude * nm.sin( (coor[:, 1] - x0) * 2. * nm.pi )
return val
ebcs = {
't1' : ('Left', {'t.0' : 'ebc_sin'}),
't2' : ('Right', {'t.0' : -0.5}),
't3' : ('Top', {'t.0' : 1.0}),
}
functions = {
'ebc_sin' : (ebc_sin,),
}
equations = {
'Temperature' : """dw_laplace.10.Omega(coef.val, s, t) = 0"""
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 1,
'eps_a' : 1e-10,
}),
}
return locals()
|
[
"sfepy.base.base.output",
"sfepy.discrete.fem.meshio.UserMeshIO",
"sfepy.homogenization.utils.define_box_regions",
"sfepy.discrete.fem.meshio.MeshIO.any_from_filename",
"sfepy.discrete.fem.Mesh.from_file",
"sfepy.discrete.fem.FEDomain"
] |
[((3111, 3132), 'sfepy.discrete.fem.meshio.UserMeshIO', 'UserMeshIO', (['mesh_hook'], {}), '(mesh_hook)\n', (3121, 3132), False, 'from sfepy.discrete.fem.meshio import UserMeshIO, MeshIO\n'), ((3176, 3211), 'sfepy.discrete.fem.meshio.MeshIO.any_from_filename', 'MeshIO.any_from_filename', (['base_mesh'], {}), '(base_mesh)\n', (3200, 3211), False, 'from sfepy.discrete.fem.meshio import UserMeshIO, MeshIO\n'), ((2008, 2033), 'sfepy.discrete.fem.Mesh.from_file', 'Mesh.from_file', (['base_mesh'], {}), '(base_mesh)\n', (2022, 2033), False, 'from sfepy.discrete.fem import Mesh, FEDomain\n'), ((2051, 2076), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['mesh.name', 'mesh'], {}), '(mesh.name, mesh)\n', (2059, 2076), False, 'from sfepy.discrete.fem import Mesh, FEDomain\n'), ((2095, 2103), 'six.moves.range', 'range', (['(3)'], {}), '(3)\n', (2100, 2103), False, 'from six.moves import range\n'), ((3774, 3822), 'sfepy.homogenization.utils.define_box_regions', 'define_box_regions', (['dim', 'bbox[0]', 'bbox[1]', '(1e-05)'], {}), '(dim, bbox[0], bbox[1], 1e-05)\n', (3792, 3822), False, 'from sfepy.homogenization.utils import define_box_regions\n'), ((2117, 2144), 'sfepy.base.base.output', 'output', (["('refine %d...' % ii)"], {}), "('refine %d...' % ii)\n", (2123, 2144), False, 'from sfepy.base.base import output\n'), ((2194, 2270), 'sfepy.base.base.output', 'output', (["('... %d nodes %d elements' % (domain.shape.n_nod, domain.shape.n_el))"], {}), "('... %d nodes %d elements' % (domain.shape.n_nod, domain.shape.n_el))\n", (2200, 2270), False, 'from sfepy.base.base import output\n'), ((4181, 4220), 'numpy.sin', 'nm.sin', (['((coor[:, 1] - x0) * 2.0 * nm.pi)'], {}), '((coor[:, 1] - x0) * 2.0 * nm.pi)\n', (4187, 4220), True, 'import numpy as nm\n')]
|
#!/usr/bin/env python3
import argparse
import math
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine import jit, tensor
class ConvNet(M.Module):
def __init__(self):
super().__init__()
self.conv1 = M.Conv2d(in_channels=3, out_channels=1, kernel_size=3, bias=False)
def forward(self, input):
x = self.conv1(input)
return x
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="dump mge model for add_demo",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--dir",
help="set the dir where the model to dump",
default=".",
type=str,
)
args = parser.parse_args()
net = ConvNet()
net.eval()
@jit.trace(symbolic=True, capture_as_const=True)
def fun(data):
return net(data)
inp = tensor(np.arange(0, 96).astype("float32").reshape(2, 3, 4, 4))
out = fun(inp)
fun.dump(args.dir + "/conv_demo_f32_without_data.mge", arg_names=["data"],
no_assert=True)
|
[
"megengine.module.Conv2d",
"megengine.jit.trace"
] |
[((457, 583), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""dump mge model for add_demo"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='dump mge model for add_demo',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (480, 583), False, 'import argparse\n'), ((816, 863), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)', 'capture_as_const': '(True)'}), '(symbolic=True, capture_as_const=True)\n', (825, 863), False, 'from megengine import jit, tensor\n'), ((268, 334), 'megengine.module.Conv2d', 'M.Conv2d', ([], {'in_channels': '(3)', 'out_channels': '(1)', 'kernel_size': '(3)', 'bias': '(False)'}), '(in_channels=3, out_channels=1, kernel_size=3, bias=False)\n', (276, 334), True, 'import megengine.module as M\n'), ((925, 941), 'numpy.arange', 'np.arange', (['(0)', '(96)'], {}), '(0, 96)\n', (934, 941), True, 'import numpy as np\n')]
|
# Copyright (c) 2020 <NAME>
# This code is licensed under MIT license
# (https://github.com/kwotsin/mimicry/blob/master/LICENSE)
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
from megengine.core.tensor_factory import zeros
def ns_loss_gen(output_fake):
r"""
Non-saturating loss for generator.
Args:
output_fake (Tensor): Discriminator output logits for fake images.
Returns:
Tensor: A scalar tensor loss output.
"""
output_fake = F.sigmoid(output_fake)
return -F.log(output_fake + 1e-8).mean()
# def ns_loss_gen(output_fake):
# """numerical stable version"""
# return F.log(1 + F.exp(-output_fake)).mean()
def _bce_loss_with_logits(output, labels, **kwargs):
r"""
Sigmoid cross entropy with logits, see tensorflow
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
"""
loss = F.maximum(output, 0) - output * labels + F.log(1 + F.exp(-F.abs(output)))
return loss.mean()
def minimax_loss_dis(output_fake,
output_real,
real_label_val=1.0,
fake_label_val=0.0,
**kwargs):
r"""
Standard minimax loss for GANs through the BCE Loss with logits fn.
Args:
output_fake (Tensor): Discriminator output logits for fake images.
output_real (Tensor): Discriminator output logits for real images.
real_label_val (int): Label for real images.
fake_label_val (int): Label for fake images.
device (torch.device): Torch device object for sending created data.
Returns:
Tensor: A scalar tensor loss output.
"""
# Produce real and fake labels.
fake_labels = zeros((output_fake.shape[0], 1)) + fake_label_val
real_labels = zeros((output_real.shape[0], 1)) + real_label_val
# FF, compute loss and backprop D
errD_fake = _bce_loss_with_logits(output=output_fake,
labels=fake_labels,
**kwargs)
errD_real = _bce_loss_with_logits(output=output_real,
labels=real_labels,
**kwargs)
# Compute cumulative error
loss = errD_real + errD_fake
return loss
def wasserstein_loss_gen(output_fake):
r"""
Computes the wasserstein loss for generator.
Args:
output_fake (Tensor): Discriminator output logits for fake images.
Returns:
Tensor: A scalar tensor loss output.
"""
loss = -output_fake.mean()
return loss
def wasserstein_loss_dis(output_real, output_fake):
r"""
Computes the wasserstein loss for the discriminator.
Args:
output_real (Tensor): Discriminator output logits for real images.
output_fake (Tensor): Discriminator output logits for fake images.
Returns:
Tensor: A scalar tensor loss output.
"""
loss = -1.0 * output_real.mean() + output_fake.mean()
return loss
|
[
"megengine.functional.maximum",
"megengine.functional.sigmoid",
"megengine.core.tensor_factory.zeros",
"megengine.functional.log",
"megengine.functional.abs"
] |
[((1132, 1154), 'megengine.functional.sigmoid', 'F.sigmoid', (['output_fake'], {}), '(output_fake)\n', (1141, 1154), True, 'import megengine.functional as F\n'), ((2374, 2406), 'megengine.core.tensor_factory.zeros', 'zeros', (['(output_fake.shape[0], 1)'], {}), '((output_fake.shape[0], 1))\n', (2379, 2406), False, 'from megengine.core.tensor_factory import zeros\n'), ((2442, 2474), 'megengine.core.tensor_factory.zeros', 'zeros', (['(output_real.shape[0], 1)'], {}), '((output_real.shape[0], 1))\n', (2447, 2474), False, 'from megengine.core.tensor_factory import zeros\n'), ((1547, 1567), 'megengine.functional.maximum', 'F.maximum', (['output', '(0)'], {}), '(output, 0)\n', (1556, 1567), True, 'import megengine.functional as F\n'), ((1168, 1194), 'megengine.functional.log', 'F.log', (['(output_fake + 1e-08)'], {}), '(output_fake + 1e-08)\n', (1173, 1194), True, 'import megengine.functional as F\n'), ((1605, 1618), 'megengine.functional.abs', 'F.abs', (['output'], {}), '(output)\n', (1610, 1618), True, 'import megengine.functional as F\n')]
|
import os
from fastapi import *
from psycopg2.errors import UndefinedTable
from sqlmodel import Session, select, text
from sqlalchemy.exc import ProgrammingError
from .models.timelog import TimeLog
from .models.calendar import Calendar
from .utils import (
engine,
create_db,
tags_metadata,
execute_sample_sql,
)
from .api import (
user,
timelog,
forecast,
epic,
epic_area,
client,
rate,
team,
role,
sponsor,
capacity,
demand,
)
import csv
app = FastAPI(title="timeflow app API", openapi_tags=tags_metadata)
app.include_router(timelog.router)
app.include_router(forecast.router)
app.include_router(user.router)
app.include_router(epic.router)
app.include_router(epic_area.router)
app.include_router(client.router)
app.include_router(rate.router)
app.include_router(team.router)
app.include_router(role.router)
app.include_router(sponsor.router)
app.include_router(capacity.router)
app.include_router(demand.router)
@app.on_event("startup")
def on_startup():
with Session(engine) as session:
if os.getenv("TIMEFLOW_DEV") == "true":
try:
statement = select(TimeLog)
results = session.exec(statement)
except ProgrammingError:
create_db()
execute_sample_sql()
elif os.getenv("TIMEFLOW_DEV") == "false":
try:
statement = select(TimeLog)
results = session.exec(statement)
except ProgrammingError:
create_db()
@app.on_event("startup")
def implement_calendar_table():
with Session(engine) as session:
try:
statement = select(Calendar.year_name).where(Calendar.id == 1)
result = session.exec(statement).one()
except Exception as e:
print(e)
values_sql = f"""INSERT INTO app_db.calendar (date, year_number, year_name, quarter_number, quarter_name
, month_number, month_name, week_number, week_name, week_day_number, week_day_name)
VALUES """
with open("backend/calendar.csv") as csvfile:
reader = csv.reader(csvfile, delimiter=",", quotechar="|")
values_list = []
for index, row in enumerate(reader):
if index > 0 and row[0] != "":
_row = [f"'{item}'" for item in row]
row_sql = ", ".join(_row)
values = f"({row_sql}),"
values_sql += values
values_sql += f"({row_sql});"
session.execute(text(values_sql))
session.commit()
|
[
"sqlmodel.Session",
"sqlmodel.select",
"sqlmodel.text"
] |
[((1035, 1050), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1042, 1050), False, 'from sqlmodel import Session, select, text\n'), ((1620, 1635), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1627, 1635), False, 'from sqlmodel import Session, select, text\n'), ((1074, 1099), 'os.getenv', 'os.getenv', (['"""TIMEFLOW_DEV"""'], {}), "('TIMEFLOW_DEV')\n", (1083, 1099), False, 'import os\n'), ((1156, 1171), 'sqlmodel.select', 'select', (['TimeLog'], {}), '(TimeLog)\n', (1162, 1171), False, 'from sqlmodel import Session, select, text\n'), ((1337, 1362), 'os.getenv', 'os.getenv', (['"""TIMEFLOW_DEV"""'], {}), "('TIMEFLOW_DEV')\n", (1346, 1362), False, 'import os\n'), ((1420, 1435), 'sqlmodel.select', 'select', (['TimeLog'], {}), '(TimeLog)\n', (1426, 1435), False, 'from sqlmodel import Session, select, text\n'), ((1685, 1711), 'sqlmodel.select', 'select', (['Calendar.year_name'], {}), '(Calendar.year_name)\n', (1691, 1711), False, 'from sqlmodel import Session, select, text\n'), ((2182, 2231), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""', 'quotechar': '"""|"""'}), "(csvfile, delimiter=',', quotechar='|')\n", (2192, 2231), False, 'import csv\n'), ((2652, 2668), 'sqlmodel.text', 'text', (['values_sql'], {}), '(values_sql)\n', (2656, 2668), False, 'from sqlmodel import Session, select, text\n')]
|
"""(Basic) Message Tables/Models.
Models of the Traction tables for Messages (layer over AcaPy basic messaging).
"""
import uuid
from datetime import datetime
from typing import List, Optional
from sqlalchemy.orm import selectinload
from sqlmodel import Field, Relationship
from sqlalchemy import (
Column,
func,
String,
select,
desc,
text,
)
from sqlalchemy.dialects.postgresql import UUID, TIMESTAMP, ARRAY
from sqlmodel.ext.asyncio.session import AsyncSession
from api.db.models.base import BaseModel
from api.db.models.v1.contact import Contact
from api.endpoints.models.v1.errors import (
NotFoundError,
)
class Message(BaseModel, table=True):
"""Message.
Model for the Message table (postgresql specific dialects in use).
This will track Messages for the Tenants (between contacts).
Attributes:
message_id: Traction ID for message OR when receiving, it is the AcaPy message_id
tenant_id: Traction Tenant ID
contact_id: Traction Contact ID
status: Business and Tenant indicator for Credential state; independent of AcaPy
Basic Message Exchange state
role: sender or recipient
deleted: Issuer Credential "soft" delete indicator.
tags: Set by tenant for arbitrary grouping of Credentials
content: actual content of the message
state: The underlying AcaPy message exchange state
sent_time: sent_time data in AcaPy payload
created_at: Timestamp when record was created in Traction
updated_at: Timestamp when record was last modified in Traction
"""
message_id: uuid.UUID = Field(
sa_column=Column(
UUID(as_uuid=True),
primary_key=True,
server_default=text("gen_random_uuid()"),
)
)
tenant_id: uuid.UUID = Field(foreign_key="tenant.id", index=True)
contact_id: uuid.UUID = Field(foreign_key="contact.contact_id", index=True)
status: str = Field(nullable=False)
role: str = Field(nullable=False)
deleted: bool = Field(nullable=False, default=False)
tags: List[str] = Field(sa_column=Column(ARRAY(String)))
content: str = Field(nullable=True)
revocation_comment: str = Field(nullable=True)
# acapy data ---
state: str = Field(nullable=False)
sent_time: datetime = Field(sa_column=Column(TIMESTAMP, nullable=True))
# --- acapy data
# relationships ---
contact: Optional[Contact] = Relationship(back_populates="messages")
# --- relationships
created_at: datetime = Field(
sa_column=Column(TIMESTAMP, nullable=False, server_default=func.now())
)
updated_at: datetime = Field(
sa_column=Column(
TIMESTAMP, nullable=False, server_default=func.now(), onupdate=func.now()
)
)
@classmethod
async def get_by_id(
cls: "Message",
db: AsyncSession,
tenant_id: uuid.UUID,
message_id: uuid.UUID,
deleted: bool | None = False,
) -> "Message":
"""Get Message by id.
Find and return the database Message record
Args:
db: database session
tenant_id: Traction ID of tenant making the call
message_id: Traction ID of Message
Returns: The Traction Message (db) record
Raises:
NotFoundError: if the Message cannot be found by ID and deleted
flag
"""
q = (
select(cls)
.where(cls.tenant_id == tenant_id)
.where(cls.message_id == message_id)
.where(cls.deleted == deleted)
.options(selectinload(cls.contact))
)
q_result = await db.execute(q)
db_rec = q_result.scalar_one_or_none()
if not db_rec:
raise NotFoundError(
code="message.id_not_found",
title="Message does not exist",
detail=f"Message does not exist for id<{message_id}>",
)
return db_rec
@classmethod
async def list_by_contact_id(
cls: "Message",
db: AsyncSession,
tenant_id: uuid.UUID,
contact_id: uuid.UUID,
) -> List["Message"]:
"""List by Contact ID.
Find and return list of Message records for Contact.
tenant_id: Traction ID of tenant making the call
contact_id: Traction ID of Contact
Returns: List of Traction Message (db) records in descending order
"""
q = (
select(cls)
.where(cls.contact_id == contact_id)
.where(cls.tenant_id == tenant_id)
.options(selectinload(cls.contact))
.order_by(desc(cls.updated_at))
)
q_result = await db.execute(q)
db_recs = q_result.scalars()
return db_recs
@classmethod
async def list_by_tenant_id(
cls: "Message",
db: AsyncSession,
tenant_id: uuid.UUID,
) -> List["Message"]:
"""List by Tenant ID.
Find and return list of Message records for Tenant.
tenant_id: Traction ID of tenant making the call
Returns: List of Traction Message (db) records in descending order
"""
q = (
select(cls)
.where(cls.tenant_id == tenant_id)
.options(selectinload(cls.contact))
.order_by(desc(cls.updated_at))
)
q_result = await db.execute(q)
db_recs = q_result.scalars()
return db_recs
|
[
"sqlmodel.Field",
"sqlmodel.Relationship"
] |
[((1808, 1850), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""tenant.id"""', 'index': '(True)'}), "(foreign_key='tenant.id', index=True)\n", (1813, 1850), False, 'from sqlmodel import Field, Relationship\n'), ((1879, 1930), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""contact.contact_id"""', 'index': '(True)'}), "(foreign_key='contact.contact_id', index=True)\n", (1884, 1930), False, 'from sqlmodel import Field, Relationship\n'), ((1949, 1970), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (1954, 1970), False, 'from sqlmodel import Field, Relationship\n'), ((1987, 2008), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (1992, 2008), False, 'from sqlmodel import Field, Relationship\n'), ((2029, 2065), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)', 'default': '(False)'}), '(nullable=False, default=False)\n', (2034, 2065), False, 'from sqlmodel import Field, Relationship\n'), ((2146, 2166), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (2151, 2166), False, 'from sqlmodel import Field, Relationship\n'), ((2197, 2217), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (2202, 2217), False, 'from sqlmodel import Field, Relationship\n'), ((2257, 2278), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (2262, 2278), False, 'from sqlmodel import Field, Relationship\n'), ((2434, 2473), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""messages"""'}), "(back_populates='messages')\n", (2446, 2473), False, 'from sqlmodel import Field, Relationship\n'), ((2321, 2353), 'sqlalchemy.Column', 'Column', (['TIMESTAMP'], {'nullable': '(True)'}), '(TIMESTAMP, nullable=True)\n', (2327, 2353), False, 'from sqlalchemy import Column, func, String, select, desc, text\n'), ((3593, 3618), 'sqlalchemy.orm.selectinload', 'selectinload', (['cls.contact'], {}), '(cls.contact)\n', (3605, 3618), False, 'from sqlalchemy.orm import selectinload\n'), ((3757, 3890), 'api.endpoints.models.v1.errors.NotFoundError', 'NotFoundError', ([], {'code': '"""message.id_not_found"""', 'title': '"""Message does not exist"""', 'detail': 'f"""Message does not exist for id<{message_id}>"""'}), "(code='message.id_not_found', title='Message does not exist',\n detail=f'Message does not exist for id<{message_id}>')\n", (3770, 3890), False, 'from api.endpoints.models.v1.errors import NotFoundError\n'), ((4652, 4672), 'sqlalchemy.desc', 'desc', (['cls.updated_at'], {}), '(cls.updated_at)\n', (4656, 4672), False, 'from sqlalchemy import Column, func, String, select, desc, text\n'), ((5335, 5355), 'sqlalchemy.desc', 'desc', (['cls.updated_at'], {}), '(cls.updated_at)\n', (5339, 5355), False, 'from sqlalchemy import Column, func, String, select, desc, text\n'), ((1661, 1679), 'sqlalchemy.dialects.postgresql.UUID', 'UUID', ([], {'as_uuid': '(True)'}), '(as_uuid=True)\n', (1665, 1679), False, 'from sqlalchemy.dialects.postgresql import UUID, TIMESTAMP, ARRAY\n'), ((2111, 2124), 'sqlalchemy.dialects.postgresql.ARRAY', 'ARRAY', (['String'], {}), '(String)\n', (2116, 2124), False, 'from sqlalchemy.dialects.postgresql import UUID, TIMESTAMP, ARRAY\n'), ((1738, 1763), 'sqlalchemy.text', 'text', (['"""gen_random_uuid()"""'], {}), "('gen_random_uuid()')\n", (1742, 1763), False, 'from sqlalchemy import Column, func, String, select, desc, text\n'), ((2600, 2610), 'sqlalchemy.func.now', 'func.now', ([], {}), '()\n', (2608, 2610), False, 'from sqlalchemy import Column, func, String, select, desc, text\n'), ((2732, 2742), 'sqlalchemy.func.now', 'func.now', ([], {}), '()\n', (2740, 2742), False, 'from sqlalchemy import Column, func, String, select, desc, text\n'), ((2753, 2763), 'sqlalchemy.func.now', 'func.now', ([], {}), '()\n', (2761, 2763), False, 'from sqlalchemy import Column, func, String, select, desc, text\n'), ((4603, 4628), 'sqlalchemy.orm.selectinload', 'selectinload', (['cls.contact'], {}), '(cls.contact)\n', (4615, 4628), False, 'from sqlalchemy.orm import selectinload\n'), ((5286, 5311), 'sqlalchemy.orm.selectinload', 'selectinload', (['cls.contact'], {}), '(cls.contact)\n', (5298, 5311), False, 'from sqlalchemy.orm import selectinload\n'), ((5206, 5217), 'sqlalchemy.select', 'select', (['cls'], {}), '(cls)\n', (5212, 5217), False, 'from sqlalchemy import Column, func, String, select, desc, text\n'), ((3421, 3432), 'sqlalchemy.select', 'select', (['cls'], {}), '(cls)\n', (3427, 3432), False, 'from sqlalchemy import Column, func, String, select, desc, text\n'), ((4474, 4485), 'sqlalchemy.select', 'select', (['cls'], {}), '(cls)\n', (4480, 4485), False, 'from sqlalchemy import Column, func, String, select, desc, text\n')]
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from functools import partial
from typing import Tuple, Union
import megengine._internal as mgb
from ... import module as Float
from ...core import Parameter
from ...functional import conv_bias_activation
from ...module import Conv2d
from ...quantization.utils import register_method_to_class
class _ConvBnActivation2d(Conv2d):
r"""Applies a 2D convolution over an quantized input tensor, inference only.
The parameter is same with :class: `~.Conv2d`
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int, int]],
stride: Union[int, Tuple[int, int]] = 1,
padding: Union[int, Tuple[int, int]] = 0,
dilation: Union[int, Tuple[int, int]] = 1,
groups: int = 1,
conv_mode: str = "CROSS_CORRELATION",
compute_mode: str = "DEFAULT",
):
super().__init__(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
True,
conv_mode,
compute_mode,
)
self.scale = 1.0
self.zero_point = 0.0
self.output_dtype = mgb.dtype.qint8(self.scale)
self.weight = self.weight.astype(self.output_dtype)
self.bias = self.bias.astype(mgb.dtype.qint32(self.scale))
def calc_conv_quantized(self, inp, nonlinear_mode="IDENTITY"):
inp_scale = mgb.dtype.get_scale(inp.dtype)
w_scale = mgb.dtype.get_scale(self.weight.dtype)
bias_scale = inp_scale * w_scale
return conv_bias_activation(
inp,
self.weight,
self.bias.astype(mgb.dtype.qint32(bias_scale)),
self.output_dtype,
self.stride,
self.padding,
self.dilation,
self.groups,
conv_mode=self.conv_mode,
compute_mode=self.compute_mode,
nonlinear_mode=nonlinear_mode,
)
class ConvBn2d(_ConvBnActivation2d):
def forward(self, inp):
if self.training:
raise ValueError("quantized module only support inference.")
return self.calc_conv_quantized(inp, nonlinear_mode="IDENTITY")
class ConvBnRelu2d(_ConvBnActivation2d):
def forward(self, inp):
if self.training:
raise ValueError("quantized module only support inference.")
return self.calc_conv_quantized(inp, nonlinear_mode="RELU")
def to_quantized(quantized_class, float_module):
qconv = quantized_class(
float_module.conv.in_channels,
float_module.conv.out_channels,
float_module.conv.kernel_size,
float_module.conv.stride,
float_module.conv.padding,
float_module.conv.dilation,
float_module.conv.groups,
)
w_fold, b_fold = float_module.fold_weight_bias(
float_module.bn.running_mean, float_module.bn.running_var
)
weight = w_fold.astype(float_module.weight_observer.get_dtype())
qconv.output_dtype = float_module.act_observer.get_dtype()
qconv.weight = Parameter(weight.numpy())
qconv.bias = Parameter(b_fold.numpy())
qconv.scale, qconv.zero_point = float_module.act_observer.get_qparams()
return qconv
# replace :class:`~.module.QATModule`'s ``to_quantized`` method.
# implemented here to avoid circular import.
register_method_to_class(Float.ConvBn2d)(partial(to_quantized, ConvBn2d))
register_method_to_class(Float.ConvBnRelu2d)(partial(to_quantized, ConvBnRelu2d))
|
[
"megengine._internal.dtype.qint32",
"megengine._internal.dtype.qint8",
"megengine._internal.dtype.get_scale"
] |
[((3763, 3794), 'functools.partial', 'partial', (['to_quantized', 'ConvBn2d'], {}), '(to_quantized, ConvBn2d)\n', (3770, 3794), False, 'from functools import partial\n'), ((3841, 3876), 'functools.partial', 'partial', (['to_quantized', 'ConvBnRelu2d'], {}), '(to_quantized, ConvBnRelu2d)\n', (3848, 3876), False, 'from functools import partial\n'), ((1573, 1600), 'megengine._internal.dtype.qint8', 'mgb.dtype.qint8', (['self.scale'], {}), '(self.scale)\n', (1588, 1600), True, 'import megengine._internal as mgb\n'), ((1816, 1846), 'megengine._internal.dtype.get_scale', 'mgb.dtype.get_scale', (['inp.dtype'], {}), '(inp.dtype)\n', (1835, 1846), True, 'import megengine._internal as mgb\n'), ((1865, 1903), 'megengine._internal.dtype.get_scale', 'mgb.dtype.get_scale', (['self.weight.dtype'], {}), '(self.weight.dtype)\n', (1884, 1903), True, 'import megengine._internal as mgb\n'), ((1698, 1726), 'megengine._internal.dtype.qint32', 'mgb.dtype.qint32', (['self.scale'], {}), '(self.scale)\n', (1714, 1726), True, 'import megengine._internal as mgb\n'), ((2053, 2081), 'megengine._internal.dtype.qint32', 'mgb.dtype.qint32', (['bias_scale'], {}), '(bias_scale)\n', (2069, 2081), True, 'import megengine._internal as mgb\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.