code
stringlengths 110
64.5k
| apis
list | extract_api
stringlengths 123
69.9k
|
---|---|---|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import os
import time
import numpy as np
# pylint: disable=import-error
import model as snet_model
import quantizable_model as quantizable_snet_model
import megengine
import megengine.device as device
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
import megengine.jit as jit
import megengine.amp as amp
import megengine.quantization as Q
logging = megengine.logger.get_logger()
from dataset import get_dataloader
DEFAULT_QAT_CONFIG = {
"ema":Q.ema_fakequant_qconfig,
"ema_lowbi":Q.ema_lowbit_fakequant_qconfig,
"sync_ema":Q.sync_ema_fakequant_qconfig,
"min_max":Q.min_max_fakequant_qconfig,
"tqt":Q.tqt_qconfig
}
def get_qconifg(config_name: str):
return DEFAULT_QAT_CONFIG[config_name]
def main():
parser = argparse.ArgumentParser(description="shufflenet benchmark")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x2_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=1,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-s",
"--steps",
default=200,
type=int,
help="number of train steps (default: 200)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 128)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"--lr",
metavar="LR",
default=0.001,
help="learning rate for single GPU (default: 0.001)",
)
parser.add_argument("--momentum", default=0.9, help="momentum (default: 0.9)")
parser.add_argument(
"--weight-decay", default=4e-5, help="weight decay (default: 4e-5)"
)
parser.add_argument(
"-p",
"--print-freq",
default=1,
type=int,
metavar="N",
help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp", "qat"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16\n"
"qat: quantization aware training"
)
parser.add_argument(
"--qat-config",
default="min_max",
type=str,
choices=["min_max", "ema", "ema_lowbit", "sync_ema", "tqt"],
help="quantization aware training config\n"
"min_max: min_max_fakequant_qconfig\n"
"ema: ema_fakequant_qconfig\n"
"ema_lowbit: ema_lowbit_fakequant_qconfig\n"
"sync_ema: sync_ema_fakequant_qconfig\n"
"tqt: tqt_qconfig"
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", type=int, default=0)
parser.add_argument("--world-size", type=int, default=None)
parser.add_argument("--rank", default=0)
parser.add_argument("--loader", default=False, action="store_true", help="whether use loader")
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
args = parser.parse_args()
if args.world_size is None:
args.world_size = args.ngpus
if args.world_size > 1:
# launch processes
train_func = dist.launcher(worker, master_ip=args.dist_addr, port=args.dist_port,
world_size=args.world_size, n_gpus=args.ngpus, rank_start=args.rank * args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
steps = args.steps
# build model
shufflenet = quantizable_snet_model if args.mode == "qat" else snet_model
model = shufflenet.__dict__[args.arch]()
if args.mode == "qat":
if args.qat_config == "sync_ema":
assert args.ngpus > 1, "sync_ema does not support ngpus={}".format(args.ngpus)
qconfig = get_qconifg(args.qat_config)
model = Q.quantize_qat(module=model, qconfig= qconfig)
model.train()
Q.enable_observer(model)
Q.enable_fake_quant(model)
# Sync parameters
if args.world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if args.world_size > 1 else None,
)
# Optimizer
params_wd = []
params_nwd = []
params_scale = []
for n, p in model.named_parameters():
if n.find("weight") >= 0 and len(p.shape) > 1:
params_wd.append(p)
elif n.find("scale") >= 0:
params_scale.append(p)
else:
params_nwd.append(p)
opt = optim.SGD(
[{"params": params_wd},
{"params": params_nwd, "weight_decay": 0},
],
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * args.world_size, # scale weight decay in "SUM" mode
)
# train and valid func
@amp.autocast(enabled=args.mode == "mp")
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label, label_smooth=0.1)
gm.backward(loss)
opt.step().clear_grad()
return loss
if args.trace:
if args.symbolic:
train_step = jit.trace(train_step, symbolic=True, sublinear_memory_config=jit.SublinearMemoryConfig(genetic_nr_iter=50), symbolic_shape=False)
else:
train_step = jit.trace(train_step, symbolic=False, symbolic_shape=False)
else:
assert args.symbolic==False, "invalid arguments: trace=Trace, symbolic=True"
# start training
objs = AverageMeter("Loss")
clck = AverageMeter("Time")
if args.loader:
dataloader = iter(get_dataloader(args))
image,label = next(dataloader)
else:
image = np.random.randn(args.batch_size, 3, 224, 224).astype("float32")
label = np.random.randint(0, 1000, size=(args.batch_size,)).astype("int32")
# warm up
for step in range(10):
if args.loader:
image,label = next(dataloader)
if not args.preload:
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
else:
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
loss = train_step(image, label)
loss.item()
for step in range(0, steps):
t = time.time()
if args.loader:
image,label = next(dataloader)
if not args.preload:
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
else:
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
loss = train_step(image, label)
objs.update(loss.item())
clck.update(time.time() - t)
if step % args.print_freq == 0 and dist.get_rank() == 0:
print(
"Step {}, {}, {}".format(
step,
objs,
clck,
))
objs.reset()
if dist.get_rank() == 0:
print("="*20, "summary", "="*20)
print(" benchmark: shufflent")
if args.trace:
print(" mode: trace(symbolic={})".format("True, sublinear=True" if args.symbolic else "False"))
else:
print(" mode: imperative")
print(" loader: {}".format("" if not args.loader else "--loader"))
if args.loader:
print(" preload: {}".format("" if not args.preload else "--preload"))
print(" arch: {}".format(args.arch))
print("train_mode: {}".format(args.mode))
print(" batchsize: {}".format(args.batch_size))
print(" #GPU: {}".format(args.ngpus))
print(" avg time: {:.3f} seconds".format(clck.avg))
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=":.3f"):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
if __name__ == "__main__":
main()
|
[
"megengine.jit.trace",
"megengine.distributed.get_rank",
"megengine.quantization.enable_fake_quant",
"megengine.tensor",
"megengine.functional.nn.cross_entropy",
"megengine.quantization.enable_observer",
"megengine.quantization.quantize_qat",
"megengine.jit.SublinearMemoryConfig",
"megengine.optimizer.SGD",
"megengine.logger.get_logger",
"megengine.distributed.make_allreduce_cb",
"megengine.distributed.launcher",
"megengine.amp.autocast",
"megengine.autodiff.GradManager"
] |
[((908, 937), 'megengine.logger.get_logger', 'megengine.logger.get_logger', ([], {}), '()\n', (935, 937), False, 'import megengine\n'), ((1301, 1360), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""shufflenet benchmark"""'}), "(description='shufflenet benchmark')\n", (1324, 1360), False, 'import argparse\n'), ((5671, 5843), 'megengine.optimizer.SGD', 'optim.SGD', (["[{'params': params_wd}, {'params': params_nwd, 'weight_decay': 0}]"], {'lr': 'args.lr', 'momentum': 'args.momentum', 'weight_decay': '(args.weight_decay * args.world_size)'}), "([{'params': params_wd}, {'params': params_nwd, 'weight_decay': 0}\n ], lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay *\n args.world_size)\n", (5680, 5843), True, 'import megengine.optimizer as optim\n'), ((5962, 6001), 'megengine.amp.autocast', 'amp.autocast', ([], {'enabled': "(args.mode == 'mp')"}), "(enabled=args.mode == 'mp')\n", (5974, 6001), True, 'import megengine.amp as amp\n'), ((4256, 4414), 'megengine.distributed.launcher', 'dist.launcher', (['worker'], {'master_ip': 'args.dist_addr', 'port': 'args.dist_port', 'world_size': 'args.world_size', 'n_gpus': 'args.ngpus', 'rank_start': '(args.rank * args.ngpus)'}), '(worker, master_ip=args.dist_addr, port=args.dist_port,\n world_size=args.world_size, n_gpus=args.ngpus, rank_start=args.rank *\n args.ngpus)\n', (4269, 4414), True, 'import megengine.distributed as dist\n'), ((4903, 4948), 'megengine.quantization.quantize_qat', 'Q.quantize_qat', ([], {'module': 'model', 'qconfig': 'qconfig'}), '(module=model, qconfig=qconfig)\n', (4917, 4948), True, 'import megengine.quantization as Q\n'), ((4980, 5004), 'megengine.quantization.enable_observer', 'Q.enable_observer', (['model'], {}), '(model)\n', (4997, 5004), True, 'import megengine.quantization as Q\n'), ((5013, 5039), 'megengine.quantization.enable_fake_quant', 'Q.enable_fake_quant', (['model'], {}), '(model)\n', (5032, 5039), True, 'import megengine.quantization as Q\n'), ((7515, 7526), 'time.time', 'time.time', ([], {}), '()\n', (7524, 7526), False, 'import time\n'), ((8241, 8256), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (8254, 8256), True, 'import megengine.distributed as dist\n'), ((5190, 5212), 'megengine.autodiff.GradManager', 'autodiff.GradManager', ([], {}), '()\n', (5210, 5212), True, 'import megengine.autodiff as autodiff\n'), ((6106, 6157), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits', 'label'], {'label_smooth': '(0.1)'}), '(logits, label, label_smooth=0.1)\n', (6124, 6157), True, 'import megengine.functional as F\n'), ((6480, 6539), 'megengine.jit.trace', 'jit.trace', (['train_step'], {'symbolic': '(False)', 'symbolic_shape': '(False)'}), '(train_step, symbolic=False, symbolic_shape=False)\n', (6489, 6539), True, 'import megengine.jit as jit\n'), ((6768, 6788), 'dataset.get_dataloader', 'get_dataloader', (['args'], {}), '(args)\n', (6782, 6788), False, 'from dataset import get_dataloader\n'), ((7308, 7348), 'megengine.tensor', 'megengine.tensor', (['image'], {'dtype': '"""float32"""'}), "(image, dtype='float32')\n", (7324, 7348), False, 'import megengine\n'), ((7369, 7407), 'megengine.tensor', 'megengine.tensor', (['label'], {'dtype': '"""int32"""'}), "(label, dtype='int32')\n", (7385, 7407), False, 'import megengine\n'), ((7789, 7829), 'megengine.tensor', 'megengine.tensor', (['image'], {'dtype': '"""float32"""'}), "(image, dtype='float32')\n", (7805, 7829), False, 'import megengine\n'), ((7850, 7888), 'megengine.tensor', 'megengine.tensor', (['label'], {'dtype': '"""int32"""'}), "(label, dtype='int32')\n", (7866, 7888), False, 'import megengine\n'), ((5267, 5296), 'megengine.distributed.make_allreduce_cb', 'dist.make_allreduce_cb', (['"""SUM"""'], {}), "('SUM')\n", (5289, 5296), True, 'import megengine.distributed as dist\n'), ((6855, 6900), 'numpy.random.randn', 'np.random.randn', (['args.batch_size', '(3)', '(224)', '(224)'], {}), '(args.batch_size, 3, 224, 224)\n', (6870, 6900), True, 'import numpy as np\n'), ((6935, 6986), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000)'], {'size': '(args.batch_size,)'}), '(0, 1000, size=(args.batch_size,))\n', (6952, 6986), True, 'import numpy as np\n'), ((7170, 7210), 'megengine.tensor', 'megengine.tensor', (['image'], {'dtype': '"""float32"""'}), "(image, dtype='float32')\n", (7186, 7210), False, 'import megengine\n'), ((7235, 7273), 'megengine.tensor', 'megengine.tensor', (['label'], {'dtype': '"""int32"""'}), "(label, dtype='int32')\n", (7251, 7273), False, 'import megengine\n'), ((7651, 7691), 'megengine.tensor', 'megengine.tensor', (['image'], {'dtype': '"""float32"""'}), "(image, dtype='float32')\n", (7667, 7691), False, 'import megengine\n'), ((7716, 7754), 'megengine.tensor', 'megengine.tensor', (['label'], {'dtype': '"""int32"""'}), "(label, dtype='int32')\n", (7732, 7754), False, 'import megengine\n'), ((7984, 7995), 'time.time', 'time.time', ([], {}), '()\n', (7993, 7995), False, 'import time\n'), ((8044, 8059), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (8057, 8059), True, 'import megengine.distributed as dist\n'), ((6372, 6417), 'megengine.jit.SublinearMemoryConfig', 'jit.SublinearMemoryConfig', ([], {'genetic_nr_iter': '(50)'}), '(genetic_nr_iter=50)\n', (6397, 6417), True, 'import megengine.jit as jit\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import megengine as mge
import megengine.module as M
import numpy as np
import pytest
from basecls.models.repvgg import RepVGGBlock
@pytest.mark.parametrize("w_in", [32, 64])
@pytest.mark.parametrize("w_out", [64])
@pytest.mark.parametrize("stride", [1, 2])
@pytest.mark.parametrize("groups", [1, 2, 4])
@pytest.mark.parametrize("se_r", [0.0, 0.25])
@pytest.mark.parametrize("act_name", ["relu"])
def test_block(w_in, w_out, stride, groups, se_r, act_name):
m = RepVGGBlock(w_in, w_out, stride, groups, se_r, act_name, deploy=False)
assert isinstance(m, M.Module)
m.eval()
x = mge.random.uniform(size=(2, w_in, 8, 8))
y0 = m(x)
m = RepVGGBlock.convert_to_deploy(m)
y1 = m(x)
np.testing.assert_allclose(y1.numpy(), y0.numpy(), rtol=1e-4, atol=1e-6)
|
[
"megengine.random.uniform"
] |
[((218, 259), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_in"""', '[32, 64]'], {}), "('w_in', [32, 64])\n", (241, 259), False, 'import pytest\n'), ((261, 299), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_out"""', '[64]'], {}), "('w_out', [64])\n", (284, 299), False, 'import pytest\n'), ((301, 342), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stride"""', '[1, 2]'], {}), "('stride', [1, 2])\n", (324, 342), False, 'import pytest\n'), ((344, 388), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""groups"""', '[1, 2, 4]'], {}), "('groups', [1, 2, 4])\n", (367, 388), False, 'import pytest\n'), ((390, 434), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""se_r"""', '[0.0, 0.25]'], {}), "('se_r', [0.0, 0.25])\n", (413, 434), False, 'import pytest\n'), ((436, 481), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""act_name"""', "['relu']"], {}), "('act_name', ['relu'])\n", (459, 481), False, 'import pytest\n'), ((551, 621), 'basecls.models.repvgg.RepVGGBlock', 'RepVGGBlock', (['w_in', 'w_out', 'stride', 'groups', 'se_r', 'act_name'], {'deploy': '(False)'}), '(w_in, w_out, stride, groups, se_r, act_name, deploy=False)\n', (562, 621), False, 'from basecls.models.repvgg import RepVGGBlock\n'), ((679, 719), 'megengine.random.uniform', 'mge.random.uniform', ([], {'size': '(2, w_in, 8, 8)'}), '(size=(2, w_in, 8, 8))\n', (697, 719), True, 'import megengine as mge\n'), ((743, 775), 'basecls.models.repvgg.RepVGGBlock.convert_to_deploy', 'RepVGGBlock.convert_to_deploy', (['m'], {}), '(m)\n', (772, 775), False, 'from basecls.models.repvgg import RepVGGBlock\n')]
|
from fastapi import Depends
from sqlmodel import select
from joj.horse import models, schemas
from joj.horse.schemas import StandardListResponse
from joj.horse.schemas.auth import Authentication
from joj.horse.utils.parser import parse_ordering_query, parse_pagination_query
from joj.horse.utils.router import MyRouter
router = MyRouter()
router_name = "problem_groups"
router_tag = "problem group"
router_prefix = "/api/v1"
@router.get("")
async def list_problem_groups(
ordering: schemas.OrderingQuery = Depends(parse_ordering_query()),
pagination: schemas.PaginationQuery = Depends(parse_pagination_query),
auth: Authentication = Depends(),
) -> StandardListResponse[schemas.ProblemGroup]:
statement = select(models.ProblemGroup)
problem_groups, count = await models.ProblemGroup.execute_list_statement(
statement, ordering, pagination
)
return StandardListResponse(problem_groups, count)
|
[
"sqlmodel.select"
] |
[((330, 340), 'joj.horse.utils.router.MyRouter', 'MyRouter', ([], {}), '()\n', (338, 340), False, 'from joj.horse.utils.router import MyRouter\n'), ((589, 620), 'fastapi.Depends', 'Depends', (['parse_pagination_query'], {}), '(parse_pagination_query)\n', (596, 620), False, 'from fastapi import Depends\n'), ((649, 658), 'fastapi.Depends', 'Depends', ([], {}), '()\n', (656, 658), False, 'from fastapi import Depends\n'), ((725, 752), 'sqlmodel.select', 'select', (['models.ProblemGroup'], {}), '(models.ProblemGroup)\n', (731, 752), False, 'from sqlmodel import select\n'), ((888, 931), 'joj.horse.schemas.StandardListResponse', 'StandardListResponse', (['problem_groups', 'count'], {}), '(problem_groups, count)\n', (908, 931), False, 'from joj.horse.schemas import StandardListResponse\n'), ((522, 544), 'joj.horse.utils.parser.parse_ordering_query', 'parse_ordering_query', ([], {}), '()\n', (542, 544), False, 'from joj.horse.utils.parser import parse_ordering_query, parse_pagination_query\n'), ((787, 862), 'joj.horse.models.ProblemGroup.execute_list_statement', 'models.ProblemGroup.execute_list_statement', (['statement', 'ordering', 'pagination'], {}), '(statement, ordering, pagination)\n', (829, 862), False, 'from joj.horse import models, schemas\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as TF
from tabulate import tabulate
import megengine as mge
import megengine.functional as MF
import megengine.module as MM
module_cache = {
"conv2d": (MM.Conv2d(32, 32, 3, 1, 0), nn.Conv2d(32, 32, 3, 1, 0).cuda()),
"dw_conv2d": (
MM.Conv2d(32, 32, 3, 1, 0, groups=32),
nn.Conv2d(32, 32, 3, 1, 0, groups=32).cuda(),
),
"conv3d": (MM.Conv3d(32, 32, 3, 1, 0), nn.Conv3d(32, 32, 3, 1, 0).cuda()),
"ConvTranspose2d": (
MM.ConvTranspose2d(32, 32, 3, 1, 0),
nn.ConvTranspose2d(32, 32, 3, 1, 0).cuda(),
),
"BatchNorm2d": (MM.BatchNorm2d(64), nn.BatchNorm2d(64).cuda()),
"Linear": (MM.Linear(1000, 1000), nn.Linear(1000, 1000).cuda()),
}
test_cases = [
# (mge op, torch op, small inps, large inps, unpack_inps, rep)
(
"adaptive_avg_pool2d",
lambda x: MF.adaptive_avg_pool2d(x, (7, 7)),
lambda x: TF.adaptive_avg_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"adaptive_max_pool2d",
lambda x: MF.adaptive_max_pool2d(x, (7, 7)),
lambda x: TF.adaptive_max_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
("argsort", MF.argsort, torch.argsort, [(1000,)], [(1000, 1000),], True, 1000),
(
"avg_pool2d",
lambda x: MF.avg_pool2d(x, 2),
lambda x: TF.avg_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"broadcast",
lambda x: MF.broadcast_to(x, (5,) + x.shape),
lambda x: torch.broadcast_to(x, (5,) + x.shape),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"batchedmatmul",
MF.matmul,
torch.matmul,
[(8, 64, 32), (8, 32, 64)],
[(8, 2048, 512), (8, 512, 2048)],
True,
1000,
),
(
"batchnrom2d",
lambda x: module_cache["BatchNorm2d"][0](x),
lambda x: module_cache["BatchNorm2d"][1](x),
[(2, 64, 16, 16)],
[(64, 64, 128, 128)],
True,
1000,
),
(
"concat",
MF.concat,
torch.cat,
[(20, 100), (50, 100), (30, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16), (64, 512, 16, 16)],
False,
1000,
),
(
"conv2d",
lambda x: module_cache["conv2d"][0](x),
lambda x: module_cache["conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"conv3d",
lambda x: module_cache["conv3d"][0](x),
lambda x: module_cache["conv3d"][1](x),
[(2, 32, 8, 8, 8)],
[(32, 32, 16, 16, 16)],
True,
1000,
),
(
"convTranspose2d",
lambda x: module_cache["ConvTranspose2d"][0](x),
lambda x: module_cache["ConvTranspose2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"dropout",
lambda x: MF.dropout(x, 0.5),
TF.dropout,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"dw_conv2d",
lambda x: module_cache["dw_conv2d"][0](x),
lambda x: module_cache["dw_conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"elemwise.unary",
MF.log,
torch.log,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"elemwise.binary",
MF.add,
torch.add,
[(100, 100), (100, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16)],
True,
1000,
),
(
"expand_dims",
lambda x: MF.expand_dims(x, 0),
lambda x: torch.unsqueeze(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("gelu", MF.gelu, TF.gelu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
("hswish", MF.hswish, TF.hardswish, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"hsigmoid",
MF.hsigmoid,
TF.hardsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("isinf", MF.isinf, torch.isinf, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"indeixngMultiAxisVec",
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
[(10, 10, 10, 10)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"logsigmoid",
MF.logsigmoid,
TF.logsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"leaky_relu",
lambda x: MF.leaky_relu(x, 0.5),
lambda x: TF.leaky_relu(x, 0.5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"linear",
lambda x: module_cache["Linear"][0](x),
lambda x: module_cache["Linear"][1](x),
[(10, 1000)],
[(64, 128, 1000)],
True,
1000,
),
("matinv", MF.matinv, torch.inverse, [(10, 10)], [(30, 30)], True, 1000),
(
"matmul",
MF.matmul,
torch.matmul,
[(64, 32), (32, 64)],
[(2048, 1024), (1024, 2048)],
True,
1000,
),
(
"max_pool2d",
lambda x: MF.max_pool2d(x, 2),
lambda x: TF.max_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"normal",
lambda x: mge.random.normal(0, 1, x.shape),
lambda x: torch.randn(x.shape, device="cuda"),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"prelu",
MF.prelu,
TF.prelu,
[(100, 100), (1,)],
[(64, 512, 16, 16), (1,)],
True,
1000,
),
(
"reduce.max",
lambda x: MF.max(x, 0),
lambda x: torch.max(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"reduce.mean",
lambda x: MF.mean(x, 0),
lambda x: torch.mean(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"reduce.mean",
lambda x: MF.mean(x, 0),
lambda x: torch.mean(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("relu", MF.relu, TF.relu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
("relu6", MF.relu6, TF.relu6, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"repeat",
lambda x: MF.repeat(x, 5),
lambda x: torch.repeat_interleave(x, 5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("silu", MF.silu, TF.silu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"split",
lambda x: MF.split(x, 5),
lambda x: torch.split(x, 5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("sigmoid", MF.sigmoid, TF.sigmoid, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"softmax",
lambda x: MF.softmax(x, axis=1),
lambda x: TF.softmax(x, dim=1),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"softplus",
MF.softplus,
TF.softplus,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"squeeze",
lambda x: MF.squeeze(x, 0),
lambda x: torch.squeeze(x, 0),
[(1, 100, 100)],
[(1, 64, 512, 16, 16)],
True,
1000,
),
(
"stack",
MF.stack,
torch.stack,
[(100, 100), (100, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16)],
False,
10000,
),
(
"subtensor",
lambda x: x[0:20, 10:60],
lambda x: x[0:20, 10:60],
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"topk",
lambda x: MF.topk(x, 10),
lambda x: torch.topk(x, 10),
[(100, 100)],
[(1000, 1000)],
True,
1000,
),
(
"tile",
lambda x: MF.tile(x, (2,) * len(x.shape)),
lambda x: torch.tile(x, (2,) * len(x.shape)),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"transpose",
lambda x: MF.transpose(x, list(range(len(x.shape)))[::-1]),
lambda x: torch.permute(x, list(range(len(x.shape)))[::-1]),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"where",
lambda x: MF.where(x > 0.5, x, x),
lambda x: torch.where(x > 0.5, x, x),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"uniform",
lambda x: mge.random.uniform(0, 1, x.shape),
lambda x: torch.rand(x.shape, device="cuda"),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
]
def perf_func(func, inps, reps, unpack_inps, is_mge):
if is_mge:
mge._full_sync()
tik = time.time()
for _ in range(reps):
if unpack_inps:
out = func(*inps)
else:
out = func(inps)
mge._full_sync()
else:
torch.cuda.synchronize()
with torch.no_grad():
tik = time.time()
for _ in range(reps):
if unpack_inps:
out = func(*inps)
else:
out = func(inps)
torch.cuda.synchronize()
return time.time() - tik
def get_avg_time(func, inps, reps, unpack_inps, is_mge):
# warm up
for _ in range(2):
t = perf_func(func, inps, reps, unpack_inps, is_mge)
times = []
for _ in range(5):
t = perf_func(func, inps, reps, unpack_inps, is_mge)
times.append(t)
return np.mean(times)
def get_perf_results(mge_func, torch_func, shapes, unpack_inps, reps):
inps = [np.random.randn(*shape) for shape in shapes]
inps_mge = [mge.tensor(inp, dtype="float32") for inp in inps]
avg_time_mge = get_avg_time(mge_func, inps_mge, reps, unpack_inps, True)
inps_torch = [torch.Tensor(inp).type(torch.float).cuda() for inp in inps]
avg_time_torch = get_avg_time(torch_func, inps_torch, reps, unpack_inps, False)
return avg_time_mge, avg_time_torch
if __name__ == "__main__":
header = [
"opr_name",
"time(mge/pytorch; small input)",
"time(mge/pytorch; large input)",
]
table = []
for case in test_cases:
assert len(case) == 7
name, mge_func, torch_func, small_shapes, large_shapes, unpack_inps, reps = case
data = []
data.append(name)
print("========== op: {}".format(name))
avg_time_mge, avg_time_torch = get_perf_results(
mge_func, torch_func, small_shapes, unpack_inps, reps
)
print("mge time: {}".format(avg_time_mge))
print("torch time: {}".format(avg_time_torch))
data.append("{:.2f}".format(avg_time_mge / avg_time_torch))
avg_time_mge, avg_time_torch = get_perf_results(
mge_func, torch_func, large_shapes, unpack_inps, reps
)
print("mge time: {}".format(avg_time_mge))
print("torch time: {}".format(avg_time_torch))
data.append("{:.2f}".format(avg_time_mge / avg_time_torch))
table.append(data)
print(tabulate(table, header, tablefmt="github"))
|
[
"megengine.random.normal",
"megengine.functional.adaptive_max_pool2d",
"megengine.functional.avg_pool2d",
"megengine.module.Conv2d",
"megengine.functional.repeat",
"megengine.functional.max_pool2d",
"megengine.functional.squeeze",
"megengine.module.BatchNorm2d",
"megengine.functional.leaky_relu",
"megengine.functional.broadcast_to",
"megengine.functional.split",
"megengine.tensor",
"megengine.functional.dropout",
"megengine.functional.adaptive_avg_pool2d",
"megengine.module.ConvTranspose2d",
"megengine.functional.max",
"megengine._full_sync",
"megengine.module.Linear",
"megengine.module.Conv3d",
"megengine.functional.expand_dims",
"megengine.functional.softmax",
"megengine.functional.topk",
"megengine.random.uniform",
"megengine.functional.mean",
"megengine.functional.where"
] |
[((10595, 10609), 'numpy.mean', 'np.mean', (['times'], {}), '(times)\n', (10602, 10609), True, 'import numpy as np\n'), ((629, 655), 'megengine.module.Conv2d', 'MM.Conv2d', (['(32)', '(32)', '(3)', '(1)', '(0)'], {}), '(32, 32, 3, 1, 0)\n', (638, 655), True, 'import megengine.module as MM\n'), ((720, 757), 'megengine.module.Conv2d', 'MM.Conv2d', (['(32)', '(32)', '(3)', '(1)', '(0)'], {'groups': '(32)'}), '(32, 32, 3, 1, 0, groups=32)\n', (729, 757), True, 'import megengine.module as MM\n'), ((835, 861), 'megengine.module.Conv3d', 'MM.Conv3d', (['(32)', '(32)', '(3)', '(1)', '(0)'], {}), '(32, 32, 3, 1, 0)\n', (844, 861), True, 'import megengine.module as MM\n'), ((932, 967), 'megengine.module.ConvTranspose2d', 'MM.ConvTranspose2d', (['(32)', '(32)', '(3)', '(1)', '(0)'], {}), '(32, 32, 3, 1, 0)\n', (950, 967), True, 'import megengine.module as MM\n'), ((1048, 1066), 'megengine.module.BatchNorm2d', 'MM.BatchNorm2d', (['(64)'], {}), '(64)\n', (1062, 1066), True, 'import megengine.module as MM\n'), ((1111, 1132), 'megengine.module.Linear', 'MM.Linear', (['(1000)', '(1000)'], {}), '(1000, 1000)\n', (1120, 1132), True, 'import megengine.module as MM\n'), ((9764, 9780), 'megengine._full_sync', 'mge._full_sync', ([], {}), '()\n', (9778, 9780), True, 'import megengine as mge\n'), ((9795, 9806), 'time.time', 'time.time', ([], {}), '()\n', (9804, 9806), False, 'import time\n'), ((9958, 9974), 'megengine._full_sync', 'mge._full_sync', ([], {}), '()\n', (9972, 9974), True, 'import megengine as mge\n'), ((9993, 10017), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (10015, 10017), False, 'import torch\n'), ((10249, 10273), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (10271, 10273), False, 'import torch\n'), ((10285, 10296), 'time.time', 'time.time', ([], {}), '()\n', (10294, 10296), False, 'import time\n'), ((10695, 10718), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (10710, 10718), True, 'import numpy as np\n'), ((10757, 10789), 'megengine.tensor', 'mge.tensor', (['inp'], {'dtype': '"""float32"""'}), "(inp, dtype='float32')\n", (10767, 10789), True, 'import megengine as mge\n'), ((12149, 12191), 'tabulate.tabulate', 'tabulate', (['table', 'header'], {'tablefmt': '"""github"""'}), "(table, header, tablefmt='github')\n", (12157, 12191), False, 'from tabulate import tabulate\n'), ((1305, 1338), 'megengine.functional.adaptive_avg_pool2d', 'MF.adaptive_avg_pool2d', (['x', '(7, 7)'], {}), '(x, (7, 7))\n', (1327, 1338), True, 'import megengine.functional as MF\n'), ((1358, 1391), 'torch.nn.functional.adaptive_avg_pool2d', 'TF.adaptive_avg_pool2d', (['x', '(7, 7)'], {}), '(x, (7, 7))\n', (1380, 1391), True, 'import torch.nn.functional as TF\n'), ((1539, 1572), 'megengine.functional.adaptive_max_pool2d', 'MF.adaptive_max_pool2d', (['x', '(7, 7)'], {}), '(x, (7, 7))\n', (1561, 1572), True, 'import megengine.functional as MF\n'), ((1592, 1625), 'torch.nn.functional.adaptive_max_pool2d', 'TF.adaptive_max_pool2d', (['x', '(7, 7)'], {}), '(x, (7, 7))\n', (1614, 1625), True, 'import torch.nn.functional as TF\n'), ((1848, 1867), 'megengine.functional.avg_pool2d', 'MF.avg_pool2d', (['x', '(2)'], {}), '(x, 2)\n', (1861, 1867), True, 'import megengine.functional as MF\n'), ((1887, 1906), 'torch.nn.functional.avg_pool2d', 'TF.avg_pool2d', (['x', '(2)'], {}), '(x, 2)\n', (1900, 1906), True, 'import torch.nn.functional as TF\n'), ((2044, 2078), 'megengine.functional.broadcast_to', 'MF.broadcast_to', (['x', '((5,) + x.shape)'], {}), '(x, (5,) + x.shape)\n', (2059, 2078), True, 'import megengine.functional as MF\n'), ((2098, 2135), 'torch.broadcast_to', 'torch.broadcast_to', (['x', '((5,) + x.shape)'], {}), '(x, (5,) + x.shape)\n', (2116, 2135), False, 'import torch\n'), ((3552, 3570), 'megengine.functional.dropout', 'MF.dropout', (['x', '(0.5)'], {}), '(x, 0.5)\n', (3562, 3570), True, 'import megengine.functional as MF\n'), ((4284, 4304), 'megengine.functional.expand_dims', 'MF.expand_dims', (['x', '(0)'], {}), '(x, 0)\n', (4298, 4304), True, 'import megengine.functional as MF\n'), ((4324, 4345), 'torch.unsqueeze', 'torch.unsqueeze', (['x', '(0)'], {}), '(x, 0)\n', (4339, 4345), False, 'import torch\n'), ((5308, 5329), 'megengine.functional.leaky_relu', 'MF.leaky_relu', (['x', '(0.5)'], {}), '(x, 0.5)\n', (5321, 5329), True, 'import megengine.functional as MF\n'), ((5349, 5370), 'torch.nn.functional.leaky_relu', 'TF.leaky_relu', (['x', '(0.5)'], {}), '(x, 0.5)\n', (5362, 5370), True, 'import torch.nn.functional as TF\n'), ((5954, 5973), 'megengine.functional.max_pool2d', 'MF.max_pool2d', (['x', '(2)'], {}), '(x, 2)\n', (5967, 5973), True, 'import megengine.functional as MF\n'), ((5993, 6012), 'torch.nn.functional.max_pool2d', 'TF.max_pool2d', (['x', '(2)'], {}), '(x, 2)\n', (6006, 6012), True, 'import torch.nn.functional as TF\n'), ((6147, 6179), 'megengine.random.normal', 'mge.random.normal', (['(0)', '(1)', 'x.shape'], {}), '(0, 1, x.shape)\n', (6164, 6179), True, 'import megengine as mge\n'), ((6199, 6234), 'torch.randn', 'torch.randn', (['x.shape'], {'device': '"""cuda"""'}), "(x.shape, device='cuda')\n", (6210, 6234), False, 'import torch\n'), ((6525, 6537), 'megengine.functional.max', 'MF.max', (['x', '(0)'], {}), '(x, 0)\n', (6531, 6537), True, 'import megengine.functional as MF\n'), ((6557, 6572), 'torch.max', 'torch.max', (['x', '(0)'], {}), '(x, 0)\n', (6566, 6572), False, 'import torch\n'), ((6707, 6720), 'megengine.functional.mean', 'MF.mean', (['x', '(0)'], {}), '(x, 0)\n', (6714, 6720), True, 'import megengine.functional as MF\n'), ((6740, 6756), 'torch.mean', 'torch.mean', (['x', '(0)'], {}), '(x, 0)\n', (6750, 6756), False, 'import torch\n'), ((6891, 6904), 'megengine.functional.mean', 'MF.mean', (['x', '(0)'], {}), '(x, 0)\n', (6898, 6904), True, 'import megengine.functional as MF\n'), ((6924, 6940), 'torch.mean', 'torch.mean', (['x', '(0)'], {}), '(x, 0)\n', (6934, 6940), False, 'import torch\n'), ((7231, 7246), 'megengine.functional.repeat', 'MF.repeat', (['x', '(5)'], {}), '(x, 5)\n', (7240, 7246), True, 'import megengine.functional as MF\n'), ((7266, 7295), 'torch.repeat_interleave', 'torch.repeat_interleave', (['x', '(5)'], {}), '(x, 5)\n', (7289, 7295), False, 'import torch\n'), ((7503, 7517), 'megengine.functional.split', 'MF.split', (['x', '(5)'], {}), '(x, 5)\n', (7511, 7517), True, 'import megengine.functional as MF\n'), ((7537, 7554), 'torch.split', 'torch.split', (['x', '(5)'], {}), '(x, 5)\n', (7548, 7554), False, 'import torch\n'), ((7773, 7794), 'megengine.functional.softmax', 'MF.softmax', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (7783, 7794), True, 'import megengine.functional as MF\n'), ((7814, 7834), 'torch.nn.functional.softmax', 'TF.softmax', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (7824, 7834), True, 'import torch.nn.functional as TF\n'), ((8119, 8135), 'megengine.functional.squeeze', 'MF.squeeze', (['x', '(0)'], {}), '(x, 0)\n', (8129, 8135), True, 'import megengine.functional as MF\n'), ((8155, 8174), 'torch.squeeze', 'torch.squeeze', (['x', '(0)'], {}), '(x, 0)\n', (8168, 8174), False, 'import torch\n'), ((8670, 8684), 'megengine.functional.topk', 'MF.topk', (['x', '(10)'], {}), '(x, 10)\n', (8677, 8684), True, 'import megengine.functional as MF\n'), ((8704, 8721), 'torch.topk', 'torch.topk', (['x', '(10)'], {}), '(x, 10)\n', (8714, 8721), False, 'import torch\n'), ((9308, 9331), 'megengine.functional.where', 'MF.where', (['(x > 0.5)', 'x', 'x'], {}), '(x > 0.5, x, x)\n', (9316, 9331), True, 'import megengine.functional as MF\n'), ((9351, 9377), 'torch.where', 'torch.where', (['(x > 0.5)', 'x', 'x'], {}), '(x > 0.5, x, x)\n', (9362, 9377), False, 'import torch\n'), ((9508, 9541), 'megengine.random.uniform', 'mge.random.uniform', (['(0)', '(1)', 'x.shape'], {}), '(0, 1, x.shape)\n', (9526, 9541), True, 'import megengine as mge\n'), ((9561, 9595), 'torch.rand', 'torch.rand', (['x.shape'], {'device': '"""cuda"""'}), "(x.shape, device='cuda')\n", (9571, 9595), False, 'import torch\n'), ((10031, 10046), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10044, 10046), False, 'import torch\n'), ((10066, 10077), 'time.time', 'time.time', ([], {}), '()\n', (10075, 10077), False, 'import time\n'), ((657, 683), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(32)', '(3)', '(1)', '(0)'], {}), '(32, 32, 3, 1, 0)\n', (666, 683), True, 'import torch.nn as nn\n'), ((767, 804), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(32)', '(3)', '(1)', '(0)'], {'groups': '(32)'}), '(32, 32, 3, 1, 0, groups=32)\n', (776, 804), True, 'import torch.nn as nn\n'), ((863, 889), 'torch.nn.Conv3d', 'nn.Conv3d', (['(32)', '(32)', '(3)', '(1)', '(0)'], {}), '(32, 32, 3, 1, 0)\n', (872, 889), True, 'import torch.nn as nn\n'), ((977, 1012), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(32)', '(32)', '(3)', '(1)', '(0)'], {}), '(32, 32, 3, 1, 0)\n', (995, 1012), True, 'import torch.nn as nn\n'), ((1068, 1086), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (1082, 1086), True, 'import torch.nn as nn\n'), ((1134, 1155), 'torch.nn.Linear', 'nn.Linear', (['(1000)', '(1000)'], {}), '(1000, 1000)\n', (1143, 1155), True, 'import torch.nn as nn\n'), ((10903, 10920), 'torch.Tensor', 'torch.Tensor', (['inp'], {}), '(inp)\n', (10915, 10920), False, 'import torch\n')]
|
import datetime
from typing import Optional
from sqlmodel import BigInteger, Column, DateTime, Field, ForeignKey, SQLModel
class HelpSessionBase(SQLModel):
"""A base model for storing information about users."""
claimant_id: int
channel_id: int
opened_at: datetime.datetime
closed_at: Optional[datetime.datetime]
class HelpSessionTable(HelpSessionBase, table=True):
"""A model for storing information about individual help sessions."""
__tablename__ = "help_sessions"
session_id: int = Field(primary_key=True)
claimant_id: int = Field(
sa_column=Column(
"claimant_id",
BigInteger,
ForeignKey("users.user_id"),
nullable=False
)
)
channel_id: int = Field(
sa_column=Column(
"channel_id",
BigInteger,
index=True,
nullable=False
)
)
opened_at: datetime.datetime = Field(
sa_column=Column(
DateTime(timezone=True),
nullable=False
)
)
closed_at: Optional[datetime.datetime] = Field(
sa_column=Column(
DateTime(timezone=True),
nullable=True
)
)
|
[
"sqlmodel.Field",
"sqlmodel.DateTime",
"sqlmodel.Column",
"sqlmodel.ForeignKey"
] |
[((526, 549), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (531, 549), False, 'from sqlmodel import BigInteger, Column, DateTime, Field, ForeignKey, SQLModel\n'), ((788, 848), 'sqlmodel.Column', 'Column', (['"""channel_id"""', 'BigInteger'], {'index': '(True)', 'nullable': '(False)'}), "('channel_id', BigInteger, index=True, nullable=False)\n", (794, 848), False, 'from sqlmodel import BigInteger, Column, DateTime, Field, ForeignKey, SQLModel\n'), ((669, 696), 'sqlmodel.ForeignKey', 'ForeignKey', (['"""users.user_id"""'], {}), "('users.user_id')\n", (679, 696), False, 'from sqlmodel import BigInteger, Column, DateTime, Field, ForeignKey, SQLModel\n'), ((993, 1016), 'sqlmodel.DateTime', 'DateTime', ([], {'timezone': '(True)'}), '(timezone=True)\n', (1001, 1016), False, 'from sqlmodel import BigInteger, Column, DateTime, Field, ForeignKey, SQLModel\n'), ((1151, 1174), 'sqlmodel.DateTime', 'DateTime', ([], {'timezone': '(True)'}), '(timezone=True)\n', (1159, 1174), False, 'from sqlmodel import BigInteger, Column, DateTime, Field, ForeignKey, SQLModel\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import re
import subprocess
import sys
import numpy as np
import megengine as mge
import megengine.functional as F
from megengine import jit, tensor
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.test import assertTensorClose
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if mge.is_cuda_available():
return get_gpu_name()
else:
return get_cpu_name()
class MnistNet(Module):
def __init__(self, has_bn=False):
super().__init__()
self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True)
self.pool0 = AvgPool2d(2)
self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True)
self.pool1 = AvgPool2d(2)
self.fc0 = Linear(20 * 4 * 4, 500, bias=True)
self.fc1 = Linear(500, 10, bias=True)
self.bn0 = None
self.bn1 = None
if has_bn:
self.bn0 = BatchNorm2d(20)
self.bn1 = BatchNorm2d(20)
def forward(self, x):
x = self.conv0(x)
if self.bn0:
x = self.bn0(x)
x = F.relu(x)
x = self.pool0(x)
x = self.conv1(x)
if self.bn1:
x = self.bn1(x)
x = F.relu(x)
x = self.pool1(x)
x = F.flatten(x, 1)
x = self.fc0(x)
x = F.relu(x)
x = self.fc1(x)
return x
def train(data, label, net, opt):
pred = net(data)
loss = F.cross_entropy_with_softmax(pred, label)
opt.backward(loss)
return loss
def update_model(model_path):
"""
Update the dumped model with test cases for new reference values.
The model with pre-trained weights is trained for one iter with the test data attached.
The loss and updated net state dict is dumped.
.. code-block:: python
from test_correctness import update_model
update_model('mnist_model_with_test.mge') # for gpu
update_model('mnist_model_with_test_cpu.mge') # for cpu
"""
net = MnistNet(has_bn=True)
checkpoint = mge.load(model_path)
net.load_state_dict(checkpoint["net_init"])
lr = checkpoint["sgd_lr"]
opt = SGD(net.parameters(), lr=lr)
data = tensor(dtype=np.float32)
label = tensor(dtype=np.int32)
data.set_value(checkpoint["data"])
label.set_value(checkpoint["label"])
opt.zero_grad()
loss = train(data, label, net=net, opt=opt)
opt.step()
xpu_name = get_xpu_name()
checkpoint.update(
{"net_updated": net.state_dict(), "loss": loss.numpy(), "xpu": xpu_name}
)
mge.save(checkpoint, model_path)
def run_test(model_path, use_jit, use_symbolic):
"""
Load the model with test cases and run the training for one iter.
The loss and updated weights are compared with reference value to verify the correctness.
Dump a new file with updated result by calling update_model
if you think the test fails due to numerical rounding errors instead of bugs.
Please think twice before you do so.
"""
net = MnistNet(has_bn=True)
checkpoint = mge.load(model_path)
net.load_state_dict(checkpoint["net_init"])
lr = checkpoint["sgd_lr"]
opt = SGD(net.parameters(), lr=lr)
data = tensor(dtype=np.float32)
label = tensor(dtype=np.int32)
data.set_value(checkpoint["data"])
label.set_value(checkpoint["label"])
max_err = 1e-5
train_func = train
if use_jit:
train_func = jit.trace(train_func, symbolic=use_symbolic)
opt.zero_grad()
loss = train_func(data, label, net=net, opt=opt)
opt.step()
assertTensorClose(loss.numpy(), checkpoint["loss"], max_err=max_err)
for param, param_ref in zip(
net.state_dict().items(), checkpoint["net_updated"].items()
):
assert param[0] == param_ref[0]
assertTensorClose(param[1], param_ref[1], max_err=max_err)
def test_correctness():
if mge.is_cuda_available():
model_name = "mnist_model_with_test.mge"
else:
model_name = "mnist_model_with_test_cpu.mge"
model_path = os.path.join(os.path.dirname(__file__), model_name)
set_conv_execution_strategy("HEURISTIC_REPRODUCIBLE")
run_test(model_path, False, False)
run_test(model_path, True, False)
run_test(model_path, True, True)
|
[
"megengine.test.assertTensorClose",
"megengine.save",
"megengine.tensor",
"megengine.functional.relu",
"megengine.functional.flatten",
"megengine.functional.debug_param.set_conv_execution_strategy",
"megengine.module.Conv2d",
"megengine.module.AvgPool2d",
"megengine.functional.cross_entropy_with_softmax",
"megengine.is_cuda_available",
"megengine.jit.trace",
"megengine.module.Linear",
"megengine.module.BatchNorm2d",
"megengine.load"
] |
[((1407, 1430), 'megengine.is_cuda_available', 'mge.is_cuda_available', ([], {}), '()\n', (1428, 1430), True, 'import megengine as mge\n'), ((2486, 2527), 'megengine.functional.cross_entropy_with_softmax', 'F.cross_entropy_with_softmax', (['pred', 'label'], {}), '(pred, label)\n', (2514, 2527), True, 'import megengine.functional as F\n'), ((3082, 3102), 'megengine.load', 'mge.load', (['model_path'], {}), '(model_path)\n', (3090, 3102), True, 'import megengine as mge\n'), ((3232, 3256), 'megengine.tensor', 'tensor', ([], {'dtype': 'np.float32'}), '(dtype=np.float32)\n', (3238, 3256), False, 'from megengine import jit, tensor\n'), ((3269, 3291), 'megengine.tensor', 'tensor', ([], {'dtype': 'np.int32'}), '(dtype=np.int32)\n', (3275, 3291), False, 'from megengine import jit, tensor\n'), ((3602, 3634), 'megengine.save', 'mge.save', (['checkpoint', 'model_path'], {}), '(checkpoint, model_path)\n', (3610, 3634), True, 'import megengine as mge\n'), ((4105, 4125), 'megengine.load', 'mge.load', (['model_path'], {}), '(model_path)\n', (4113, 4125), True, 'import megengine as mge\n'), ((4255, 4279), 'megengine.tensor', 'tensor', ([], {'dtype': 'np.float32'}), '(dtype=np.float32)\n', (4261, 4279), False, 'from megengine import jit, tensor\n'), ((4292, 4314), 'megengine.tensor', 'tensor', ([], {'dtype': 'np.int32'}), '(dtype=np.int32)\n', (4298, 4314), False, 'from megengine import jit, tensor\n'), ((4934, 4957), 'megengine.is_cuda_available', 'mge.is_cuda_available', ([], {}), '()\n', (4955, 4957), True, 'import megengine as mge\n'), ((5144, 5197), 'megengine.functional.debug_param.set_conv_execution_strategy', 'set_conv_execution_strategy', (['"""HEURISTIC_REPRODUCIBLE"""'], {}), "('HEURISTIC_REPRODUCIBLE')\n", (5171, 5197), False, 'from megengine.functional.debug_param import set_conv_execution_strategy\n'), ((818, 910), 'subprocess.check_output', 'subprocess.check_output', (["['nvidia-smi', '--query-gpu=gpu_name', '--format=csv,noheader']"], {}), "(['nvidia-smi', '--query-gpu=gpu_name',\n '--format=csv,noheader'])\n", (841, 910), False, 'import subprocess\n'), ((1614, 1653), 'megengine.module.Conv2d', 'Conv2d', (['(1)', '(20)'], {'kernel_size': '(5)', 'bias': '(True)'}), '(1, 20, kernel_size=5, bias=True)\n', (1620, 1653), False, 'from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module\n'), ((1675, 1687), 'megengine.module.AvgPool2d', 'AvgPool2d', (['(2)'], {}), '(2)\n', (1684, 1687), False, 'from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module\n'), ((1709, 1749), 'megengine.module.Conv2d', 'Conv2d', (['(20)', '(20)'], {'kernel_size': '(5)', 'bias': '(True)'}), '(20, 20, kernel_size=5, bias=True)\n', (1715, 1749), False, 'from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module\n'), ((1771, 1783), 'megengine.module.AvgPool2d', 'AvgPool2d', (['(2)'], {}), '(2)\n', (1780, 1783), False, 'from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module\n'), ((1803, 1837), 'megengine.module.Linear', 'Linear', (['(20 * 4 * 4)', '(500)'], {'bias': '(True)'}), '(20 * 4 * 4, 500, bias=True)\n', (1809, 1837), False, 'from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module\n'), ((1857, 1883), 'megengine.module.Linear', 'Linear', (['(500)', '(10)'], {'bias': '(True)'}), '(500, 10, bias=True)\n', (1863, 1883), False, 'from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module\n'), ((2143, 2152), 'megengine.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (2149, 2152), True, 'import megengine.functional as F\n'), ((2266, 2275), 'megengine.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (2272, 2275), True, 'import megengine.functional as F\n'), ((2314, 2329), 'megengine.functional.flatten', 'F.flatten', (['x', '(1)'], {}), '(x, 1)\n', (2323, 2329), True, 'import megengine.functional as F\n'), ((2366, 2375), 'megengine.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (2372, 2375), True, 'import megengine.functional as F\n'), ((4476, 4520), 'megengine.jit.trace', 'jit.trace', (['train_func'], {'symbolic': 'use_symbolic'}), '(train_func, symbolic=use_symbolic)\n', (4485, 4520), False, 'from megengine import jit, tensor\n'), ((4841, 4899), 'megengine.test.assertTensorClose', 'assertTensorClose', (['param[1]', 'param_ref[1]'], {'max_err': 'max_err'}), '(param[1], param_ref[1], max_err=max_err)\n', (4858, 4899), False, 'from megengine.test import assertTensorClose\n'), ((5101, 5126), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (5116, 5126), False, 'import os\n'), ((1974, 1989), 'megengine.module.BatchNorm2d', 'BatchNorm2d', (['(20)'], {}), '(20)\n', (1985, 1989), False, 'from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module\n'), ((2013, 2028), 'megengine.module.BatchNorm2d', 'BatchNorm2d', (['(20)'], {}), '(20)\n', (2024, 2028), False, 'from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module\n'), ((1118, 1167), 'subprocess.check_output', 'subprocess.check_output', (["['cat', '/proc/cpuinfo']"], {}), "(['cat', '/proc/cpuinfo'])\n", (1141, 1167), False, 'import subprocess\n'), ((1286, 1324), 're.sub', 're.sub', (['""".*model name.*:"""', '""""""', 'line', '(1)'], {}), "('.*model name.*:', '', line, 1)\n", (1292, 1324), False, 'import re\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import math
import official.vision.classification.resnet.model as resnet
import numpy as np
class ResnetBody(M.Module):
def __init__(
self,
block,
init_channel,
layers,
channels,
zero_init_residual=False,
norm=M.BatchNorm2d,
):
super(ResnetBody, self).__init__()
self.in_channels = init_channel
self.layer1 = self._make_layer(
block, channels[0], layers[0], stride=1, norm=norm
)
self.layer2 = self._make_layer(
block, channels[1], layers[1], stride=2, norm=norm
)
self.layer3 = self._make_layer(
block, channels[2], layers[2], stride=2, norm=norm,
)
self.layer4 = self._make_layer(
block, channels[3], layers[3], stride=2, norm=norm,
)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
elif isinstance(m, M.Linear):
M.init.msra_uniform_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
def _make_layer(self, block, channels, blocks, stride=1, norm=M.BatchNorm2d):
layers = []
layers.append(block(self.in_channels, channels, stride, norm=norm))
self.in_channels = channels * block.expansion
for _ in range(1, blocks):
layers.append(block(self.in_channels, channels, norm=norm))
return M.Sequential(*layers)
def forward(self, x):
outputs = []
x = self.layer1(x)
outputs.append(x)
x = self.layer2(x)
outputs.append(x)
x = self.layer3(x)
outputs.append(x)
x = self.layer4(x)
outputs.append(x)
return outputs
class SingleStage(M.Module):
def __init__(
self, block, init_channel, layers, channels, mid_channel, norm=M.BatchNorm2d
):
super(SingleStage, self).__init__()
self.down = ResnetBody(block, init_channel, layers, channels, norm)
channel = block.expansion * channels[-1]
self.up1 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv1 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-2]
self.up2 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv2 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-3]
self.up3 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv3 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-4]
self.up4 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
def forward(self, x):
branches = self.down(x)
branches = list(reversed(branches))
outputs = []
f_up = F.relu(self.up1(branches[0]))
outputs.append(f_up)
f = self.up2(branches[1])
f_up = F.relu(self.deconv1(f_up) + f)
outputs.append(f_up)
f = self.up3(branches[2])
f_up = F.relu(self.deconv2(f_up) + f)
outputs.append(f_up)
f = self.up4(branches[3])
f_up = F.relu(self.deconv3(f_up) + f)
outputs.append(f_up)
return outputs
class MSPN(M.Module):
def __init__(self, block, layers, channels, mid_channel, keypoint_num, nr_stg):
super(MSPN, self).__init__()
block = getattr(resnet, block)
norm = M.BatchNorm2d
self.nr_stg = nr_stg
self.keypoint_num = keypoint_num
self.head = M.Sequential(
M.Conv2d(3, 64, 3, 2, 1),
norm(64),
M.ReLU(),
M.Conv2d(64, 64, 3, 1, 1),
norm(64),
M.ReLU(),
M.Conv2d(64, 64, 3, 2, 1),
norm(64),
M.ReLU(),
)
self.stages = {}
for i in range(nr_stg):
init_channel = 64
self.stages["Stage_{}_body".format(i)] = SingleStage(
block, init_channel, layers, channels, mid_channel, norm
)
tail = {}
for j in range(4):
tail["tail_{}".format(j)] = M.Conv2d(mid_channel, keypoint_num, 3, 1, 1)
self.stages["Stage_{}_tail".format(i)] = tail
if i < nr_stg - 1:
self.stages["Stage_{}_next".format(i)] = M.Sequential(
M.Conv2d(mid_channel, 64, 1, 1, 0), norm(64), M.ReLU()
)
self.inputs = {
"image": mge.tensor(dtype="float32"),
"heatmap": mge.tensor(dtype="float32"),
"heat_valid": mge.tensor(dtype="float32"),
}
def calc_loss(self):
outs = self.forward(self.inputs["image"])
loss = 0
for stage_out in outs:
for ind, scale_out in enumerate(stage_out[:-1]):
label = (
self.inputs["heatmap"][:, ind]
* (self.inputs["heat_valid"] > 1.1)[:, :, None, None]
)
tmp = F.square_loss(scale_out, label)
loss += tmp / 4 / len(outs)
# OHKM loss for the largest heatmap
tmp = ((stage_out[-1] - self.inputs["heatmap"][:, -1]) ** 2).mean(3).mean(
2
) * (self.inputs["heat_valid"] > 0.1)
ohkm_loss = 0
for i in range(tmp.shape[0]):
selected_loss, _ = F.top_k(
tmp[i], self.keypoint_num // 2, descending=True
)
ohkm_loss += selected_loss.mean()
ohkm_loss /= tmp.shape[0]
loss += ohkm_loss
return loss
def predict(self):
outputs = self.forward(self.inputs["image"])
pred = outputs[-1][-1]
return pred
def forward(self, x):
f = self.head(x)
outputs = []
for i in range(self.nr_stg):
multi_scale_features = self.stages["Stage_{}_body".format(i)](f)
multi_scale_heatmaps = []
for j in range(4):
out = self.stages["Stage_{}_tail".format(i)]["tail_{}".format(j)](
multi_scale_features[j]
)
out = F.interpolate(out, scale_factor=2 ** (3 - j))
multi_scale_heatmaps.append(out)
if i < self.nr_stg - 1:
f = self.stages["Stage_{}_next".format(i)](multi_scale_features[-1])
outputs.append(multi_scale_heatmaps)
return outputs
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/mspn_4stage_256x192_0_255_75_2.pkl"
)
def mspn_4stage(**kwargs):
model = MSPN(
block="Bottleneck",
layers=[5, 5, 6, 3],
channels=[64, 128, 192, 384],
nr_stg=4,
mid_channel=256,
keypoint_num=17,
**kwargs
)
return model
|
[
"megengine.module.ReLU",
"megengine.tensor",
"megengine.functional.top_k",
"megengine.module.init.zeros_",
"megengine.module.init.msra_normal_",
"megengine.module.init.calculate_fan_in_and_fan_out",
"megengine.module.ConvTranspose2d",
"megengine.module.Sequential",
"megengine.module.Conv2d",
"megengine.functional.square_loss",
"megengine.functional.interpolate",
"megengine.hub.pretrained",
"megengine.module.init.uniform_",
"megengine.module.init.ones_"
] |
[((7936, 8043), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/models/weights/mspn_4stage_256x192_0_255_75_2.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/models/weights/mspn_4stage_256x192_0_255_75_2.pkl'\n )\n", (7950, 8043), True, 'import megengine.hub as hub\n'), ((2520, 2541), 'megengine.module.Sequential', 'M.Sequential', (['*layers'], {}), '(*layers)\n', (2532, 2541), True, 'import megengine.module as M\n'), ((3182, 3221), 'megengine.module.Conv2d', 'M.Conv2d', (['channel', 'mid_channel', '(1)', '(1)', '(0)'], {}), '(channel, mid_channel, 1, 1, 0)\n', (3190, 3221), True, 'import megengine.module as M\n'), ((3300, 3352), 'megengine.module.ConvTranspose2d', 'M.ConvTranspose2d', (['mid_channel', 'mid_channel', '(4)', '(2)', '(1)'], {}), '(mid_channel, mid_channel, 4, 2, 1)\n', (3317, 3352), True, 'import megengine.module as M\n'), ((3477, 3516), 'megengine.module.Conv2d', 'M.Conv2d', (['channel', 'mid_channel', '(1)', '(1)', '(0)'], {}), '(channel, mid_channel, 1, 1, 0)\n', (3485, 3516), True, 'import megengine.module as M\n'), ((3595, 3647), 'megengine.module.ConvTranspose2d', 'M.ConvTranspose2d', (['mid_channel', 'mid_channel', '(4)', '(2)', '(1)'], {}), '(mid_channel, mid_channel, 4, 2, 1)\n', (3612, 3647), True, 'import megengine.module as M\n'), ((3772, 3811), 'megengine.module.Conv2d', 'M.Conv2d', (['channel', 'mid_channel', '(1)', '(1)', '(0)'], {}), '(channel, mid_channel, 1, 1, 0)\n', (3780, 3811), True, 'import megengine.module as M\n'), ((3890, 3942), 'megengine.module.ConvTranspose2d', 'M.ConvTranspose2d', (['mid_channel', 'mid_channel', '(4)', '(2)', '(1)'], {}), '(mid_channel, mid_channel, 4, 2, 1)\n', (3907, 3942), True, 'import megengine.module as M\n'), ((4067, 4106), 'megengine.module.Conv2d', 'M.Conv2d', (['channel', 'mid_channel', '(1)', '(1)', '(0)'], {}), '(channel, mid_channel, 1, 1, 0)\n', (4075, 4106), True, 'import megengine.module as M\n'), ((5022, 5046), 'megengine.module.Conv2d', 'M.Conv2d', (['(3)', '(64)', '(3)', '(2)', '(1)'], {}), '(3, 64, 3, 2, 1)\n', (5030, 5046), True, 'import megengine.module as M\n'), ((5082, 5090), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (5088, 5090), True, 'import megengine.module as M\n'), ((5104, 5129), 'megengine.module.Conv2d', 'M.Conv2d', (['(64)', '(64)', '(3)', '(1)', '(1)'], {}), '(64, 64, 3, 1, 1)\n', (5112, 5129), True, 'import megengine.module as M\n'), ((5165, 5173), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (5171, 5173), True, 'import megengine.module as M\n'), ((5187, 5212), 'megengine.module.Conv2d', 'M.Conv2d', (['(64)', '(64)', '(3)', '(2)', '(1)'], {}), '(64, 64, 3, 2, 1)\n', (5195, 5212), True, 'import megengine.module as M\n'), ((5248, 5256), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (5254, 5256), True, 'import megengine.module as M\n'), ((5951, 5978), 'megengine.tensor', 'mge.tensor', ([], {'dtype': '"""float32"""'}), "(dtype='float32')\n", (5961, 5978), True, 'import megengine as mge\n'), ((6003, 6030), 'megengine.tensor', 'mge.tensor', ([], {'dtype': '"""float32"""'}), "(dtype='float32')\n", (6013, 6030), True, 'import megengine as mge\n'), ((6058, 6085), 'megengine.tensor', 'mge.tensor', ([], {'dtype': '"""float32"""'}), "(dtype='float32')\n", (6068, 6085), True, 'import megengine as mge\n'), ((1416, 1482), 'megengine.module.init.msra_normal_', 'M.init.msra_normal_', (['m.weight'], {'mode': '"""fan_out"""', 'nonlinearity': '"""relu"""'}), "(m.weight, mode='fan_out', nonlinearity='relu')\n", (1435, 1482), True, 'import megengine.module as M\n'), ((5606, 5650), 'megengine.module.Conv2d', 'M.Conv2d', (['mid_channel', 'keypoint_num', '(3)', '(1)', '(1)'], {}), '(mid_channel, keypoint_num, 3, 1, 1)\n', (5614, 5650), True, 'import megengine.module as M\n'), ((6474, 6505), 'megengine.functional.square_loss', 'F.square_loss', (['scale_out', 'label'], {}), '(scale_out, label)\n', (6487, 6505), True, 'import megengine.functional as F\n'), ((6857, 6913), 'megengine.functional.top_k', 'F.top_k', (['tmp[i]', '(self.keypoint_num // 2)'], {'descending': '(True)'}), '(tmp[i], self.keypoint_num // 2, descending=True)\n', (6864, 6913), True, 'import megengine.functional as F\n'), ((7643, 7688), 'megengine.functional.interpolate', 'F.interpolate', (['out'], {'scale_factor': '(2 ** (3 - j))'}), '(out, scale_factor=2 ** (3 - j))\n', (7656, 7688), True, 'import megengine.functional as F\n'), ((1554, 1599), 'megengine.module.init.calculate_fan_in_and_fan_out', 'M.init.calculate_fan_in_and_fan_out', (['m.weight'], {}), '(m.weight)\n', (1589, 1599), True, 'import megengine.module as M\n'), ((1670, 1708), 'megengine.module.init.uniform_', 'M.init.uniform_', (['m.bias', '(-bound)', 'bound'], {}), '(m.bias, -bound, bound)\n', (1685, 1708), True, 'import megengine.module as M\n'), ((1772, 1794), 'megengine.module.init.ones_', 'M.init.ones_', (['m.weight'], {}), '(m.weight)\n', (1784, 1794), True, 'import megengine.module as M\n'), ((1811, 1832), 'megengine.module.init.zeros_', 'M.init.zeros_', (['m.bias'], {}), '(m.bias)\n', (1824, 1832), True, 'import megengine.module as M\n'), ((5832, 5866), 'megengine.module.Conv2d', 'M.Conv2d', (['mid_channel', '(64)', '(1)', '(1)', '(0)'], {}), '(mid_channel, 64, 1, 1, 0)\n', (5840, 5866), True, 'import megengine.module as M\n'), ((5878, 5886), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (5884, 5886), True, 'import megengine.module as M\n'), ((1632, 1649), 'math.sqrt', 'math.sqrt', (['fan_in'], {}), '(fan_in)\n', (1641, 1649), False, 'import math\n'), ((2009, 2054), 'megengine.module.init.calculate_fan_in_and_fan_out', 'M.init.calculate_fan_in_and_fan_out', (['m.weight'], {}), '(m.weight)\n', (2044, 2054), True, 'import megengine.module as M\n'), ((2125, 2163), 'megengine.module.init.uniform_', 'M.init.uniform_', (['m.bias', '(-bound)', 'bound'], {}), '(m.bias, -bound, bound)\n', (2140, 2163), True, 'import megengine.module as M\n'), ((1924, 1936), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (1933, 1936), False, 'import math\n'), ((2087, 2104), 'math.sqrt', 'math.sqrt', (['fan_in'], {}), '(fan_in)\n', (2096, 2104), False, 'import math\n')]
|
import os.path as op
import numpy as nm
from sfepy.base.conf import transform_variables
from sfepy.base.testing import TestCommon
variables = {
'u' : ('unknown field', 'f', 0),
'v' : ('test field', 'f', 'u'),
}
def in_dir(adir):
return lambda x: op.join(adir, x)
def gen_datas(meshes):
datas = {}
for key, mesh in meshes.iteritems():
bbox = mesh.get_bounding_box()
nx = bbox[1,0] - bbox[0,0]
centre = 0.5 * bbox.sum(axis=0)
mesh.coors -= centre
data = nm.sin(4.0 * nm.pi * mesh.coors[:,0:1] / nx)
datas['scalar_' + key] = data
data = nm.zeros_like(mesh.coors)
data[:,0] = 0.05 * nx * nm.sin(4.0 * nm.pi * mesh.coors[:,0] / nx)
data[:,2] = 0.05 * nx * nm.cos(4.0 * nm.pi * mesh.coors[:,0] / nx)
datas['vector_' + key] = data
return datas
def do_interpolation(m2, m1, data, field_name, force=False):
"""Interpolate data from m1 to m2. """
from sfepy.discrete import Variables
from sfepy.discrete.fem import FEDomain, Field
fields = {
'scalar_si' : ((1,1), 'Omega', 2),
'vector_si' : ((3,1), 'Omega', 2),
'scalar_tp' : ((1,1), 'Omega', 1),
'vector_tp' : ((3,1), 'Omega', 1),
}
d1 = FEDomain('d1', m1)
omega1 = d1.create_region('Omega', 'all')
f = fields[field_name]
field1 = Field.from_args('f', nm.float64, f[0], d1.regions[f[1]],
approx_order=f[2])
ff = {field1.name : field1}
vv = Variables.from_conf(transform_variables(variables), ff)
u1 = vv['u']
u1.set_from_mesh_vertices(data)
d2 = FEDomain('d2', m2)
omega2 = d2.create_region('Omega', 'all')
field2 = Field.from_args('f', nm.float64, f[0], d2.regions[f[1]],
approx_order=f[2])
ff2 = {field2.name : field2}
vv2 = Variables.from_conf(transform_variables(variables), ff2)
u2 = vv2['u']
if not force:
# Performs interpolation, if other field differs from self.field
# or, in particular, is defined on a different mesh.
u2.set_from_other(u1, strategy='interpolation', close_limit=0.5)
else:
coors = u2.field.get_coor()
vals = u1.evaluate_at(coors, close_limit=0.5)
u2.set_data(vals)
return u1, u2
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
test = Test(conf=conf, options=options)
return test
def test_interpolation(self):
from sfepy import data_dir
from sfepy.discrete.fem import Mesh
from sfepy.linalg import make_axis_rotation_matrix
fname = in_dir(self.options.out_dir)
meshes = {
'tp' : Mesh('original mesh', data_dir + '/meshes/3d/block.mesh'),
'si' : Mesh('original mesh', data_dir + '/meshes/3d/cylinder.mesh'),
}
datas = gen_datas(meshes)
for field_name in ['scalar_si', 'vector_si', 'scalar_tp', 'vector_tp']:
m1 = meshes[field_name[-2:]]
for ia, angle in enumerate(nm.linspace(0.0, nm.pi, 11)):
self.report('%s: %d. angle: %f' % (field_name, ia, angle))
shift = [0.0, 0.0, 0.0]
mtx = make_axis_rotation_matrix([0, 1, 0], angle)
m2 = m1.copy('rotated mesh')
m2.transform_coors(mtx)
data = datas[field_name]
u1, u2 = do_interpolation(m2, m1, data, field_name)
if ia == 0:
u1.save_as_mesh(fname('test_mesh_interp_%s_u1.vtk'
% field_name))
u2.save_as_mesh(fname('test_mesh_interp_%s_u2.%03d.vtk'
% (field_name, ia)))
return True
def test_interpolation_two_meshes(self):
from sfepy import data_dir
from sfepy.discrete import Variables
from sfepy.discrete.fem import Mesh, FEDomain, Field
m1 = Mesh('source mesh', data_dir + '/meshes/3d/block.mesh')
m2 = Mesh('target mesh', data_dir + '/meshes/3d/cube_medium_tetra.mesh')
m2.coors *= 2.0
bbox = m1.get_bounding_box()
dd = bbox[1,:] - bbox[0,:]
data = nm.sin(4.0 * nm.pi * m1.coors[:,0:1] / dd[0]) \
* nm.cos(4.0 * nm.pi * m1.coors[:,1:2] / dd[1])
variables1 = {
'u' : ('unknown field', 'scalar_tp', 0),
'v' : ('test field', 'scalar_tp', 'u'),
}
variables2 = {
'u' : ('unknown field', 'scalar_si', 0),
'v' : ('test field', 'scalar_si', 'u'),
}
d1 = FEDomain('d1', m1)
omega1 = d1.create_region('Omega', 'all')
field1 = Field.from_args('scalar_tp', nm.float64, (1,1), omega1,
approx_order=1)
ff1 = {field1.name : field1}
d2 = FEDomain('d2', m2)
omega2 = d2.create_region('Omega', 'all')
field2 = Field.from_args('scalar_si', nm.float64, (1,1), omega2,
approx_order=0)
ff2 = {field2.name : field2}
vv1 = Variables.from_conf(transform_variables(variables1), ff1)
u1 = vv1['u']
u1.set_from_mesh_vertices(data)
vv2 = Variables.from_conf(transform_variables(variables2), ff2)
u2 = vv2['u']
# Performs interpolation, if other field differs from self.field
# or, in particular, is defined on a different mesh.
u2.set_from_other(u1, strategy='interpolation', close_limit=0.1)
fname = in_dir(self.options.out_dir)
u1.save_as_mesh(fname('test_mesh_interp_block_scalar.vtk'))
u2.save_as_mesh(fname('test_mesh_interp_cube_scalar.vtk'))
return True
def test_invariance(self):
from sfepy import data_dir
from sfepy.discrete.fem import Mesh
meshes = {
'tp' : Mesh('original mesh', data_dir + '/meshes/3d/block.mesh'),
'si' : Mesh('original mesh', data_dir + '/meshes/3d/cylinder.mesh'),
}
datas = gen_datas(meshes)
ok = True
for field_name in ['scalar_si', 'vector_si', 'scalar_tp', 'vector_tp']:
m1 = meshes[field_name[-2:]]
data = datas[field_name]
u1, u2 = do_interpolation(m1, m1, data, field_name, force=True)
self.report('max. difference:', nm.abs(u1() - u2()).max())
_ok = nm.allclose(u1(), u2(), rtol=0.0, atol=1e-12)
self.report('invariance for %s field: %s' % (field_name, _ok))
ok = ok and _ok
return ok
def test_invariance_qp(self):
from sfepy import data_dir
from sfepy.discrete import Variables, Integral
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.common.mappings import get_physical_qps
mesh = Mesh('source mesh', data_dir + '/meshes/3d/block.mesh')
bbox = mesh.get_bounding_box()
dd = bbox[1,:] - bbox[0,:]
data = nm.sin(4.0 * nm.pi * mesh.coors[:,0:1] / dd[0]) \
* nm.cos(4.0 * nm.pi * mesh.coors[:,1:2] / dd[1])
variables = {
'u' : ('unknown field', 'scalar_tp', 0),
'v' : ('test field', 'scalar_tp', 'u'),
}
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field = Field.from_args('scalar_tp', nm.float64, 1, omega,
approx_order=1)
ff = {field.name : field}
vv = Variables.from_conf(transform_variables(variables), ff)
u = vv['u']
u.set_from_mesh_vertices(data)
integral = Integral('i', order=2)
term = Term.new('ev_volume_integrate(u)', integral, omega, u=u)
term.setup()
val1, _ = term.evaluate(mode='qp')
val1 = val1.ravel()
qps = get_physical_qps(omega, integral)
coors = qps.get_merged_values()
val2 = u.evaluate_at(coors).ravel()
self.report('max. difference:', nm.abs(val1 - val2).max())
ok = nm.allclose(val1, val2, rtol=0.0, atol=1e-12)
self.report('invariance in qp: %s' % ok)
return ok
|
[
"sfepy.discrete.common.mappings.get_physical_qps",
"sfepy.discrete.fem.Mesh",
"sfepy.terms.Term.new",
"sfepy.base.conf.transform_variables",
"sfepy.discrete.Integral",
"sfepy.discrete.fem.Field.from_args",
"sfepy.discrete.fem.FEDomain",
"sfepy.linalg.make_axis_rotation_matrix"
] |
[((1266, 1284), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['"""d1"""', 'm1'], {}), "('d1', m1)\n", (1274, 1284), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((1374, 1449), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""f"""', 'nm.float64', 'f[0]', 'd1.regions[f[1]]'], {'approx_order': 'f[2]'}), "('f', nm.float64, f[0], d1.regions[f[1]], approx_order=f[2])\n", (1389, 1449), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((1640, 1658), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['"""d2"""', 'm2'], {}), "('d2', m2)\n", (1648, 1658), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((1719, 1794), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""f"""', 'nm.float64', 'f[0]', 'd2.regions[f[1]]'], {'approx_order': 'f[2]'}), "('f', nm.float64, f[0], d2.regions[f[1]], approx_order=f[2])\n", (1734, 1794), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((277, 293), 'os.path.join', 'op.join', (['adir', 'x'], {}), '(adir, x)\n', (284, 293), True, 'import os.path as op\n'), ((534, 579), 'numpy.sin', 'nm.sin', (['(4.0 * nm.pi * mesh.coors[:, 0:1] / nx)'], {}), '(4.0 * nm.pi * mesh.coors[:, 0:1] / nx)\n', (540, 579), True, 'import numpy as nm\n'), ((633, 658), 'numpy.zeros_like', 'nm.zeros_like', (['mesh.coors'], {}), '(mesh.coors)\n', (646, 658), True, 'import numpy as nm\n'), ((1541, 1571), 'sfepy.base.conf.transform_variables', 'transform_variables', (['variables'], {}), '(variables)\n', (1560, 1571), False, 'from sfepy.base.conf import transform_variables\n'), ((1888, 1918), 'sfepy.base.conf.transform_variables', 'transform_variables', (['variables'], {}), '(variables)\n', (1907, 1918), False, 'from sfepy.base.conf import transform_variables\n'), ((3984, 4039), 'sfepy.discrete.fem.Mesh', 'Mesh', (['"""source mesh"""', "(data_dir + '/meshes/3d/block.mesh')"], {}), "('source mesh', data_dir + '/meshes/3d/block.mesh')\n", (3988, 4039), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((4054, 4121), 'sfepy.discrete.fem.Mesh', 'Mesh', (['"""target mesh"""', "(data_dir + '/meshes/3d/cube_medium_tetra.mesh')"], {}), "('target mesh', data_dir + '/meshes/3d/cube_medium_tetra.mesh')\n", (4058, 4121), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((4667, 4685), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['"""d1"""', 'm1'], {}), "('d1', m1)\n", (4675, 4685), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((4753, 4825), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""scalar_tp"""', 'nm.float64', '(1, 1)', 'omega1'], {'approx_order': '(1)'}), "('scalar_tp', nm.float64, (1, 1), omega1, approx_order=1)\n", (4768, 4825), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((4909, 4927), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['"""d2"""', 'm2'], {}), "('d2', m2)\n", (4917, 4927), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((4995, 5067), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""scalar_si"""', 'nm.float64', '(1, 1)', 'omega2'], {'approx_order': '(0)'}), "('scalar_si', nm.float64, (1, 1), omega2, approx_order=0)\n", (5010, 5067), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((6931, 6986), 'sfepy.discrete.fem.Mesh', 'Mesh', (['"""source mesh"""', "(data_dir + '/meshes/3d/block.mesh')"], {}), "('source mesh', data_dir + '/meshes/3d/block.mesh')\n", (6935, 6986), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((7363, 7387), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['"""domain"""', 'mesh'], {}), "('domain', mesh)\n", (7371, 7387), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((7457, 7523), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""scalar_tp"""', 'nm.float64', '(1)', 'omega'], {'approx_order': '(1)'}), "('scalar_tp', nm.float64, 1, omega, approx_order=1)\n", (7472, 7523), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((7739, 7761), 'sfepy.discrete.Integral', 'Integral', (['"""i"""'], {'order': '(2)'}), "('i', order=2)\n", (7747, 7761), False, 'from sfepy.discrete import Variables, Integral\n'), ((7777, 7833), 'sfepy.terms.Term.new', 'Term.new', (['"""ev_volume_integrate(u)"""', 'integral', 'omega'], {'u': 'u'}), "('ev_volume_integrate(u)', integral, omega, u=u)\n", (7785, 7833), False, 'from sfepy.terms import Term\n'), ((7941, 7974), 'sfepy.discrete.common.mappings.get_physical_qps', 'get_physical_qps', (['omega', 'integral'], {}), '(omega, integral)\n', (7957, 7974), False, 'from sfepy.discrete.common.mappings import get_physical_qps\n'), ((8141, 8186), 'numpy.allclose', 'nm.allclose', (['val1', 'val2'], {'rtol': '(0.0)', 'atol': '(1e-12)'}), '(val1, val2, rtol=0.0, atol=1e-12)\n', (8152, 8186), True, 'import numpy as nm\n'), ((691, 734), 'numpy.sin', 'nm.sin', (['(4.0 * nm.pi * mesh.coors[:, 0] / nx)'], {}), '(4.0 * nm.pi * mesh.coors[:, 0] / nx)\n', (697, 734), True, 'import numpy as nm\n'), ((766, 809), 'numpy.cos', 'nm.cos', (['(4.0 * nm.pi * mesh.coors[:, 0] / nx)'], {}), '(4.0 * nm.pi * mesh.coors[:, 0] / nx)\n', (772, 809), True, 'import numpy as nm\n'), ((2719, 2776), 'sfepy.discrete.fem.Mesh', 'Mesh', (['"""original mesh"""', "(data_dir + '/meshes/3d/block.mesh')"], {}), "('original mesh', data_dir + '/meshes/3d/block.mesh')\n", (2723, 2776), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((2797, 2857), 'sfepy.discrete.fem.Mesh', 'Mesh', (['"""original mesh"""', "(data_dir + '/meshes/3d/cylinder.mesh')"], {}), "('original mesh', data_dir + '/meshes/3d/cylinder.mesh')\n", (2801, 2857), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((4234, 4280), 'numpy.sin', 'nm.sin', (['(4.0 * nm.pi * m1.coors[:, 0:1] / dd[0])'], {}), '(4.0 * nm.pi * m1.coors[:, 0:1] / dd[0])\n', (4240, 4280), True, 'import numpy as nm\n'), ((4299, 4345), 'numpy.cos', 'nm.cos', (['(4.0 * nm.pi * m1.coors[:, 1:2] / dd[1])'], {}), '(4.0 * nm.pi * m1.coors[:, 1:2] / dd[1])\n', (4305, 4345), True, 'import numpy as nm\n'), ((5172, 5203), 'sfepy.base.conf.transform_variables', 'transform_variables', (['variables1'], {}), '(variables1)\n', (5191, 5203), False, 'from sfepy.base.conf import transform_variables\n'), ((5307, 5338), 'sfepy.base.conf.transform_variables', 'transform_variables', (['variables2'], {}), '(variables2)\n', (5326, 5338), False, 'from sfepy.base.conf import transform_variables\n'), ((5927, 5984), 'sfepy.discrete.fem.Mesh', 'Mesh', (['"""original mesh"""', "(data_dir + '/meshes/3d/block.mesh')"], {}), "('original mesh', data_dir + '/meshes/3d/block.mesh')\n", (5931, 5984), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((6005, 6065), 'sfepy.discrete.fem.Mesh', 'Mesh', (['"""original mesh"""', "(data_dir + '/meshes/3d/cylinder.mesh')"], {}), "('original mesh', data_dir + '/meshes/3d/cylinder.mesh')\n", (6009, 6065), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((7077, 7125), 'numpy.sin', 'nm.sin', (['(4.0 * nm.pi * mesh.coors[:, 0:1] / dd[0])'], {}), '(4.0 * nm.pi * mesh.coors[:, 0:1] / dd[0])\n', (7083, 7125), True, 'import numpy as nm\n'), ((7144, 7192), 'numpy.cos', 'nm.cos', (['(4.0 * nm.pi * mesh.coors[:, 1:2] / dd[1])'], {}), '(4.0 * nm.pi * mesh.coors[:, 1:2] / dd[1])\n', (7150, 7192), True, 'import numpy as nm\n'), ((7624, 7654), 'sfepy.base.conf.transform_variables', 'transform_variables', (['variables'], {}), '(variables)\n', (7643, 7654), False, 'from sfepy.base.conf import transform_variables\n'), ((3066, 3093), 'numpy.linspace', 'nm.linspace', (['(0.0)', 'nm.pi', '(11)'], {}), '(0.0, nm.pi, 11)\n', (3077, 3093), True, 'import numpy as nm\n'), ((3233, 3276), 'sfepy.linalg.make_axis_rotation_matrix', 'make_axis_rotation_matrix', (['[0, 1, 0]', 'angle'], {}), '([0, 1, 0], angle)\n', (3258, 3276), False, 'from sfepy.linalg import make_axis_rotation_matrix\n'), ((8101, 8120), 'numpy.abs', 'nm.abs', (['(val1 - val2)'], {}), '(val1 - val2)\n', (8107, 8120), True, 'import numpy as nm\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import os
import tempfile
import numpy as np
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.module as M
from megengine import jit, tensor
from megengine.core.tensor import Tensor
from megengine.jit import SublinearMemoryConfig
from megengine.test import assertTensorClose
@contextlib.contextmanager
def mkstemp():
fd, path = tempfile.mkstemp()
try:
os.close(fd)
yield path
finally:
os.remove(path)
def load_and_compile(fpath):
cg, _, outputs = mgb.load_comp_graph_from_file(fpath)
inputs = mgb.cgtools.get_dep_vars(outputs, "Host2DeviceCopy")
inputs = sorted(inputs, key=lambda i: i.name)
outputs = list(map(mgb.copy_output, outputs))
if len(outputs) == 1:
(outputs,) = outputs
return cg.compile(inputs, outputs)
def test_symbolic():
@jit.trace(symbolic=False)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
with pytest.raises(mgb.exc.MegBrainError):
f.trace(0)
@jit.trace(symbolic=True)
def f(x):
return Tensor(mgb.opr.assert_equal(x._symvar, x._symvar + 1))
f.trace(0)
def test_dump():
@jit.trace(symbolic=True)
def f(x, y):
return x * y
f.trace(0, 0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3], [1, 2, 3]), [1, 4, 9])
def test_goptions():
@jit.trace(symbolic=True, opt_level=0)
def f(x):
return x / x
@jit.trace(symbolic=True, opt_level=1)
def g(x):
return x / x
out = f([0.0]).numpy()
# out is nan
if out == out:
raise
# with gopt, x / x returns 1
out = g([0.0]).numpy()
assert out == 1
def test_json_prof():
@jit.trace(symbolic=True, profiling=True)
def f(x):
return x * x
f([0.0])
out = f.get_profile()
assert out.get("profiler")
def test_capture_dump():
p = tensor(7)
@jit.trace(symbolic=True)
def f(x):
return x * p
f.trace(0)
with mkstemp() as out:
f.dump(out)
g = load_and_compile(out)
np.testing.assert_allclose(g([1, 2, 3]), [7, 14, 21])
def test_dump_volatile():
p = tensor(7)
@jit.trace(symbolic=True)
def f(x):
return x * p
f.trace(0)
with mkstemp() as out:
f.dump(out)
cg, _, outputs = mgb.load_comp_graph_from_file(out)
(out,) = outputs
assert mgb.cgtools.get_type(mgb.cgtools.get_inputs(out)[1]) == "SharedDeviceTensor"
def test_shape_tracing():
for symbolic in [False, True]:
@jit.trace(symbolic=symbolic)
def f(x):
a, b = x.shape
return a * b
assert f(np.zeros([4, 3], dtype="float32")).item() == 12
assert f(np.zeros([6, 4], dtype="float32")).item() == 24
def test_shape_infer():
@jit.trace(symbolic=True)
def f(x):
a, b = x.shape
return sum(x[i] for i in range(a))
x = np.random.randn(3, 10).astype("float32")
assertTensorClose(f(x), x.sum(0))
x = np.random.randn(4, 10).astype("float32")
assertTensorClose(f(x), x[:3].sum(0))
def test_dump_bn_fused():
class ConvBNReLU(M.Sequential):
def __init__(self):
super(ConvBNReLU, self).__init__(
M.Conv2d(3, 4, 3, 1, 1, groups=1, bias=False),
M.BatchNorm2d(4),
M.ReLU(),
)
net = ConvBNReLU()
net.eval()
@jit.trace(symbolic=True)
def fun(data):
return net(data)
data = np.random.random([1, 3, 224, 224]).astype(np.float32)
fun.trace(data)
with mkstemp() as out:
fun.dump(out, optimize_for_inference=True)
cg, _, outputs = mgb.load_comp_graph_from_file(out)
(out,) = outputs
inputs = mgb.cgtools.get_inputs(out)
assert len(inputs) == 2 and (
mgb.cgtools.get_type(inputs[0]) == "MultipleDeviceTensorHolder"
and mgb.cgtools.get_type(inputs[1]) == "ConvolutionForward"
)
# Simply verify the options passed down
def test_sublinear():
config = SublinearMemoryConfig(genetic_nr_iter=10)
@jit.trace(symbolic=True, sublinear_memory_config=config)
def f(x):
return x + x
f([0.0])
|
[
"megengine._internal.cgtools.get_dep_vars",
"megengine.tensor",
"megengine.module.ReLU",
"megengine._internal.cgtools.get_inputs",
"megengine.module.BatchNorm2d",
"megengine._internal.load_comp_graph_from_file",
"megengine.module.Conv2d",
"megengine._internal.opr.assert_equal",
"megengine.jit.trace",
"megengine.jit.SublinearMemoryConfig",
"megengine._internal.cgtools.get_type"
] |
[((770, 788), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (786, 788), False, 'import tempfile\n'), ((927, 963), 'megengine._internal.load_comp_graph_from_file', 'mgb.load_comp_graph_from_file', (['fpath'], {}), '(fpath)\n', (956, 963), True, 'import megengine._internal as mgb\n'), ((977, 1029), 'megengine._internal.cgtools.get_dep_vars', 'mgb.cgtools.get_dep_vars', (['outputs', '"""Host2DeviceCopy"""'], {}), "(outputs, 'Host2DeviceCopy')\n", (1001, 1029), True, 'import megengine._internal as mgb\n'), ((1252, 1277), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(False)'}), '(symbolic=False)\n', (1261, 1277), False, 'from megengine import jit, tensor\n'), ((1435, 1459), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)'}), '(symbolic=True)\n', (1444, 1459), False, 'from megengine import jit, tensor\n'), ((1584, 1608), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)'}), '(symbolic=True)\n', (1593, 1608), False, 'from megengine import jit, tensor\n'), ((1844, 1881), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)', 'opt_level': '(0)'}), '(symbolic=True, opt_level=0)\n', (1853, 1881), False, 'from megengine import jit, tensor\n'), ((1923, 1960), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)', 'opt_level': '(1)'}), '(symbolic=True, opt_level=1)\n', (1932, 1960), False, 'from megengine import jit, tensor\n'), ((2184, 2224), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)', 'profiling': '(True)'}), '(symbolic=True, profiling=True)\n', (2193, 2224), False, 'from megengine import jit, tensor\n'), ((2367, 2376), 'megengine.tensor', 'tensor', (['(7)'], {}), '(7)\n', (2373, 2376), False, 'from megengine import jit, tensor\n'), ((2383, 2407), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)'}), '(symbolic=True)\n', (2392, 2407), False, 'from megengine import jit, tensor\n'), ((2636, 2645), 'megengine.tensor', 'tensor', (['(7)'], {}), '(7)\n', (2642, 2645), False, 'from megengine import jit, tensor\n'), ((2652, 2676), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)'}), '(symbolic=True)\n', (2661, 2676), False, 'from megengine import jit, tensor\n'), ((3280, 3304), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)'}), '(symbolic=True)\n', (3289, 3304), False, 'from megengine import jit, tensor\n'), ((3884, 3908), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)'}), '(symbolic=True)\n', (3893, 3908), False, 'from megengine import jit, tensor\n'), ((4212, 4239), 'megengine._internal.cgtools.get_inputs', 'mgb.cgtools.get_inputs', (['out'], {}), '(out)\n', (4234, 4239), True, 'import megengine._internal as mgb\n'), ((4497, 4538), 'megengine.jit.SublinearMemoryConfig', 'SublinearMemoryConfig', ([], {'genetic_nr_iter': '(10)'}), '(genetic_nr_iter=10)\n', (4518, 4538), False, 'from megengine.jit import SublinearMemoryConfig\n'), ((4545, 4601), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)', 'sublinear_memory_config': 'config'}), '(symbolic=True, sublinear_memory_config=config)\n', (4554, 4601), False, 'from megengine import jit, tensor\n'), ((806, 818), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (814, 818), False, 'import os\n'), ((859, 874), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (868, 874), False, 'import os\n'), ((1372, 1408), 'pytest.raises', 'pytest.raises', (['mgb.exc.MegBrainError'], {}), '(mgb.exc.MegBrainError)\n', (1385, 1408), False, 'import pytest\n'), ((2801, 2835), 'megengine._internal.load_comp_graph_from_file', 'mgb.load_comp_graph_from_file', (['out'], {}), '(out)\n', (2830, 2835), True, 'import megengine._internal as mgb\n'), ((3019, 3047), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': 'symbolic'}), '(symbolic=symbolic)\n', (3028, 3047), False, 'from megengine import jit, tensor\n'), ((4142, 4176), 'megengine._internal.load_comp_graph_from_file', 'mgb.load_comp_graph_from_file', (['out'], {}), '(out)\n', (4171, 4176), True, 'import megengine._internal as mgb\n'), ((1314, 1360), 'megengine._internal.opr.assert_equal', 'mgb.opr.assert_equal', (['x._symvar', '(x._symvar + 1)'], {}), '(x._symvar, x._symvar + 1)\n', (1334, 1360), True, 'import megengine._internal as mgb\n'), ((1496, 1542), 'megengine._internal.opr.assert_equal', 'mgb.opr.assert_equal', (['x._symvar', '(x._symvar + 1)'], {}), '(x._symvar, x._symvar + 1)\n', (1516, 1542), True, 'import megengine._internal as mgb\n'), ((3394, 3416), 'numpy.random.randn', 'np.random.randn', (['(3)', '(10)'], {}), '(3, 10)\n', (3409, 3416), True, 'import numpy as np\n'), ((3481, 3503), 'numpy.random.randn', 'np.random.randn', (['(4)', '(10)'], {}), '(4, 10)\n', (3496, 3503), True, 'import numpy as np\n'), ((3965, 3999), 'numpy.random.random', 'np.random.random', (['[1, 3, 224, 224]'], {}), '([1, 3, 224, 224])\n', (3981, 3999), True, 'import numpy as np\n'), ((2890, 2917), 'megengine._internal.cgtools.get_inputs', 'mgb.cgtools.get_inputs', (['out'], {}), '(out)\n', (2912, 2917), True, 'import megengine._internal as mgb\n'), ((3718, 3763), 'megengine.module.Conv2d', 'M.Conv2d', (['(3)', '(4)', '(3)', '(1)', '(1)'], {'groups': '(1)', 'bias': '(False)'}), '(3, 4, 3, 1, 1, groups=1, bias=False)\n', (3726, 3763), True, 'import megengine.module as M\n'), ((3781, 3797), 'megengine.module.BatchNorm2d', 'M.BatchNorm2d', (['(4)'], {}), '(4)\n', (3794, 3797), True, 'import megengine.module as M\n'), ((3815, 3823), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (3821, 3823), True, 'import megengine.module as M\n'), ((4282, 4313), 'megengine._internal.cgtools.get_type', 'mgb.cgtools.get_type', (['inputs[0]'], {}), '(inputs[0])\n', (4302, 4313), True, 'import megengine._internal as mgb\n'), ((4358, 4389), 'megengine._internal.cgtools.get_type', 'mgb.cgtools.get_type', (['inputs[1]'], {}), '(inputs[1])\n', (4378, 4389), True, 'import megengine._internal as mgb\n'), ((3136, 3169), 'numpy.zeros', 'np.zeros', (['[4, 3]'], {'dtype': '"""float32"""'}), "([4, 3], dtype='float32')\n", (3144, 3169), True, 'import numpy as np\n'), ((3201, 3234), 'numpy.zeros', 'np.zeros', (['[6, 4]'], {'dtype': '"""float32"""'}), "([6, 4], dtype='float32')\n", (3209, 3234), True, 'import numpy as np\n')]
|
# 30.05.2007, c
# last revision: 25.02.2008
from __future__ import absolute_import
from sfepy import data_dir
import six
filename_mesh = data_dir + '/meshes/2d/square_unit_tri.mesh'
material_1 = {
'name' : 'coef',
'values' : {
'val' : 1.0,
},
}
material_2 = {
'name' : 'm',
'values' : {
'K' : [[1.0, 0.0], [0.0, 1.0]],
},
}
field_1 = {
'name' : 'a_harmonic_field',
'dtype' : 'real',
'shape' : 'scalar',
'region' : 'Omega',
'approx_order' : 2,
}
variable_1 = {
'name' : 't',
'kind' : 'unknown field',
'field' : 'a_harmonic_field',
'order' : 0,
}
variable_2 = {
'name' : 's',
'kind' : 'test field',
'field' : 'a_harmonic_field',
'dual' : 't',
}
region_1000 = {
'name' : 'Omega',
'select' : 'all',
}
region_1 = {
'name' : 'Left',
'select' : 'vertices in (x < -0.499)',
'kind' : 'facet',
}
region_2 = {
'name' : 'Right',
'select' : 'vertices in (x > 0.499)',
'kind' : 'facet',
}
region_3 = {
'name' : 'Gamma',
'select' : 'vertices of surface',
'kind' : 'facet',
}
ebc_1 = {
'name' : 't_left',
'region' : 'Left',
'dofs' : {'t.0' : 5.0},
}
ebc_2 = {
'name' : 't_right',
'region' : 'Right',
'dofs' : {'t.0' : 0.0},
}
# 'Left' : ('T3', (30,), 'linear_y'),
integral_1 = {
'name' : 'i',
'order' : 2,
}
equations = {
'Temperature' : """dw_laplace.i.Omega( coef.val, s, t ) = 0"""
}
solution = {
't' : '- 5.0 * (x - 0.5)',
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 1,
'eps_a' : 1e-10,
}
lin_min, lin_max = 0.0, 2.0
##
# 31.05.2007, c
def linear( bc, ts, coor, which ):
vals = coor[:,which]
min_val, max_val = vals.min(), vals.max()
vals = (vals - min_val) / (max_val - min_val) * (lin_max - lin_min) + lin_min
return vals
##
# 31.05.2007, c
def linear_x( bc, ts, coor ):
return linear( bc, ts, coor, 0 )
def linear_y( bc, ts, coor ):
return linear( bc, ts, coor, 1 )
def linear_z( bc, ts, coor ):
return linear( bc, ts, coor, 2 )
from sfepy.base.testing import TestCommon
##
# 30.05.2007, c
class Test( TestCommon ):
##
# 30.05.2007, c
def from_conf( conf, options ):
from sfepy.applications import solve_pde
problem, state = solve_pde(conf, save_results=False)
test = Test(problem=problem, state=state, conf=conf, options=options)
return test
from_conf = staticmethod( from_conf )
##
# 30.05.2007, c
def test_solution( self ):
sol = self.conf.solution
vec = self.state()
problem = self.problem
variables = problem.get_variables()
ok = True
for var_name, expression in six.iteritems(sol):
coor = variables[var_name].field.get_coor()
ana_sol = self.eval_coor_expression( expression, coor )
num_sol = variables.get_state_part_view( vec, var_name )
ret = self.compare_vectors( ana_sol, num_sol,
label1 = 'analytical %s' % var_name,
label2 = 'numerical %s' % var_name )
if not ret:
self.report( 'variable %s: failed' % var_name )
ok = ok and ret
return ok
##
# c: 30.05.2007, r: 19.02.2008
def test_boundary_fluxes( self ):
import os.path as op
from sfepy.linalg import rotation_matrix2d
from sfepy.discrete.evaluate import BasicEvaluator
from sfepy.discrete import Material
problem = self.problem
angles = [0, 30, 45]
region_names = ['Left', 'Right', 'Gamma']
values = [5.0, -5.0, 0.0]
variables = problem.get_variables()
get_state = variables.get_state_part_view
state = self.state.copy(deep=True)
problem.time_update(ebcs={}, epbcs={})
# problem.save_ebc( 'aux.vtk' )
state.apply_ebc()
ev = BasicEvaluator( problem )
aux = ev.eval_residual(state())
field = variables['t'].field
conf_m = problem.conf.get_item_by_name('materials', 'm')
m = Material.from_conf(conf_m, problem.functions)
name = op.join( self.options.out_dir,
op.split( problem.domain.mesh.name )[1] + '_%02d.mesh' )
orig_coors = problem.get_mesh_coors().copy()
ok = True
for ia, angle in enumerate( angles ):
self.report( '%d: mesh rotation %d degrees' % (ia, angle) )
problem.domain.mesh.transform_coors( rotation_matrix2d( angle ),
ref_coors = orig_coors )
problem.set_mesh_coors(problem.domain.mesh.coors,
update_fields=True)
problem.domain.mesh.write( name % angle, io = 'auto' )
for ii, region_name in enumerate( region_names ):
flux_term = 'd_surface_flux.i.%s( m.K, t )' % region_name
val1 = problem.evaluate(flux_term, t=variables['t'], m=m)
rvec = get_state( aux, 't', True )
reg = problem.domain.regions[region_name]
nods = field.get_dofs_in_region(reg, merge=True)
val2 = rvec[nods].sum() # Assume 1 dof per node.
ok = ok and ((abs( val1 - values[ii] ) < 1e-10) and
(abs( val2 - values[ii] ) < 1e-10))
self.report( ' %d. %s: %e == %e == %e'\
% (ii, region_name, val1, val2, values[ii]) )
# Restore original coordinates.
problem.domain.mesh.transform_coors(rotation_matrix2d(0),
ref_coors=orig_coors)
problem.set_mesh_coors(problem.domain.mesh.coors,
update_fields=True)
return ok
|
[
"sfepy.applications.solve_pde",
"sfepy.linalg.rotation_matrix2d",
"sfepy.discrete.Material.from_conf",
"sfepy.discrete.evaluate.BasicEvaluator"
] |
[((2386, 2421), 'sfepy.applications.solve_pde', 'solve_pde', (['conf'], {'save_results': '(False)'}), '(conf, save_results=False)\n', (2395, 2421), False, 'from sfepy.applications import solve_pde\n'), ((2813, 2831), 'six.iteritems', 'six.iteritems', (['sol'], {}), '(sol)\n', (2826, 2831), False, 'import six\n'), ((4046, 4069), 'sfepy.discrete.evaluate.BasicEvaluator', 'BasicEvaluator', (['problem'], {}), '(problem)\n', (4060, 4069), False, 'from sfepy.discrete.evaluate import BasicEvaluator\n'), ((4228, 4273), 'sfepy.discrete.Material.from_conf', 'Material.from_conf', (['conf_m', 'problem.functions'], {}), '(conf_m, problem.functions)\n', (4246, 4273), False, 'from sfepy.discrete import Material\n'), ((5729, 5749), 'sfepy.linalg.rotation_matrix2d', 'rotation_matrix2d', (['(0)'], {}), '(0)\n', (5746, 5749), False, 'from sfepy.linalg import rotation_matrix2d\n'), ((4642, 4666), 'sfepy.linalg.rotation_matrix2d', 'rotation_matrix2d', (['angle'], {}), '(angle)\n', (4659, 4666), False, 'from sfepy.linalg import rotation_matrix2d\n'), ((4345, 4379), 'os.path.split', 'op.split', (['problem.domain.mesh.name'], {}), '(problem.domain.mesh.name)\n', (4353, 4379), True, 'import os.path as op\n')]
|
"""add school abbreviations and acronyms
Revision ID: 41f361ac6a74
Revises: c<PASSWORD>
Create Date: 2022-06-07 03:48:15.445488+00:00
"""
import sqlalchemy as sa
import sqlmodel
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "41f361ac6a74"
down_revision = "c1b1ed99e50d"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"schools",
sa.Column(
"abbreviations",
postgresql.ARRAY(sqlmodel.sql.sqltypes.AutoString()),
nullable=False,
server_default=sa.text("array[]::varchar[]"),
),
)
op.add_column(
"schools",
sa.Column(
"alternatives",
postgresql.ARRAY(sqlmodel.sql.sqltypes.AutoString()),
nullable=False,
server_default=sa.text("array[]::varchar[]"),
),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("schools", "alternatives")
op.drop_column("schools", "abbreviations")
# ### end Alembic commands ###
|
[
"sqlmodel.sql.sqltypes.AutoString"
] |
[((1100, 1141), 'alembic.op.drop_column', 'op.drop_column', (['"""schools"""', '"""alternatives"""'], {}), "('schools', 'alternatives')\n", (1114, 1141), False, 'from alembic import op\n'), ((1146, 1188), 'alembic.op.drop_column', 'op.drop_column', (['"""schools"""', '"""abbreviations"""'], {}), "('schools', 'abbreviations')\n", (1160, 1188), False, 'from alembic import op\n'), ((582, 616), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (614, 616), False, 'import sqlmodel\n'), ((674, 703), 'sqlalchemy.text', 'sa.text', (['"""array[]::varchar[]"""'], {}), "('array[]::varchar[]')\n", (681, 703), True, 'import sqlalchemy as sa\n'), ((836, 870), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (868, 870), False, 'import sqlmodel\n'), ((928, 957), 'sqlalchemy.text', 'sa.text', (['"""array[]::varchar[]"""'], {}), "('array[]::varchar[]')\n", (935, 957), True, 'import sqlalchemy as sa\n')]
|
from typing import Optional
from pydantic import EmailStr
from sqlmodel import Field, SQLModel
# define your database tables (models) here
class User(SQLModel, table=True):
id: Optional[int] = Field(default=None, nullable=False, primary_key=True)
name: str = Field(nullable=False)
email: EmailStr = Field(
nullable=False,
)
password: str = Field(nullable=False)
|
[
"sqlmodel.Field"
] |
[((200, 253), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'nullable': '(False)', 'primary_key': '(True)'}), '(default=None, nullable=False, primary_key=True)\n', (205, 253), False, 'from sqlmodel import Field, SQLModel\n'), ((270, 291), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (275, 291), False, 'from sqlmodel import Field, SQLModel\n'), ((314, 335), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (319, 335), False, 'from sqlmodel import Field, SQLModel\n'), ((371, 392), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (376, 392), False, 'from sqlmodel import Field, SQLModel\n')]
|
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import sys
import logging
import megengine.distributed as dist
import torch
import torch.optim as optim
import torch.nn.functional as F
import datasets
import torchvision.transforms as transforms
import shufflenet_v2_pytorch as M
from tensorboardX import SummaryWriter
from devkit.core import (init_dist, broadcast_params, average_gradients, load_state_ckpt, load_state, save_checkpoint, LRScheduler, CrossEntropyLoss)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
parser.add_argument(
'--port', default=29500, type=int, help='port of server')
args = parser.parse_args()
rank, world_size = init_dist(
backend='nccl', port=args.port)
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters():
if p.requires_grad:
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
if rank == 0:
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(save_dir, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
if world_size > 1:
# Initialize distributed process group
logging.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
# if args.model:
# logging.info("load weights from %s", args.model)
# model.load_state_dict(mge.load(args.model))
# step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
def train_func(image, label):
model.train()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
optimizer.backward(loss) # compute gradients
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
# Build train and valid datasets
logging.info("preparing dataset..")
transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
train_dataset = datasets.ImageNet(split='train', transform=transform)
train_sampler = torch.utils.data.RandomSampler(train_dataset)
train_queue = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch_size,
sampler=train_sampler,
shuffle=False,
drop_last=True,
pin_memory=True,
num_workers=args.workers
)
train_queue = iter(train_queue)
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
valid_dataset = datasets.ImageNet(split='val', transform=transform)
valid_sampler = torch.utils.data.SequentialSampler(valid_dataset)
valid_queue = torch.utils.data.DataLoader(
valid_dataset,
batch_size=100,
sampler=valid_sampler,
shuffle=False,
drop_last=False,
num_workers=args.workers
)
# Start training
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
total_time = AverageMeter("Time")
t = time.time()
best_valid_acc = 0
for step in range(step_start, args.steps + 1):
# Linear learning rate decay
decay = 1.0
decay = 1 - float(step) / args.steps if step < args.steps else 0
for param_group in optimizer.param_groups:
param_group["lr"] = args.learning_rate * decay
image, label = next(train_queue)
time_data=time.time()-t
# image = image.astype("float32")
# label = label.astype("int32")
n = image.shape[0]
optimizer.zero_grad()
loss, acc1, acc5 = train_func(image, label)
optimizer.step()
top1.update(100 * acc1.numpy()[0], n)
top5.update(100 * acc5.numpy()[0], n)
objs.update(loss.numpy()[0], n)
total_time.update(time.time() - t)
time_iter=time.time()-t
t = time.time()
if step % args.report_freq == 0 and rank == 0:
logging.info(
"TRAIN Iter %06d: lr = %f,\tloss = %f,\twc_loss = 1,\tTop-1 err = %f,\tTop-5 err = %f,\tdata_time = %f,\ttrain_time = %f,\tremain_hours=%f",
step,
args.learning_rate * decay,
float(objs.__str__().split()[1]),
1-float(top1.__str__().split()[1])/100,
1-float(top5.__str__().split()[1])/100,
time_data,
time_iter - time_data,
time_iter * (args.steps - step) / 3600,
)
writers['train'].add_scalar('loss', float(objs.__str__().split()[1]), global_step=step)
writers['train'].add_scalar('top1_err', 1-float(top1.__str__().split()[1])/100, global_step=step)
writers['train'].add_scalar('top5_err', 1-float(top5.__str__().split()[1])/100, global_step=step)
objs.reset()
top1.reset()
top5.reset()
total_time.reset()
if step % 10000 == 0 and step != 0:
loss, valid_acc, valid_acc5 = infer(valid_func, valid_queue, args)
logging.info("TEST Iter %06d: loss = %f,\tTop-1 err = %f,\tTop-5 err = %f", step, loss, 1-valid_acc/100, 1-valid_acc5/100)
is_best = valid_acc > best_valid_acc
best_valid_acc = max(valid_acc, best_valid_acc)
if rank == 0:
writers['valid'].add_scalar('loss', loss, global_step=step)
writers['valid'].add_scalar('top1_err', 1-valid_acc/100, global_step=step)
writers['valid'].add_scalar('top5_err', 1-valid_acc5/100, global_step=step)
logging.info("SAVING %06d", step)
save_checkpoint(save_dir, {
'step': step + 1,
'model': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_valid_acc,
'optimizer': optimizer.state_dict(),
}, is_best)
def infer(model, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
total_time = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
n = image.shape[0]
image = image.astype("float32") # convert np.uint8 to float32
label = label.astype("int32")
loss, acc1, acc5 = model(image, label)
objs.update(loss.numpy()[0], n)
top1.update(100 * acc1.numpy()[0], n)
top5.update(100 * acc5.numpy()[0], n)
total_time.update(time.time() - t)
t = time.time()
if step % args.report_freq == 0 and dist.get_rank() == 0:
logging.info(
"Step %d, %s %s %s %s",
step,
objs,
top1,
top5,
total_time,
)
return objs.avg, top1.avg, top5.avg
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=":.3f"):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
if __name__ == "__main__":
main()
|
[
"megengine.distributed.is_distributed",
"megengine.distributed.get_rank",
"megengine.distributed.get_world_size",
"megengine.distributed.all_reduce_sum",
"megengine.distributed.init_process_group"
] |
[((2327, 2352), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2350, 2352), False, 'import argparse\n'), ((3419, 3460), 'devkit.core.init_dist', 'init_dist', ([], {'backend': '"""nccl"""', 'port': 'args.port'}), "(backend='nccl', port=args.port)\n", (3428, 3460), False, 'from devkit.core import init_dist, broadcast_params, average_gradients, load_state_ckpt, load_state, save_checkpoint, LRScheduler, CrossEntropyLoss\n'), ((5720, 5754), 'os.path.join', 'os.path.join', (['args.save', 'args.arch'], {}), '(args.save, args.arch)\n', (5732, 5754), False, 'import os\n'), ((7485, 7520), 'logging.info', 'logging.info', (['"""preparing dataset.."""'], {}), "('preparing dataset..')\n", (7497, 7520), False, 'import logging\n'), ((7865, 7918), 'datasets.ImageNet', 'datasets.ImageNet', ([], {'split': '"""train"""', 'transform': 'transform'}), "(split='train', transform=transform)\n", (7882, 7918), False, 'import datasets\n'), ((7939, 7984), 'torch.utils.data.RandomSampler', 'torch.utils.data.RandomSampler', (['train_dataset'], {}), '(train_dataset)\n', (7969, 7984), False, 'import torch\n'), ((8003, 8178), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'sampler': 'train_sampler', 'shuffle': '(False)', 'drop_last': '(True)', 'pin_memory': '(True)', 'num_workers': 'args.workers'}), '(train_dataset, batch_size=args.batch_size,\n sampler=train_sampler, shuffle=False, drop_last=True, pin_memory=True,\n num_workers=args.workers)\n', (8030, 8178), False, 'import torch\n'), ((8518, 8569), 'datasets.ImageNet', 'datasets.ImageNet', ([], {'split': '"""val"""', 'transform': 'transform'}), "(split='val', transform=transform)\n", (8535, 8569), False, 'import datasets\n'), ((8590, 8639), 'torch.utils.data.SequentialSampler', 'torch.utils.data.SequentialSampler', (['valid_dataset'], {}), '(valid_dataset)\n', (8624, 8639), False, 'import torch\n'), ((8658, 8802), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_dataset'], {'batch_size': '(100)', 'sampler': 'valid_sampler', 'shuffle': '(False)', 'drop_last': '(False)', 'num_workers': 'args.workers'}), '(valid_dataset, batch_size=100, sampler=\n valid_sampler, shuffle=False, drop_last=False, num_workers=args.workers)\n', (8685, 8802), False, 'import torch\n'), ((9019, 9030), 'time.time', 'time.time', ([], {}), '()\n', (9028, 9030), False, 'import time\n'), ((12105, 12116), 'time.time', 'time.time', ([], {}), '()\n', (12114, 12116), False, 'import time\n'), ((3482, 3509), 'os.path.exists', 'os.path.exists', (['args.output'], {}), '(args.output)\n', (3496, 3509), False, 'import os\n'), ((3519, 3543), 'os.makedirs', 'os.makedirs', (['args.output'], {}), '(args.output)\n', (3530, 3543), False, 'import os\n'), ((3726, 3754), 'multiprocessing.set_start_method', 'mp.set_start_method', (['"""spawn"""'], {}), "('spawn')\n", (3745, 3754), True, 'import multiprocessing as mp\n'), ((5055, 5166), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO', 'format': 'log_format', 'datefmt': '"""%m/%d %I:%M:%S %p"""'}), "(stream=sys.stdout, level=logging.INFO, format=\n log_format, datefmt='%m/%d %I:%M:%S %p')\n", (5074, 5166), False, 'import logging\n'), ((5523, 5636), 'megengine.distributed.init_process_group', 'dist.init_process_group', ([], {'master_ip': '"""localhost"""', 'master_port': '(23456)', 'world_size': 'world_size', 'rank': 'rank', 'dev': 'rank'}), "(master_ip='localhost', master_port=23456,\n world_size=world_size, rank=rank, dev=rank)\n", (5546, 5636), True, 'import megengine.distributed as dist\n'), ((6470, 6531), 'torch.nn.functional.cross_entropy_with_softmax', 'F.cross_entropy_with_softmax', (['logits', 'label'], {'label_smooth': '(0.1)'}), '(logits, label, label_smooth=0.1)\n', (6498, 6531), True, 'import torch.nn.functional as F\n'), ((6553, 6586), 'torch.nn.functional.accuracy', 'F.accuracy', (['logits', 'label', '(1, 5)'], {}), '(logits, label, (1, 5))\n', (6563, 6586), True, 'import torch.nn.functional as F\n'), ((6652, 6673), 'megengine.distributed.is_distributed', 'dist.is_distributed', ([], {}), '()\n', (6671, 6673), True, 'import megengine.distributed as dist\n'), ((7034, 7095), 'torch.nn.functional.cross_entropy_with_softmax', 'F.cross_entropy_with_softmax', (['logits', 'label'], {'label_smooth': '(0.1)'}), '(logits, label, label_smooth=0.1)\n', (7062, 7095), True, 'import torch.nn.functional as F\n'), ((7117, 7150), 'torch.nn.functional.accuracy', 'F.accuracy', (['logits', 'label', '(1, 5)'], {}), '(logits, label, (1, 5))\n', (7127, 7150), True, 'import torch.nn.functional as F\n'), ((7162, 7183), 'megengine.distributed.is_distributed', 'dist.is_distributed', ([], {}), '()\n', (7181, 7183), True, 'import megengine.distributed as dist\n'), ((9858, 9869), 'time.time', 'time.time', ([], {}), '()\n', (9867, 9869), False, 'import time\n'), ((12544, 12555), 'time.time', 'time.time', ([], {}), '()\n', (12553, 12555), False, 'import time\n'), ((3833, 3889), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(rank, world_size, args)'}), '(target=worker, args=(rank, world_size, args))\n', (3843, 3889), True, 'import multiprocessing as mp\n'), ((4939, 4963), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (4953, 4963), False, 'import os\n'), ((4977, 4998), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (4988, 4998), False, 'import os\n'), ((5223, 5256), 'os.path.join', 'os.path.join', (['save_dir', '"""log.txt"""'], {}), "(save_dir, 'log.txt')\n", (5235, 5256), False, 'import os\n'), ((5282, 5311), 'logging.Formatter', 'logging.Formatter', (['log_format'], {}), '(log_format)\n', (5299, 5311), False, 'import logging\n'), ((7567, 7600), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['(224)'], {}), '(224)\n', (7595, 7600), True, 'import torchvision.transforms as transforms\n'), ((7610, 7643), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (7641, 7643), True, 'import torchvision.transforms as transforms\n'), ((7653, 7721), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', ([], {'brightness': '(0.4)', 'contrast': '(0.4)', 'saturation': '(0.4)'}), '(brightness=0.4, contrast=0.4, saturation=0.4)\n', (7675, 7721), True, 'import torchvision.transforms as transforms\n'), ((7731, 7752), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (7750, 7752), True, 'import torchvision.transforms as transforms\n'), ((7762, 7837), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (7782, 7837), True, 'import torchvision.transforms as transforms\n'), ((8316, 8338), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (8333, 8338), True, 'import torchvision.transforms as transforms\n'), ((8348, 8374), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (8369, 8374), True, 'import torchvision.transforms as transforms\n'), ((8384, 8405), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (8403, 8405), True, 'import torchvision.transforms as transforms\n'), ((8415, 8490), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (8435, 8490), True, 'import torchvision.transforms as transforms\n'), ((9406, 9417), 'time.time', 'time.time', ([], {}), '()\n', (9415, 9417), False, 'import time\n'), ((9832, 9843), 'time.time', 'time.time', ([], {}), '()\n', (9841, 9843), False, 'import time\n'), ((11038, 11172), 'logging.info', 'logging.info', (['"""TEST Iter %06d: loss = %f,\tTop-1 err = %f,\tTop-5 err = %f"""', 'step', 'loss', '(1 - valid_acc / 100)', '(1 - valid_acc5 / 100)'], {}), "('TEST Iter %06d: loss = %f,\\tTop-1 err = %f,\\tTop-5 err = %f',\n step, loss, 1 - valid_acc / 100, 1 - valid_acc5 / 100)\n", (11050, 11172), False, 'import logging\n'), ((12635, 12707), 'logging.info', 'logging.info', (['"""Step %d, %s %s %s %s"""', 'step', 'objs', 'top1', 'top5', 'total_time'], {}), "('Step %d, %s %s %s %s', step, objs, top1, top5, total_time)\n", (12647, 12707), False, 'import logging\n'), ((5321, 5340), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (5338, 5340), False, 'import logging\n'), ((5850, 5883), 'os.path.join', 'os.path.join', (['args.output', 'prefix'], {}), '(args.output, prefix)\n', (5862, 5883), False, 'import os\n'), ((6713, 6738), 'megengine.distributed.all_reduce_sum', 'dist.all_reduce_sum', (['loss'], {}), '(loss)\n', (6732, 6738), True, 'import megengine.distributed as dist\n'), ((6741, 6762), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (6760, 6762), True, 'import megengine.distributed as dist\n'), ((6782, 6807), 'megengine.distributed.all_reduce_sum', 'dist.all_reduce_sum', (['acc1'], {}), '(acc1)\n', (6801, 6807), True, 'import megengine.distributed as dist\n'), ((6810, 6831), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (6829, 6831), True, 'import megengine.distributed as dist\n'), ((6851, 6876), 'megengine.distributed.all_reduce_sum', 'dist.all_reduce_sum', (['acc5'], {}), '(acc5)\n', (6870, 6876), True, 'import megengine.distributed as dist\n'), ((6879, 6900), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (6898, 6900), True, 'import megengine.distributed as dist\n'), ((7223, 7248), 'megengine.distributed.all_reduce_sum', 'dist.all_reduce_sum', (['loss'], {}), '(loss)\n', (7242, 7248), True, 'import megengine.distributed as dist\n'), ((7251, 7272), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (7270, 7272), True, 'import megengine.distributed as dist\n'), ((7292, 7317), 'megengine.distributed.all_reduce_sum', 'dist.all_reduce_sum', (['acc1'], {}), '(acc1)\n', (7311, 7317), True, 'import megengine.distributed as dist\n'), ((7320, 7341), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (7339, 7341), True, 'import megengine.distributed as dist\n'), ((7361, 7386), 'megengine.distributed.all_reduce_sum', 'dist.all_reduce_sum', (['acc5'], {}), '(acc5)\n', (7380, 7386), True, 'import megengine.distributed as dist\n'), ((7389, 7410), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (7408, 7410), True, 'import megengine.distributed as dist\n'), ((9797, 9808), 'time.time', 'time.time', ([], {}), '()\n', (9806, 9808), False, 'import time\n'), ((11574, 11607), 'logging.info', 'logging.info', (['"""SAVING %06d"""', 'step'], {}), "('SAVING %06d', step)\n", (11586, 11607), False, 'import logging\n'), ((12515, 12526), 'time.time', 'time.time', ([], {}), '()\n', (12524, 12526), False, 'import time\n'), ((12601, 12616), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (12614, 12616), True, 'import megengine.distributed as dist\n')]
|
from typing import Optional
from datetime import datetime
from sqlalchemy import DateTime, String
from sqlalchemy.sql.schema import Column
from sqlmodel import Field, SQLModel
class Test(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class DataStorage(SQLModel, table=True):
test_id: int = Field(foreign_key="test.id")
distance: int
created: Optional[datetime] = Field(
default=None,
sa_column=Column("created", DateTime),
)
|
[
"sqlmodel.Field"
] |
[((237, 274), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (242, 274), False, 'from sqlmodel import Field, SQLModel\n'), ((351, 379), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""test.id"""'}), "(foreign_key='test.id')\n", (356, 379), False, 'from sqlmodel import Field, SQLModel\n'), ((479, 506), 'sqlalchemy.sql.schema.Column', 'Column', (['"""created"""', 'DateTime'], {}), "('created', DateTime)\n", (485, 506), False, 'from sqlalchemy.sql.schema import Column\n')]
|
import os
from sqlmodel import create_engine, Session, select, update
from functools import lru_cache
from typing import Union
from sqlalchemy.exc import NoResultFound
engine = create_engine(os.environ.get('DB_CONN'))
# Grim hack to get the imports working with crawler and main.
# TODO: Split poke models and other common functions out into a separate package the api+crawler can share.
# TODO: After split crawler code out into a separate part of the repo and create an individual Docker image for it.
try:
from poke.poke_model import pokemon as pokemon_model
except:
from poke_model import pokemon as pokemon_model
@lru_cache(maxsize=16)
def get_pokemon(poke_id: int) -> pokemon_model:
""" Get a pokemon's data from the database from its ID.
Args:
poke_id: ID of the pokemon you want the data for.
Returns:
pokemon_model object containing the data for the pokemon found in the DB.
Raises:
NoResultFound: If there isn't a pokemon in the DB with the passed in ID.
"""
with Session(engine) as session:
poke = session.exec(select(pokemon_model).where(pokemon_model.id == poke_id)).one()
return poke
def get_total_pokemon() -> int:
""" Get the total number of pokemon in the database.
Returns:
int: Number of pokemon in the database.
"""
with Session(engine) as session:
return session.query(pokemon_model).count()
def upsert_pokemon(pokemon: Union[pokemon_model, list[pokemon_model]]) -> None:
""" Takes an individual, or list of pokemon_models that are to be added or updated in place.
Args:
pokemon: pokemon_model objects that are to be created/updated in place in the DB
"""
with Session(engine) as session:
if isinstance(pokemon, list):
# TODO: add bulk inserts
raise NotImplementedError
p = session.exec(select(pokemon_model).where(pokemon_model.id == pokemon.id))
try:
p.one() # see if there was a result for that poke_id
# TODO: Only update if the values are different than in the DB.
update(pokemon_model).where(pokemon_model.id == pokemon.id).values(pokemon.__dict__)
except NoResultFound:
session.add(pokemon)
session.commit()
|
[
"sqlmodel.Session",
"sqlmodel.select",
"sqlmodel.update"
] |
[((633, 654), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(16)'}), '(maxsize=16)\n', (642, 654), False, 'from functools import lru_cache\n'), ((193, 218), 'os.environ.get', 'os.environ.get', (['"""DB_CONN"""'], {}), "('DB_CONN')\n", (207, 218), False, 'import os\n'), ((1039, 1054), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1046, 1054), False, 'from sqlmodel import create_engine, Session, select, update\n'), ((1349, 1364), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1356, 1364), False, 'from sqlmodel import create_engine, Session, select, update\n'), ((1726, 1741), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1733, 1741), False, 'from sqlmodel import create_engine, Session, select, update\n'), ((1894, 1915), 'sqlmodel.select', 'select', (['pokemon_model'], {}), '(pokemon_model)\n', (1900, 1915), False, 'from sqlmodel import create_engine, Session, select, update\n'), ((1095, 1116), 'sqlmodel.select', 'select', (['pokemon_model'], {}), '(pokemon_model)\n', (1101, 1116), False, 'from sqlmodel import create_engine, Session, select, update\n'), ((2123, 2144), 'sqlmodel.update', 'update', (['pokemon_model'], {}), '(pokemon_model)\n', (2129, 2144), False, 'from sqlmodel import create_engine, Session, select, update\n')]
|
"""
Acoustic band gaps in a strongly heterogeneous elastic body, detected using
homogenization techniques.
A reference periodic cell contains two domains: the stiff matrix :math:`Y_m`
and the soft (but heavy) inclusion :math:`Y_c`.
"""
from sfepy import data_dir
from sfepy.base.base import Struct
from sfepy.base.ioutils import InDir
from sfepy.homogenization.coefficients import Coefficients
from band_gaps_conf import BandGapsConf, get_pars, clip_sqrt, normalize
clip_sqrt, normalize # Make pyflakes happy...
incwd = InDir(__file__)
filename = data_dir + '/meshes/2d/special/circle_in_square.mesh'
output_dir = incwd('output/band_gaps')
# aluminium, in 1e+10 Pa
D_m = get_pars(2, 5.898, 2.681)
density_m = 0.2799 # in 1e4 kg/m3
# epoxy, in 1e+10 Pa
D_c = get_pars(2, 0.1798, 0.148)
density_c = 0.1142 # in 1e4 kg/m3
mat_pars = Coefficients(D_m=D_m, density_m=density_m,
D_c=D_c, density_c=density_c)
region_selects = Struct(matrix='cells of group 1',
inclusion='cells of group 2')
corrs_save_names = {'evp' : 'evp', 'corrs_rs' : 'corrs_rs'}
options = {
'plot_transform_angle' : None,
'plot_transform_wave' : ('clip_sqrt', (0, 30)),
'plot_transform' : ('normalize', (-2, 2)),
'fig_name' : 'band_gaps',
'fig_name_angle' : 'band_gaps_angle',
'fig_name_wave' : 'band_gaps_wave',
'fig_suffix' : '.pdf',
'coefs_filename' : 'coefs.txt',
'incident_wave_dir' : [1.0, 1.0],
'plot_options' : {
'show' : True,
'legend' : True,
},
'plot_labels' : {
'band_gaps' : {
'resonance' : r'$\lambda^r$',
'masked' : r'masked $\lambda^r$',
'eig_min' : r'min eig($M$)',
'eig_max' : r'max eig($M$)',
'x_axis' : r'$\sqrt{\lambda}$, $\omega$',
'y_axis' : r'eigenvalues of mass matrix $M$',
},
},
'plot_rsc' : {
'params' : {'axes.labelsize': 'x-large',
'text.fontsize': 'large',
'legend.fontsize': 'large',
'legend.loc': 1,
'xtick.labelsize': 'large',
'ytick.labelsize': 'large',
'text.usetex': True},
},
'multiprocessing' : False,
}
evp_options = {
'eigensolver' : 'eig.sgscipy',
'save_eig_vectors' : (12, 0),
'scale_epsilon' : 1.0,
'elasticity_contrast' : 1.0,
}
eigenmomenta_options = {
# eigenmomentum threshold,
'threshold' : 1e-2,
# eigenmomentum threshold is relative w.r.t. largest one,
'threshold_is_relative' : True,
}
band_gaps_options = {
'eig_range' : (0, 30), # -> freq_range
# = sqrt(eigs[slice(*eig_range)][[0, -1]])
'freq_margins' : (10, 10), # % of freq_range
'freq_eps' : 1e-12, # frequency
'zezo_eps' : 1e-12, # zero finding
'freq_step' : 0.0001, # % of freq_range
'log_save_name' : 'band_gaps.log',
}
conf = BandGapsConf(filename, 1, region_selects, mat_pars, options,
evp_options, eigenmomenta_options, band_gaps_options,
corrs_save_names=corrs_save_names, incwd=incwd,
output_dir=output_dir)
define = lambda: conf.conf.to_dict()
|
[
"sfepy.base.base.Struct",
"sfepy.homogenization.coefficients.Coefficients",
"sfepy.base.ioutils.InDir"
] |
[((524, 539), 'sfepy.base.ioutils.InDir', 'InDir', (['__file__'], {}), '(__file__)\n', (529, 539), False, 'from sfepy.base.ioutils import InDir\n'), ((678, 703), 'band_gaps_conf.get_pars', 'get_pars', (['(2)', '(5.898)', '(2.681)'], {}), '(2, 5.898, 2.681)\n', (686, 703), False, 'from band_gaps_conf import BandGapsConf, get_pars, clip_sqrt, normalize\n'), ((766, 792), 'band_gaps_conf.get_pars', 'get_pars', (['(2)', '(0.1798)', '(0.148)'], {}), '(2, 0.1798, 0.148)\n', (774, 792), False, 'from band_gaps_conf import BandGapsConf, get_pars, clip_sqrt, normalize\n'), ((839, 911), 'sfepy.homogenization.coefficients.Coefficients', 'Coefficients', ([], {'D_m': 'D_m', 'density_m': 'density_m', 'D_c': 'D_c', 'density_c': 'density_c'}), '(D_m=D_m, density_m=density_m, D_c=D_c, density_c=density_c)\n', (851, 911), False, 'from sfepy.homogenization.coefficients import Coefficients\n'), ((954, 1017), 'sfepy.base.base.Struct', 'Struct', ([], {'matrix': '"""cells of group 1"""', 'inclusion': '"""cells of group 2"""'}), "(matrix='cells of group 1', inclusion='cells of group 2')\n", (960, 1017), False, 'from sfepy.base.base import Struct\n'), ((2951, 3145), 'band_gaps_conf.BandGapsConf', 'BandGapsConf', (['filename', '(1)', 'region_selects', 'mat_pars', 'options', 'evp_options', 'eigenmomenta_options', 'band_gaps_options'], {'corrs_save_names': 'corrs_save_names', 'incwd': 'incwd', 'output_dir': 'output_dir'}), '(filename, 1, region_selects, mat_pars, options, evp_options,\n eigenmomenta_options, band_gaps_options, corrs_save_names=\n corrs_save_names, incwd=incwd, output_dir=output_dir)\n', (2963, 3145), False, 'from band_gaps_conf import BandGapsConf, get_pars, clip_sqrt, normalize\n')]
|
import pytest
from typing import Optional
from sqlmodel import Field, Session, SQLModel, create_engine
from sqlalchemy.exc import IntegrityError
def test_should_allow_duplicate_row_if_unique_constraint_is_not_passed(clear_sqlmodel):
class Hero(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
secret_name: str
age: Optional[int] = None
hero_1 = Hero(name="Deadpond", secret_name="<NAME>")
hero_2 = Hero(name="Deadpond", secret_name="<NAME>")
engine = create_engine("sqlite://")
SQLModel.metadata.create_all(engine)
with Session(engine) as session:
session.add(hero_1)
session.commit()
session.refresh(hero_1)
with Session(engine) as session:
session.add(hero_2)
session.commit()
session.refresh(hero_2)
with Session(engine) as session:
heroes = session.query(Hero).all()
assert len(heroes) == 2
assert heroes[0].name == heroes[1].name
def test_should_allow_duplicate_row_if_unique_constraint_is_false(clear_sqlmodel):
class Hero(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
secret_name: str = Field(unique=False)
age: Optional[int] = None
hero_1 = Hero(name="Deadpond", secret_name="<NAME>")
hero_2 = Hero(name="Deadpond", secret_name="<NAME>")
engine = create_engine("sqlite://")
SQLModel.metadata.create_all(engine)
with Session(engine) as session:
session.add(hero_1)
session.commit()
session.refresh(hero_1)
with Session(engine) as session:
session.add(hero_2)
session.commit()
session.refresh(hero_2)
with Session(engine) as session:
heroes = session.query(Hero).all()
assert len(heroes) == 2
assert heroes[0].name == heroes[1].name
def test_should_raise_exception_when_try_to_duplicate_row_if_unique_constraint_is_true(clear_sqlmodel):
class Hero(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
secret_name: str = Field(unique=True)
age: Optional[int] = None
hero_1 = Hero(name="Deadpond", secret_name="<NAME>")
hero_2 = Hero(name="Deadpond", secret_name="<NAME>")
engine = create_engine("sqlite://")
SQLModel.metadata.create_all(engine)
with Session(engine) as session:
session.add(hero_1)
session.commit()
session.refresh(hero_1)
with pytest.raises(IntegrityError):
with Session(engine) as session:
session.add(hero_2)
session.commit()
session.refresh(hero_2)
|
[
"sqlmodel.create_engine",
"sqlmodel.Session",
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.Field"
] |
[((563, 589), 'sqlmodel.create_engine', 'create_engine', (['"""sqlite://"""'], {}), "('sqlite://')\n", (576, 589), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((597, 633), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (625, 633), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((1486, 1512), 'sqlmodel.create_engine', 'create_engine', (['"""sqlite://"""'], {}), "('sqlite://')\n", (1499, 1512), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((1520, 1556), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (1548, 1556), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((2429, 2455), 'sqlmodel.create_engine', 'create_engine', (['"""sqlite://"""'], {}), "('sqlite://')\n", (2442, 2455), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((2463, 2499), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (2491, 2499), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((311, 348), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (316, 348), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((646, 661), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (653, 661), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((774, 789), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (781, 789), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((902, 917), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (909, 917), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((1212, 1249), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (1217, 1249), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((1297, 1316), 'sqlmodel.Field', 'Field', ([], {'unique': '(False)'}), '(unique=False)\n', (1302, 1316), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((1569, 1584), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1576, 1584), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((1697, 1712), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1704, 1712), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((1825, 1840), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1832, 1840), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((2156, 2193), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (2161, 2193), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((2241, 2259), 'sqlmodel.Field', 'Field', ([], {'unique': '(True)'}), '(unique=True)\n', (2246, 2259), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((2512, 2527), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (2519, 2527), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n'), ((2640, 2669), 'pytest.raises', 'pytest.raises', (['IntegrityError'], {}), '(IntegrityError)\n', (2653, 2669), False, 'import pytest\n'), ((2685, 2700), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (2692, 2700), False, 'from sqlmodel import Field, Session, SQLModel, create_engine\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import megengine as mge
import megengine.module as M
import pytest
from basecls.models.regnet import RegBottleneckBlock
from basecls.models.resnet import (
AnyStage,
ResBasicBlock,
ResBottleneckBlock,
ResDeepStem,
ResStem,
SimpleStem,
)
@pytest.mark.parametrize("Block", [RegBottleneckBlock, ResBasicBlock, ResBottleneckBlock])
@pytest.mark.parametrize("w_in", [32])
@pytest.mark.parametrize("w_out", [32, 64])
@pytest.mark.parametrize("stride", [1, 2])
@pytest.mark.parametrize("bot_mul", [1.0, 0.25])
@pytest.mark.parametrize("group_w", [8])
@pytest.mark.parametrize("se_r", [0.0, 0.25])
@pytest.mark.parametrize("avg_down", [True, False])
@pytest.mark.parametrize("drop_path_prob", [0.05, 0.1])
@pytest.mark.parametrize("norm_name", ["BN"])
@pytest.mark.parametrize("act_name", ["relu"])
def test_block(
Block,
w_in,
w_out,
stride,
bot_mul,
group_w,
se_r,
avg_down,
drop_path_prob,
norm_name,
act_name,
):
m = Block(
w_in,
w_out,
stride,
bot_mul=bot_mul,
group_w=group_w,
se_r=se_r,
avg_down=avg_down,
drop_path_prob=drop_path_prob,
norm_name=norm_name,
act_name=act_name,
)
assert isinstance(m, M.Module)
m(mge.random.normal(size=(2, 32, 8, 8)))
@pytest.mark.parametrize("Stem", [ResDeepStem, ResStem, SimpleStem])
@pytest.mark.parametrize("w_in", [3])
@pytest.mark.parametrize("w_out", [8, 16])
@pytest.mark.parametrize("norm_name", ["BN"])
@pytest.mark.parametrize("act_name", ["relu"])
def test_stem(Stem, w_in, w_out, norm_name, act_name):
m = Stem(w_in, w_out, norm_name=norm_name, act_name=act_name)
assert isinstance(m, M.Module)
m(mge.random.normal(size=(2, 3, 8, 8)))
@pytest.mark.parametrize("w_in", [4])
@pytest.mark.parametrize("w_out", [4, 8])
@pytest.mark.parametrize("stride", [1, 2])
@pytest.mark.parametrize("depth", [2])
@pytest.mark.parametrize("block_func", [RegBottleneckBlock, ResBasicBlock, ResBottleneckBlock])
@pytest.mark.parametrize("drop_path_prob", [[0.05, 0.1]])
def test_any_stage(w_in, w_out, stride, depth, block_func, drop_path_prob):
m = AnyStage(
w_in,
w_out,
stride,
depth,
block_func,
drop_path_prob,
bot_mul=1.0,
group_w=4,
se_r=0.0,
avg_down=False,
norm_name="BN",
act_name="relu",
)
assert isinstance(m, M.Module)
assert len(m) == depth
m(mge.random.normal(size=(2, 4, 8, 8)))
|
[
"megengine.random.normal"
] |
[((347, 440), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""Block"""', '[RegBottleneckBlock, ResBasicBlock, ResBottleneckBlock]'], {}), "('Block', [RegBottleneckBlock, ResBasicBlock,\n ResBottleneckBlock])\n", (370, 440), False, 'import pytest\n'), ((438, 475), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_in"""', '[32]'], {}), "('w_in', [32])\n", (461, 475), False, 'import pytest\n'), ((477, 519), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_out"""', '[32, 64]'], {}), "('w_out', [32, 64])\n", (500, 519), False, 'import pytest\n'), ((521, 562), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stride"""', '[1, 2]'], {}), "('stride', [1, 2])\n", (544, 562), False, 'import pytest\n'), ((564, 611), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""bot_mul"""', '[1.0, 0.25]'], {}), "('bot_mul', [1.0, 0.25])\n", (587, 611), False, 'import pytest\n'), ((613, 652), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""group_w"""', '[8]'], {}), "('group_w', [8])\n", (636, 652), False, 'import pytest\n'), ((654, 698), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""se_r"""', '[0.0, 0.25]'], {}), "('se_r', [0.0, 0.25])\n", (677, 698), False, 'import pytest\n'), ((700, 750), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""avg_down"""', '[True, False]'], {}), "('avg_down', [True, False])\n", (723, 750), False, 'import pytest\n'), ((752, 806), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""drop_path_prob"""', '[0.05, 0.1]'], {}), "('drop_path_prob', [0.05, 0.1])\n", (775, 806), False, 'import pytest\n'), ((808, 852), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""norm_name"""', "['BN']"], {}), "('norm_name', ['BN'])\n", (831, 852), False, 'import pytest\n'), ((854, 899), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""act_name"""', "['relu']"], {}), "('act_name', ['relu'])\n", (877, 899), False, 'import pytest\n'), ((1403, 1470), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""Stem"""', '[ResDeepStem, ResStem, SimpleStem]'], {}), "('Stem', [ResDeepStem, ResStem, SimpleStem])\n", (1426, 1470), False, 'import pytest\n'), ((1472, 1508), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_in"""', '[3]'], {}), "('w_in', [3])\n", (1495, 1508), False, 'import pytest\n'), ((1510, 1551), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_out"""', '[8, 16]'], {}), "('w_out', [8, 16])\n", (1533, 1551), False, 'import pytest\n'), ((1553, 1597), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""norm_name"""', "['BN']"], {}), "('norm_name', ['BN'])\n", (1576, 1597), False, 'import pytest\n'), ((1599, 1644), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""act_name"""', "['relu']"], {}), "('act_name', ['relu'])\n", (1622, 1644), False, 'import pytest\n'), ((1849, 1885), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_in"""', '[4]'], {}), "('w_in', [4])\n", (1872, 1885), False, 'import pytest\n'), ((1887, 1927), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_out"""', '[4, 8]'], {}), "('w_out', [4, 8])\n", (1910, 1927), False, 'import pytest\n'), ((1929, 1970), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stride"""', '[1, 2]'], {}), "('stride', [1, 2])\n", (1952, 1970), False, 'import pytest\n'), ((1972, 2009), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""depth"""', '[2]'], {}), "('depth', [2])\n", (1995, 2009), False, 'import pytest\n'), ((2011, 2109), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""block_func"""', '[RegBottleneckBlock, ResBasicBlock, ResBottleneckBlock]'], {}), "('block_func', [RegBottleneckBlock, ResBasicBlock,\n ResBottleneckBlock])\n", (2034, 2109), False, 'import pytest\n'), ((2107, 2163), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""drop_path_prob"""', '[[0.05, 0.1]]'], {}), "('drop_path_prob', [[0.05, 0.1]])\n", (2130, 2163), False, 'import pytest\n'), ((2248, 2400), 'basecls.models.resnet.AnyStage', 'AnyStage', (['w_in', 'w_out', 'stride', 'depth', 'block_func', 'drop_path_prob'], {'bot_mul': '(1.0)', 'group_w': '(4)', 'se_r': '(0.0)', 'avg_down': '(False)', 'norm_name': '"""BN"""', 'act_name': '"""relu"""'}), "(w_in, w_out, stride, depth, block_func, drop_path_prob, bot_mul=\n 1.0, group_w=4, se_r=0.0, avg_down=False, norm_name='BN', act_name='relu')\n", (2256, 2400), False, 'from basecls.models.resnet import AnyStage, ResBasicBlock, ResBottleneckBlock, ResDeepStem, ResStem, SimpleStem\n'), ((1361, 1398), 'megengine.random.normal', 'mge.random.normal', ([], {'size': '(2, 32, 8, 8)'}), '(size=(2, 32, 8, 8))\n', (1378, 1398), True, 'import megengine as mge\n'), ((1808, 1844), 'megengine.random.normal', 'mge.random.normal', ([], {'size': '(2, 3, 8, 8)'}), '(size=(2, 3, 8, 8))\n', (1825, 1844), True, 'import megengine as mge\n'), ((2568, 2604), 'megengine.random.normal', 'mge.random.normal', ([], {'size': '(2, 4, 8, 8)'}), '(size=(2, 4, 8, 8))\n', (2585, 2604), True, 'import megengine as mge\n')]
|
from datetime import datetime
import logging
from typing import List, Optional
from pydantic import BaseConfig
from sqlmodel import Field, SQLModel, Session
import shortuuid
import random
from faker import Faker
# Line items that would be on a receipt
# Each line item has an id, sku, price, quantity, and transaction_id
class LineItem(SQLModel, table=True):
id: str = Field(default=None, primary_key=True)
sku: str = Field(foreign_key="product.sku")
price: float
quantity: int
transaction_id: int = Field(foreign_key="transaction.id")
# Each transaction has an id, store_id, date, and total
class Transaction(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
store_id: str = Field(foreign_key="store.id")
date: datetime
total: float
# TODO extract real sales data, transform it, and load into db
# for now random data in the correct format will do.
def run(engine):
# Allow arbitary types for Pydantic validation
BaseConfig.arbitrary_types_allowed = True
# Create a fake data generator
fake = Faker()
# Create a session to interact with the database
with Session(engine) as session:
# Get all of the store ids
store_ids = session.exec(f'SELECT id FROM store').fetchall()
logging.debug(store_ids)
# Get all of the products
products = session.exec(f'SELECT * FROM product').fetchall()
logging.debug(p.name for p in products)
# Define a list of transactions and sales
transactions = List(Transaction)
sales = List(LineItem)
# generate 100k random transactions
for i in range(0, 100000):
# lineitems is a temp list to hold the line items for this transaction
lineitems = List(LineItem)
# temp_products is a temp copy of the products list to prevent picking the same product twice in the same transaction
# [:] it to make a copy and not a reference
temp_products = products[:]
# shuffle the temp products list to make it random
random.shuffle(temp_products)
# add a random amount of line items to the transaction (no more than the total aount of products to prevent index out of range)
for j in range(0, random.randint(0, len(products)-1)):
# pick the next product from the temp products list and remove it from the list (pop)
p = temp_products.pop()
# create a new line item with the current transaction id, product p, and a random quantity
lineitems.append(LineItem(transaction_id=i, id=shortuuid.uuid(), price=p.price, quantity=random.randint(1, 10), sku=p.sku))
# add the line items for this transaction to the sales list
sales.extend(lineitems)
# create a new transaction with a random store id (from the list of store ids), date, and total
transactions.append(Transaction(
store_id=random.choice(store_ids)[0],
date=fake.date_time_between(start_date="-1y", end_date="now"),
total=sum(item.price * item.quantity for item in lineitems),
id=i
))
# insert the transactions into the database
session.add_all(transactions)
session.commit()
# insert the sales into the database
session.add_all(sales)
session.commit()
|
[
"sqlmodel.Session",
"sqlmodel.Field"
] |
[((374, 411), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (379, 411), False, 'from sqlmodel import Field, SQLModel, Session\n'), ((427, 459), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""product.sku"""'}), "(foreign_key='product.sku')\n", (432, 459), False, 'from sqlmodel import Field, SQLModel, Session\n'), ((521, 556), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""transaction.id"""'}), "(foreign_key='transaction.id')\n", (526, 556), False, 'from sqlmodel import Field, SQLModel, Session\n'), ((679, 716), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (684, 716), False, 'from sqlmodel import Field, SQLModel, Session\n'), ((737, 766), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""store.id"""'}), "(foreign_key='store.id')\n", (742, 766), False, 'from sqlmodel import Field, SQLModel, Session\n'), ((1082, 1089), 'faker.Faker', 'Faker', ([], {}), '()\n', (1087, 1089), False, 'from faker import Faker\n'), ((1153, 1168), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1160, 1168), False, 'from sqlmodel import Field, SQLModel, Session\n'), ((1293, 1317), 'logging.debug', 'logging.debug', (['store_ids'], {}), '(store_ids)\n', (1306, 1317), False, 'import logging\n'), ((1429, 1468), 'logging.debug', 'logging.debug', (['(p.name for p in products)'], {}), '(p.name for p in products)\n', (1442, 1468), False, 'import logging\n'), ((1542, 1559), 'typing.List', 'List', (['Transaction'], {}), '(Transaction)\n', (1546, 1559), False, 'from typing import List, Optional\n'), ((1576, 1590), 'typing.List', 'List', (['LineItem'], {}), '(LineItem)\n', (1580, 1590), False, 'from typing import List, Optional\n'), ((1777, 1791), 'typing.List', 'List', (['LineItem'], {}), '(LineItem)\n', (1781, 1791), False, 'from typing import List, Optional\n'), ((2093, 2122), 'random.shuffle', 'random.shuffle', (['temp_products'], {}), '(temp_products)\n', (2107, 2122), False, 'import random\n'), ((2642, 2658), 'shortuuid.uuid', 'shortuuid.uuid', ([], {}), '()\n', (2656, 2658), False, 'import shortuuid\n'), ((2684, 2705), 'random.randint', 'random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (2698, 2705), False, 'import random\n'), ((3005, 3029), 'random.choice', 'random.choice', (['store_ids'], {}), '(store_ids)\n', (3018, 3029), False, 'import random\n')]
|
"""Patron CRUD controller."""
from typing import Any, Dict
import sqlmodel
from sqlmodel.ext.asyncio import session as aio_session
from app.core import security
from app.crud import base
from app.models import patron
class PatronCRUD(base.BaseCRUD[patron.Patron, patron.PatronCreate,
patron.PatronUpdate]):
"""CRUD controller for patrons.
It contains Create, Read, Update, and Delete methods and additional
methods for authentication and read by username.
"""
@classmethod
async def update(
cls, session: aio_session.AsyncSession, *, model_db: patron.Patron,
model_in: patron.PatronUpdate | Dict[str, Any]) -> patron.Patron:
"""Updates a patron.
Args:
session: The database session.
patron_db: The current patron's data.
patron_in: The updated patron's data.
Returns:
The updated patron.
"""
if isinstance(model_in, dict):
update_data = model_in
else:
update_data = model_in.dict(exclude_unset=True)
if update_data.get("password"):
hashed_password = security.get_password_hash(
update_data["password"])
del update_data["password"]
update_data["hashed_password"] = hashed_password
return await super().update(session,
model_db=model_db,
model_in=update_data)
@classmethod
async def get_by_username(cls, session: aio_session.AsyncSession,
username: str) -> patron.Patron | None:
"""Gets a patron by their username.
Args:
session: The database session.
username: The patron's username.
Returns:
The patron with the given username.
"""
patrons = await session.exec(
sqlmodel.select(
patron.Patron).where(patron.Patron.username == username))
return patrons.first()
@classmethod
async def authenticate(cls, session: aio_session.AsyncSession, *,
username: str, password: str) -> patron.Patron:
"""Authenticates the patron with given username and password.
Args:
session: The database session.
username: The patron's username.
password: The <PASSWORD>.
Returns:
The authenticated patron.
"""
user = await PatronCRUD.get_by_username(session, username)
if not user:
return None
if not security.verify_password(password, user.hashed_password):
return None
return user
|
[
"sqlmodel.select"
] |
[((1179, 1230), 'app.core.security.get_password_hash', 'security.get_password_hash', (["update_data['password']"], {}), "(update_data['password'])\n", (1205, 1230), False, 'from app.core import security\n'), ((2634, 2690), 'app.core.security.verify_password', 'security.verify_password', (['password', 'user.hashed_password'], {}), '(password, user.hashed_password)\n', (2658, 2690), False, 'from app.core import security\n'), ((1941, 1971), 'sqlmodel.select', 'sqlmodel.select', (['patron.Patron'], {}), '(patron.Patron)\n', (1956, 1971), False, 'import sqlmodel\n')]
|
"""init_db
Revision ID: 23799b5136c5
Revises:
Create Date: 2021-12-11 00:49:58.116933
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = '23799b5136c5'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=True),
sa.Column('full_name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('email', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('hashed_password', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('is_active', sa.Boolean(), nullable=True),
sa.Column('is_superuser', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=False)
op.create_index(op.f('ix_user_full_name'), 'user', ['full_name'], unique=False)
op.create_index(op.f('ix_user_hashed_password'), 'user', ['hashed_password'], unique=False)
op.create_index(op.f('ix_user_id'), 'user', ['id'], unique=False)
op.create_index(op.f('ix_user_is_active'), 'user', ['is_active'], unique=False)
op.create_index(op.f('ix_user_is_superuser'), 'user', ['is_superuser'], unique=False)
op.create_table('task',
sa.Column('status', sa.Enum('draft', 'in_process', 'delete', 'done', name='taskstatus'), nullable=True),
sa.Column('id', sa.Integer(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('title', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_task_created_at'), 'task', ['created_at'], unique=False)
op.create_index(op.f('ix_task_id'), 'task', ['id'], unique=False)
op.create_index(op.f('ix_task_title'), 'task', ['title'], unique=False)
op.create_index(op.f('ix_task_user_id'), 'task', ['user_id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_task_user_id'), table_name='task')
op.drop_index(op.f('ix_task_title'), table_name='task')
op.drop_index(op.f('ix_task_id'), table_name='task')
op.drop_index(op.f('ix_task_created_at'), table_name='task')
op.drop_table('task')
op.drop_index(op.f('ix_user_is_superuser'), table_name='user')
op.drop_index(op.f('ix_user_is_active'), table_name='user')
op.drop_index(op.f('ix_user_id'), table_name='user')
op.drop_index(op.f('ix_user_hashed_password'), table_name='user')
op.drop_index(op.f('ix_user_full_name'), table_name='user')
op.drop_index(op.f('ix_user_email'), table_name='user')
op.drop_table('user')
# ### end Alembic commands ###
|
[
"sqlmodel.sql.sqltypes.AutoString"
] |
[((2498, 2519), 'alembic.op.drop_table', 'op.drop_table', (['"""task"""'], {}), "('task')\n", (2511, 2519), False, 'from alembic import op\n'), ((2906, 2927), 'alembic.op.drop_table', 'op.drop_table', (['"""user"""'], {}), "('user')\n", (2919, 2927), False, 'from alembic import op\n'), ((808, 837), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (831, 837), True, 'import sqlalchemy as sa\n'), ((864, 885), 'alembic.op.f', 'op.f', (['"""ix_user_email"""'], {}), "('ix_user_email')\n", (868, 885), False, 'from alembic import op\n'), ((940, 965), 'alembic.op.f', 'op.f', (['"""ix_user_full_name"""'], {}), "('ix_user_full_name')\n", (944, 965), False, 'from alembic import op\n'), ((1024, 1055), 'alembic.op.f', 'op.f', (['"""ix_user_hashed_password"""'], {}), "('ix_user_hashed_password')\n", (1028, 1055), False, 'from alembic import op\n'), ((1120, 1138), 'alembic.op.f', 'op.f', (['"""ix_user_id"""'], {}), "('ix_user_id')\n", (1124, 1138), False, 'from alembic import op\n'), ((1190, 1215), 'alembic.op.f', 'op.f', (['"""ix_user_is_active"""'], {}), "('ix_user_is_active')\n", (1194, 1215), False, 'from alembic import op\n'), ((1274, 1302), 'alembic.op.f', 'op.f', (['"""ix_user_is_superuser"""'], {}), "('ix_user_is_superuser')\n", (1278, 1302), False, 'from alembic import op\n'), ((1725, 1774), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['user_id']", "['user.id']"], {}), "(['user_id'], ['user.id'])\n", (1748, 1774), True, 'import sqlalchemy as sa\n'), ((1782, 1811), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (1805, 1811), True, 'import sqlalchemy as sa\n'), ((1838, 1864), 'alembic.op.f', 'op.f', (['"""ix_task_created_at"""'], {}), "('ix_task_created_at')\n", (1842, 1864), False, 'from alembic import op\n'), ((1924, 1942), 'alembic.op.f', 'op.f', (['"""ix_task_id"""'], {}), "('ix_task_id')\n", (1928, 1942), False, 'from alembic import op\n'), ((1994, 2015), 'alembic.op.f', 'op.f', (['"""ix_task_title"""'], {}), "('ix_task_title')\n", (1998, 2015), False, 'from alembic import op\n'), ((2070, 2093), 'alembic.op.f', 'op.f', (['"""ix_task_user_id"""'], {}), "('ix_task_user_id')\n", (2074, 2093), False, 'from alembic import op\n'), ((2268, 2291), 'alembic.op.f', 'op.f', (['"""ix_task_user_id"""'], {}), "('ix_task_user_id')\n", (2272, 2291), False, 'from alembic import op\n'), ((2330, 2351), 'alembic.op.f', 'op.f', (['"""ix_task_title"""'], {}), "('ix_task_title')\n", (2334, 2351), False, 'from alembic import op\n'), ((2390, 2408), 'alembic.op.f', 'op.f', (['"""ix_task_id"""'], {}), "('ix_task_id')\n", (2394, 2408), False, 'from alembic import op\n'), ((2447, 2473), 'alembic.op.f', 'op.f', (['"""ix_task_created_at"""'], {}), "('ix_task_created_at')\n", (2451, 2473), False, 'from alembic import op\n'), ((2538, 2566), 'alembic.op.f', 'op.f', (['"""ix_user_is_superuser"""'], {}), "('ix_user_is_superuser')\n", (2542, 2566), False, 'from alembic import op\n'), ((2605, 2630), 'alembic.op.f', 'op.f', (['"""ix_user_is_active"""'], {}), "('ix_user_is_active')\n", (2609, 2630), False, 'from alembic import op\n'), ((2669, 2687), 'alembic.op.f', 'op.f', (['"""ix_user_id"""'], {}), "('ix_user_id')\n", (2673, 2687), False, 'from alembic import op\n'), ((2726, 2757), 'alembic.op.f', 'op.f', (['"""ix_user_hashed_password"""'], {}), "('ix_user_hashed_password')\n", (2730, 2757), False, 'from alembic import op\n'), ((2796, 2821), 'alembic.op.f', 'op.f', (['"""ix_user_full_name"""'], {}), "('ix_user_full_name')\n", (2800, 2821), False, 'from alembic import op\n'), ((2860, 2881), 'alembic.op.f', 'op.f', (['"""ix_user_email"""'], {}), "('ix_user_email')\n", (2864, 2881), False, 'from alembic import op\n'), ((416, 428), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (426, 428), True, 'import sqlalchemy as sa\n'), ((473, 507), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (505, 507), False, 'import sqlmodel\n'), ((549, 583), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (581, 583), False, 'import sqlmodel\n'), ((634, 668), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (666, 668), False, 'import sqlmodel\n'), ((714, 726), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (724, 726), True, 'import sqlalchemy as sa\n'), ((774, 786), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (784, 786), True, 'import sqlalchemy as sa\n'), ((1396, 1463), 'sqlalchemy.Enum', 'sa.Enum', (['"""draft"""', '"""in_process"""', '"""delete"""', '"""done"""'], {'name': '"""taskstatus"""'}), "('draft', 'in_process', 'delete', 'done', name='taskstatus')\n", (1403, 1463), True, 'import sqlalchemy as sa\n'), ((1501, 1513), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1511, 1513), True, 'import sqlalchemy as sa\n'), ((1559, 1572), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (1570, 1572), True, 'import sqlalchemy as sa\n'), ((1613, 1647), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1645, 1647), False, 'import sqlmodel\n'), ((1691, 1703), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1701, 1703), True, 'import sqlalchemy as sa\n')]
|
import uuid
from datetime import datetime
from sqlmodel import Field
from api.db.models.base import BaseModel, BaseTable
class IssueCredentialBase(BaseModel):
tenant_id: uuid.UUID = Field(nullable=False)
wallet_id: uuid.UUID = Field(nullable=False)
connection_id: uuid.UUID = Field(nullable=False)
cred_type: str = Field(nullable=False)
cred_protocol: str = Field(nullable=False)
cred_def_id: str = Field(nullable=True, default=None)
credential: str = Field(nullable=False)
issue_role: str = Field(nullable=False)
issue_state: str = Field(nullable=False)
# workflow_id will be null until the tenant kcks it off
workflow_id: uuid.UUID = Field(nullable=True, default=None)
cred_exch_id: uuid.UUID = Field(nullable=True, default=None)
rev_reg_id: str = Field(nullable=True, default=None)
cred_rev_id: str = Field(nullable=True, default=None)
class IssueCredential(IssueCredentialBase, BaseTable, table=True):
# This is the class that represents the table
pass
class IssueCredentialCreate(IssueCredentialBase):
# This is the class that represents interface for creating a tenant
# we must set all the required fields,
# but do not need to set optional (and shouldn't)
pass
class IssueCredentialRead(IssueCredentialBase):
# This is the class that represents interface for reading a tenant
# here we indicate id, created_at and updated_at must be included
id: uuid.UUID
created_at: datetime
updated_at: datetime
class IssueCredentialUpdate(BaseModel):
# This is our update interface
# This does NOT inherit from IssueCredentialBase,
# so no need to worry about accidentally updating id or other fields
id: uuid.UUID
issue_state: str = Field(nullable=False)
workflow_id: uuid.UUID = Field(nullable=True, default=None)
cred_exch_id: uuid.UUID = Field(nullable=True, default=None)
rev_reg_id: str = Field(nullable=True, default=None)
cred_rev_id: str = Field(nullable=True, default=None)
|
[
"sqlmodel.Field"
] |
[((190, 211), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (195, 211), False, 'from sqlmodel import Field\n'), ((239, 260), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (244, 260), False, 'from sqlmodel import Field\n'), ((292, 313), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (297, 313), False, 'from sqlmodel import Field\n'), ((335, 356), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (340, 356), False, 'from sqlmodel import Field\n'), ((382, 403), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (387, 403), False, 'from sqlmodel import Field\n'), ((427, 461), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (432, 461), False, 'from sqlmodel import Field\n'), ((484, 505), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (489, 505), False, 'from sqlmodel import Field\n'), ((528, 549), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (533, 549), False, 'from sqlmodel import Field\n'), ((573, 594), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (578, 594), False, 'from sqlmodel import Field\n'), ((684, 718), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (689, 718), False, 'from sqlmodel import Field\n'), ((749, 783), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (754, 783), False, 'from sqlmodel import Field\n'), ((806, 840), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (811, 840), False, 'from sqlmodel import Field\n'), ((864, 898), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (869, 898), False, 'from sqlmodel import Field\n'), ((1761, 1782), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (1766, 1782), False, 'from sqlmodel import Field\n'), ((1812, 1846), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (1817, 1846), False, 'from sqlmodel import Field\n'), ((1877, 1911), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (1882, 1911), False, 'from sqlmodel import Field\n'), ((1934, 1968), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (1939, 1968), False, 'from sqlmodel import Field\n'), ((1992, 2026), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (1997, 2026), False, 'from sqlmodel import Field\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import numpy as np
import yaml
from megengine import jit
from megengine.module.external import ExternOprSubgraph
# "1,3,224,224" -> (1,3,224,224)
def str2tuple(x):
x = x.split(",")
x = [int(a) for a in x]
x = tuple(x)
return x
def main():
parser = argparse.ArgumentParser(
description="load a .pb model and convert to corresponding "
"load-and-run model"
)
parser.add_argument("input", help="mace model file")
parser.add_argument("param", help="mace param file")
parser.add_argument(
"output", help="converted model that can be fed to dump_with_testcase_mge.py"
)
parser.add_argument("config", help="config file with yaml format")
args = parser.parse_args()
with open(args.config, "r") as f:
configs = yaml.load(f)
for model_name in configs["models"]:
# ignore several sub models currently
sub_model = configs["models"][model_name]["subgraphs"][0]
# input/output shapes
isizes = [str2tuple(x) for x in sub_model["input_shapes"]]
# input/output names
input_names = sub_model["input_tensors"]
if "check_tensors" in sub_model:
output_names = sub_model["check_tensors"]
osizes = [str2tuple(x) for x in sub_model["check_shapes"]]
else:
output_names = sub_model["output_tensors"]
osizes = [str2tuple(x) for x in sub_model["output_shapes"]]
with open(args.input, "rb") as fin:
raw_model = fin.read()
with open(args.param, "rb") as fin:
raw_param = fin.read()
model_size = (len(raw_model)).to_bytes(4, byteorder="little")
param_size = (len(raw_param)).to_bytes(4, byteorder="little")
n_inputs = (len(input_names)).to_bytes(4, byteorder="little")
n_outputs = (len(output_names)).to_bytes(4, byteorder="little")
names_buffer = n_inputs + n_outputs
for iname in input_names:
names_buffer += (len(iname)).to_bytes(4, byteorder="little")
names_buffer += str.encode(iname)
for oname in output_names:
names_buffer += (len(oname)).to_bytes(4, byteorder="little")
names_buffer += str.encode(oname)
shapes_buffer = n_outputs
for oshape in osizes:
shapes_buffer += (len(oshape)).to_bytes(4, byteorder="little")
for oi in oshape:
shapes_buffer += oi.to_bytes(4, byteorder="little")
# raw content contains:
# input/output names + output shapes + model buffer + param buffer
wk_raw_content = (
names_buffer
+ shapes_buffer
+ model_size
+ raw_model
+ param_size
+ raw_param
)
net = ExternOprSubgraph(wk_raw_content, "mace", osizes)
net.eval()
@jit.trace(symbolic=True)
def inference(inputs):
return net(inputs)
inputs = [
np.random.random(isizes[i]).astype(np.float32) for i in range(len(isizes))
]
inference.trace(*inputs)
inference.dump(args.output)
if __name__ == "__main__":
main()
|
[
"megengine.jit.trace",
"megengine.module.external.ExternOprSubgraph"
] |
[((666, 774), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""load a .pb model and convert to corresponding load-and-run model"""'}), "(description=\n 'load a .pb model and convert to corresponding load-and-run model')\n", (689, 774), False, 'import argparse\n'), ((1185, 1197), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (1194, 1197), False, 'import yaml\n'), ((3180, 3229), 'megengine.module.external.ExternOprSubgraph', 'ExternOprSubgraph', (['wk_raw_content', '"""mace"""', 'osizes'], {}), "(wk_raw_content, 'mace', osizes)\n", (3197, 3229), False, 'from megengine.module.external import ExternOprSubgraph\n'), ((3259, 3283), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)'}), '(symbolic=True)\n', (3268, 3283), False, 'from megengine import jit\n'), ((3378, 3405), 'numpy.random.random', 'np.random.random', (['isizes[i]'], {}), '(isizes[i])\n', (3394, 3405), True, 'import numpy as np\n')]
|
from sqlmodel import Field
from taskana_api.entities.tasks import TaskBase
class Task(TaskBase, table=True):
id: int = Field(primary_key=True)
|
[
"sqlmodel.Field"
] |
[((126, 149), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (131, 149), False, 'from sqlmodel import Field\n')]
|
from datetime import date, datetime
from typing import Any, Dict, Optional
from uuid import UUID, uuid4
from pydantic.class_validators import root_validator
from sqlmodel import Column, Enum, Field, SQLModel
from sqlmodel.sql.sqltypes import GUID
from ...utils.date import now_datetime
from ..constants import OperationType, PaymentType, SaleType
class BaseBalance(SQLModel):
value: float = Field(description="Value of operation")
operation: OperationType = Field(
description="Type of operation", sa_column=Column(Enum(OperationType), nullable=False)
)
description: str = Field(description="Description of operation", min_length=1)
created_at: datetime = Field(default_factory=now_datetime)
class CreateBalance(BaseBalance):
@root_validator()
def normalize_value(cls, values: Dict[str, Any]) -> float:
operation_type = values.get("operation")
value = values.get("value")
if not operation_type or not value:
return values
if any(operation_type.name == payment_type.name for payment_type in PaymentType) and value > 0:
values["value"] = value * -1
if any(operation_type.name == sale_type.name for sale_type in SaleType) and value < 0:
values["value"] = value * -1
return values
class QueryBalance(SQLModel):
start_date: Optional[date] = Field(description="Initial date for query")
end_date: Optional[date] = Field(description="End date for query")
class Balance(BaseBalance, table=True):
id: UUID = Field(default_factory=uuid4, sa_column=Column("id", GUID(), primary_key=True))
owner_id: UUID = Field(description="User ID that owns the balance", foreign_key="users.id")
|
[
"sqlmodel.sql.sqltypes.GUID",
"sqlmodel.Enum",
"sqlmodel.Field"
] |
[((399, 438), 'sqlmodel.Field', 'Field', ([], {'description': '"""Value of operation"""'}), "(description='Value of operation')\n", (404, 438), False, 'from sqlmodel import Column, Enum, Field, SQLModel\n'), ((601, 660), 'sqlmodel.Field', 'Field', ([], {'description': '"""Description of operation"""', 'min_length': '(1)'}), "(description='Description of operation', min_length=1)\n", (606, 660), False, 'from sqlmodel import Column, Enum, Field, SQLModel\n'), ((688, 723), 'sqlmodel.Field', 'Field', ([], {'default_factory': 'now_datetime'}), '(default_factory=now_datetime)\n', (693, 723), False, 'from sqlmodel import Column, Enum, Field, SQLModel\n'), ((765, 781), 'pydantic.class_validators.root_validator', 'root_validator', ([], {}), '()\n', (779, 781), False, 'from pydantic.class_validators import root_validator\n'), ((1372, 1415), 'sqlmodel.Field', 'Field', ([], {'description': '"""Initial date for query"""'}), "(description='Initial date for query')\n", (1377, 1415), False, 'from sqlmodel import Column, Enum, Field, SQLModel\n'), ((1447, 1486), 'sqlmodel.Field', 'Field', ([], {'description': '"""End date for query"""'}), "(description='End date for query')\n", (1452, 1486), False, 'from sqlmodel import Column, Enum, Field, SQLModel\n'), ((1644, 1718), 'sqlmodel.Field', 'Field', ([], {'description': '"""User ID that owns the balance"""', 'foreign_key': '"""users.id"""'}), "(description='User ID that owns the balance', foreign_key='users.id')\n", (1649, 1718), False, 'from sqlmodel import Column, Enum, Field, SQLModel\n'), ((535, 554), 'sqlmodel.Enum', 'Enum', (['OperationType'], {}), '(OperationType)\n', (539, 554), False, 'from sqlmodel import Column, Enum, Field, SQLModel\n'), ((1596, 1602), 'sqlmodel.sql.sqltypes.GUID', 'GUID', ([], {}), '()\n', (1600, 1602), False, 'from sqlmodel.sql.sqltypes import GUID\n')]
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.quint4(inp_scale, 0)
w_dtype = dtype.qint4(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.quint4(outp_scale, 0)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_quint4(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint4(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_uint4 = mge.Tensor(inpv, dtype=inp_dtype)
w_int4 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_uint4.astype("float32")
w_fp32 = w_int4.astype("float32")
b_fp32 = b_int32.astype("float32")
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_uint4, w_int4, b_int32).astype("float32")
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skipif(
get_device_count("gpu") > 0 and get_cuda_compute_capability(0) < 61,
reason="does not support int8 when gpu compute capability less than 6.1",
)
def test_conv_transpose2d():
rng = np.random.RandomState(seed=2021)
def test_func(
N,
IC,
IH,
IW,
OC,
KH,
KW,
SH,
SW,
PH,
PW,
DH,
DW,
groups=1,
has_bias=True,
conv_mode: str = "cross_correlation",
compute_mode: str = "default",
):
inp_scale = np.float32(rng.uniform(low=0.04, high=0.06))
weight_scale = np.float32(rng.uniform(low=0.04, high=0.06))
bias_scale = inp_scale * weight_scale
out_scale = np.float32(rng.uniform(low=0.04, high=0.06))
inp_dtype = dtype.qint8(inp_scale)
weight_dtype = dtype.qint8(weight_scale)
bias_dtype = dtype.qint32(bias_scale)
out_dtype = dtype.qint8(out_scale)
inp_fp32 = rng.uniform(low=-1, high=1, size=(N, IC, IH, IW)).astype(np.float32)
weight_fp32 = rng.uniform(low=-1, high=1, size=(IC, OC, KH, KW)).astype(
np.float32
)
bias_fp32 = rng.uniform(low=-1, high=1, size=(1, OC, 1, 1)).astype(np.float32)
inp_int8 = dtype.convert_to_qint8(inp_fp32, inp_dtype)
weight_int8 = dtype.convert_to_qint8(weight_fp32, weight_dtype)
bias_int32 = dtype.convert_to_qint32(bias_fp32, bias_dtype)
inp_int8 = mge.tensor(inp_int8, dtype=inp_dtype)
weight_int8 = mge.Parameter(weight_int8, dtype=weight_dtype)
bias_int32 = mge.Parameter(bias_int32, dtype=bias_dtype)
inp_fp32 = inp_int8.astype("float32")
weight_fp32 = weight_int8.astype("float32")
bias_fp32 = bias_int32.astype("float32")
expected = F.conv_transpose2d(
inp_fp32,
weight_fp32,
bias_fp32 if has_bias else None,
stride=(SH, SW),
padding=(PH, PW),
dilation=(DH, DW),
groups=groups,
conv_mode=conv_mode,
compute_mode=compute_mode,
)
expected = dtype.convert_to_qint8(expected.numpy(), out_dtype)
expected = dtype.convert_from_qint8(expected)
conv_transpose2d = ConvTranspose2d(
in_channels=IC,
out_channels=OC,
kernel_size=(KH, KW),
stride=(SH, SW),
padding=(PH, PW),
dilation=(DH, DW),
groups=groups,
bias=has_bias,
conv_mode=conv_mode,
compute_mode=compute_mode,
dtype=out_dtype,
)
conv_transpose2d.weight = mge.Parameter(weight_int8)
if has_bias:
conv_transpose2d.bias = mge.Parameter(bias_int32)
result = conv_transpose2d.forward(inp_int8).numpy()
result = dtype.convert_from_qint8(result)
np.testing.assert_allclose(result, expected, atol=out_scale)
test_func(1, 4, 1, 1, 4, 1, 1, 1, 1, 0, 0, 1, 1, 1, False)
test_func(2, 4, 3, 1, 8, 1, 1, 1, 1, 0, 0, 1, 1, 1, False)
test_func(4, 4, 16, 16, 8, 3, 3, 1, 1, 1, 1, 1, 1, 1, False)
test_func(32, 64, 36, 28, 16, 3, 2, 1, 3, 1, 0, 1, 1, 1, False)
|
[
"megengine.functional.conv2d",
"megengine.functional.reshape",
"megengine.core.tensor.dtype.convert_to_qint4",
"megengine.functional.transpose",
"megengine.core.tensor.dtype.qint32",
"megengine.functional.clip",
"megengine.Tensor",
"megengine.core.tensor.dtype.qint4",
"megengine.device.get_device_count",
"megengine.core.tensor.dtype.quint4",
"megengine.functional.quantized.conv_bias_activation",
"megengine.tensor",
"megengine.functional.elemwise._elwise",
"megengine.core.tensor.dtype.convert_to_qint32",
"megengine.core.tensor.dtype.qint8",
"megengine.functional.elemwise._elemwise_multi_type",
"megengine.is_cuda_available",
"megengine.quantization.create_qparams",
"megengine.functional.relu",
"megengine.core.tensor.dtype.get_scale",
"megengine.functional.flatten",
"megengine.core.tensor.dtype.convert_from_qint8",
"megengine.device.get_cuda_compute_capability",
"megengine.functional.conv_transpose2d",
"megengine.Parameter",
"megengine.core.tensor.dtype.convert_to_quint4",
"megengine.module.quantized.conv.ConvTranspose2d",
"megengine.functional.round",
"megengine.core.tensor.dtype.convert_to_qint8"
] |
[((966, 1044), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""kind"""', "['abs', 'sin', 'sub', 'mul', 'fuse_add_tanh']"], {}), "('kind', ['abs', 'sin', 'sub', 'mul', 'fuse_add_tanh'])\n", (989, 1044), False, 'import pytest\n'), ((5797, 5887), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""does not support int4 when cuda version is lower than 10.2"""'}), "(reason=\n 'does not support int4 when cuda version is lower than 10.2')\n", (5813, 5887), False, 'import pytest\n'), ((790, 808), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['scale'], {}), '(scale)\n', (801, 808), False, 'from megengine.core.tensor import dtype\n'), ((892, 902), 'megengine.functional.round', 'F.round', (['x'], {}), '(x)\n', (899, 902), True, 'import megengine.functional as F\n'), ((911, 931), 'megengine.functional.clip', 'F.clip', (['x', '(-128)', '(127)'], {}), '(x, -128, 127)\n', (917, 931), True, 'import megengine.functional as F\n'), ((1669, 1694), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['output_scale'], {}), '(output_scale)\n', (1680, 1694), False, 'from megengine.core.tensor import dtype\n'), ((2633, 2655), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['inp_scale'], {}), '(inp_scale)\n', (2644, 2655), False, 'from megengine.core.tensor import dtype\n'), ((2670, 2690), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['w_scale'], {}), '(w_scale)\n', (2681, 2690), False, 'from megengine.core.tensor import dtype\n'), ((2705, 2738), 'megengine.core.tensor.dtype.qint32', 'dtype.qint32', (['(inp_scale * w_scale)'], {}), '(inp_scale * w_scale)\n', (2717, 2738), False, 'from megengine.core.tensor import dtype\n'), ((2755, 2778), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['outp_scale'], {}), '(outp_scale)\n', (2766, 2778), False, 'from megengine.core.tensor import dtype\n'), ((5985, 6011), 'megengine.core.tensor.dtype.quint4', 'dtype.quint4', (['inp_scale', '(0)'], {}), '(inp_scale, 0)\n', (5997, 6011), False, 'from megengine.core.tensor import dtype\n'), ((6026, 6046), 'megengine.core.tensor.dtype.qint4', 'dtype.qint4', (['w_scale'], {}), '(w_scale)\n', (6037, 6046), False, 'from megengine.core.tensor import dtype\n'), ((6061, 6094), 'megengine.core.tensor.dtype.qint32', 'dtype.qint32', (['(inp_scale * w_scale)'], {}), '(inp_scale * w_scale)\n', (6073, 6094), False, 'from megengine.core.tensor import dtype\n'), ((6111, 6138), 'megengine.core.tensor.dtype.quint4', 'dtype.quint4', (['outp_scale', '(0)'], {}), '(outp_scale, 0)\n', (6123, 6138), False, 'from megengine.core.tensor import dtype\n'), ((8770, 8802), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': '(2021)'}), '(seed=2021)\n', (8791, 8802), True, 'import numpy as np\n'), ((1243, 1297), 'megengine.quantization.create_qparams', 'create_qparams', (['QuantMode.SYMMERTIC', '"""qint8"""', 'x1_scale'], {}), "(QuantMode.SYMMERTIC, 'qint8', x1_scale)\n", (1257, 1297), False, 'from megengine.quantization import QuantMode, create_qparams\n'), ((1507, 1561), 'megengine.quantization.create_qparams', 'create_qparams', (['QuantMode.SYMMERTIC', '"""qint8"""', 'x2_scale'], {}), "(QuantMode.SYMMERTIC, 'qint8', x2_scale)\n", (1521, 1561), False, 'from megengine.quantization import QuantMode, create_qparams\n'), ((3005, 3043), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(N, IC, IH, IW)'}), '(size=(N, IC, IH, IW))\n', (3021, 3043), True, 'import numpy as np\n'), ((3058, 3097), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(OC, IC, KH, KW)'}), '(size=(OC, IC, KH, KW))\n', (3074, 3097), True, 'import numpy as np\n'), ((3112, 3148), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, OC, 1, 1)'}), '(size=(1, OC, 1, 1))\n', (3128, 3148), True, 'import numpy as np\n'), ((3169, 3195), 'megengine.core.tensor.dtype.get_scale', 'dtype.get_scale', (['inp_dtype'], {}), '(inp_dtype)\n', (3184, 3195), False, 'from megengine.core.tensor import dtype\n'), ((3214, 3238), 'megengine.core.tensor.dtype.get_scale', 'dtype.get_scale', (['w_dtype'], {}), '(w_dtype)\n', (3229, 3238), False, 'from megengine.core.tensor import dtype\n'), ((3257, 3281), 'megengine.core.tensor.dtype.get_scale', 'dtype.get_scale', (['b_dtype'], {}), '(b_dtype)\n', (3272, 3281), False, 'from megengine.core.tensor import dtype\n'), ((3298, 3350), 'megengine.core.tensor.dtype.convert_to_qint8', 'dtype.convert_to_qint8', (['(inp_v * inp_scale)', 'inp_dtype'], {}), '(inp_v * inp_scale, inp_dtype)\n', (3320, 3350), False, 'from megengine.core.tensor import dtype\n'), ((3364, 3410), 'megengine.core.tensor.dtype.convert_to_qint8', 'dtype.convert_to_qint8', (['(w_v * w_scale)', 'w_dtype'], {}), '(w_v * w_scale, w_dtype)\n', (3386, 3410), False, 'from megengine.core.tensor import dtype\n'), ((3424, 3471), 'megengine.core.tensor.dtype.convert_to_qint32', 'dtype.convert_to_qint32', (['(b_v * b_scale)', 'b_dtype'], {}), '(b_v * b_scale, b_dtype)\n', (3447, 3471), False, 'from megengine.core.tensor import dtype\n'), ((3492, 3525), 'megengine.tensor', 'mge.tensor', (['inpv'], {'dtype': 'inp_dtype'}), '(inpv, dtype=inp_dtype)\n', (3502, 3525), True, 'import megengine as mge\n'), ((3543, 3575), 'megengine.Parameter', 'mge.Parameter', (['wv'], {'dtype': 'w_dtype'}), '(wv, dtype=w_dtype)\n', (3556, 3575), True, 'import megengine as mge\n'), ((3594, 3626), 'megengine.Parameter', 'mge.Parameter', (['bv'], {'dtype': 'b_dtype'}), '(bv, dtype=b_dtype)\n', (3607, 3626), True, 'import megengine as mge\n'), ((5242, 5261), 'megengine.functional.flatten', 'F.flatten', (['expected'], {}), '(expected)\n', (5251, 5261), True, 'import megengine.functional as F\n'), ((5279, 5296), 'megengine.functional.flatten', 'F.flatten', (['result'], {}), '(result)\n', (5288, 5296), True, 'import megengine.functional as F\n'), ((2377, 2400), 'megengine.device.get_device_count', 'get_device_count', (['"""gpu"""'], {}), "('gpu')\n", (2393, 2400), False, 'from megengine.device import get_cuda_compute_capability, get_device_count\n'), ((6365, 6403), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(N, IC, IH, IW)'}), '(size=(N, IC, IH, IW))\n', (6381, 6403), True, 'import numpy as np\n'), ((6418, 6457), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(OC, IC, KH, KW)'}), '(size=(OC, IC, KH, KW))\n', (6434, 6457), True, 'import numpy as np\n'), ((6472, 6508), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, OC, 1, 1)'}), '(size=(1, OC, 1, 1))\n', (6488, 6508), True, 'import numpy as np\n'), ((6529, 6555), 'megengine.core.tensor.dtype.get_scale', 'dtype.get_scale', (['inp_dtype'], {}), '(inp_dtype)\n', (6544, 6555), False, 'from megengine.core.tensor import dtype\n'), ((6574, 6598), 'megengine.core.tensor.dtype.get_scale', 'dtype.get_scale', (['w_dtype'], {}), '(w_dtype)\n', (6589, 6598), False, 'from megengine.core.tensor import dtype\n'), ((6617, 6641), 'megengine.core.tensor.dtype.get_scale', 'dtype.get_scale', (['b_dtype'], {}), '(b_dtype)\n', (6632, 6641), False, 'from megengine.core.tensor import dtype\n'), ((6658, 6711), 'megengine.core.tensor.dtype.convert_to_quint4', 'dtype.convert_to_quint4', (['(inp_v * inp_scale)', 'inp_dtype'], {}), '(inp_v * inp_scale, inp_dtype)\n', (6681, 6711), False, 'from megengine.core.tensor import dtype\n'), ((6725, 6771), 'megengine.core.tensor.dtype.convert_to_qint4', 'dtype.convert_to_qint4', (['(w_v * w_scale)', 'w_dtype'], {}), '(w_v * w_scale, w_dtype)\n', (6747, 6771), False, 'from megengine.core.tensor import dtype\n'), ((6785, 6832), 'megengine.core.tensor.dtype.convert_to_qint32', 'dtype.convert_to_qint32', (['(b_v * b_scale)', 'b_dtype'], {}), '(b_v * b_scale, b_dtype)\n', (6808, 6832), False, 'from megengine.core.tensor import dtype\n'), ((6854, 6887), 'megengine.Tensor', 'mge.Tensor', (['inpv'], {'dtype': 'inp_dtype'}), '(inpv, dtype=inp_dtype)\n', (6864, 6887), True, 'import megengine as mge\n'), ((6905, 6937), 'megengine.Parameter', 'mge.Parameter', (['wv'], {'dtype': 'w_dtype'}), '(wv, dtype=w_dtype)\n', (6918, 6937), True, 'import megengine as mge\n'), ((6956, 6988), 'megengine.Parameter', 'mge.Parameter', (['bv'], {'dtype': 'b_dtype'}), '(bv, dtype=b_dtype)\n', (6969, 6988), True, 'import megengine as mge\n'), ((8003, 8022), 'megengine.functional.flatten', 'F.flatten', (['expected'], {}), '(expected)\n', (8012, 8022), True, 'import megengine.functional as F\n'), ((8040, 8057), 'megengine.functional.flatten', 'F.flatten', (['result'], {}), '(result)\n', (8049, 8057), True, 'import megengine.functional as F\n'), ((9376, 9398), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['inp_scale'], {}), '(inp_scale)\n', (9387, 9398), False, 'from megengine.core.tensor import dtype\n'), ((9422, 9447), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['weight_scale'], {}), '(weight_scale)\n', (9433, 9447), False, 'from megengine.core.tensor import dtype\n'), ((9469, 9493), 'megengine.core.tensor.dtype.qint32', 'dtype.qint32', (['bias_scale'], {}), '(bias_scale)\n', (9481, 9493), False, 'from megengine.core.tensor import dtype\n'), ((9514, 9536), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['out_scale'], {}), '(out_scale)\n', (9525, 9536), False, 'from megengine.core.tensor import dtype\n'), ((9847, 9890), 'megengine.core.tensor.dtype.convert_to_qint8', 'dtype.convert_to_qint8', (['inp_fp32', 'inp_dtype'], {}), '(inp_fp32, inp_dtype)\n', (9869, 9890), False, 'from megengine.core.tensor import dtype\n'), ((9913, 9962), 'megengine.core.tensor.dtype.convert_to_qint8', 'dtype.convert_to_qint8', (['weight_fp32', 'weight_dtype'], {}), '(weight_fp32, weight_dtype)\n', (9935, 9962), False, 'from megengine.core.tensor import dtype\n'), ((9984, 10030), 'megengine.core.tensor.dtype.convert_to_qint32', 'dtype.convert_to_qint32', (['bias_fp32', 'bias_dtype'], {}), '(bias_fp32, bias_dtype)\n', (10007, 10030), False, 'from megengine.core.tensor import dtype\n'), ((10051, 10088), 'megengine.tensor', 'mge.tensor', (['inp_int8'], {'dtype': 'inp_dtype'}), '(inp_int8, dtype=inp_dtype)\n', (10061, 10088), True, 'import megengine as mge\n'), ((10111, 10157), 'megengine.Parameter', 'mge.Parameter', (['weight_int8'], {'dtype': 'weight_dtype'}), '(weight_int8, dtype=weight_dtype)\n', (10124, 10157), True, 'import megengine as mge\n'), ((10179, 10222), 'megengine.Parameter', 'mge.Parameter', (['bias_int32'], {'dtype': 'bias_dtype'}), '(bias_int32, dtype=bias_dtype)\n', (10192, 10222), True, 'import megengine as mge\n'), ((10391, 10590), 'megengine.functional.conv_transpose2d', 'F.conv_transpose2d', (['inp_fp32', 'weight_fp32', '(bias_fp32 if has_bias else None)'], {'stride': '(SH, SW)', 'padding': '(PH, PW)', 'dilation': '(DH, DW)', 'groups': 'groups', 'conv_mode': 'conv_mode', 'compute_mode': 'compute_mode'}), '(inp_fp32, weight_fp32, bias_fp32 if has_bias else None,\n stride=(SH, SW), padding=(PH, PW), dilation=(DH, DW), groups=groups,\n conv_mode=conv_mode, compute_mode=compute_mode)\n', (10409, 10590), True, 'import megengine.functional as F\n'), ((10792, 10826), 'megengine.core.tensor.dtype.convert_from_qint8', 'dtype.convert_from_qint8', (['expected'], {}), '(expected)\n', (10816, 10826), False, 'from megengine.core.tensor import dtype\n'), ((10855, 11087), 'megengine.module.quantized.conv.ConvTranspose2d', 'ConvTranspose2d', ([], {'in_channels': 'IC', 'out_channels': 'OC', 'kernel_size': '(KH, KW)', 'stride': '(SH, SW)', 'padding': '(PH, PW)', 'dilation': '(DH, DW)', 'groups': 'groups', 'bias': 'has_bias', 'conv_mode': 'conv_mode', 'compute_mode': 'compute_mode', 'dtype': 'out_dtype'}), '(in_channels=IC, out_channels=OC, kernel_size=(KH, KW),\n stride=(SH, SW), padding=(PH, PW), dilation=(DH, DW), groups=groups,\n bias=has_bias, conv_mode=conv_mode, compute_mode=compute_mode, dtype=\n out_dtype)\n', (10870, 11087), False, 'from megengine.module.quantized.conv import ConvTranspose2d\n'), ((11253, 11279), 'megengine.Parameter', 'mge.Parameter', (['weight_int8'], {}), '(weight_int8)\n', (11266, 11279), True, 'import megengine as mge\n'), ((11440, 11472), 'megengine.core.tensor.dtype.convert_from_qint8', 'dtype.convert_from_qint8', (['result'], {}), '(result)\n', (11464, 11472), False, 'from megengine.core.tensor import dtype\n'), ((11481, 11541), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['result', 'expected'], {'atol': 'out_scale'}), '(result, expected, atol=out_scale)\n', (11507, 11541), True, 'import numpy as np\n'), ((1165, 1181), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1179, 1181), True, 'import numpy as np\n'), ((1429, 1445), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1443, 1445), True, 'import numpy as np\n'), ((1628, 1644), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1642, 1644), True, 'import numpy as np\n'), ((1792, 1814), 'megengine.functional.elemwise._elwise', '_elwise', (['x1'], {'mode': 'kind'}), '(x1, mode=kind)\n', (1799, 1814), False, 'from megengine.functional.elemwise import _elemwise_multi_type, _elwise\n'), ((2054, 2080), 'megengine.functional.elemwise._elwise', '_elwise', (['x1', 'x2'], {'mode': 'kind'}), '(x1, x2, mode=kind)\n', (2061, 2080), False, 'from megengine.functional.elemwise import _elemwise_multi_type, _elwise\n'), ((2498, 2514), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2512, 2514), True, 'import numpy as np\n'), ((2545, 2561), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2559, 2561), True, 'import numpy as np\n'), ((2595, 2611), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2609, 2611), True, 'import numpy as np\n'), ((3813, 3898), 'megengine.functional.reshape', 'F.reshape', (['var', '(var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])'], {}), '(var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])\n )\n', (3822, 3898), True, 'import megengine.functional as F\n'), ((3942, 3975), 'megengine.functional.transpose', 'F.transpose', (['var', '(0, 1, 3, 4, 2)'], {}), '(var, (0, 1, 3, 4, 2))\n', (3953, 3975), True, 'import megengine.functional as F\n'), ((4051, 4127), 'megengine.functional.conv2d', 'F.conv2d', (['inp', 'w', '(b if has_bias else None)'], {'stride': '(SH, SW)', 'padding': '(PH, PW)'}), '(inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW))\n', (4059, 4127), True, 'import megengine.functional as F\n'), ((4582, 4712), 'megengine.functional.quantized.conv_bias_activation', 'F.quantized.conv_bias_activation', (['inp', 'w', 'b'], {'stride': '(SH, SW)', 'padding': '(PH, PW)', 'dtype': 'out_dtype', 'nonlinear_mode': 'nonlinear_mode'}), '(inp, w, b, stride=(SH, SW), padding=(PH,\n PW), dtype=out_dtype, nonlinear_mode=nonlinear_mode)\n', (4614, 4712), True, 'import megengine.functional as F\n'), ((4865, 4888), 'megengine.is_cuda_available', 'mge.is_cuda_available', ([], {}), '()\n', (4886, 4888), True, 'import megengine as mge\n'), ((5186, 5222), 'megengine.functional.transpose', 'F.transpose', (['result', '(0, 1, 4, 2, 3)'], {}), '(result, (0, 1, 4, 2, 3))\n', (5197, 5222), True, 'import megengine.functional as F\n'), ((7174, 7250), 'megengine.functional.conv2d', 'F.conv2d', (['inp', 'w', '(b if has_bias else None)'], {'stride': '(SH, SW)', 'padding': '(PH, PW)'}), '(inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW))\n', (7182, 7250), True, 'import megengine.functional as F\n'), ((7532, 7662), 'megengine.functional.quantized.conv_bias_activation', 'F.quantized.conv_bias_activation', (['inp', 'w', 'b'], {'stride': '(SH, SW)', 'padding': '(PH, PW)', 'dtype': 'out_dtype', 'nonlinear_mode': 'nonlinear_mode'}), '(inp, w, b, stride=(SH, SW), padding=(PH,\n PW), dtype=out_dtype, nonlinear_mode=nonlinear_mode)\n', (7564, 7662), True, 'import megengine.functional as F\n'), ((11337, 11362), 'megengine.Parameter', 'mge.Parameter', (['bias_int32'], {}), '(bias_int32)\n', (11350, 11362), True, 'import megengine as mge\n'), ((8582, 8605), 'megengine.device.get_device_count', 'get_device_count', (['"""gpu"""'], {}), "('gpu')\n", (8598, 8605), False, 'from megengine.device import get_cuda_compute_capability, get_device_count\n'), ((8614, 8644), 'megengine.device.get_cuda_compute_capability', 'get_cuda_compute_capability', (['(0)'], {}), '(0)\n', (8641, 8644), False, 'from megengine.device import get_cuda_compute_capability, get_device_count\n'), ((1090, 1119), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3, 3)'}), '(size=(3, 3))\n', (1106, 1119), True, 'import numpy as np\n'), ((1354, 1383), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3, 3)'}), '(size=(3, 3))\n', (1370, 1383), True, 'import numpy as np\n'), ((4223, 4232), 'megengine.functional.relu', 'F.relu', (['O'], {}), '(O)\n', (4229, 4232), True, 'import megengine.functional as F\n'), ((7346, 7355), 'megengine.functional.relu', 'F.relu', (['O'], {}), '(O)\n', (7352, 7355), True, 'import megengine.functional as F\n'), ((1865, 1935), 'megengine.functional.elemwise._elemwise_multi_type', '_elemwise_multi_type', (['x1_int8'], {'mode': 'quantized_kind', 'dtype': 'output_dtype'}), '(x1_int8, mode=quantized_kind, dtype=output_dtype)\n', (1885, 1935), False, 'from megengine.functional.elemwise import _elemwise_multi_type, _elwise\n'), ((2131, 2210), 'megengine.functional.elemwise._elemwise_multi_type', '_elemwise_multi_type', (['x1_int8', 'x2_int8'], {'mode': 'quantized_kind', 'dtype': 'output_dtype'}), '(x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype)\n', (2151, 2210), False, 'from megengine.functional.elemwise import _elemwise_multi_type, _elwise\n')]
|
#!/usr/bin/env python
r"""
Parallel assembling and solving of a Biot problem (deformable porous medium),
using commands for interactive use.
Find :math:`\ul{u}`, :math:`p` such that:
.. math::
\int_{\Omega} D_{ijkl}\ e_{ij}(\ul{v}) e_{kl}(\ul{u})
- \int_{\Omega} p\ \alpha_{ij} e_{ij}(\ul{v})
= 0
\;, \quad \forall \ul{v} \;,
\int_{\Omega} q\ \alpha_{ij} e_{ij}(\ul{u})
+ \int_{\Omega} K_{ij} \nabla_i q \nabla_j p
= 0
\;, \quad \forall q \;,
where
.. math::
D_{ijkl} = \mu (\delta_{ik} \delta_{jl}+\delta_{il} \delta_{jk}) +
\lambda \ \delta_{ij} \delta_{kl}
\;.
Important Notes
---------------
- This example requires petsc4py, mpi4py and (optionally) pymetis with their
dependencies installed!
- This example generates a number of files - do not use an existing non-empty
directory for the ``output_dir`` argument.
- Use the ``--clear`` option with care!
Notes
-----
- Each task is responsible for a subdomain consisting of a set of cells (a cell
region).
- Each subdomain owns PETSc DOFs within a consecutive range.
- When both global and task-local variables exist, the task-local
variables have ``_i`` suffix.
- This example shows how to use a nonlinear solver from PETSc.
- This example can serve as a template for solving a (non)linear multi-field
problem - just replace the equations in :func:`create_local_problem()`.
- The material parameter :math:`\alpha_{ij}` is artificially high to be able to
see the pressure influence on displacements.
- The command line options are saved into <output_dir>/options.txt file.
Usage Examples
--------------
See all options::
$ python examples/multi_physics/biot_parallel_interactive.py -h
See PETSc options::
$ python examples/multi_physics/biot_parallel_interactive.py -help
Single process run useful for debugging with :func:`debug()
<sfepy.base.base.debug>`::
$ python examples/multi_physics/biot_parallel_interactive.py output-parallel
Parallel runs::
$ mpiexec -n 3 python examples/multi_physics/biot_parallel_interactive.py output-parallel -2 --shape=101,101
$ mpiexec -n 3 python examples/multi_physics/biot_parallel_interactive.py output-parallel -2 --shape=101,101 --metis
$ mpiexec -n 8 python examples/multi_physics/biot_parallel_interactive.py output-parallel -2 --shape 101,101 --metis -snes_monitor -snes_view -snes_converged_reason -ksp_monitor
Using FieldSplit preconditioner::
$ mpiexec -n 2 python examples/multi_physics/biot_parallel_interactive.py output-parallel --shape=101,101 -snes_monitor -snes_converged_reason -ksp_monitor -pc_type fieldsplit
$ mpiexec -n 8 python examples/multi_physics/biot_parallel_interactive.py output-parallel --shape=1001,1001 --metis -snes_monitor -snes_converged_reason -ksp_monitor -pc_type fieldsplit -pc_fieldsplit_type additive
View the results using (strip linearization or approximation orders one)::
$ python postproc.py output-parallel/sol.h5 --wireframe -b -d'p,plot_warp_scalar:u,plot_displacements'
View the results using (adaptive linearization)::
$ python postproc.py output-parallel/sol_u.h5 --wireframe -b -d'u,plot_displacements'
$ python postproc.py output-parallel/sol_p.h5 --wireframe -b -d'p,plot_warp_scalar'
"""
from __future__ import absolute_import
from argparse import RawDescriptionHelpFormatter, ArgumentParser
import os
import time
import numpy as nm
from sfepy.base.base import output, Struct
from sfepy.base.ioutils import ensure_path, remove_files_patterns, save_options
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.discrete.common.region import Region
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem, State)
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.terms import Term
from sfepy.solvers.ls import PETScKrylovSolver
from sfepy.solvers.nls import PETScNonlinearSolver
from sfepy.mechanics.matcoefs import stiffness_from_lame
import sfepy.parallel.parallel as pl
from sfepy.parallel.evaluate import PETScParallelEvaluator
def create_local_problem(omega_gi, orders):
"""
Local problem definition using a domain corresponding to the global region
`omega_gi`.
"""
order_u, order_p = orders
mesh = omega_gi.domain.mesh
# All tasks have the whole mesh.
bbox = mesh.get_bounding_box()
min_x, max_x = bbox[:, 0]
eps_x = 1e-8 * (max_x - min_x)
min_y, max_y = bbox[:, 1]
eps_y = 1e-8 * (max_y - min_y)
mesh_i = Mesh.from_region(omega_gi, mesh, localize=True)
domain_i = FEDomain('domain_i', mesh_i)
omega_i = domain_i.create_region('Omega', 'all')
gamma1_i = domain_i.create_region('Gamma1',
'vertices in (x < %.10f)'
% (min_x + eps_x),
'facet', allow_empty=True)
gamma2_i = domain_i.create_region('Gamma2',
'vertices in (x > %.10f)'
% (max_x - eps_x),
'facet', allow_empty=True)
gamma3_i = domain_i.create_region('Gamma3',
'vertices in (y < %.10f)'
% (min_y + eps_y),
'facet', allow_empty=True)
field1_i = Field.from_args('fu', nm.float64, mesh.dim, omega_i,
approx_order=order_u)
field2_i = Field.from_args('fp', nm.float64, 1, omega_i,
approx_order=order_p)
output('field 1: number of local DOFs:', field1_i.n_nod)
output('field 2: number of local DOFs:', field2_i.n_nod)
u_i = FieldVariable('u_i', 'unknown', field1_i, order=0)
v_i = FieldVariable('v_i', 'test', field1_i, primary_var_name='u_i')
p_i = FieldVariable('p_i', 'unknown', field2_i, order=1)
q_i = FieldVariable('q_i', 'test', field2_i, primary_var_name='p_i')
if mesh.dim == 2:
alpha = 1e2 * nm.array([[0.132], [0.132], [0.092]])
else:
alpha = 1e2 * nm.array([[0.132], [0.132], [0.132],
[0.092], [0.092], [0.092]])
mat = Material('m', D=stiffness_from_lame(mesh.dim, lam=10, mu=5),
k=1, alpha=alpha)
integral = Integral('i', order=2*(max(order_u, order_p)))
t11 = Term.new('dw_lin_elastic(m.D, v_i, u_i)',
integral, omega_i, m=mat, v_i=v_i, u_i=u_i)
t12 = Term.new('dw_biot(m.alpha, v_i, p_i)',
integral, omega_i, m=mat, v_i=v_i, p_i=p_i)
t21 = Term.new('dw_biot(m.alpha, u_i, q_i)',
integral, omega_i, m=mat, u_i=u_i, q_i=q_i)
t22 = Term.new('dw_laplace(m.k, q_i, p_i)',
integral, omega_i, m=mat, q_i=q_i, p_i=p_i)
eq1 = Equation('eq1', t11 - t12)
eq2 = Equation('eq1', t21 + t22)
eqs = Equations([eq1, eq2])
ebc1 = EssentialBC('ebc1', gamma1_i, {'u_i.all' : 0.0})
ebc2 = EssentialBC('ebc2', gamma2_i, {'u_i.0' : 0.05})
def bc_fun(ts, coors, **kwargs):
val = 0.3 * nm.sin(4 * nm.pi * (coors[:, 0] - min_x) / (max_x - min_x))
return val
fun = Function('bc_fun', bc_fun)
ebc3 = EssentialBC('ebc3', gamma3_i, {'p_i.all' : fun})
pb = Problem('problem_i', equations=eqs, active_only=False)
pb.time_update(ebcs=Conditions([ebc1, ebc2, ebc3]))
pb.update_materials()
return pb
def solve_problem(mesh_filename, options, comm):
order_u = options.order_u
order_p = options.order_p
rank, size = comm.Get_rank(), comm.Get_size()
output('rank', rank, 'of', size)
mesh = Mesh.from_file(mesh_filename)
if rank == 0:
cell_tasks = pl.partition_mesh(mesh, size, use_metis=options.metis,
verbose=True)
else:
cell_tasks = None
output('creating global domain and fields...')
tt = time.clock()
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field1 = Field.from_args('fu', nm.float64, mesh.dim, omega,
approx_order=order_u)
field2 = Field.from_args('fp', nm.float64, 1, omega,
approx_order=order_p)
fields = [field1, field2]
output('...done in', time.clock() - tt)
output('distributing fields...')
tt = time.clock()
distribute = pl.distribute_fields_dofs
lfds, gfds = distribute(fields, cell_tasks,
is_overlap=True,
use_expand_dofs=True,
save_inter_regions=options.save_inter_regions,
output_dir=options.output_dir,
comm=comm, verbose=True)
output('...done in', time.clock() - tt)
output('creating local problem...')
tt = time.clock()
cells = lfds[0].cells
omega_gi = Region.from_cells(cells, domain)
omega_gi.finalize()
omega_gi.update_shape()
pb = create_local_problem(omega_gi, [order_u, order_p])
variables = pb.get_variables()
state = State(variables)
state.fill(0.0)
state.apply_ebc()
output('...done in', time.clock() - tt)
output('allocating global system...')
tt = time.clock()
sizes, drange, pdofs = pl.setup_composite_dofs(lfds, fields, variables,
verbose=True)
pmtx, psol, prhs = pl.create_petsc_system(pb.mtx_a, sizes, pdofs, drange,
is_overlap=True, comm=comm,
verbose=True)
output('...done in', time.clock() - tt)
output('creating solver...')
tt = time.clock()
conf = Struct(method='bcgsl', precond='jacobi', sub_precond='none',
i_max=10000, eps_a=1e-50, eps_r=1e-6, eps_d=1e4,
verbose=True)
status = {}
ls = PETScKrylovSolver(conf, comm=comm, mtx=pmtx, status=status)
field_ranges = {}
for ii, variable in enumerate(variables.iter_state(ordered=True)):
field_ranges[variable.name] = lfds[ii].petsc_dofs_range
ls.set_field_split(field_ranges, comm=comm)
ev = PETScParallelEvaluator(pb, pdofs, drange, True,
psol, comm, verbose=True)
nls_status = {}
conf = Struct(method='newtonls',
i_max=5, eps_a=0, eps_r=1e-5, eps_s=0.0,
verbose=True)
nls = PETScNonlinearSolver(conf, pmtx=pmtx, prhs=prhs, comm=comm,
fun=ev.eval_residual,
fun_grad=ev.eval_tangent_matrix,
lin_solver=ls, status=nls_status)
output('...done in', time.clock() - tt)
output('solving...')
tt = time.clock()
state = pb.create_state()
state.apply_ebc()
ev.psol_i[...] = state()
ev.gather(psol, ev.psol_i)
psol = nls(psol)
ev.scatter(ev.psol_i, psol)
sol0_i = ev.psol_i[...]
output('...done in', time.clock() - tt)
output('saving solution...')
tt = time.clock()
state.set_full(sol0_i)
out = state.create_output_dict()
filename = os.path.join(options.output_dir, 'sol_%02d.h5' % comm.rank)
pb.domain.mesh.write(filename, io='auto', out=out)
gather_to_zero = pl.create_gather_to_zero(psol)
psol_full = gather_to_zero(psol)
if comm.rank == 0:
sol = psol_full[...].copy()
u = FieldVariable('u', 'parameter', field1,
primary_var_name='(set-to-None)')
remap = gfds[0].id_map
ug = sol[remap]
p = FieldVariable('p', 'parameter', field2,
primary_var_name='(set-to-None)')
remap = gfds[1].id_map
pg = sol[remap]
if (((order_u == 1) and (order_p == 1))
or (options.linearization == 'strip')):
out = u.create_output(ug)
out.update(p.create_output(pg))
filename = os.path.join(options.output_dir, 'sol.h5')
mesh.write(filename, io='auto', out=out)
else:
out = u.create_output(ug, linearization=Struct(kind='adaptive',
min_level=0,
max_level=order_u,
eps=1e-3))
filename = os.path.join(options.output_dir, 'sol_u.h5')
out['u'].mesh.write(filename, io='auto', out=out)
out = p.create_output(pg, linearization=Struct(kind='adaptive',
min_level=0,
max_level=order_p,
eps=1e-3))
filename = os.path.join(options.output_dir, 'sol_p.h5')
out['p'].mesh.write(filename, io='auto', out=out)
output('...done in', time.clock() - tt)
helps = {
'output_dir' :
'output directory',
'dims' :
'dimensions of the block [default: %(default)s]',
'shape' :
'shape (counts of nodes in x, y, z) of the block [default: %(default)s]',
'centre' :
'centre of the block [default: %(default)s]',
'2d' :
'generate a 2D rectangle, the third components of the above'
' options are ignored',
'u-order' :
'displacement field approximation order',
'p-order' :
'pressure field approximation order',
'linearization' :
'linearization used for storing the results with approximation order > 1'
' [default: %(default)s]',
'metis' :
'use metis for domain partitioning',
'save_inter_regions' :
'save inter-task regions for debugging partitioning problems',
'silent' : 'do not print messages to screen',
'clear' :
'clear old solution files from output directory'
' (DANGEROUS - use with care!)',
}
def main():
parser = ArgumentParser(description=__doc__.rstrip(),
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('output_dir', help=helps['output_dir'])
parser.add_argument('--dims', metavar='dims',
action='store', dest='dims',
default='1.0,1.0,1.0', help=helps['dims'])
parser.add_argument('--shape', metavar='shape',
action='store', dest='shape',
default='11,11,11', help=helps['shape'])
parser.add_argument('--centre', metavar='centre',
action='store', dest='centre',
default='0.0,0.0,0.0', help=helps['centre'])
parser.add_argument('-2', '--2d',
action='store_true', dest='is_2d',
default=False, help=helps['2d'])
parser.add_argument('--u-order', metavar='int', type=int,
action='store', dest='order_u',
default=1, help=helps['u-order'])
parser.add_argument('--p-order', metavar='int', type=int,
action='store', dest='order_p',
default=1, help=helps['p-order'])
parser.add_argument('--linearization', choices=['strip', 'adaptive'],
action='store', dest='linearization',
default='strip', help=helps['linearization'])
parser.add_argument('--metis',
action='store_true', dest='metis',
default=False, help=helps['metis'])
parser.add_argument('--save-inter-regions',
action='store_true', dest='save_inter_regions',
default=False, help=helps['save_inter_regions'])
parser.add_argument('--silent',
action='store_true', dest='silent',
default=False, help=helps['silent'])
parser.add_argument('--clear',
action='store_true', dest='clear',
default=False, help=helps['clear'])
options, petsc_opts = parser.parse_known_args()
comm = pl.PETSc.COMM_WORLD
output_dir = options.output_dir
filename = os.path.join(output_dir, 'output_log_%02d.txt' % comm.rank)
if comm.rank == 0:
ensure_path(filename)
comm.barrier()
output.prefix = 'sfepy_%02d:' % comm.rank
output.set_output(filename=filename, combined=options.silent == False)
output('petsc options:', petsc_opts)
mesh_filename = os.path.join(options.output_dir, 'para.h5')
if comm.rank == 0:
from sfepy.mesh.mesh_generators import gen_block_mesh
if options.clear:
remove_files_patterns(output_dir,
['*.h5', '*.mesh', '*.txt'],
ignores=['output_log_%02d.txt' % ii
for ii in range(comm.size)],
verbose=True)
save_options(os.path.join(output_dir, 'options.txt'),
[('options', vars(options))])
dim = 2 if options.is_2d else 3
dims = nm.array(eval(options.dims), dtype=nm.float64)[:dim]
shape = nm.array(eval(options.shape), dtype=nm.int32)[:dim]
centre = nm.array(eval(options.centre), dtype=nm.float64)[:dim]
output('dimensions:', dims)
output('shape: ', shape)
output('centre: ', centre)
mesh = gen_block_mesh(dims, shape, centre, name='block-fem',
verbose=True)
mesh.write(mesh_filename, io='auto')
comm.barrier()
output('field u order:', options.order_u)
output('field p order:', options.order_p)
solve_problem(mesh_filename, options, comm)
if __name__ == '__main__':
main()
|
[
"sfepy.parallel.parallel.partition_mesh",
"sfepy.discrete.Equation",
"sfepy.parallel.evaluate.PETScParallelEvaluator",
"sfepy.discrete.conditions.EssentialBC",
"sfepy.base.base.Struct",
"sfepy.discrete.fem.Mesh.from_file",
"sfepy.parallel.parallel.create_gather_to_zero",
"sfepy.mesh.mesh_generators.gen_block_mesh",
"sfepy.discrete.fem.Mesh.from_region",
"sfepy.mechanics.matcoefs.stiffness_from_lame",
"sfepy.discrete.conditions.Conditions",
"sfepy.parallel.parallel.setup_composite_dofs",
"sfepy.discrete.fem.Field.from_args",
"sfepy.base.base.output.set_output",
"sfepy.discrete.common.region.Region.from_cells",
"sfepy.discrete.Function",
"sfepy.terms.Term.new",
"sfepy.solvers.nls.PETScNonlinearSolver",
"sfepy.discrete.Equations",
"sfepy.discrete.fem.FEDomain",
"sfepy.discrete.State",
"sfepy.solvers.ls.PETScKrylovSolver",
"sfepy.discrete.Problem",
"sfepy.base.ioutils.ensure_path",
"sfepy.discrete.FieldVariable",
"sfepy.parallel.parallel.create_petsc_system",
"sfepy.base.base.output"
] |
[((4531, 4578), 'sfepy.discrete.fem.Mesh.from_region', 'Mesh.from_region', (['omega_gi', 'mesh'], {'localize': '(True)'}), '(omega_gi, mesh, localize=True)\n', (4547, 4578), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((4594, 4622), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['"""domain_i"""', 'mesh_i'], {}), "('domain_i', mesh_i)\n", (4602, 4622), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((5395, 5469), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""fu"""', 'nm.float64', 'mesh.dim', 'omega_i'], {'approx_order': 'order_u'}), "('fu', nm.float64, mesh.dim, omega_i, approx_order=order_u)\n", (5410, 5469), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((5517, 5584), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""fp"""', 'nm.float64', '(1)', 'omega_i'], {'approx_order': 'order_p'}), "('fp', nm.float64, 1, omega_i, approx_order=order_p)\n", (5532, 5584), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((5621, 5677), 'sfepy.base.base.output', 'output', (['"""field 1: number of local DOFs:"""', 'field1_i.n_nod'], {}), "('field 1: number of local DOFs:', field1_i.n_nod)\n", (5627, 5677), False, 'from sfepy.base.base import output, Struct\n'), ((5682, 5738), 'sfepy.base.base.output', 'output', (['"""field 2: number of local DOFs:"""', 'field2_i.n_nod'], {}), "('field 2: number of local DOFs:', field2_i.n_nod)\n", (5688, 5738), False, 'from sfepy.base.base import output, Struct\n'), ((5750, 5800), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""u_i"""', '"""unknown"""', 'field1_i'], {'order': '(0)'}), "('u_i', 'unknown', field1_i, order=0)\n", (5763, 5800), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem, State\n'), ((5811, 5873), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""v_i"""', '"""test"""', 'field1_i'], {'primary_var_name': '"""u_i"""'}), "('v_i', 'test', field1_i, primary_var_name='u_i')\n", (5824, 5873), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem, State\n'), ((5884, 5934), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""p_i"""', '"""unknown"""', 'field2_i'], {'order': '(1)'}), "('p_i', 'unknown', field2_i, order=1)\n", (5897, 5934), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem, State\n'), ((5945, 6007), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""q_i"""', '"""test"""', 'field2_i'], {'primary_var_name': '"""p_i"""'}), "('q_i', 'test', field2_i, primary_var_name='p_i')\n", (5958, 6007), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem, State\n'), ((6403, 6492), 'sfepy.terms.Term.new', 'Term.new', (['"""dw_lin_elastic(m.D, v_i, u_i)"""', 'integral', 'omega_i'], {'m': 'mat', 'v_i': 'v_i', 'u_i': 'u_i'}), "('dw_lin_elastic(m.D, v_i, u_i)', integral, omega_i, m=mat, v_i=v_i,\n u_i=u_i)\n", (6411, 6492), False, 'from sfepy.terms import Term\n'), ((6518, 6604), 'sfepy.terms.Term.new', 'Term.new', (['"""dw_biot(m.alpha, v_i, p_i)"""', 'integral', 'omega_i'], {'m': 'mat', 'v_i': 'v_i', 'p_i': 'p_i'}), "('dw_biot(m.alpha, v_i, p_i)', integral, omega_i, m=mat, v_i=v_i,\n p_i=p_i)\n", (6526, 6604), False, 'from sfepy.terms import Term\n'), ((6630, 6716), 'sfepy.terms.Term.new', 'Term.new', (['"""dw_biot(m.alpha, u_i, q_i)"""', 'integral', 'omega_i'], {'m': 'mat', 'u_i': 'u_i', 'q_i': 'q_i'}), "('dw_biot(m.alpha, u_i, q_i)', integral, omega_i, m=mat, u_i=u_i,\n q_i=q_i)\n", (6638, 6716), False, 'from sfepy.terms import Term\n'), ((6742, 6827), 'sfepy.terms.Term.new', 'Term.new', (['"""dw_laplace(m.k, q_i, p_i)"""', 'integral', 'omega_i'], {'m': 'mat', 'q_i': 'q_i', 'p_i': 'p_i'}), "('dw_laplace(m.k, q_i, p_i)', integral, omega_i, m=mat, q_i=q_i,\n p_i=p_i)\n", (6750, 6827), False, 'from sfepy.terms import Term\n'), ((6854, 6880), 'sfepy.discrete.Equation', 'Equation', (['"""eq1"""', '(t11 - t12)'], {}), "('eq1', t11 - t12)\n", (6862, 6880), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem, State\n'), ((6891, 6917), 'sfepy.discrete.Equation', 'Equation', (['"""eq1"""', '(t21 + t22)'], {}), "('eq1', t21 + t22)\n", (6899, 6917), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem, State\n'), ((6928, 6949), 'sfepy.discrete.Equations', 'Equations', (['[eq1, eq2]'], {}), '([eq1, eq2])\n', (6937, 6949), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem, State\n'), ((6962, 7009), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', (['"""ebc1"""', 'gamma1_i', "{'u_i.all': 0.0}"], {}), "('ebc1', gamma1_i, {'u_i.all': 0.0})\n", (6973, 7009), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((7022, 7068), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', (['"""ebc2"""', 'gamma2_i', "{'u_i.0': 0.05}"], {}), "('ebc2', gamma2_i, {'u_i.0': 0.05})\n", (7033, 7068), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((7217, 7243), 'sfepy.discrete.Function', 'Function', (['"""bc_fun"""', 'bc_fun'], {}), "('bc_fun', bc_fun)\n", (7225, 7243), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem, State\n'), ((7255, 7302), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', (['"""ebc3"""', 'gamma3_i', "{'p_i.all': fun}"], {}), "('ebc3', gamma3_i, {'p_i.all': fun})\n", (7266, 7302), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((7314, 7368), 'sfepy.discrete.Problem', 'Problem', (['"""problem_i"""'], {'equations': 'eqs', 'active_only': '(False)'}), "('problem_i', equations=eqs, active_only=False)\n", (7321, 7368), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem, State\n'), ((7632, 7664), 'sfepy.base.base.output', 'output', (['"""rank"""', 'rank', '"""of"""', 'size'], {}), "('rank', rank, 'of', size)\n", (7638, 7664), False, 'from sfepy.base.base import output, Struct\n'), ((7677, 7706), 'sfepy.discrete.fem.Mesh.from_file', 'Mesh.from_file', (['mesh_filename'], {}), '(mesh_filename)\n', (7691, 7706), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((7897, 7943), 'sfepy.base.base.output', 'output', (['"""creating global domain and fields..."""'], {}), "('creating global domain and fields...')\n", (7903, 7943), False, 'from sfepy.base.base import output, Struct\n'), ((7953, 7965), 'time.clock', 'time.clock', ([], {}), '()\n', (7963, 7965), False, 'import time\n'), ((7979, 8003), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['"""domain"""', 'mesh'], {}), "('domain', mesh)\n", (7987, 8003), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((8066, 8138), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""fu"""', 'nm.float64', 'mesh.dim', 'omega'], {'approx_order': 'order_u'}), "('fu', nm.float64, mesh.dim, omega, approx_order=order_u)\n", (8081, 8138), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((8181, 8246), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""fp"""', 'nm.float64', '(1)', 'omega'], {'approx_order': 'order_p'}), "('fp', nm.float64, 1, omega, approx_order=order_p)\n", (8196, 8246), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((8355, 8387), 'sfepy.base.base.output', 'output', (['"""distributing fields..."""'], {}), "('distributing fields...')\n", (8361, 8387), False, 'from sfepy.base.base import output, Struct\n'), ((8397, 8409), 'time.clock', 'time.clock', ([], {}), '()\n', (8407, 8409), False, 'import time\n'), ((8834, 8869), 'sfepy.base.base.output', 'output', (['"""creating local problem..."""'], {}), "('creating local problem...')\n", (8840, 8869), False, 'from sfepy.base.base import output, Struct\n'), ((8879, 8891), 'time.clock', 'time.clock', ([], {}), '()\n', (8889, 8891), False, 'import time\n'), ((8935, 8967), 'sfepy.discrete.common.region.Region.from_cells', 'Region.from_cells', (['cells', 'domain'], {}), '(cells, domain)\n', (8952, 8967), False, 'from sfepy.discrete.common.region import Region\n'), ((9130, 9146), 'sfepy.discrete.State', 'State', (['variables'], {}), '(variables)\n', (9135, 9146), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem, State\n'), ((9240, 9277), 'sfepy.base.base.output', 'output', (['"""allocating global system..."""'], {}), "('allocating global system...')\n", (9246, 9277), False, 'from sfepy.base.base import output, Struct\n'), ((9287, 9299), 'time.clock', 'time.clock', ([], {}), '()\n', (9297, 9299), False, 'import time\n'), ((9328, 9390), 'sfepy.parallel.parallel.setup_composite_dofs', 'pl.setup_composite_dofs', (['lfds', 'fields', 'variables'], {'verbose': '(True)'}), '(lfds, fields, variables, verbose=True)\n', (9351, 9390), True, 'import sfepy.parallel.parallel as pl\n'), ((9465, 9565), 'sfepy.parallel.parallel.create_petsc_system', 'pl.create_petsc_system', (['pb.mtx_a', 'sizes', 'pdofs', 'drange'], {'is_overlap': '(True)', 'comm': 'comm', 'verbose': '(True)'}), '(pb.mtx_a, sizes, pdofs, drange, is_overlap=True,\n comm=comm, verbose=True)\n', (9487, 9565), True, 'import sfepy.parallel.parallel as pl\n'), ((9704, 9732), 'sfepy.base.base.output', 'output', (['"""creating solver..."""'], {}), "('creating solver...')\n", (9710, 9732), False, 'from sfepy.base.base import output, Struct\n'), ((9742, 9754), 'time.clock', 'time.clock', ([], {}), '()\n', (9752, 9754), False, 'import time\n'), ((9767, 9899), 'sfepy.base.base.Struct', 'Struct', ([], {'method': '"""bcgsl"""', 'precond': '"""jacobi"""', 'sub_precond': '"""none"""', 'i_max': '(10000)', 'eps_a': '(1e-50)', 'eps_r': '(1e-06)', 'eps_d': '(10000.0)', 'verbose': '(True)'}), "(method='bcgsl', precond='jacobi', sub_precond='none', i_max=10000,\n eps_a=1e-50, eps_r=1e-06, eps_d=10000.0, verbose=True)\n", (9773, 9899), False, 'from sfepy.base.base import output, Struct\n'), ((9952, 10011), 'sfepy.solvers.ls.PETScKrylovSolver', 'PETScKrylovSolver', (['conf'], {'comm': 'comm', 'mtx': 'pmtx', 'status': 'status'}), '(conf, comm=comm, mtx=pmtx, status=status)\n', (9969, 10011), False, 'from sfepy.solvers.ls import PETScKrylovSolver\n'), ((10229, 10302), 'sfepy.parallel.evaluate.PETScParallelEvaluator', 'PETScParallelEvaluator', (['pb', 'pdofs', 'drange', '(True)', 'psol', 'comm'], {'verbose': '(True)'}), '(pb, pdofs, drange, True, psol, comm, verbose=True)\n', (10251, 10302), False, 'from sfepy.parallel.evaluate import PETScParallelEvaluator\n'), ((10367, 10453), 'sfepy.base.base.Struct', 'Struct', ([], {'method': '"""newtonls"""', 'i_max': '(5)', 'eps_a': '(0)', 'eps_r': '(1e-05)', 'eps_s': '(0.0)', 'verbose': '(True)'}), "(method='newtonls', i_max=5, eps_a=0, eps_r=1e-05, eps_s=0.0, verbose\n =True)\n", (10373, 10453), False, 'from sfepy.base.base import output, Struct\n'), ((10494, 10652), 'sfepy.solvers.nls.PETScNonlinearSolver', 'PETScNonlinearSolver', (['conf'], {'pmtx': 'pmtx', 'prhs': 'prhs', 'comm': 'comm', 'fun': 'ev.eval_residual', 'fun_grad': 'ev.eval_tangent_matrix', 'lin_solver': 'ls', 'status': 'nls_status'}), '(conf, pmtx=pmtx, prhs=prhs, comm=comm, fun=ev.\n eval_residual, fun_grad=ev.eval_tangent_matrix, lin_solver=ls, status=\n nls_status)\n', (10514, 10652), False, 'from sfepy.solvers.nls import PETScNonlinearSolver\n'), ((10786, 10806), 'sfepy.base.base.output', 'output', (['"""solving..."""'], {}), "('solving...')\n", (10792, 10806), False, 'from sfepy.base.base import output, Struct\n'), ((10816, 10828), 'time.clock', 'time.clock', ([], {}), '()\n', (10826, 10828), False, 'import time\n'), ((11076, 11104), 'sfepy.base.base.output', 'output', (['"""saving solution..."""'], {}), "('saving solution...')\n", (11082, 11104), False, 'from sfepy.base.base import output, Struct\n'), ((11114, 11126), 'time.clock', 'time.clock', ([], {}), '()\n', (11124, 11126), False, 'import time\n'), ((11208, 11267), 'os.path.join', 'os.path.join', (['options.output_dir', "('sol_%02d.h5' % comm.rank)"], {}), "(options.output_dir, 'sol_%02d.h5' % comm.rank)\n", (11220, 11267), False, 'import os\n'), ((11345, 11375), 'sfepy.parallel.parallel.create_gather_to_zero', 'pl.create_gather_to_zero', (['psol'], {}), '(psol)\n', (11369, 11375), True, 'import sfepy.parallel.parallel as pl\n'), ((16199, 16258), 'os.path.join', 'os.path.join', (['output_dir', "('output_log_%02d.txt' % comm.rank)"], {}), "(output_dir, 'output_log_%02d.txt' % comm.rank)\n", (16211, 16258), False, 'import os\n'), ((16382, 16452), 'sfepy.base.base.output.set_output', 'output.set_output', ([], {'filename': 'filename', 'combined': '(options.silent == False)'}), '(filename=filename, combined=options.silent == False)\n', (16399, 16452), False, 'from sfepy.base.base import output, Struct\n'), ((16458, 16494), 'sfepy.base.base.output', 'output', (['"""petsc options:"""', 'petsc_opts'], {}), "('petsc options:', petsc_opts)\n", (16464, 16494), False, 'from sfepy.base.base import output, Struct\n'), ((16516, 16559), 'os.path.join', 'os.path.join', (['options.output_dir', '"""para.h5"""'], {}), "(options.output_dir, 'para.h5')\n", (16528, 16559), False, 'import os\n'), ((17631, 17672), 'sfepy.base.base.output', 'output', (['"""field u order:"""', 'options.order_u'], {}), "('field u order:', options.order_u)\n", (17637, 17672), False, 'from sfepy.base.base import output, Struct\n'), ((17677, 17718), 'sfepy.base.base.output', 'output', (['"""field p order:"""', 'options.order_p'], {}), "('field p order:', options.order_p)\n", (17683, 17718), False, 'from sfepy.base.base import output, Struct\n'), ((7747, 7815), 'sfepy.parallel.parallel.partition_mesh', 'pl.partition_mesh', (['mesh', 'size'], {'use_metis': 'options.metis', 'verbose': '(True)'}), '(mesh, size, use_metis=options.metis, verbose=True)\n', (7764, 7815), True, 'import sfepy.parallel.parallel as pl\n'), ((11487, 11560), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""u"""', '"""parameter"""', 'field1'], {'primary_var_name': '"""(set-to-None)"""'}), "('u', 'parameter', field1, primary_var_name='(set-to-None)')\n", (11500, 11560), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem, State\n'), ((11655, 11728), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""p"""', '"""parameter"""', 'field2'], {'primary_var_name': '"""(set-to-None)"""'}), "('p', 'parameter', field2, primary_var_name='(set-to-None)')\n", (11668, 11728), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem, State\n'), ((16290, 16311), 'sfepy.base.ioutils.ensure_path', 'ensure_path', (['filename'], {}), '(filename)\n', (16301, 16311), False, 'from sfepy.base.ioutils import ensure_path, remove_files_patterns, save_options\n'), ((17344, 17371), 'sfepy.base.base.output', 'output', (['"""dimensions:"""', 'dims'], {}), "('dimensions:', dims)\n", (17350, 17371), False, 'from sfepy.base.base import output, Struct\n'), ((17380, 17408), 'sfepy.base.base.output', 'output', (['"""shape: """', 'shape'], {}), "('shape: ', shape)\n", (17386, 17408), False, 'from sfepy.base.base import output, Struct\n'), ((17417, 17446), 'sfepy.base.base.output', 'output', (['"""centre: """', 'centre'], {}), "('centre: ', centre)\n", (17423, 17446), False, 'from sfepy.base.base import output, Struct\n'), ((17463, 17530), 'sfepy.mesh.mesh_generators.gen_block_mesh', 'gen_block_mesh', (['dims', 'shape', 'centre'], {'name': '"""block-fem"""', 'verbose': '(True)'}), "(dims, shape, centre, name='block-fem', verbose=True)\n", (17477, 17530), False, 'from sfepy.mesh.mesh_generators import gen_block_mesh\n'), ((6053, 6090), 'numpy.array', 'nm.array', (['[[0.132], [0.132], [0.092]]'], {}), '([[0.132], [0.132], [0.092]])\n', (6061, 6090), True, 'import numpy as nm\n'), ((6124, 6188), 'numpy.array', 'nm.array', (['[[0.132], [0.132], [0.132], [0.092], [0.092], [0.092]]'], {}), '([[0.132], [0.132], [0.132], [0.092], [0.092], [0.092]])\n', (6132, 6188), True, 'import numpy as nm\n'), ((6248, 6291), 'sfepy.mechanics.matcoefs.stiffness_from_lame', 'stiffness_from_lame', (['mesh.dim'], {'lam': '(10)', 'mu': '(5)'}), '(mesh.dim, lam=10, mu=5)\n', (6267, 6291), False, 'from sfepy.mechanics.matcoefs import stiffness_from_lame\n'), ((7127, 7186), 'numpy.sin', 'nm.sin', (['(4 * nm.pi * (coors[:, 0] - min_x) / (max_x - min_x))'], {}), '(4 * nm.pi * (coors[:, 0] - min_x) / (max_x - min_x))\n', (7133, 7186), True, 'import numpy as nm\n'), ((7393, 7423), 'sfepy.discrete.conditions.Conditions', 'Conditions', (['[ebc1, ebc2, ebc3]'], {}), '([ebc1, ebc2, ebc3])\n', (7403, 7423), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((8331, 8343), 'time.clock', 'time.clock', ([], {}), '()\n', (8341, 8343), False, 'import time\n'), ((8810, 8822), 'time.clock', 'time.clock', ([], {}), '()\n', (8820, 8822), False, 'import time\n'), ((9215, 9227), 'time.clock', 'time.clock', ([], {}), '()\n', (9225, 9227), False, 'import time\n'), ((9680, 9692), 'time.clock', 'time.clock', ([], {}), '()\n', (9690, 9692), False, 'import time\n'), ((10762, 10774), 'time.clock', 'time.clock', ([], {}), '()\n', (10772, 10774), False, 'import time\n'), ((11052, 11064), 'time.clock', 'time.clock', ([], {}), '()\n', (11062, 11064), False, 'import time\n'), ((12016, 12058), 'os.path.join', 'os.path.join', (['options.output_dir', '"""sol.h5"""'], {}), "(options.output_dir, 'sol.h5')\n", (12028, 12058), False, 'import os\n'), ((12447, 12491), 'os.path.join', 'os.path.join', (['options.output_dir', '"""sol_u.h5"""'], {}), "(options.output_dir, 'sol_u.h5')\n", (12459, 12491), False, 'import os\n'), ((12875, 12919), 'os.path.join', 'os.path.join', (['options.output_dir', '"""sol_p.h5"""'], {}), "(options.output_dir, 'sol_p.h5')\n", (12887, 12919), False, 'import os\n'), ((13008, 13020), 'time.clock', 'time.clock', ([], {}), '()\n', (13018, 13020), False, 'import time\n'), ((16994, 17033), 'os.path.join', 'os.path.join', (['output_dir', '"""options.txt"""'], {}), "(output_dir, 'options.txt')\n", (17006, 17033), False, 'import os\n'), ((12179, 12245), 'sfepy.base.base.Struct', 'Struct', ([], {'kind': '"""adaptive"""', 'min_level': '(0)', 'max_level': 'order_u', 'eps': '(0.001)'}), "(kind='adaptive', min_level=0, max_level=order_u, eps=0.001)\n", (12185, 12245), False, 'from sfepy.base.base import output, Struct\n'), ((12607, 12673), 'sfepy.base.base.Struct', 'Struct', ([], {'kind': '"""adaptive"""', 'min_level': '(0)', 'max_level': 'order_p', 'eps': '(0.001)'}), "(kind='adaptive', min_level=0, max_level=order_p, eps=0.001)\n", (12613, 12673), False, 'from sfepy.base.base import output, Struct\n')]
|
"""
Reference-physical domain mappings.
"""
import numpy as nm
from sfepy.base.base import Struct
class PhysicalQPs(Struct):
"""
Physical quadrature points in a region.
"""
def __init__(self, igs, n_total=0, is_uniform=True):
Struct.__init__(self, igs=igs, n_total=n_total, indx={}, rindx={},
n_per_group={}, shape={}, values={},
is_uniform=is_uniform)
for ig in self.igs:
self.indx[ig] = slice(None)
self.rindx[ig] = slice(None)
self.n_per_group[ig] = 0
self.shape[ig] = (0, 0, 0)
self.values[ig] = nm.empty(self.shape[ig], dtype=nm.float64)
def get_merged_values(self):
qps = nm.concatenate([self.values[ig] for ig in self.igs], axis=0)
return qps
def get_shape(self, rshape, ig=None):
"""
Get shape from raveled shape.
"""
if ig is None:
if self.is_uniform:
n_qp = self.shape[self.igs[0]][1]
else:
msg = 'ig argument must be given for non-uniform QPs!'
raise ValueError(msg)
else:
n_qp = self.shape[ig][1]
if (rshape[0] / n_qp) * n_qp != rshape[0]:
raise ValueError('incompatible shapes! (n_qp: %d, %s)'
% (n_qp, rshape))
shape = (rshape[0] / n_qp, n_qp) + rshape[1:]
return shape
class Mapping(Struct):
"""
Base class for mappings.
"""
@staticmethod
def from_args(region, kind='v', ig=None):
"""
Create mapping from reference to physical entities in a given
region, given the integration kind ('v' or 's').
This mapping can be used to compute the physical quadrature
points.
Parameters
----------
region : Region instance
The region defining the entities.
kind : 'v' or 's'
The kind of the entities: 'v' - cells, 's' - facets.
ig : int, optional
The group index.
Returns
-------
mapping : VolumeMapping or SurfaceMapping instance
The requested mapping.
"""
from sfepy.discrete.fem.domain import FEDomain
from sfepy.discrete.iga.domain import IGDomain
if isinstance(region.domain, FEDomain):
import sfepy.discrete.fem.mappings as mm
coors = region.domain.get_mesh_coors()
if kind == 's':
coors = coors[region.vertices]
gel = region.domain.groups[ig].gel
conn = region.domain.groups[ig].conn
if kind == 'v':
cells = region.get_cells(ig)
mapping = mm.VolumeMapping(coors, conn[cells], gel=gel)
elif kind == 's':
from sfepy.discrete.fem.fe_surface import FESurface
aux = FESurface('aux', region, gel.get_surface_entities(),
conn , ig)
mapping = mm.SurfaceMapping(coors, aux.leconn,
gel=gel.surface_facet)
elif isinstance(region.domain, IGDomain):
import sfepy.discrete.iga.mappings as mm
mapping = mm.IGMapping(region.domain, region.cells)
else:
raise ValueError('unknown domain class! (%s)' % type(region.domain))
return mapping
def get_physical_qps(region, integral, map_kind=None):
"""
Get physical quadrature points corresponding to the given region
and integral.
"""
phys_qps = PhysicalQPs(region.igs)
if map_kind is None:
map_kind = 'v' if region.can_cells else 's'
ii = 0
for ig in region.igs:
gmap = Mapping.from_args(region, map_kind, ig)
gel = gmap.get_geometry()
qp_coors, _ = integral.get_qp(gel.name)
qps = gmap.get_physical_qps(qp_coors)
n_el, n_qp = qps.shape[0], qps.shape[1]
phys_qps.n_per_group[ig] = n_per_group = n_el * n_qp
phys_qps.shape[ig] = qps.shape
phys_qps.indx[ig] = slice(ii, ii + n_el)
phys_qps.rindx[ig] = slice(ii * n_qp, (ii + n_el) * n_qp)
ii += qps.shape[0]
qps.shape = (n_per_group, qps.shape[2])
phys_qps.values[ig] = qps
phys_qps.n_total += n_el * n_qp
return phys_qps
def get_mapping_data(name, field, integral, region=None, integration='volume'):
"""
General helper function for accessing reference mapping data.
Get data attribute `name` from reference mapping corresponding to
`field` in `region` in quadrature points of the given `integral` and
`integration` type.
Parameters
----------
name : str
The reference mapping attribute name.
field : Field instance
The field defining the reference mapping.
integral : Integral instance
The integral defining quadrature points.
region : Region instance, optional
If given, use the given region instead of `field` region.
integration : one of ('volume', 'surface', 'surface_extra')
The integration type.
Returns
-------
data : array
The required data merged for all element groups.
Notes
-----
Assumes the same element geometry in all element groups of the field!
"""
data = None
if region is None:
region = field.region
for ig in region.igs:
geo, _ = field.get_mapping(ig, region, integral, integration)
_data = getattr(geo, name)
if data is None:
data = _data
else:
data = nm.concatenate((data, _data), axis=0)
return data
def get_jacobian(field, integral, region=None, integration='volume'):
"""
Get the jacobian of reference mapping corresponding to `field`.
Parameters
----------
field : Field instance
The field defining the reference mapping.
integral : Integral instance
The integral defining quadrature points.
region : Region instance, optional
If given, use the given region instead of `field` region.
integration : one of ('volume', 'surface', 'surface_extra')
The integration type.
Returns
-------
jac : array
The jacobian merged for all element groups.
See Also
--------
get_mapping_data()
Notes
-----
Assumes the same element geometry in all element groups of the field!
"""
jac = get_mapping_data('det', field, integral, region=region,
integration=integration)
return jac
def get_normals(field, integral, region):
"""
Get the normals of element faces in `region`.
Parameters
----------
field : Field instance
The field defining the reference mapping.
integral : Integral instance
The integral defining quadrature points.
region : Region instance
The given of the element faces.
Returns
-------
normals : array
The normals merged for all element groups.
See Also
--------
get_mapping_data()
Notes
-----
Assumes the same element geometry in all element groups of the field!
"""
normals = get_mapping_data('normal', field, integral, region=region,
integration='surface')
return normals
|
[
"sfepy.discrete.iga.mappings.SurfaceMapping",
"sfepy.discrete.iga.mappings.VolumeMapping",
"sfepy.discrete.iga.mappings.IGMapping",
"sfepy.base.base.Struct.__init__"
] |
[((253, 383), 'sfepy.base.base.Struct.__init__', 'Struct.__init__', (['self'], {'igs': 'igs', 'n_total': 'n_total', 'indx': '{}', 'rindx': '{}', 'n_per_group': '{}', 'shape': '{}', 'values': '{}', 'is_uniform': 'is_uniform'}), '(self, igs=igs, n_total=n_total, indx={}, rindx={},\n n_per_group={}, shape={}, values={}, is_uniform=is_uniform)\n', (268, 383), False, 'from sfepy.base.base import Struct\n'), ((734, 794), 'numpy.concatenate', 'nm.concatenate', (['[self.values[ig] for ig in self.igs]'], {'axis': '(0)'}), '([self.values[ig] for ig in self.igs], axis=0)\n', (748, 794), True, 'import numpy as nm\n'), ((643, 685), 'numpy.empty', 'nm.empty', (['self.shape[ig]'], {'dtype': 'nm.float64'}), '(self.shape[ig], dtype=nm.float64)\n', (651, 685), True, 'import numpy as nm\n'), ((5623, 5660), 'numpy.concatenate', 'nm.concatenate', (['(data, _data)'], {'axis': '(0)'}), '((data, _data), axis=0)\n', (5637, 5660), True, 'import numpy as nm\n'), ((2746, 2791), 'sfepy.discrete.iga.mappings.VolumeMapping', 'mm.VolumeMapping', (['coors', 'conn[cells]'], {'gel': 'gel'}), '(coors, conn[cells], gel=gel)\n', (2762, 2791), True, 'import sfepy.discrete.iga.mappings as mm\n'), ((3266, 3307), 'sfepy.discrete.iga.mappings.IGMapping', 'mm.IGMapping', (['region.domain', 'region.cells'], {}), '(region.domain, region.cells)\n', (3278, 3307), True, 'import sfepy.discrete.iga.mappings as mm\n'), ((3036, 3095), 'sfepy.discrete.iga.mappings.SurfaceMapping', 'mm.SurfaceMapping', (['coors', 'aux.leconn'], {'gel': 'gel.surface_facet'}), '(coors, aux.leconn, gel=gel.surface_facet)\n', (3053, 3095), True, 'import sfepy.discrete.iga.mappings as mm\n')]
|
from decouple import config
from sqlmodel import Session, SQLModel, create_engine
DATABASE_URL = config("DATABASE_URL")
DEBUG = config("DEBUG", default=False, cast=bool)
engine = create_engine(DATABASE_URL, echo=DEBUG)
def get_session():
with Session(engine) as session:
yield session
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
|
[
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.Session",
"sqlmodel.create_engine"
] |
[((98, 120), 'decouple.config', 'config', (['"""DATABASE_URL"""'], {}), "('DATABASE_URL')\n", (104, 120), False, 'from decouple import config\n'), ((129, 170), 'decouple.config', 'config', (['"""DEBUG"""'], {'default': '(False)', 'cast': 'bool'}), "('DEBUG', default=False, cast=bool)\n", (135, 170), False, 'from decouple import config\n'), ((182, 221), 'sqlmodel.create_engine', 'create_engine', (['DATABASE_URL'], {'echo': 'DEBUG'}), '(DATABASE_URL, echo=DEBUG)\n', (195, 221), False, 'from sqlmodel import Session, SQLModel, create_engine\n'), ((336, 372), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (364, 372), False, 'from sqlmodel import Session, SQLModel, create_engine\n'), ((252, 267), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (259, 267), False, 'from sqlmodel import Session, SQLModel, create_engine\n')]
|
"""init database
Revision ID: 60e58d3a26fa
Revises:
Create Date: 2021-11-24 18:06:53.935899
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = '60e58d3a26fa'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('address',
sa.Column('street_name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('house_number', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('city', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('zip_code', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('id', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_address_city'), 'address', ['city'], unique=False)
op.create_index(op.f('ix_address_house_number'), 'address', ['house_number'], unique=False)
op.create_index(op.f('ix_address_id'), 'address', ['id'], unique=False)
op.create_index(op.f('ix_address_street_name'), 'address', ['street_name'], unique=False)
op.create_index(op.f('ix_address_zip_code'), 'address', ['zip_code'], unique=False)
op.create_table('product',
sa.Column('name', sa.String(), nullable=True),
sa.Column('id', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_index(op.f('ix_product_id'), 'product', ['id'], unique=False)
op.create_table('customer',
sa.Column('mobile_number', sa.String(), nullable=True),
sa.Column('email', sa.String(), nullable=True),
sa.Column('first_name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('last_name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('birth_date', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('gender', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('id', sa.Integer(), nullable=True),
sa.Column('address_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['address_id'], ['address.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('mobile_number')
)
op.create_index(op.f('ix_customer_address_id'), 'customer', ['address_id'], unique=False)
op.create_index(op.f('ix_customer_birth_date'), 'customer', ['birth_date'], unique=False)
op.create_index(op.f('ix_customer_first_name'), 'customer', ['first_name'], unique=False)
op.create_index(op.f('ix_customer_gender'), 'customer', ['gender'], unique=False)
op.create_index(op.f('ix_customer_id'), 'customer', ['id'], unique=False)
op.create_index(op.f('ix_customer_last_name'), 'customer', ['last_name'], unique=False)
op.create_table('customerproductlink',
sa.Column('customer_id', sa.Integer(), nullable=True),
sa.Column('product_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['customer_id'], ['customer.id'], ),
sa.ForeignKeyConstraint(['product_id'], ['product.id'], ),
sa.PrimaryKeyConstraint('customer_id', 'product_id')
)
op.create_index(op.f('ix_customerproductlink_customer_id'), 'customerproductlink', ['customer_id'], unique=False)
op.create_index(op.f('ix_customerproductlink_product_id'), 'customerproductlink', ['product_id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_customerproductlink_product_id'), table_name='customerproductlink')
op.drop_index(op.f('ix_customerproductlink_customer_id'), table_name='customerproductlink')
op.drop_table('customerproductlink')
op.drop_index(op.f('ix_customer_last_name'), table_name='customer')
op.drop_index(op.f('ix_customer_id'), table_name='customer')
op.drop_index(op.f('ix_customer_gender'), table_name='customer')
op.drop_index(op.f('ix_customer_first_name'), table_name='customer')
op.drop_index(op.f('ix_customer_birth_date'), table_name='customer')
op.drop_index(op.f('ix_customer_address_id'), table_name='customer')
op.drop_table('customer')
op.drop_index(op.f('ix_product_id'), table_name='product')
op.drop_table('product')
op.drop_index(op.f('ix_address_zip_code'), table_name='address')
op.drop_index(op.f('ix_address_street_name'), table_name='address')
op.drop_index(op.f('ix_address_id'), table_name='address')
op.drop_index(op.f('ix_address_house_number'), table_name='address')
op.drop_index(op.f('ix_address_city'), table_name='address')
op.drop_table('address')
# ### end Alembic commands ###
|
[
"sqlmodel.sql.sqltypes.AutoString"
] |
[((3717, 3753), 'alembic.op.drop_table', 'op.drop_table', (['"""customerproductlink"""'], {}), "('customerproductlink')\n", (3730, 3753), False, 'from alembic import op\n'), ((4183, 4208), 'alembic.op.drop_table', 'op.drop_table', (['"""customer"""'], {}), "('customer')\n", (4196, 4208), False, 'from alembic import op\n'), ((4276, 4300), 'alembic.op.drop_table', 'op.drop_table', (['"""product"""'], {}), "('product')\n", (4289, 4300), False, 'from alembic import op\n'), ((4647, 4671), 'alembic.op.drop_table', 'op.drop_table', (['"""address"""'], {}), "('address')\n", (4660, 4671), False, 'from alembic import op\n'), ((778, 807), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (801, 807), True, 'import sqlalchemy as sa\n'), ((834, 857), 'alembic.op.f', 'op.f', (['"""ix_address_city"""'], {}), "('ix_address_city')\n", (838, 857), False, 'from alembic import op\n'), ((914, 945), 'alembic.op.f', 'op.f', (['"""ix_address_house_number"""'], {}), "('ix_address_house_number')\n", (918, 945), False, 'from alembic import op\n'), ((1010, 1031), 'alembic.op.f', 'op.f', (['"""ix_address_id"""'], {}), "('ix_address_id')\n", (1014, 1031), False, 'from alembic import op\n'), ((1086, 1116), 'alembic.op.f', 'op.f', (['"""ix_address_street_name"""'], {}), "('ix_address_street_name')\n", (1090, 1116), False, 'from alembic import op\n'), ((1180, 1207), 'alembic.op.f', 'op.f', (['"""ix_address_zip_code"""'], {}), "('ix_address_zip_code')\n", (1184, 1207), False, 'from alembic import op\n'), ((1384, 1413), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (1407, 1413), True, 'import sqlalchemy as sa\n'), ((1419, 1446), 'sqlalchemy.UniqueConstraint', 'sa.UniqueConstraint', (['"""name"""'], {}), "('name')\n", (1438, 1446), True, 'import sqlalchemy as sa\n'), ((1473, 1494), 'alembic.op.f', 'op.f', (['"""ix_product_id"""'], {}), "('ix_product_id')\n", (1477, 1494), False, 'from alembic import op\n'), ((2104, 2159), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['address_id']", "['address.id']"], {}), "(['address_id'], ['address.id'])\n", (2127, 2159), True, 'import sqlalchemy as sa\n'), ((2167, 2196), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (2190, 2196), True, 'import sqlalchemy as sa\n'), ((2202, 2230), 'sqlalchemy.UniqueConstraint', 'sa.UniqueConstraint', (['"""email"""'], {}), "('email')\n", (2221, 2230), True, 'import sqlalchemy as sa\n'), ((2236, 2272), 'sqlalchemy.UniqueConstraint', 'sa.UniqueConstraint', (['"""mobile_number"""'], {}), "('mobile_number')\n", (2255, 2272), True, 'import sqlalchemy as sa\n'), ((2299, 2329), 'alembic.op.f', 'op.f', (['"""ix_customer_address_id"""'], {}), "('ix_customer_address_id')\n", (2303, 2329), False, 'from alembic import op\n'), ((2393, 2423), 'alembic.op.f', 'op.f', (['"""ix_customer_birth_date"""'], {}), "('ix_customer_birth_date')\n", (2397, 2423), False, 'from alembic import op\n'), ((2487, 2517), 'alembic.op.f', 'op.f', (['"""ix_customer_first_name"""'], {}), "('ix_customer_first_name')\n", (2491, 2517), False, 'from alembic import op\n'), ((2581, 2607), 'alembic.op.f', 'op.f', (['"""ix_customer_gender"""'], {}), "('ix_customer_gender')\n", (2585, 2607), False, 'from alembic import op\n'), ((2667, 2689), 'alembic.op.f', 'op.f', (['"""ix_customer_id"""'], {}), "('ix_customer_id')\n", (2671, 2689), False, 'from alembic import op\n'), ((2745, 2774), 'alembic.op.f', 'op.f', (['"""ix_customer_last_name"""'], {}), "('ix_customer_last_name')\n", (2749, 2774), False, 'from alembic import op\n'), ((2981, 3038), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['customer_id']", "['customer.id']"], {}), "(['customer_id'], ['customer.id'])\n", (3004, 3038), True, 'import sqlalchemy as sa\n'), ((3046, 3101), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['product_id']", "['product.id']"], {}), "(['product_id'], ['product.id'])\n", (3069, 3101), True, 'import sqlalchemy as sa\n'), ((3109, 3161), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""customer_id"""', '"""product_id"""'], {}), "('customer_id', 'product_id')\n", (3132, 3161), True, 'import sqlalchemy as sa\n'), ((3188, 3230), 'alembic.op.f', 'op.f', (['"""ix_customerproductlink_customer_id"""'], {}), "('ix_customerproductlink_customer_id')\n", (3192, 3230), False, 'from alembic import op\n'), ((3306, 3347), 'alembic.op.f', 'op.f', (['"""ix_customerproductlink_product_id"""'], {}), "('ix_customerproductlink_product_id')\n", (3310, 3347), False, 'from alembic import op\n'), ((3540, 3581), 'alembic.op.f', 'op.f', (['"""ix_customerproductlink_product_id"""'], {}), "('ix_customerproductlink_product_id')\n", (3544, 3581), False, 'from alembic import op\n'), ((3635, 3677), 'alembic.op.f', 'op.f', (['"""ix_customerproductlink_customer_id"""'], {}), "('ix_customerproductlink_customer_id')\n", (3639, 3677), False, 'from alembic import op\n'), ((3772, 3801), 'alembic.op.f', 'op.f', (['"""ix_customer_last_name"""'], {}), "('ix_customer_last_name')\n", (3776, 3801), False, 'from alembic import op\n'), ((3844, 3866), 'alembic.op.f', 'op.f', (['"""ix_customer_id"""'], {}), "('ix_customer_id')\n", (3848, 3866), False, 'from alembic import op\n'), ((3909, 3935), 'alembic.op.f', 'op.f', (['"""ix_customer_gender"""'], {}), "('ix_customer_gender')\n", (3913, 3935), False, 'from alembic import op\n'), ((3978, 4008), 'alembic.op.f', 'op.f', (['"""ix_customer_first_name"""'], {}), "('ix_customer_first_name')\n", (3982, 4008), False, 'from alembic import op\n'), ((4051, 4081), 'alembic.op.f', 'op.f', (['"""ix_customer_birth_date"""'], {}), "('ix_customer_birth_date')\n", (4055, 4081), False, 'from alembic import op\n'), ((4124, 4154), 'alembic.op.f', 'op.f', (['"""ix_customer_address_id"""'], {}), "('ix_customer_address_id')\n", (4128, 4154), False, 'from alembic import op\n'), ((4227, 4248), 'alembic.op.f', 'op.f', (['"""ix_product_id"""'], {}), "('ix_product_id')\n", (4231, 4248), False, 'from alembic import op\n'), ((4319, 4346), 'alembic.op.f', 'op.f', (['"""ix_address_zip_code"""'], {}), "('ix_address_zip_code')\n", (4323, 4346), False, 'from alembic import op\n'), ((4388, 4418), 'alembic.op.f', 'op.f', (['"""ix_address_street_name"""'], {}), "('ix_address_street_name')\n", (4392, 4418), False, 'from alembic import op\n'), ((4460, 4481), 'alembic.op.f', 'op.f', (['"""ix_address_id"""'], {}), "('ix_address_id')\n", (4464, 4481), False, 'from alembic import op\n'), ((4523, 4554), 'alembic.op.f', 'op.f', (['"""ix_address_house_number"""'], {}), "('ix_address_house_number')\n", (4527, 4554), False, 'from alembic import op\n'), ((4596, 4619), 'alembic.op.f', 'op.f', (['"""ix_address_city"""'], {}), "('ix_address_city')\n", (4600, 4619), False, 'from alembic import op\n'), ((434, 468), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (466, 468), False, 'import sqlmodel\n'), ((517, 551), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (549, 551), False, 'import sqlmodel\n'), ((592, 626), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (624, 626), False, 'import sqlmodel\n'), ((671, 705), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (703, 705), False, 'import sqlmodel\n'), ((744, 756), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (754, 756), True, 'import sqlalchemy as sa\n'), ((1301, 1312), 'sqlalchemy.String', 'sa.String', ([], {}), '()\n', (1310, 1312), True, 'import sqlalchemy as sa\n'), ((1350, 1362), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1360, 1362), True, 'import sqlalchemy as sa\n'), ((1592, 1603), 'sqlalchemy.String', 'sa.String', ([], {}), '()\n', (1601, 1603), True, 'import sqlalchemy as sa\n'), ((1644, 1655), 'sqlalchemy.String', 'sa.String', ([], {}), '()\n', (1653, 1655), True, 'import sqlalchemy as sa\n'), ((1701, 1735), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1733, 1735), False, 'import sqlmodel\n'), ((1781, 1815), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1813, 1815), False, 'import sqlmodel\n'), ((1862, 1896), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1894, 1896), False, 'import sqlmodel\n'), ((1939, 1973), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1971, 1973), False, 'import sqlmodel\n'), ((2012, 2024), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2022, 2024), True, 'import sqlalchemy as sa\n'), ((2070, 2082), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2080, 2082), True, 'import sqlalchemy as sa\n'), ((2889, 2901), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2899, 2901), True, 'import sqlalchemy as sa\n'), ((2947, 2959), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2957, 2959), True, 'import sqlalchemy as sa\n')]
|
import numpy as np
import megengine.functional as F
import megengine.module as M
from config import config
from .anchors_generator import AnchorGenerator
from .find_top_rpn_proposals import find_top_rpn_proposals
from .fpn_anchor_target import fpn_anchor_target, fpn_rpn_reshape
from det_opr.loss_opr import softmax_loss, smooth_l1_loss_rpn
import pdb
class RPN(M.Module):
def __init__(self, rpn_channel=256):
super().__init__()
self.anchors_generator = AnchorGenerator(
config.anchor_base_size,
config.anchor_aspect_ratios,
config.anchor_base_scale)
self.rpn_conv = M.Conv2d(256, rpn_channel, kernel_size=3, stride=1, padding=1)
self.rpn_cls_score = M.Conv2d(rpn_channel, config.num_cell_anchors * 2, kernel_size=1, stride=1)
self.rpn_bbox_offsets = M.Conv2d(rpn_channel, config.num_cell_anchors * 4, kernel_size=1, stride=1)
for l in [self.rpn_conv, self.rpn_cls_score, self.rpn_bbox_offsets]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def forward(self, features, im_info, boxes=None):
# prediction
pred_cls_score_list = []
pred_bbox_offsets_list = []
for x in features:
t = F.relu(self.rpn_conv(x))
pred_cls_score_list.append(self.rpn_cls_score(t))
pred_bbox_offsets_list.append(self.rpn_bbox_offsets(t))
# get anchors
all_anchors_list = []
fm_stride = 2 ** (len(features) + 1)
for fm in features:
layer_anchors = self.anchors_generator(fm, fm_stride)
fm_stride = fm_stride // 2
all_anchors_list.append(layer_anchors)
# sample from the predictions
rpn_rois, rpn_probs = find_top_rpn_proposals(
self.training, pred_bbox_offsets_list, pred_cls_score_list,
all_anchors_list, im_info)
if self.training:
rpn_labels, rpn_bbox_targets = fpn_anchor_target(
boxes, im_info, all_anchors_list)
#rpn_labels = rpn_labels.astype(np.int32)
pred_cls_score, pred_bbox_offsets = fpn_rpn_reshape(
pred_cls_score_list, pred_bbox_offsets_list)
# rpn loss
rpn_cls_loss = softmax_loss(pred_cls_score, rpn_labels)
rpn_bbox_loss = smooth_l1_loss_rpn(pred_bbox_offsets, rpn_bbox_targets, \
rpn_labels, config.rpn_smooth_l1_beta)
loss_dict = {}
loss_dict['loss_rpn_cls'] = rpn_cls_loss
loss_dict['loss_rpn_loc'] = rpn_bbox_loss
return rpn_rois, loss_dict
else:
return rpn_rois
|
[
"megengine.module.init.fill_",
"megengine.module.init.normal_",
"megengine.module.Conv2d"
] |
[((640, 702), 'megengine.module.Conv2d', 'M.Conv2d', (['(256)', 'rpn_channel'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256, rpn_channel, kernel_size=3, stride=1, padding=1)\n', (648, 702), True, 'import megengine.module as M\n'), ((732, 807), 'megengine.module.Conv2d', 'M.Conv2d', (['rpn_channel', '(config.num_cell_anchors * 2)'], {'kernel_size': '(1)', 'stride': '(1)'}), '(rpn_channel, config.num_cell_anchors * 2, kernel_size=1, stride=1)\n', (740, 807), True, 'import megengine.module as M\n'), ((840, 915), 'megengine.module.Conv2d', 'M.Conv2d', (['rpn_channel', '(config.num_cell_anchors * 4)'], {'kernel_size': '(1)', 'stride': '(1)'}), '(rpn_channel, config.num_cell_anchors * 4, kernel_size=1, stride=1)\n', (848, 915), True, 'import megengine.module as M\n'), ((1006, 1040), 'megengine.module.init.normal_', 'M.init.normal_', (['l.weight'], {'std': '(0.01)'}), '(l.weight, std=0.01)\n', (1020, 1040), True, 'import megengine.module as M\n'), ((1053, 1076), 'megengine.module.init.fill_', 'M.init.fill_', (['l.bias', '(0)'], {}), '(l.bias, 0)\n', (1065, 1076), True, 'import megengine.module as M\n'), ((2286, 2326), 'det_opr.loss_opr.softmax_loss', 'softmax_loss', (['pred_cls_score', 'rpn_labels'], {}), '(pred_cls_score, rpn_labels)\n', (2298, 2326), False, 'from det_opr.loss_opr import softmax_loss, smooth_l1_loss_rpn\n'), ((2355, 2454), 'det_opr.loss_opr.smooth_l1_loss_rpn', 'smooth_l1_loss_rpn', (['pred_bbox_offsets', 'rpn_bbox_targets', 'rpn_labels', 'config.rpn_smooth_l1_beta'], {}), '(pred_bbox_offsets, rpn_bbox_targets, rpn_labels, config.\n rpn_smooth_l1_beta)\n', (2373, 2454), False, 'from det_opr.loss_opr import softmax_loss, smooth_l1_loss_rpn\n')]
|
r"""
Thermo-elasticity with a given temperature distribution.
Uses `dw_biot` term with an isotropic coefficient for thermo-elastic coupling.
For given body temperature :math:`T` and background temperature
:math:`T_0` find :math:`\ul{u}` such that:
.. math::
\int_{\Omega} D_{ijkl}\ e_{ij}(\ul{v}) e_{kl}(\ul{u})
- \int_{\Omega} (T - T_0)\ \alpha_{ij} e_{ij}(\ul{v})
= 0
\;, \quad \forall \ul{v} \;,
where
.. math::
D_{ijkl} = \mu (\delta_{ik} \delta_{jl}+\delta_{il} \delta_{jk}) +
\lambda \ \delta_{ij} \delta_{kl}
\;, \\
\alpha_{ij} = (3 \lambda + 2 \mu) \alpha \delta_{ij}
and :math:`\alpha` is the thermal expansion coefficient.
"""
from __future__ import absolute_import
import numpy as np
from sfepy.base.base import Struct
from sfepy.mechanics.matcoefs import stiffness_from_lame
from sfepy.mechanics.tensors import get_von_mises_stress
from sfepy import data_dir
# Material parameters.
lam = 10.0
mu = 5.0
thermal_expandability = 1.25e-5
T0 = 20.0 # Background temperature.
filename_mesh = data_dir + '/meshes/3d/block.mesh'
def get_temperature_load(ts, coors, region=None):
"""
Temperature load depends on the `x` coordinate.
"""
x = coors[:, 0]
return (x - x.min())**2 - T0
def post_process(out, pb, state, extend=False):
"""
Compute derived quantities: strain, stresses. Store also the loading
temperature.
"""
ev = pb.evaluate
strain = ev('ev_cauchy_strain.2.Omega( u )', mode='el_avg')
out['cauchy_strain'] = Struct(name='output_data',
mode='cell', data=strain,
dofs=None)
e_stress = ev('ev_cauchy_stress.2.Omega( solid.D, u )', mode='el_avg')
out['elastic_stress'] = Struct(name='output_data',
mode='cell', data=e_stress,
dofs=None)
t_stress = ev('ev_biot_stress.2.Omega( solid.alpha, T )', mode='el_avg')
out['thermal_stress'] = Struct(name='output_data',
mode='cell', data=t_stress,
dofs=None)
out['total_stress'] = Struct(name='output_data',
mode='cell', data=e_stress + t_stress,
dofs=None)
out['von_mises_stress'] = aux = out['total_stress'].copy()
vms = get_von_mises_stress(aux.data.squeeze())
vms.shape = (vms.shape[0], 1, 1, 1)
out['von_mises_stress'].data = vms
val = pb.get_variables()['T']()
val.shape = (val.shape[0], 1)
out['T'] = Struct(name='output_data',
mode='vertex', data=val + T0,
dofs=None)
return out
options = {
'post_process_hook' : 'post_process',
'nls' : 'newton',
'ls' : 'ls',
}
functions = {
'get_temperature_load' : (get_temperature_load,),
}
regions = {
'Omega' : 'all',
'Left' : ('vertices in (x < -4.99)', 'facet'),
}
fields = {
'displacement': ('real', 3, 'Omega', 1),
'temperature': ('real', 1, 'Omega', 1),
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
'T' : ('parameter field', 'temperature',
{'setter' : 'get_temperature_load'}),
}
ebcs = {
'fix_u' : ('Left', {'u.all' : 0.0}),
}
eye_sym = np.array([[1], [1], [1], [0], [0], [0]], dtype=np.float64)
materials = {
'solid' : ({
'D' : stiffness_from_lame(3, lam=lam, mu=mu),
'alpha' : (3.0 * lam + 2.0 * mu) * thermal_expandability * eye_sym
},),
}
equations = {
'balance_of_forces' :
"""dw_lin_elastic.2.Omega( solid.D, v, u )
- dw_biot.2.Omega( solid.alpha, v, T )
= 0""",
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 1,
'eps_a' : 1e-10,
}),
}
|
[
"sfepy.base.base.Struct",
"sfepy.mechanics.matcoefs.stiffness_from_lame"
] |
[((3335, 3393), 'numpy.array', 'np.array', (['[[1], [1], [1], [0], [0], [0]]'], {'dtype': 'np.float64'}), '([[1], [1], [1], [0], [0], [0]], dtype=np.float64)\n', (3343, 3393), True, 'import numpy as np\n'), ((1516, 1579), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""cell"""', 'data': 'strain', 'dofs': 'None'}), "(name='output_data', mode='cell', data=strain, dofs=None)\n", (1522, 1579), False, 'from sfepy.base.base import Struct\n'), ((1752, 1817), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""cell"""', 'data': 'e_stress', 'dofs': 'None'}), "(name='output_data', mode='cell', data=e_stress, dofs=None)\n", (1758, 1817), False, 'from sfepy.base.base import Struct\n'), ((1994, 2059), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""cell"""', 'data': 't_stress', 'dofs': 'None'}), "(name='output_data', mode='cell', data=t_stress, dofs=None)\n", (2000, 2059), False, 'from sfepy.base.base import Struct\n'), ((2157, 2233), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""cell"""', 'data': '(e_stress + t_stress)', 'dofs': 'None'}), "(name='output_data', mode='cell', data=e_stress + t_stress, dofs=None)\n", (2163, 2233), False, 'from sfepy.base.base import Struct\n'), ((2580, 2647), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""vertex"""', 'data': '(val + T0)', 'dofs': 'None'}), "(name='output_data', mode='vertex', data=val + T0, dofs=None)\n", (2586, 2647), False, 'from sfepy.base.base import Struct\n'), ((3439, 3477), 'sfepy.mechanics.matcoefs.stiffness_from_lame', 'stiffness_from_lame', (['(3)'], {'lam': 'lam', 'mu': 'mu'}), '(3, lam=lam, mu=mu)\n', (3458, 3477), False, 'from sfepy.mechanics.matcoefs import stiffness_from_lame\n')]
|
from typing import Optional
from sqlalchemy import UniqueConstraint
from sqlmodel import Field, Relationship, SQLModel
from db.base import BaseDBModel
from model.item import Item
from model.warehouse import Warehouse
class InventoryEditableFields(SQLModel):
item_id: int = Field(foreign_key="item.id")
warehouse_id: int = Field(foreign_key="warehouse.id")
quantity: float
class Inventory(BaseDBModel, InventoryEditableFields, table=True):
__table_args__ = (UniqueConstraint("id", "item_id", "warehouse_id"),)
item: Optional[Item] = Relationship(back_populates="item_inventories")
warehouse: Optional[Warehouse] = Relationship(back_populates="warehouse_inventories")
class InventoryRead(SQLModel):
quantity: int
item: Optional[Item] = None
warehouse: Optional[Warehouse] = None
|
[
"sqlmodel.Relationship",
"sqlmodel.Field"
] |
[((281, 309), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""item.id"""'}), "(foreign_key='item.id')\n", (286, 309), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((334, 367), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""warehouse.id"""'}), "(foreign_key='warehouse.id')\n", (339, 367), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((558, 605), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""item_inventories"""'}), "(back_populates='item_inventories')\n", (570, 605), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((643, 695), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""warehouse_inventories"""'}), "(back_populates='warehouse_inventories')\n", (655, 695), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((479, 528), 'sqlalchemy.UniqueConstraint', 'UniqueConstraint', (['"""id"""', '"""item_id"""', '"""warehouse_id"""'], {}), "('id', 'item_id', 'warehouse_id')\n", (495, 528), False, 'from sqlalchemy import UniqueConstraint\n')]
|
"""
Models for columns.
"""
from typing import TYPE_CHECKING, Optional, TypedDict
from sqlalchemy.sql.schema import Column as SqlaColumn
from sqlalchemy.types import Enum
from sqlmodel import Field, Relationship, SQLModel
from datajunction.typing import ColumnType
if TYPE_CHECKING:
from datajunction.models.node import Node
class ColumnYAML(TypedDict, total=False):
"""
Schema of a column in the YAML file.
"""
type: str
dimension: str
class Column(SQLModel, table=True): # type: ignore
"""
A column.
Columns can be physical (associated with ``Table`` objects) or abstract (associated
with ``Node`` objects).
"""
id: Optional[int] = Field(default=None, primary_key=True)
name: str
type: ColumnType = Field(sa_column=SqlaColumn(Enum(ColumnType)))
dimension_id: Optional[int] = Field(default=None, foreign_key="node.id")
dimension: "Node" = Relationship()
dimension_column: Optional[str] = None
def to_yaml(self) -> ColumnYAML:
"""
Serialize the column for YAML.
"""
return {
"type": self.type.value, # pylint: disable=no-member
}
def __hash__(self) -> int:
return hash(self.id)
|
[
"sqlmodel.Relationship",
"sqlmodel.Field"
] |
[((694, 731), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (699, 731), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((850, 892), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""node.id"""'}), "(default=None, foreign_key='node.id')\n", (855, 892), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((917, 931), 'sqlmodel.Relationship', 'Relationship', ([], {}), '()\n', (929, 931), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((796, 812), 'sqlalchemy.types.Enum', 'Enum', (['ColumnType'], {}), '(ColumnType)\n', (800, 812), False, 'from sqlalchemy.types import Enum\n')]
|
from typing import Optional
from sqlmodel import Field, SQLModel, Relationship, Column
from sqlalchemy_utils.types import TSVectorType
from .db import stand_by_models, stand_by_db
stand_by_models()
class Hero(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
content: str
age: Optional[int] = None
search_vector: Optional[str] = Field(
sa_column=Column(
TSVectorType(
"name",
"content",
# weights={"name": "A", "secret_name": "B", "age": "D"},
)
)
)
class Parents(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
# children = orm.relationship("Children")
class Children(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
parent_id: Optional[int] = Field(default=None, foreign_key="parents.id")
stand_by_db()
|
[
"sqlmodel.Field"
] |
[((260, 297), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (265, 297), False, 'from sqlmodel import Field, SQLModel, Relationship, Column\n'), ((670, 707), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (675, 707), False, 'from sqlmodel import Field, SQLModel, Relationship, Column\n'), ((832, 869), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (837, 869), False, 'from sqlmodel import Field, SQLModel, Relationship, Column\n'), ((915, 960), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""parents.id"""'}), "(default=None, foreign_key='parents.id')\n", (920, 960), False, 'from sqlmodel import Field, SQLModel, Relationship, Column\n'), ((439, 470), 'sqlalchemy_utils.types.TSVectorType', 'TSVectorType', (['"""name"""', '"""content"""'], {}), "('name', 'content')\n", (451, 470), False, 'from sqlalchemy_utils.types import TSVectorType\n')]
|
#!/usr/bin/env python
r"""
Parallel assembling and solving of a Poisson's equation, using commands for
interactive use.
Find :math:`u` such that:
.. math::
\int_{\Omega} \nabla v \cdot \nabla u
= \int_{\Omega} v f
\;, \quad \forall s \;.
Important Notes
---------------
- This example requires petsc4py, mpi4py and (optionally) pymetis with their
dependencies installed!
- This example generates a number of files - do not use an existing non-empty
directory for the ``output_dir`` argument.
- Use the ``--clear`` option with care!
Notes
-----
- Each task is responsible for a subdomain consisting of a set of cells (a cell
region).
- Each subdomain owns PETSc DOFs within a consecutive range.
- When both global and task-local variables exist, the task-local
variables have ``_i`` suffix.
- This example does not use a nonlinear solver.
- This example can serve as a template for solving a linear single-field scalar
problem - just replace the equations in :func:`create_local_problem()`.
- The command line options are saved into <output_dir>/options.txt file.
Usage Examples
--------------
See all options::
$ python examples/diffusion/poisson_parallel_interactive.py -h
See PETSc options::
$ python examples/diffusion/poisson_parallel_interactive.py -help
Single process run useful for debugging with :func:`debug()
<sfepy.base.base.debug>`::
$ python examples/diffusion/poisson_parallel_interactive.py output-parallel
Parallel runs::
$ mpiexec -n 3 python examples/diffusion/poisson_parallel_interactive.py output-parallel -2 --shape=101,101
$ mpiexec -n 3 python examples/diffusion/poisson_parallel_interactive.py output-parallel -2 --shape=101,101 --metis
$ mpiexec -n 5 python examples/diffusion/poisson_parallel_interactive.py output-parallel -2 --shape=101,101 --verify --metis -ksp_monitor -ksp_converged_reason
View the results using::
$ python postproc.py output-parallel/sol.h5 --wireframe -b -d'u,plot_warp_scalar'
"""
from __future__ import absolute_import
from argparse import RawDescriptionHelpFormatter, ArgumentParser
import os
import sys
sys.path.append('.')
import csv
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import output, Struct
from sfepy.base.ioutils import ensure_path, remove_files_patterns, save_options
from sfepy.base.timing import Timer
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.discrete.common.region import Region
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem, State)
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.discrete.evaluate import apply_ebc_to_matrix
from sfepy.terms import Term
from sfepy.solvers.ls import PETScKrylovSolver
import sfepy.parallel.parallel as pl
import sfepy.parallel.plot_parallel_dofs as ppd
def create_local_problem(omega_gi, order):
"""
Local problem definition using a domain corresponding to the global region
`omega_gi`.
"""
mesh = omega_gi.domain.mesh
# All tasks have the whole mesh.
bbox = mesh.get_bounding_box()
min_x, max_x = bbox[:, 0]
eps_x = 1e-8 * (max_x - min_x)
mesh_i = Mesh.from_region(omega_gi, mesh, localize=True)
domain_i = FEDomain('domain_i', mesh_i)
omega_i = domain_i.create_region('Omega', 'all')
gamma1_i = domain_i.create_region('Gamma1',
'vertices in (x < %.10f)'
% (min_x + eps_x),
'facet', allow_empty=True)
gamma2_i = domain_i.create_region('Gamma2',
'vertices in (x > %.10f)'
% (max_x - eps_x),
'facet', allow_empty=True)
field_i = Field.from_args('fu', nm.float64, 1, omega_i,
approx_order=order)
output('number of local field DOFs:', field_i.n_nod)
u_i = FieldVariable('u_i', 'unknown', field_i)
v_i = FieldVariable('v_i', 'test', field_i, primary_var_name='u_i')
integral = Integral('i', order=2*order)
mat = Material('m', lam=10, mu=5)
t1 = Term.new('dw_laplace(m.lam, v_i, u_i)',
integral, omega_i, m=mat, v_i=v_i, u_i=u_i)
def _get_load(coors):
val = nm.ones_like(coors[:, 0])
for coor in coors.T:
val *= nm.sin(4 * nm.pi * coor)
return val
def get_load(ts, coors, mode=None, **kwargs):
if mode == 'qp':
return {'val' : _get_load(coors).reshape(coors.shape[0], 1, 1)}
load = Material('load', function=Function('get_load', get_load))
t2 = Term.new('dw_volume_lvf(load.val, v_i)',
integral, omega_i, load=load, v_i=v_i)
eq = Equation('balance', t1 - 100 * t2)
eqs = Equations([eq])
ebc1 = EssentialBC('ebc1', gamma1_i, {'u_i.all' : 0.0})
ebc2 = EssentialBC('ebc2', gamma2_i, {'u_i.all' : 0.1})
pb = Problem('problem_i', equations=eqs, active_only=False)
pb.time_update(ebcs=Conditions([ebc1, ebc2]))
pb.update_materials()
return pb
def verify_save_dof_maps(field, cell_tasks, dof_maps, id_map, options,
verbose=False):
vec = pl.verify_task_dof_maps(dof_maps, id_map, field, verbose=verbose)
order = options.order
mesh = field.domain.mesh
sfield = Field.from_args('aux', nm.float64, 'scalar', field.region,
approx_order=order)
aux = FieldVariable('aux', 'parameter', sfield,
primary_var_name='(set-to-None)')
out = aux.create_output(vec,
linearization=Struct(kind='adaptive',
min_level=order-1,
max_level=order-1,
eps=1e-8))
filename = os.path.join(options.output_dir,
'para-domains-dofs.h5')
if field.is_higher_order():
out['aux'].mesh.write(filename, out=out)
else:
mesh.write(filename, out=out)
out = Struct(name='cells', mode='cell',
data=cell_tasks[:, None, None, None])
filename = os.path.join(options.output_dir,
'para-domains-cells.h5')
mesh.write(filename, out={'cells' : out})
def solve_problem(mesh_filename, options, comm):
order = options.order
rank, size = comm.Get_rank(), comm.Get_size()
output('rank', rank, 'of', size)
stats = Struct()
timer = Timer('solve_timer')
timer.start()
mesh = Mesh.from_file(mesh_filename)
stats.t_read_mesh = timer.stop()
timer.start()
if rank == 0:
cell_tasks = pl.partition_mesh(mesh, size, use_metis=options.metis,
verbose=True)
else:
cell_tasks = None
stats.t_partition_mesh = timer.stop()
output('creating global domain and field...')
timer.start()
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field = Field.from_args('fu', nm.float64, 1, omega, approx_order=order)
stats.t_create_global_fields = timer.stop()
output('...done in', timer.dt)
output('distributing field %s...' % field.name)
timer.start()
distribute = pl.distribute_fields_dofs
lfds, gfds = distribute([field], cell_tasks,
is_overlap=True,
save_inter_regions=options.save_inter_regions,
output_dir=options.output_dir,
comm=comm, verbose=True)
lfd = lfds[0]
stats.t_distribute_fields_dofs = timer.stop()
output('...done in', timer.dt)
if rank == 0:
dof_maps = gfds[0].dof_maps
id_map = gfds[0].id_map
if options.verify:
verify_save_dof_maps(field, cell_tasks,
dof_maps, id_map, options, verbose=True)
if options.plot:
ppd.plot_partitioning([None, None], field, cell_tasks, gfds[0],
options.output_dir, size)
output('creating local problem...')
timer.start()
omega_gi = Region.from_cells(lfd.cells, field.domain)
omega_gi.finalize()
omega_gi.update_shape()
pb = create_local_problem(omega_gi, order)
variables = pb.get_variables()
eqs = pb.equations
u_i = variables['u_i']
field_i = u_i.field
stats.t_create_local_problem = timer.stop()
output('...done in', timer.dt)
if options.plot:
ppd.plot_local_dofs([None, None], field, field_i, omega_gi,
options.output_dir, rank)
output('allocating global system...')
timer.start()
sizes, drange = pl.get_sizes(lfd.petsc_dofs_range, field.n_nod, 1)
output('sizes:', sizes)
output('drange:', drange)
pdofs = pl.get_local_ordering(field_i, lfd.petsc_dofs_conn)
output('pdofs:', pdofs)
pmtx, psol, prhs = pl.create_petsc_system(pb.mtx_a, sizes, pdofs, drange,
is_overlap=True, comm=comm,
verbose=True)
stats.t_allocate_global_system = timer.stop()
output('...done in', timer.dt)
output('evaluating local problem...')
timer.start()
state = State(variables)
state.fill(0.0)
state.apply_ebc()
rhs_i = eqs.eval_residuals(state())
# This must be after pl.create_petsc_system() call!
mtx_i = eqs.eval_tangent_matrices(state(), pb.mtx_a)
stats.t_evaluate_local_problem = timer.stop()
output('...done in', timer.dt)
output('assembling global system...')
timer.start()
apply_ebc_to_matrix(mtx_i, u_i.eq_map.eq_ebc)
pl.assemble_rhs_to_petsc(prhs, rhs_i, pdofs, drange, is_overlap=True,
comm=comm, verbose=True)
pl.assemble_mtx_to_petsc(pmtx, mtx_i, pdofs, drange, is_overlap=True,
comm=comm, verbose=True)
stats.t_assemble_global_system = timer.stop()
output('...done in', timer.dt)
output('creating solver...')
timer.start()
conf = Struct(method='cg', precond='gamg', sub_precond='none',
i_max=10000, eps_a=1e-50, eps_r=1e-5, eps_d=1e4, verbose=True)
status = {}
ls = PETScKrylovSolver(conf, comm=comm, mtx=pmtx, status=status)
stats.t_create_solver = timer.stop()
output('...done in', timer.dt)
output('solving...')
timer.start()
psol = ls(prhs, psol)
psol_i = pl.create_local_petsc_vector(pdofs)
gather, scatter = pl.create_gather_scatter(pdofs, psol_i, psol, comm=comm)
scatter(psol_i, psol)
sol0_i = state() - psol_i[...]
psol_i[...] = sol0_i
gather(psol, psol_i)
stats.t_solve = timer.stop()
output('...done in', timer.dt)
output('saving solution...')
timer.start()
u_i.set_data(sol0_i)
out = u_i.create_output()
filename = os.path.join(options.output_dir, 'sol_%02d.h5' % comm.rank)
pb.domain.mesh.write(filename, io='auto', out=out)
gather_to_zero = pl.create_gather_to_zero(psol)
psol_full = gather_to_zero(psol)
if comm.rank == 0:
sol = psol_full[...].copy()[id_map]
u = FieldVariable('u', 'parameter', field,
primary_var_name='(set-to-None)')
filename = os.path.join(options.output_dir, 'sol.h5')
if (order == 1) or (options.linearization == 'strip'):
out = u.create_output(sol)
mesh.write(filename, io='auto', out=out)
else:
out = u.create_output(sol, linearization=Struct(kind='adaptive',
min_level=0,
max_level=order,
eps=1e-3))
out['u'].mesh.write(filename, io='auto', out=out)
stats.t_save_solution = timer.stop()
output('...done in', timer.dt)
stats.t_total = timer.total
stats.n_dof = sizes[1]
stats.n_dof_local = sizes[0]
stats.n_cell = omega.shape.n_cell
stats.n_cell_local = omega_gi.shape.n_cell
if options.show:
plt.show()
return stats
def save_stats(filename, pars, stats, overwrite, rank, comm=None):
out = stats.to_dict()
names = sorted(out.keys())
shape_dict = {'n%d' % ii : pars.shape[ii] for ii in range(pars.dim)}
keys = ['size', 'rank', 'dim'] + list(shape_dict.keys()) + ['order'] + names
out['size'] = comm.size
out['rank'] = rank
out['dim'] = pars.dim
out.update(shape_dict)
out['order'] = pars.order
if rank == 0 and overwrite:
with open(filename, 'w') as fd:
writer = csv.DictWriter(fd, fieldnames=keys)
writer.writeheader()
writer.writerow(out)
else:
with open(filename, 'a') as fd:
writer = csv.DictWriter(fd, fieldnames=keys)
writer.writerow(out)
helps = {
'output_dir' :
'output directory',
'dims' :
'dimensions of the block [default: %(default)s]',
'shape' :
'shape (counts of nodes in x, y, z) of the block [default: %(default)s]',
'centre' :
'centre of the block [default: %(default)s]',
'2d' :
'generate a 2D rectangle, the third components of the above'
' options are ignored',
'order' :
'field approximation order',
'linearization' :
'linearization used for storing the results with approximation order > 1'
' [default: %(default)s]',
'metis' :
'use metis for domain partitioning',
'verify' :
'verify domain partitioning, save cells and DOFs of tasks'
' for visualization',
'plot' :
'make partitioning plots',
'save_inter_regions' :
'save inter-task regions for debugging partitioning problems',
'show' :
'show partitioning plots (implies --plot)',
'stats_filename' :
'name of the stats file for storing elapsed time statistics',
'new_stats' :
'create a new stats file with a header line (overwrites existing!)',
'silent' : 'do not print messages to screen',
'clear' :
'clear old solution files from output directory'
' (DANGEROUS - use with care!)',
}
def main():
parser = ArgumentParser(description=__doc__.rstrip(),
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('output_dir', help=helps['output_dir'])
parser.add_argument('--dims', metavar='dims',
action='store', dest='dims',
default='1.0,1.0,1.0', help=helps['dims'])
parser.add_argument('--shape', metavar='shape',
action='store', dest='shape',
default='11,11,11', help=helps['shape'])
parser.add_argument('--centre', metavar='centre',
action='store', dest='centre',
default='0.0,0.0,0.0', help=helps['centre'])
parser.add_argument('-2', '--2d',
action='store_true', dest='is_2d',
default=False, help=helps['2d'])
parser.add_argument('--order', metavar='int', type=int,
action='store', dest='order',
default=1, help=helps['order'])
parser.add_argument('--linearization', choices=['strip', 'adaptive'],
action='store', dest='linearization',
default='strip', help=helps['linearization'])
parser.add_argument('--metis',
action='store_true', dest='metis',
default=False, help=helps['metis'])
parser.add_argument('--verify',
action='store_true', dest='verify',
default=False, help=helps['verify'])
parser.add_argument('--plot',
action='store_true', dest='plot',
default=False, help=helps['plot'])
parser.add_argument('--show',
action='store_true', dest='show',
default=False, help=helps['show'])
parser.add_argument('--save-inter-regions',
action='store_true', dest='save_inter_regions',
default=False, help=helps['save_inter_regions'])
parser.add_argument('--stats', metavar='filename',
action='store', dest='stats_filename',
default=None, help=helps['stats_filename'])
parser.add_argument('--new-stats',
action='store_true', dest='new_stats',
default=False, help=helps['new_stats'])
parser.add_argument('--silent',
action='store_true', dest='silent',
default=False, help=helps['silent'])
parser.add_argument('--clear',
action='store_true', dest='clear',
default=False, help=helps['clear'])
options, petsc_opts = parser.parse_known_args()
if options.show:
options.plot = True
comm = pl.PETSc.COMM_WORLD
output_dir = options.output_dir
filename = os.path.join(output_dir, 'output_log_%02d.txt' % comm.rank)
if comm.rank == 0:
ensure_path(filename)
comm.barrier()
output.prefix = 'sfepy_%02d:' % comm.rank
output.set_output(filename=filename, combined=options.silent == False)
output('petsc options:', petsc_opts)
mesh_filename = os.path.join(options.output_dir, 'para.h5')
dim = 2 if options.is_2d else 3
dims = nm.array(eval(options.dims), dtype=nm.float64)[:dim]
shape = nm.array(eval(options.shape), dtype=nm.int32)[:dim]
centre = nm.array(eval(options.centre), dtype=nm.float64)[:dim]
output('dimensions:', dims)
output('shape: ', shape)
output('centre: ', centre)
if comm.rank == 0:
from sfepy.mesh.mesh_generators import gen_block_mesh
if options.clear:
remove_files_patterns(output_dir,
['*.h5', '*.mesh', '*.txt', '*.png'],
ignores=['output_log_%02d.txt' % ii
for ii in range(comm.size)],
verbose=True)
save_options(os.path.join(output_dir, 'options.txt'),
[('options', vars(options))])
mesh = gen_block_mesh(dims, shape, centre, name='block-fem',
verbose=True)
mesh.write(mesh_filename, io='auto')
comm.barrier()
output('field order:', options.order)
stats = solve_problem(mesh_filename, options, comm)
output(stats)
if options.stats_filename:
if comm.rank == 0:
ensure_path(options.stats_filename)
comm.barrier()
pars = Struct(dim=dim, shape=shape, order=options.order)
pl.call_in_rank_order(
lambda rank, comm:
save_stats(options.stats_filename, pars, stats, options.new_stats,
rank, comm),
comm
)
if __name__ == '__main__':
main()
|
[
"sfepy.parallel.parallel.assemble_rhs_to_petsc",
"sfepy.mesh.mesh_generators.gen_block_mesh",
"sfepy.parallel.parallel.create_gather_scatter",
"sfepy.base.ioutils.ensure_path",
"sfepy.parallel.parallel.create_local_petsc_vector",
"sfepy.discrete.fem.Mesh.from_region",
"sfepy.discrete.fem.Mesh.from_file",
"sfepy.discrete.Integral",
"sfepy.discrete.fem.Field.from_args",
"sfepy.base.base.Struct",
"sfepy.solvers.ls.PETScKrylovSolver",
"sfepy.discrete.State",
"sfepy.parallel.plot_parallel_dofs.plot_local_dofs",
"sfepy.discrete.fem.FEDomain",
"sfepy.base.base.output",
"sfepy.discrete.conditions.EssentialBC",
"sfepy.discrete.conditions.Conditions",
"sfepy.parallel.parallel.verify_task_dof_maps",
"sfepy.discrete.common.region.Region.from_cells",
"sfepy.parallel.parallel.create_petsc_system",
"sfepy.parallel.parallel.partition_mesh",
"sfepy.parallel.parallel.get_sizes",
"sfepy.discrete.evaluate.apply_ebc_to_matrix",
"sfepy.parallel.parallel.create_gather_to_zero",
"sfepy.discrete.Equation",
"sfepy.parallel.parallel.get_local_ordering",
"sfepy.base.base.output.set_output",
"sfepy.terms.Term.new",
"sfepy.base.timing.Timer",
"sfepy.parallel.parallel.assemble_mtx_to_petsc",
"sfepy.discrete.FieldVariable",
"sfepy.discrete.Function",
"sfepy.discrete.Material",
"sfepy.discrete.Problem",
"sfepy.discrete.Equations",
"sfepy.parallel.plot_parallel_dofs.plot_partitioning"
] |
[((2114, 2134), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (2129, 2134), False, 'import sys\n'), ((3216, 3263), 'sfepy.discrete.fem.Mesh.from_region', 'Mesh.from_region', (['omega_gi', 'mesh'], {'localize': '(True)'}), '(omega_gi, mesh, localize=True)\n', (3232, 3263), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((3279, 3307), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['"""domain_i"""', 'mesh_i'], {}), "('domain_i', mesh_i)\n", (3287, 3307), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((3845, 3910), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""fu"""', 'nm.float64', '(1)', 'omega_i'], {'approx_order': 'order'}), "('fu', nm.float64, 1, omega_i, approx_order=order)\n", (3860, 3910), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((3946, 3998), 'sfepy.base.base.output', 'output', (['"""number of local field DOFs:"""', 'field_i.n_nod'], {}), "('number of local field DOFs:', field_i.n_nod)\n", (3952, 3998), False, 'from sfepy.base.base import output, Struct\n'), ((4010, 4050), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""u_i"""', '"""unknown"""', 'field_i'], {}), "('u_i', 'unknown', field_i)\n", (4023, 4050), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem, State\n'), ((4061, 4122), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""v_i"""', '"""test"""', 'field_i'], {'primary_var_name': '"""u_i"""'}), "('v_i', 'test', field_i, primary_var_name='u_i')\n", (4074, 4122), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem, State\n'), ((4139, 4169), 'sfepy.discrete.Integral', 'Integral', (['"""i"""'], {'order': '(2 * order)'}), "('i', order=2 * order)\n", (4147, 4169), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem, State\n'), ((4179, 4206), 'sfepy.discrete.Material', 'Material', (['"""m"""'], {'lam': '(10)', 'mu': '(5)'}), "('m', lam=10, mu=5)\n", (4187, 4206), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem, State\n'), ((4216, 4303), 'sfepy.terms.Term.new', 'Term.new', (['"""dw_laplace(m.lam, v_i, u_i)"""', 'integral', 'omega_i'], {'m': 'mat', 'v_i': 'v_i', 'u_i': 'u_i'}), "('dw_laplace(m.lam, v_i, u_i)', integral, omega_i, m=mat, v_i=v_i,\n u_i=u_i)\n", (4224, 4303), False, 'from sfepy.terms import Term\n'), ((4709, 4788), 'sfepy.terms.Term.new', 'Term.new', (['"""dw_volume_lvf(load.val, v_i)"""', 'integral', 'omega_i'], {'load': 'load', 'v_i': 'v_i'}), "('dw_volume_lvf(load.val, v_i)', integral, omega_i, load=load, v_i=v_i)\n", (4717, 4788), False, 'from sfepy.terms import Term\n'), ((4817, 4851), 'sfepy.discrete.Equation', 'Equation', (['"""balance"""', '(t1 - 100 * t2)'], {}), "('balance', t1 - 100 * t2)\n", (4825, 4851), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem, State\n'), ((4862, 4877), 'sfepy.discrete.Equations', 'Equations', (['[eq]'], {}), '([eq])\n', (4871, 4877), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem, State\n'), ((4890, 4937), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', (['"""ebc1"""', 'gamma1_i', "{'u_i.all': 0.0}"], {}), "('ebc1', gamma1_i, {'u_i.all': 0.0})\n", (4901, 4937), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((4950, 4997), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', (['"""ebc2"""', 'gamma2_i', "{'u_i.all': 0.1}"], {}), "('ebc2', gamma2_i, {'u_i.all': 0.1})\n", (4961, 4997), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((5009, 5063), 'sfepy.discrete.Problem', 'Problem', (['"""problem_i"""'], {'equations': 'eqs', 'active_only': '(False)'}), "('problem_i', equations=eqs, active_only=False)\n", (5016, 5063), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem, State\n'), ((5278, 5343), 'sfepy.parallel.parallel.verify_task_dof_maps', 'pl.verify_task_dof_maps', (['dof_maps', 'id_map', 'field'], {'verbose': 'verbose'}), '(dof_maps, id_map, field, verbose=verbose)\n', (5301, 5343), True, 'import sfepy.parallel.parallel as pl\n'), ((5414, 5492), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""aux"""', 'nm.float64', '"""scalar"""', 'field.region'], {'approx_order': 'order'}), "('aux', nm.float64, 'scalar', field.region, approx_order=order)\n", (5429, 5492), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((5532, 5607), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""aux"""', '"""parameter"""', 'sfield'], {'primary_var_name': '"""(set-to-None)"""'}), "('aux', 'parameter', sfield, primary_var_name='(set-to-None)')\n", (5545, 5607), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem, State\n'), ((5943, 5999), 'os.path.join', 'os.path.join', (['options.output_dir', '"""para-domains-dofs.h5"""'], {}), "(options.output_dir, 'para-domains-dofs.h5')\n", (5955, 5999), False, 'import os\n'), ((6169, 6240), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""cells"""', 'mode': '"""cell"""', 'data': 'cell_tasks[:, None, None, None]'}), "(name='cells', mode='cell', data=cell_tasks[:, None, None, None])\n", (6175, 6240), False, 'from sfepy.base.base import output, Struct\n'), ((6273, 6330), 'os.path.join', 'os.path.join', (['options.output_dir', '"""para-domains-cells.h5"""'], {}), "(options.output_dir, 'para-domains-cells.h5')\n", (6285, 6330), False, 'import os\n'), ((6537, 6569), 'sfepy.base.base.output', 'output', (['"""rank"""', 'rank', '"""of"""', 'size'], {}), "('rank', rank, 'of', size)\n", (6543, 6569), False, 'from sfepy.base.base import output, Struct\n'), ((6583, 6591), 'sfepy.base.base.Struct', 'Struct', ([], {}), '()\n', (6589, 6591), False, 'from sfepy.base.base import output, Struct\n'), ((6604, 6624), 'sfepy.base.timing.Timer', 'Timer', (['"""solve_timer"""'], {}), "('solve_timer')\n", (6609, 6624), False, 'from sfepy.base.timing import Timer\n'), ((6655, 6684), 'sfepy.discrete.fem.Mesh.from_file', 'Mesh.from_file', (['mesh_filename'], {}), '(mesh_filename)\n', (6669, 6684), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((6973, 7018), 'sfepy.base.base.output', 'output', (['"""creating global domain and field..."""'], {}), "('creating global domain and field...')\n", (6979, 7018), False, 'from sfepy.base.base import output, Struct\n'), ((7051, 7075), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['"""domain"""', 'mesh'], {}), "('domain', mesh)\n", (7059, 7075), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((7137, 7200), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""fu"""', 'nm.float64', '(1)', 'omega'], {'approx_order': 'order'}), "('fu', nm.float64, 1, omega, approx_order=order)\n", (7152, 7200), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((7254, 7284), 'sfepy.base.base.output', 'output', (['"""...done in"""', 'timer.dt'], {}), "('...done in', timer.dt)\n", (7260, 7284), False, 'from sfepy.base.base import output, Struct\n'), ((7290, 7337), 'sfepy.base.base.output', 'output', (["('distributing field %s...' % field.name)"], {}), "('distributing field %s...' % field.name)\n", (7296, 7337), False, 'from sfepy.base.base import output, Struct\n'), ((7754, 7784), 'sfepy.base.base.output', 'output', (['"""...done in"""', 'timer.dt'], {}), "('...done in', timer.dt)\n", (7760, 7784), False, 'from sfepy.base.base import output, Struct\n'), ((8193, 8228), 'sfepy.base.base.output', 'output', (['"""creating local problem..."""'], {}), "('creating local problem...')\n", (8199, 8228), False, 'from sfepy.base.base import output, Struct\n'), ((8263, 8305), 'sfepy.discrete.common.region.Region.from_cells', 'Region.from_cells', (['lfd.cells', 'field.domain'], {}), '(lfd.cells, field.domain)\n', (8280, 8305), False, 'from sfepy.discrete.common.region import Region\n'), ((8570, 8600), 'sfepy.base.base.output', 'output', (['"""...done in"""', 'timer.dt'], {}), "('...done in', timer.dt)\n", (8576, 8600), False, 'from sfepy.base.base import output, Struct\n'), ((8750, 8787), 'sfepy.base.base.output', 'output', (['"""allocating global system..."""'], {}), "('allocating global system...')\n", (8756, 8787), False, 'from sfepy.base.base import output, Struct\n'), ((8827, 8877), 'sfepy.parallel.parallel.get_sizes', 'pl.get_sizes', (['lfd.petsc_dofs_range', 'field.n_nod', '(1)'], {}), '(lfd.petsc_dofs_range, field.n_nod, 1)\n', (8839, 8877), True, 'import sfepy.parallel.parallel as pl\n'), ((8882, 8905), 'sfepy.base.base.output', 'output', (['"""sizes:"""', 'sizes'], {}), "('sizes:', sizes)\n", (8888, 8905), False, 'from sfepy.base.base import output, Struct\n'), ((8910, 8935), 'sfepy.base.base.output', 'output', (['"""drange:"""', 'drange'], {}), "('drange:', drange)\n", (8916, 8935), False, 'from sfepy.base.base import output, Struct\n'), ((8949, 9000), 'sfepy.parallel.parallel.get_local_ordering', 'pl.get_local_ordering', (['field_i', 'lfd.petsc_dofs_conn'], {}), '(field_i, lfd.petsc_dofs_conn)\n', (8970, 9000), True, 'import sfepy.parallel.parallel as pl\n'), ((9006, 9029), 'sfepy.base.base.output', 'output', (['"""pdofs:"""', 'pdofs'], {}), "('pdofs:', pdofs)\n", (9012, 9029), False, 'from sfepy.base.base import output, Struct\n'), ((9054, 9154), 'sfepy.parallel.parallel.create_petsc_system', 'pl.create_petsc_system', (['pb.mtx_a', 'sizes', 'pdofs', 'drange'], {'is_overlap': '(True)', 'comm': 'comm', 'verbose': '(True)'}), '(pb.mtx_a, sizes, pdofs, drange, is_overlap=True,\n comm=comm, verbose=True)\n', (9076, 9154), True, 'import sfepy.parallel.parallel as pl\n'), ((9298, 9328), 'sfepy.base.base.output', 'output', (['"""...done in"""', 'timer.dt'], {}), "('...done in', timer.dt)\n", (9304, 9328), False, 'from sfepy.base.base import output, Struct\n'), ((9334, 9371), 'sfepy.base.base.output', 'output', (['"""evaluating local problem..."""'], {}), "('evaluating local problem...')\n", (9340, 9371), False, 'from sfepy.base.base import output, Struct\n'), ((9403, 9419), 'sfepy.discrete.State', 'State', (['variables'], {}), '(variables)\n', (9408, 9419), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem, State\n'), ((9671, 9701), 'sfepy.base.base.output', 'output', (['"""...done in"""', 'timer.dt'], {}), "('...done in', timer.dt)\n", (9677, 9701), False, 'from sfepy.base.base import output, Struct\n'), ((9707, 9744), 'sfepy.base.base.output', 'output', (['"""assembling global system..."""'], {}), "('assembling global system...')\n", (9713, 9744), False, 'from sfepy.base.base import output, Struct\n'), ((9768, 9813), 'sfepy.discrete.evaluate.apply_ebc_to_matrix', 'apply_ebc_to_matrix', (['mtx_i', 'u_i.eq_map.eq_ebc'], {}), '(mtx_i, u_i.eq_map.eq_ebc)\n', (9787, 9813), False, 'from sfepy.discrete.evaluate import apply_ebc_to_matrix\n'), ((9818, 9917), 'sfepy.parallel.parallel.assemble_rhs_to_petsc', 'pl.assemble_rhs_to_petsc', (['prhs', 'rhs_i', 'pdofs', 'drange'], {'is_overlap': '(True)', 'comm': 'comm', 'verbose': '(True)'}), '(prhs, rhs_i, pdofs, drange, is_overlap=True, comm=\n comm, verbose=True)\n', (9842, 9917), True, 'import sfepy.parallel.parallel as pl\n'), ((9946, 10045), 'sfepy.parallel.parallel.assemble_mtx_to_petsc', 'pl.assemble_mtx_to_petsc', (['pmtx', 'mtx_i', 'pdofs', 'drange'], {'is_overlap': '(True)', 'comm': 'comm', 'verbose': '(True)'}), '(pmtx, mtx_i, pdofs, drange, is_overlap=True, comm=\n comm, verbose=True)\n', (9970, 10045), True, 'import sfepy.parallel.parallel as pl\n'), ((10125, 10155), 'sfepy.base.base.output', 'output', (['"""...done in"""', 'timer.dt'], {}), "('...done in', timer.dt)\n", (10131, 10155), False, 'from sfepy.base.base import output, Struct\n'), ((10161, 10189), 'sfepy.base.base.output', 'output', (['"""creating solver..."""'], {}), "('creating solver...')\n", (10167, 10189), False, 'from sfepy.base.base import output, Struct\n'), ((10220, 10348), 'sfepy.base.base.Struct', 'Struct', ([], {'method': '"""cg"""', 'precond': '"""gamg"""', 'sub_precond': '"""none"""', 'i_max': '(10000)', 'eps_a': '(1e-50)', 'eps_r': '(1e-05)', 'eps_d': '(10000.0)', 'verbose': '(True)'}), "(method='cg', precond='gamg', sub_precond='none', i_max=10000, eps_a=\n 1e-50, eps_r=1e-05, eps_d=10000.0, verbose=True)\n", (10226, 10348), False, 'from sfepy.base.base import output, Struct\n'), ((10382, 10441), 'sfepy.solvers.ls.PETScKrylovSolver', 'PETScKrylovSolver', (['conf'], {'comm': 'comm', 'mtx': 'pmtx', 'status': 'status'}), '(conf, comm=comm, mtx=pmtx, status=status)\n', (10399, 10441), False, 'from sfepy.solvers.ls import PETScKrylovSolver\n'), ((10488, 10518), 'sfepy.base.base.output', 'output', (['"""...done in"""', 'timer.dt'], {}), "('...done in', timer.dt)\n", (10494, 10518), False, 'from sfepy.base.base import output, Struct\n'), ((10524, 10544), 'sfepy.base.base.output', 'output', (['"""solving..."""'], {}), "('solving...')\n", (10530, 10544), False, 'from sfepy.base.base import output, Struct\n'), ((10604, 10639), 'sfepy.parallel.parallel.create_local_petsc_vector', 'pl.create_local_petsc_vector', (['pdofs'], {}), '(pdofs)\n', (10632, 10639), True, 'import sfepy.parallel.parallel as pl\n'), ((10662, 10718), 'sfepy.parallel.parallel.create_gather_scatter', 'pl.create_gather_scatter', (['pdofs', 'psol_i', 'psol'], {'comm': 'comm'}), '(pdofs, psol_i, psol, comm=comm)\n', (10686, 10718), True, 'import sfepy.parallel.parallel as pl\n'), ((10871, 10901), 'sfepy.base.base.output', 'output', (['"""...done in"""', 'timer.dt'], {}), "('...done in', timer.dt)\n", (10877, 10901), False, 'from sfepy.base.base import output, Struct\n'), ((10907, 10935), 'sfepy.base.base.output', 'output', (['"""saving solution..."""'], {}), "('saving solution...')\n", (10913, 10935), False, 'from sfepy.base.base import output, Struct\n'), ((11026, 11085), 'os.path.join', 'os.path.join', (['options.output_dir', "('sol_%02d.h5' % comm.rank)"], {}), "(options.output_dir, 'sol_%02d.h5' % comm.rank)\n", (11038, 11085), False, 'import os\n'), ((11163, 11193), 'sfepy.parallel.parallel.create_gather_to_zero', 'pl.create_gather_to_zero', (['psol'], {}), '(psol)\n', (11187, 11193), True, 'import sfepy.parallel.parallel as pl\n'), ((12052, 12082), 'sfepy.base.base.output', 'output', (['"""...done in"""', 'timer.dt'], {}), "('...done in', timer.dt)\n", (12058, 12082), False, 'from sfepy.base.base import output, Struct\n'), ((17239, 17298), 'os.path.join', 'os.path.join', (['output_dir', "('output_log_%02d.txt' % comm.rank)"], {}), "(output_dir, 'output_log_%02d.txt' % comm.rank)\n", (17251, 17298), False, 'import os\n'), ((17422, 17492), 'sfepy.base.base.output.set_output', 'output.set_output', ([], {'filename': 'filename', 'combined': '(options.silent == False)'}), '(filename=filename, combined=options.silent == False)\n', (17439, 17492), False, 'from sfepy.base.base import output, Struct\n'), ((17498, 17534), 'sfepy.base.base.output', 'output', (['"""petsc options:"""', 'petsc_opts'], {}), "('petsc options:', petsc_opts)\n", (17504, 17534), False, 'from sfepy.base.base import output, Struct\n'), ((17556, 17599), 'os.path.join', 'os.path.join', (['options.output_dir', '"""para.h5"""'], {}), "(options.output_dir, 'para.h5')\n", (17568, 17599), False, 'import os\n'), ((17837, 17864), 'sfepy.base.base.output', 'output', (['"""dimensions:"""', 'dims'], {}), "('dimensions:', dims)\n", (17843, 17864), False, 'from sfepy.base.base import output, Struct\n'), ((17869, 17897), 'sfepy.base.base.output', 'output', (['"""shape: """', 'shape'], {}), "('shape: ', shape)\n", (17875, 17897), False, 'from sfepy.base.base import output, Struct\n'), ((17902, 17931), 'sfepy.base.base.output', 'output', (['"""centre: """', 'centre'], {}), "('centre: ', centre)\n", (17908, 17931), False, 'from sfepy.base.base import output, Struct\n'), ((18651, 18688), 'sfepy.base.base.output', 'output', (['"""field order:"""', 'options.order'], {}), "('field order:', options.order)\n", (18657, 18688), False, 'from sfepy.base.base import output, Struct\n'), ((18750, 18763), 'sfepy.base.base.output', 'output', (['stats'], {}), '(stats)\n', (18756, 18763), False, 'from sfepy.base.base import output, Struct\n'), ((4359, 4384), 'numpy.ones_like', 'nm.ones_like', (['coors[:, 0]'], {}), '(coors[:, 0])\n', (4371, 4384), True, 'import numpy as nm\n'), ((6780, 6848), 'sfepy.parallel.parallel.partition_mesh', 'pl.partition_mesh', (['mesh', 'size'], {'use_metis': 'options.metis', 'verbose': '(True)'}), '(mesh, size, use_metis=options.metis, verbose=True)\n', (6797, 6848), True, 'import sfepy.parallel.parallel as pl\n'), ((8631, 8721), 'sfepy.parallel.plot_parallel_dofs.plot_local_dofs', 'ppd.plot_local_dofs', (['[None, None]', 'field', 'field_i', 'omega_gi', 'options.output_dir', 'rank'], {}), '([None, None], field, field_i, omega_gi, options.\n output_dir, rank)\n', (8650, 8721), True, 'import sfepy.parallel.plot_parallel_dofs as ppd\n'), ((11313, 11385), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""u"""', '"""parameter"""', 'field'], {'primary_var_name': '"""(set-to-None)"""'}), "('u', 'parameter', field, primary_var_name='(set-to-None)')\n", (11326, 11385), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem, State\n'), ((11432, 11474), 'os.path.join', 'os.path.join', (['options.output_dir', '"""sol.h5"""'], {}), "(options.output_dir, 'sol.h5')\n", (11444, 11474), False, 'import os\n'), ((12292, 12302), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12300, 12302), True, 'import matplotlib.pyplot as plt\n'), ((17330, 17351), 'sfepy.base.ioutils.ensure_path', 'ensure_path', (['filename'], {}), '(filename)\n', (17341, 17351), False, 'from sfepy.base.ioutils import ensure_path, remove_files_patterns, save_options\n'), ((18483, 18550), 'sfepy.mesh.mesh_generators.gen_block_mesh', 'gen_block_mesh', (['dims', 'shape', 'centre'], {'name': '"""block-fem"""', 'verbose': '(True)'}), "(dims, shape, centre, name='block-fem', verbose=True)\n", (18497, 18550), False, 'from sfepy.mesh.mesh_generators import gen_block_mesh\n'), ((18910, 18959), 'sfepy.base.base.Struct', 'Struct', ([], {'dim': 'dim', 'shape': 'shape', 'order': 'options.order'}), '(dim=dim, shape=shape, order=options.order)\n', (18916, 18959), False, 'from sfepy.base.base import output, Struct\n'), ((4433, 4457), 'numpy.sin', 'nm.sin', (['(4 * nm.pi * coor)'], {}), '(4 * nm.pi * coor)\n', (4439, 4457), True, 'import numpy as nm\n'), ((4667, 4697), 'sfepy.discrete.Function', 'Function', (['"""get_load"""', 'get_load'], {}), "('get_load', get_load)\n", (4675, 4697), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem, State\n'), ((5088, 5112), 'sfepy.discrete.conditions.Conditions', 'Conditions', (['[ebc1, ebc2]'], {}), '([ebc1, ebc2])\n', (5098, 5112), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((5707, 5783), 'sfepy.base.base.Struct', 'Struct', ([], {'kind': '"""adaptive"""', 'min_level': '(order - 1)', 'max_level': '(order - 1)', 'eps': '(1e-08)'}), "(kind='adaptive', min_level=order - 1, max_level=order - 1, eps=1e-08)\n", (5713, 5783), False, 'from sfepy.base.base import output, Struct\n'), ((8064, 8158), 'sfepy.parallel.plot_parallel_dofs.plot_partitioning', 'ppd.plot_partitioning', (['[None, None]', 'field', 'cell_tasks', 'gfds[0]', 'options.output_dir', 'size'], {}), '([None, None], field, cell_tasks, gfds[0], options.\n output_dir, size)\n', (8085, 8158), True, 'import sfepy.parallel.plot_parallel_dofs as ppd\n'), ((12829, 12864), 'csv.DictWriter', 'csv.DictWriter', (['fd'], {'fieldnames': 'keys'}), '(fd, fieldnames=keys)\n', (12843, 12864), False, 'import csv\n'), ((13003, 13038), 'csv.DictWriter', 'csv.DictWriter', (['fd'], {'fieldnames': 'keys'}), '(fd, fieldnames=keys)\n', (13017, 13038), False, 'import csv\n'), ((18375, 18414), 'os.path.join', 'os.path.join', (['output_dir', '"""options.txt"""'], {}), "(output_dir, 'options.txt')\n", (18387, 18414), False, 'import os\n'), ((18835, 18870), 'sfepy.base.ioutils.ensure_path', 'ensure_path', (['options.stats_filename'], {}), '(options.stats_filename)\n', (18846, 18870), False, 'from sfepy.base.ioutils import ensure_path, remove_files_patterns, save_options\n'), ((11698, 11762), 'sfepy.base.base.Struct', 'Struct', ([], {'kind': '"""adaptive"""', 'min_level': '(0)', 'max_level': 'order', 'eps': '(0.001)'}), "(kind='adaptive', min_level=0, max_level=order, eps=0.001)\n", (11704, 11762), False, 'from sfepy.base.base import output, Struct\n')]
|
# -*- coding:utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
from megengine.random import uniform
def sample_labels(labels, num_samples, label_value, ignore_label=-1):
"""sample N labels with label value = sample_labels
Args:
labels(Tensor): shape of label is (N,)
num_samples(int):
label_value(int):
Returns:
label(Tensor): label after sampling
"""
assert labels.ndim == 1, "Only tensor of dim 1 is supported."
mask = (labels == label_value)
num_class = mask.sum()
if num_class <= num_samples:
return labels
topk_tensor = F.zeros_like(labels).astype("float32")
topk_tensor[mask] = uniform(size=num_class)
_, select_inds = F.topk(topk_tensor, k=num_samples - num_class)
labels[select_inds] = ignore_label
return labels
def sample_mask_from_labels(labels, num_sample, sample_value):
"""generate mask for labels using sampling method.
Args:
labels (Tensor):
num_sample (int):
sample_value (int):
Returns:
sample_mask (Tensor)
"""
assert labels.ndim == 1, "Only tensor of dim 1 is supported."
# TODO: support bool mask
sample_mask = (labels == sample_value).astype("float32")
num_mask = sample_mask.sum().astype("int32")
if num_mask <= num_sample:
return sample_mask
random_tensor = sample_mask * uniform(size=labels.shape)
_, sampled_idx = F.topk(random_tensor, k=num_sample - num_mask)
sample_mask[sampled_idx] = F.zeros(sampled_idx.shape)
return sample_mask
|
[
"megengine.functional.zeros",
"megengine.functional.topk",
"megengine.random.uniform",
"megengine.functional.zeros_like"
] |
[((1015, 1038), 'megengine.random.uniform', 'uniform', ([], {'size': 'num_class'}), '(size=num_class)\n', (1022, 1038), False, 'from megengine.random import uniform\n'), ((1060, 1106), 'megengine.functional.topk', 'F.topk', (['topk_tensor'], {'k': '(num_samples - num_class)'}), '(topk_tensor, k=num_samples - num_class)\n', (1066, 1106), True, 'import megengine.functional as F\n'), ((1773, 1819), 'megengine.functional.topk', 'F.topk', (['random_tensor'], {'k': '(num_sample - num_mask)'}), '(random_tensor, k=num_sample - num_mask)\n', (1779, 1819), True, 'import megengine.functional as F\n'), ((1851, 1877), 'megengine.functional.zeros', 'F.zeros', (['sampled_idx.shape'], {}), '(sampled_idx.shape)\n', (1858, 1877), True, 'import megengine.functional as F\n'), ((1725, 1751), 'megengine.random.uniform', 'uniform', ([], {'size': 'labels.shape'}), '(size=labels.shape)\n', (1732, 1751), False, 'from megengine.random import uniform\n'), ((952, 972), 'megengine.functional.zeros_like', 'F.zeros_like', (['labels'], {}), '(labels)\n', (964, 972), True, 'import megengine.functional as F\n')]
|
"""
Computational domain for isogeometric analysis.
"""
import os.path as op
import numpy as nm
from sfepy.base.base import assert_, Struct
from sfepy.discrete.common.domain import Domain
import sfepy.discrete.iga as iga
import sfepy.discrete.iga.io as io
from sfepy.discrete.iga.extmods.igac import eval_in_tp_coors
class NurbsPatch(Struct):
"""
Single NURBS patch data.
"""
def __init__(self, knots, degrees, cps,
weights, cs, conn):
degrees = nm.asarray(degrees, dtype=nm.int32)
cs = [nm.asarray(cc, dtype=nm.float64) for cc in cs]
if cs[0].ndim == 3:
cs = [nm.ascontiguousarray(cc[:, None, ...]) for cc in cs]
Struct.__init__(self, name='nurbs', knots=knots, degrees=degrees,
cps=cps, weights=weights, cs=cs, conn=conn)
self.n_els = [len(ii) for ii in cs]
self.dim = len(self.n_els)
def _get_ref_coors_1d(self, pars, axis):
uk = nm.unique(self.knots[axis])
indices = nm.searchsorted(uk[1:], pars)
ref_coors = nm.empty_like(pars)
for ii in xrange(len(uk) - 1):
ispan = nm.where(indices == ii)[0]
pp = pars[ispan]
ref_coors[ispan] = (pp - uk[ii]) / (uk[ii+1] - uk[ii])
return uk, indices, ref_coors
def __call__(self, u=None, v=None, w=None, field=None):
"""
Igakit-like interface for NURBS evaluation.
"""
pars = [u]
if v is not None: pars += [v]
if w is not None: pars += [w]
indices = []
rcs = []
for ia, par in enumerate(pars):
uk, indx, rc = self._get_ref_coors_1d(par, ia)
indices.append(indx.astype(nm.uint32))
rcs.append(rc)
out = eval_in_tp_coors(field, indices,
rcs, self.cps, self.weights,
self.degrees,
self.cs, self.conn)
return out
def evaluate(self, field, u=None, v=None, w=None):
"""
Igakit-like interface for NURBS evaluation.
"""
return self(u, v, w, field)
def _to_igakit(self):
import igakit.cad as cad
n_efuns = self.degrees + 1
nks = nm.array([len(ii) for ii in self.knots])
shape = tuple(nks - n_efuns)
cps = self.cps.reshape(shape + (-1,))
weights = self.weights.reshape(shape)
return cad.NURBS(self.knots, cps, weights=weights)
def _from_igakit(self, inurbs):
cs = iga.compute_bezier_extraction(inurbs.knots, inurbs.degree)
n_els = [len(ii) for ii in cs]
conn, bconn = iga.create_connectivity(n_els, inurbs.knots,
inurbs.degree)
cps = inurbs.points[..., :self.dim].copy()
cps = cps.reshape((-1, self.dim))
return NurbsPatch(inurbs.knots, inurbs.degree, cps,
inurbs.weights.ravel(), cs, conn)
def elevate(self, times=0):
"""
Elevate the patch degrees several `times` by one.
Returns
-------
nurbs : NurbsPatch instance
Either `self` if `times` is zero, or a new instance.
"""
if times is 0: return self
aux = self._to_igakit()
for ia in range(self.dim):
aux.elevate(ia, times)
assert_(nm.isfinite(aux.points).all(),
'igakit degree elevation failed for axis %d!' % ia)
return self._from_igakit(aux)
class IGDomain(Domain):
"""
Bezier extraction based NURBS domain for isogeometric analysis.
"""
@staticmethod
def from_file(filename):
"""
filename : str
The name of the IGA domain file.
"""
(knots, degrees, cps, weights, cs, conn,
bcps, bweights, bconn, regions) = io.read_iga_data(filename)
nurbs = NurbsPatch(knots, degrees, cps, weights, cs, conn)
bmesh = Struct(name='bmesh', cps=bcps, weights=bweights, conn=bconn)
name = op.splitext(filename)[0]
domain = IGDomain(name, nurbs=nurbs, bmesh=bmesh, regions=regions)
return domain
def __init__(self, name, nurbs, bmesh, regions=None, **kwargs):
"""
Create an IGA domain.
Parameters
----------
name : str
The domain name.
"""
Domain.__init__(self, name, nurbs=nurbs, bmesh=bmesh, regions=regions,
**kwargs)
from sfepy.discrete.fem.geometry_element import create_geometry_elements
from sfepy.discrete.fem import Mesh
from sfepy.discrete.fem.utils import prepare_remap
tconn = iga.get_bezier_topology(bmesh.conn, nurbs.degrees)
itc = nm.unique(tconn)
remap = prepare_remap(itc, bmesh.conn.max() + 1)
ltcoors = bmesh.cps[itc]
ltconn = remap[tconn]
n_nod, dim = ltcoors.shape
n_el = ltconn.shape[0]
self.shape = Struct(n_nod=n_nod, dim=dim, tdim=0, n_el=n_el)
desc = '%d_%d' % (dim, bmesh.conn.shape[1])
mat_id = nm.zeros(bmesh.conn.shape[0], dtype=nm.int32)
eval_mesh = Mesh.from_data(self.name + '_eval', nurbs.cps, None,
[nurbs.conn], [mat_id], [desc])
self.eval_mesh = eval_mesh
desc = '%d_%d' % (dim, 2**dim)
mat_id = nm.zeros(ltconn.shape[0], dtype=nm.int32)
self.mesh = Mesh.from_data(self.name + '_topo', ltcoors, None, [ltconn],
[mat_id], [desc])
self.cmesh = self.mesh.cmesh
gels = create_geometry_elements()
self.cmesh.set_local_entities(gels)
self.cmesh.setup_entities()
self.shape.tdim = self.cmesh.tdim
self.gel = gels[desc]
if regions is not None:
self.vertex_set_bcs = {}
for key, val in self.regions.iteritems():
self.vertex_set_bcs[key] = remap[val]
self.reset_regions()
|
[
"sfepy.discrete.fem.geometry_element.create_geometry_elements",
"sfepy.discrete.iga.io.read_iga_data",
"sfepy.discrete.iga.get_bezier_topology",
"sfepy.discrete.common.domain.Domain.__init__",
"sfepy.base.base.Struct",
"sfepy.discrete.iga.compute_bezier_extraction",
"sfepy.discrete.iga.extmods.igac.eval_in_tp_coors",
"sfepy.discrete.iga.create_connectivity",
"sfepy.discrete.fem.Mesh.from_data",
"sfepy.base.base.Struct.__init__"
] |
[((491, 526), 'numpy.asarray', 'nm.asarray', (['degrees'], {'dtype': 'nm.int32'}), '(degrees, dtype=nm.int32)\n', (501, 526), True, 'import numpy as nm\n'), ((696, 809), 'sfepy.base.base.Struct.__init__', 'Struct.__init__', (['self'], {'name': '"""nurbs"""', 'knots': 'knots', 'degrees': 'degrees', 'cps': 'cps', 'weights': 'weights', 'cs': 'cs', 'conn': 'conn'}), "(self, name='nurbs', knots=knots, degrees=degrees, cps=cps,\n weights=weights, cs=cs, conn=conn)\n", (711, 809), False, 'from sfepy.base.base import assert_, Struct\n'), ((968, 995), 'numpy.unique', 'nm.unique', (['self.knots[axis]'], {}), '(self.knots[axis])\n', (977, 995), True, 'import numpy as nm\n'), ((1014, 1043), 'numpy.searchsorted', 'nm.searchsorted', (['uk[1:]', 'pars'], {}), '(uk[1:], pars)\n', (1029, 1043), True, 'import numpy as nm\n'), ((1064, 1083), 'numpy.empty_like', 'nm.empty_like', (['pars'], {}), '(pars)\n', (1077, 1083), True, 'import numpy as nm\n'), ((1768, 1867), 'sfepy.discrete.iga.extmods.igac.eval_in_tp_coors', 'eval_in_tp_coors', (['field', 'indices', 'rcs', 'self.cps', 'self.weights', 'self.degrees', 'self.cs', 'self.conn'], {}), '(field, indices, rcs, self.cps, self.weights, self.degrees,\n self.cs, self.conn)\n', (1784, 1867), False, 'from sfepy.discrete.iga.extmods.igac import eval_in_tp_coors\n'), ((2442, 2485), 'igakit.cad.NURBS', 'cad.NURBS', (['self.knots', 'cps'], {'weights': 'weights'}), '(self.knots, cps, weights=weights)\n', (2451, 2485), True, 'import igakit.cad as cad\n'), ((2536, 2594), 'sfepy.discrete.iga.compute_bezier_extraction', 'iga.compute_bezier_extraction', (['inurbs.knots', 'inurbs.degree'], {}), '(inurbs.knots, inurbs.degree)\n', (2565, 2594), True, 'import sfepy.discrete.iga as iga\n'), ((2656, 2715), 'sfepy.discrete.iga.create_connectivity', 'iga.create_connectivity', (['n_els', 'inurbs.knots', 'inurbs.degree'], {}), '(n_els, inurbs.knots, inurbs.degree)\n', (2679, 2715), True, 'import sfepy.discrete.iga as iga\n'), ((3866, 3892), 'sfepy.discrete.iga.io.read_iga_data', 'io.read_iga_data', (['filename'], {}), '(filename)\n', (3882, 3892), True, 'import sfepy.discrete.iga.io as io\n'), ((3977, 4037), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""bmesh"""', 'cps': 'bcps', 'weights': 'bweights', 'conn': 'bconn'}), "(name='bmesh', cps=bcps, weights=bweights, conn=bconn)\n", (3983, 4037), False, 'from sfepy.base.base import assert_, Struct\n'), ((4394, 4479), 'sfepy.discrete.common.domain.Domain.__init__', 'Domain.__init__', (['self', 'name'], {'nurbs': 'nurbs', 'bmesh': 'bmesh', 'regions': 'regions'}), '(self, name, nurbs=nurbs, bmesh=bmesh, regions=regions, **kwargs\n )\n', (4409, 4479), False, 'from sfepy.discrete.common.domain import Domain\n'), ((4700, 4750), 'sfepy.discrete.iga.get_bezier_topology', 'iga.get_bezier_topology', (['bmesh.conn', 'nurbs.degrees'], {}), '(bmesh.conn, nurbs.degrees)\n', (4723, 4750), True, 'import sfepy.discrete.iga as iga\n'), ((4765, 4781), 'numpy.unique', 'nm.unique', (['tconn'], {}), '(tconn)\n', (4774, 4781), True, 'import numpy as nm\n'), ((4992, 5039), 'sfepy.base.base.Struct', 'Struct', ([], {'n_nod': 'n_nod', 'dim': 'dim', 'tdim': '(0)', 'n_el': 'n_el'}), '(n_nod=n_nod, dim=dim, tdim=0, n_el=n_el)\n', (4998, 5039), False, 'from sfepy.base.base import assert_, Struct\n'), ((5110, 5155), 'numpy.zeros', 'nm.zeros', (['bmesh.conn.shape[0]'], {'dtype': 'nm.int32'}), '(bmesh.conn.shape[0], dtype=nm.int32)\n', (5118, 5155), True, 'import numpy as nm\n'), ((5176, 5264), 'sfepy.discrete.fem.Mesh.from_data', 'Mesh.from_data', (["(self.name + '_eval')", 'nurbs.cps', 'None', '[nurbs.conn]', '[mat_id]', '[desc]'], {}), "(self.name + '_eval', nurbs.cps, None, [nurbs.conn], [mat_id],\n [desc])\n", (5190, 5264), False, 'from sfepy.discrete.fem import Mesh\n'), ((5388, 5429), 'numpy.zeros', 'nm.zeros', (['ltconn.shape[0]'], {'dtype': 'nm.int32'}), '(ltconn.shape[0], dtype=nm.int32)\n', (5396, 5429), True, 'import numpy as nm\n'), ((5450, 5528), 'sfepy.discrete.fem.Mesh.from_data', 'Mesh.from_data', (["(self.name + '_topo')", 'ltcoors', 'None', '[ltconn]', '[mat_id]', '[desc]'], {}), "(self.name + '_topo', ltcoors, None, [ltconn], [mat_id], [desc])\n", (5464, 5528), False, 'from sfepy.discrete.fem import Mesh\n'), ((5617, 5643), 'sfepy.discrete.fem.geometry_element.create_geometry_elements', 'create_geometry_elements', ([], {}), '()\n', (5641, 5643), False, 'from sfepy.discrete.fem.geometry_element import create_geometry_elements\n'), ((541, 573), 'numpy.asarray', 'nm.asarray', (['cc'], {'dtype': 'nm.float64'}), '(cc, dtype=nm.float64)\n', (551, 573), True, 'import numpy as nm\n'), ((4054, 4075), 'os.path.splitext', 'op.splitext', (['filename'], {}), '(filename)\n', (4065, 4075), True, 'import os.path as op\n'), ((634, 672), 'numpy.ascontiguousarray', 'nm.ascontiguousarray', (['cc[:, None, ...]'], {}), '(cc[:, None, ...])\n', (654, 672), True, 'import numpy as nm\n'), ((1143, 1166), 'numpy.where', 'nm.where', (['(indices == ii)'], {}), '(indices == ii)\n', (1151, 1166), True, 'import numpy as nm\n'), ((3383, 3406), 'numpy.isfinite', 'nm.isfinite', (['aux.points'], {}), '(aux.points)\n', (3394, 3406), True, 'import numpy as nm\n')]
|
#!/usr/bin/env python
# 12.01.2007, c
from __future__ import absolute_import
from argparse import ArgumentParser
import sfepy
from sfepy.base.base import output
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.homogenization.band_gaps_app import AcousticBandGapsApp
from sfepy.base.plotutils import plt
helps = {
'debug':
'automatically start debugger when an exception is raised',
'filename' :
'basename of output file(s) [default: <basename of input file>]',
'detect_band_gaps' :
'detect frequency band gaps',
'analyze_dispersion' :
'analyze dispersion properties (low frequency domain)',
'plot' :
'plot frequency band gaps, assumes -b',
'phase_velocity' :
'compute phase velocity (frequency-independet mass only)'
}
def main():
parser = ArgumentParser()
parser.add_argument("--version", action="version",
version="%(prog)s " + sfepy.__version__)
parser.add_argument('--debug',
action='store_true', dest='debug',
default=False, help=helps['debug'])
parser.add_argument("-o", metavar='filename',
action="store", dest="output_filename_trunk",
default=None, help=helps['filename'])
parser.add_argument("-b", "--band-gaps",
action="store_true", dest="detect_band_gaps",
default=False, help=helps['detect_band_gaps'])
parser.add_argument("-d", "--dispersion",
action="store_true", dest="analyze_dispersion",
default=False, help=helps['analyze_dispersion'])
parser.add_argument("-p", "--plot",
action="store_true", dest="plot",
default=False, help=helps['plot'])
parser.add_argument("--phase-velocity",
action="store_true", dest="phase_velocity",
default=False, help=helps['phase_velocity'])
parser.add_argument("filename_in")
options = parser.parse_args()
if options.debug:
from sfepy.base.base import debug_on_error; debug_on_error()
if options.plot:
if plt is None:
output('matplotlib.pyplot cannot be imported, ignoring option -p!')
options.plot = False
elif options.analyze_dispersion == False:
options.detect_band_gaps = True
required, other = get_standard_keywords()
required.remove('equations')
if not options.analyze_dispersion:
required.remove('solver_[0-9]+|solvers')
if options.phase_velocity:
required = [ii for ii in required if 'ebc' not in ii]
conf = ProblemConf.from_file(options.filename_in, required, other)
app = AcousticBandGapsApp(conf, options, 'phonon:')
opts = conf.options
if hasattr(opts, 'parametric_hook'): # Parametric study.
parametric_hook = conf.get_function(opts.parametric_hook)
app.parametrize(parametric_hook)
app()
if __name__ == '__main__':
main()
|
[
"sfepy.base.conf.get_standard_keywords",
"sfepy.homogenization.band_gaps_app.AcousticBandGapsApp",
"sfepy.base.base.output",
"sfepy.base.conf.ProblemConf.from_file",
"sfepy.base.base.debug_on_error"
] |
[((820, 836), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (834, 836), False, 'from argparse import ArgumentParser\n'), ((2449, 2472), 'sfepy.base.conf.get_standard_keywords', 'get_standard_keywords', ([], {}), '()\n', (2470, 2472), False, 'from sfepy.base.conf import ProblemConf, get_standard_keywords\n'), ((2698, 2757), 'sfepy.base.conf.ProblemConf.from_file', 'ProblemConf.from_file', (['options.filename_in', 'required', 'other'], {}), '(options.filename_in, required, other)\n', (2719, 2757), False, 'from sfepy.base.conf import ProblemConf, get_standard_keywords\n'), ((2769, 2814), 'sfepy.homogenization.band_gaps_app.AcousticBandGapsApp', 'AcousticBandGapsApp', (['conf', 'options', '"""phonon:"""'], {}), "(conf, options, 'phonon:')\n", (2788, 2814), False, 'from sfepy.homogenization.band_gaps_app import AcousticBandGapsApp\n'), ((2156, 2172), 'sfepy.base.base.debug_on_error', 'debug_on_error', ([], {}), '()\n', (2170, 2172), False, 'from sfepy.base.base import debug_on_error\n'), ((2231, 2298), 'sfepy.base.base.output', 'output', (['"""matplotlib.pyplot cannot be imported, ignoring option -p!"""'], {}), "('matplotlib.pyplot cannot be imported, ignoring option -p!')\n", (2237, 2298), False, 'from sfepy.base.base import output\n')]
|
r"""
Thermo-elasticity with a computed temperature demonstrating equation sequence
solver.
Uses `dw_biot` term with an isotropic coefficient for thermo-elastic coupling.
The equation sequence solver (``'ess'`` in ``solvers``) automatically solves
first the temperature distribution and then the elasticity problem with the
already computed temperature.
Find :math:`\ul{u}`, :math:`T` such that:
.. math::
\int_{\Omega} D_{ijkl}\ e_{ij}(\ul{v}) e_{kl}(\ul{u})
- \int_{\Omega} (T - T_0)\ \alpha_{ij} e_{ij}(\ul{v})
= 0
\;, \quad \forall \ul{v} \;,
\int_{\Omega} \nabla s \cdot \nabla T
= 0
\;, \quad \forall s \;.
where
.. math::
D_{ijkl} = \mu (\delta_{ik} \delta_{jl}+\delta_{il} \delta_{jk}) +
\lambda \ \delta_{ij} \delta_{kl}
\;, \\
\alpha_{ij} = (3 \lambda + 2 \mu) \alpha \delta_{ij} \;,
:math:`T_0` is the background temperature and :math:`\alpha` is the thermal
expansion coefficient.
Notes
-----
The gallery image was produced by (plus proper view settings)::
./postproc.py block.vtk -d'u,plot_displacements,rel_scaling=1000,color_kind="scalars",color_name="T"' --wireframe --only-names=u -b
"""
import numpy as np
from sfepy.mechanics.matcoefs import stiffness_from_lame
from sfepy import data_dir
# Material parameters.
lam = 10.0
mu = 5.0
thermal_expandability = 1.25e-5
T0 = 20.0 # Background temperature.
filename_mesh = data_dir + '/meshes/3d/block.mesh'
options = {
'ts' : 'ess',
'nls' : 'newton',
'ls' : 'ls',
}
regions = {
'Omega' : 'all',
'Left' : ('vertices in (x < -4.99)', 'facet'),
'Right' : ('vertices in (x > 4.99)', 'facet'),
'Bottom' : ('vertices in (z < -0.99)', 'facet'),
}
fields = {
'displacement': ('real', 3, 'Omega', 1),
'temperature': ('real', 1, 'Omega', 1),
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
'T' : ('unknown field', 'temperature', 1),
's' : ('test field', 'temperature', 'T'),
}
ebcs = {
'u0' : ('Left', {'u.all' : 0.0}),
't0' : ('Left', {'T.0' : 20.0}),
't2' : ('Bottom', {'T.0' : 0.0}),
't1' : ('Right', {'T.0' : 30.0}),
}
eye_sym = np.array([[1], [1], [1], [0], [0], [0]], dtype=np.float64)
materials = {
'solid' : ({
'D' : stiffness_from_lame(3, lam=lam, mu=mu),
'alpha' : (3.0 * lam + 2.0 * mu) * thermal_expandability * eye_sym
},),
}
equations = {
'balance_of_forces' : """
+ dw_lin_elastic.2.Omega(solid.D, v, u)
- dw_biot.2.Omega(solid.alpha, v, T)
= 0
""",
'temperature' : """
+ dw_laplace.1.Omega(s, T)
= 0
"""
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 1,
'eps_a' : 1e-10,
'problem' : 'nonlinear',
}),
'ess' : ('ts.equation_sequence', {}),
}
|
[
"sfepy.mechanics.matcoefs.stiffness_from_lame"
] |
[((2177, 2235), 'numpy.array', 'np.array', (['[[1], [1], [1], [0], [0], [0]]'], {'dtype': 'np.float64'}), '([[1], [1], [1], [0], [0], [0]], dtype=np.float64)\n', (2185, 2235), True, 'import numpy as np\n'), ((2281, 2319), 'sfepy.mechanics.matcoefs.stiffness_from_lame', 'stiffness_from_lame', (['(3)'], {'lam': 'lam', 'mu': 'mu'}), '(3, lam=lam, mu=mu)\n', (2300, 2319), False, 'from sfepy.mechanics.matcoefs import stiffness_from_lame\n')]
|
from datetime import datetime, timedelta
import pendulum
import prefect
from prefect import Flow, task
from prefect.run_configs import DockerRun
from prefect.schedules import CronSchedule
from prefect.storage import GitHub
from scrapy.crawler import CrawlerProcess
from sqlmodel import SQLModel, create_engine
from imdb_rating.dependencies.spiders import IMDBSpider
@task
def scrap_movies_from_imdb():
"""
Scrap movies from IMDB and store them into a PostgreSQL database using SQLModel.
Run a scrapy crawler process to launch a spider.
"""
logger = prefect.context.get("logger")
# engine = create_engine('postgresql://postgres:postgres@localhost:5432/imdb')
engine = create_engine("sqlite:///imdb.db")
SQLModel.metadata.create_all(engine)
start = datetime.today() - timedelta(days=90)
end = datetime.today() + timedelta(days=30)
process = CrawlerProcess()
process.crawl(IMDBSpider, start=start, end=end, engine=engine)
process.start()
schedule = CronSchedule("0 0 * * *", start_date=pendulum.now(tz="Europe/Paris"))
storage = GitHub(repo="PeregHer/imdb-rating-predictions", path="imdb_rating/workflow/flow.py")
run_config = DockerRun(image="imdb-scraping:latest")
with Flow(
"imdb_scraping", schedule=schedule, storage=storage, run_config=run_config
) as flow:
scrap_movies_from_imdb()
# flow.register(project_name="imdb-scraping", tags=["imdb-scraping"])
# flow.run()
|
[
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.create_engine"
] |
[((1089, 1178), 'prefect.storage.GitHub', 'GitHub', ([], {'repo': '"""PeregHer/imdb-rating-predictions"""', 'path': '"""imdb_rating/workflow/flow.py"""'}), "(repo='PeregHer/imdb-rating-predictions', path=\n 'imdb_rating/workflow/flow.py')\n", (1095, 1178), False, 'from prefect.storage import GitHub\n'), ((1188, 1227), 'prefect.run_configs.DockerRun', 'DockerRun', ([], {'image': '"""imdb-scraping:latest"""'}), "(image='imdb-scraping:latest')\n", (1197, 1227), False, 'from prefect.run_configs import DockerRun\n'), ((573, 602), 'prefect.context.get', 'prefect.context.get', (['"""logger"""'], {}), "('logger')\n", (592, 602), False, 'import prefect\n'), ((700, 734), 'sqlmodel.create_engine', 'create_engine', (['"""sqlite:///imdb.db"""'], {}), "('sqlite:///imdb.db')\n", (713, 734), False, 'from sqlmodel import SQLModel, create_engine\n'), ((739, 775), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (767, 775), False, 'from sqlmodel import SQLModel, create_engine\n'), ((890, 906), 'scrapy.crawler.CrawlerProcess', 'CrawlerProcess', ([], {}), '()\n', (904, 906), False, 'from scrapy.crawler import CrawlerProcess\n'), ((1235, 1320), 'prefect.Flow', 'Flow', (['"""imdb_scraping"""'], {'schedule': 'schedule', 'storage': 'storage', 'run_config': 'run_config'}), "('imdb_scraping', schedule=schedule, storage=storage, run_config=run_config\n )\n", (1239, 1320), False, 'from prefect import Flow, task\n'), ((789, 805), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (803, 805), False, 'from datetime import datetime, timedelta\n'), ((808, 826), 'datetime.timedelta', 'timedelta', ([], {'days': '(90)'}), '(days=90)\n', (817, 826), False, 'from datetime import datetime, timedelta\n'), ((837, 853), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (851, 853), False, 'from datetime import datetime, timedelta\n'), ((856, 874), 'datetime.timedelta', 'timedelta', ([], {'days': '(30)'}), '(days=30)\n', (865, 874), False, 'from datetime import datetime, timedelta\n'), ((1045, 1076), 'pendulum.now', 'pendulum.now', ([], {'tz': '"""Europe/Paris"""'}), "(tz='Europe/Paris')\n", (1057, 1076), False, 'import pendulum\n')]
|
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def softmax_loss(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
loss = loss.sum() / F.maximum(mask.sum(), 1)
return loss
def softmax_loss_opr(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
return loss
def _smooth_l1_base(pred, gt, sigma):
sigma2 = sigma ** 2
cond_point = 1 / sigma2
x = pred - gt
abs_x = F.abs(x)
in_mask = abs_x < cond_point
out_mask = 1 - in_mask.astype(np.float32)
in_value = 0.5 * (sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.astype(np.float32) + out_value * out_mask
return value
def _get_mask_of_label(label, background, ignore_label):
mask_fg = 1 - F.equal(label, background).astype(np.float32)
mask_ig = 1 - F.equal(label, ignore_label).astype(np.float32)
mask = mask_fg * mask_ig
return mask, mask_ig
def smooth_l1_loss_rcnn_opr(
pred, gt, label, sigma = 1, background=0, ignore_label=-1):
"""
pred : (minibatch, class_num, 4)
gt : (minibatch, 4)
label : (minibatch, )
"""
broadcast_label = F.broadcast_to(label.reshape(-1, 1), (1, pred.shape[-1]))
broadcast_mask, broadcast_mask_ig = _get_mask_of_label(
broadcast_label, background, ignore_label)
vlabel = broadcast_label * broadcast_mask
pred_corr = F.nn.indexing_one_hot(pred, vlabel.astype(np.int32), 1)
value = _smooth_l1_base(pred_corr, gt, sigma)
loss = (value * broadcast_mask).sum(dim=1)
return loss
def smooth_l1_loss_rpn(pred, gt, label, sigma=1,
background=0, ignore_label=-1, axis=1):
value = _smooth_l1_base(pred, gt, sigma)
mask, mask_ig = _get_mask_of_label(label, background, ignore_label)
loss = (value.sum(axis = axis) * mask).sum() / F.maximum(mask_ig.sum(), 1)
return loss
def smooth_l1_loss_rcnn_opr(
pred, gt, label, sigma = 1, background=0, ignore_label=-1):
"""
pred : (minibatch, class_num, 4)
gt : (minibatch, 4)
label : (minibatch, )
"""
broadcast_label = F.broadcast_to(label.reshape(-1, 1), (label.shape[0], pred.shape[-1]))
broadcast_mask, broadcast_mask_ig = _get_mask_of_label(
broadcast_label, background, ignore_label)
vlabel = broadcast_label * broadcast_mask
pred_corr = F.nn.indexing_one_hot(pred, vlabel.astype(np.int32), 1)
value = _smooth_l1_base(pred_corr, gt, sigma)
loss = (value * broadcast_mask).sum(axis=1)
return loss
def smooth_l1_loss(pred, target, beta: float):
abs_x = F.abs(pred - target)
in_mask = abs_x < beta
out_mask = 1 - in_mask.astype(np.float32)
in_loss = 0.5 * abs_x ** 2 / beta
out_loss = abs_x - 0.5 * beta
loss = in_loss * in_mask.astype(np.float32) + out_loss * out_mask
return loss.sum(axis=1)
def sigmoid_cross_entropy_retina(
pred, label, ignore_label=-1, background=0, alpha=0.5, gamma=0):
device = pred.device
mask = 1 - F.equal(label, ignore_label).astype(np.float32)
vlabel = label * mask
n, m, c = pred.shape
zero_mat = F.zeros([n, m, c + 1]).to(device)
index = F.expand_dims(vlabel, 2).astype(np.int32)
one_hot = F.scatter(zero_mat, 2, index, F.ones([n, m, 1]))
onehot = one_hot[:, :, 1:]
pos_part = F.pow(1 - pred, gamma) * onehot * F.log(pred)
neg_part = F.pow(pred, gamma) * (1 - onehot) * F.log(1 - pred)
loss = -(alpha * pos_part + (1 - alpha) * neg_part).sum(axis=2) * mask
positive_mask = (label > 0)
return loss.sum() / F.maximum(positive_mask.sum(), 1)
def smooth_l1_loss_retina(
pred, gt, label, sigma=3, background=0, ignore_label=-1, axis=2):
value = _smooth_l1_base(pred, gt, sigma)
mask, mask_ig = _get_mask_of_label(label, background, ignore_label)
loss = (value.sum(axis=axis) * mask).sum() / F.maximum(mask.sum(), 1)
return loss
def iou_l1_loss(pred, max_overlaps, gt, ignore_label=-1, background=0):
pred = pred.reshape(pred.shape[0], -1, max_overlaps.shape[2])
abs_x = F.abs(pred - max_overlaps)
mask_bg = 1 - F.equal(gt, background).astype(np.float32)
mask_ig = 1 - F.equal(gt, ignore_label).astype(np.float32)
mask = mask_bg * mask_ig
mask = mask.reshape(mask.shape[0], -1, pred.shape[2])
loss = (abs_x * mask).sum() / F.maximum(mask.sum(), 1)
return loss
def smooth_l1_loss_rcnn(
pred, gt, label, sigma = 1, background=0, ignore_label=-1):
"""
pred : (minibatch, class_num, 4)
gt : (minibatch, 4)
label : (minibatch, )
"""
loss = smooth_l1_loss_rcnn_opr(pred, gt, label, sigma)
loss = loss.sum()/F.maximum((label > 0).sum(), 1)
return loss
|
[
"megengine.functional.exp",
"megengine.functional.pow",
"megengine.functional.equal",
"megengine.functional.zeros",
"megengine.functional.log",
"megengine.functional.ones",
"megengine.functional.expand_dims",
"megengine.functional.abs"
] |
[((1072, 1080), 'megengine.functional.abs', 'F.abs', (['x'], {}), '(x)\n', (1077, 1080), True, 'import megengine.functional as F\n'), ((3251, 3271), 'megengine.functional.abs', 'F.abs', (['(pred - target)'], {}), '(pred - target)\n', (3256, 3271), True, 'import megengine.functional as F\n'), ((4722, 4748), 'megengine.functional.abs', 'F.abs', (['(pred - max_overlaps)'], {}), '(pred - max_overlaps)\n', (4727, 4748), True, 'import megengine.functional as F\n'), ((325, 353), 'megengine.functional.equal', 'F.equal', (['label', 'ignore_label'], {}), '(label, ignore_label)\n', (332, 353), True, 'import megengine.functional as F\n'), ((769, 797), 'megengine.functional.equal', 'F.equal', (['label', 'ignore_label'], {}), '(label, ignore_label)\n', (776, 797), True, 'import megengine.functional as F\n'), ((3916, 3933), 'megengine.functional.ones', 'F.ones', (['[n, m, 1]'], {}), '([n, m, 1])\n', (3922, 3933), True, 'import megengine.functional as F\n'), ((4016, 4027), 'megengine.functional.log', 'F.log', (['pred'], {}), '(pred)\n', (4021, 4027), True, 'import megengine.functional as F\n'), ((4079, 4094), 'megengine.functional.log', 'F.log', (['(1 - pred)'], {}), '(1 - pred)\n', (4084, 4094), True, 'import megengine.functional as F\n'), ((3783, 3805), 'megengine.functional.zeros', 'F.zeros', (['[n, m, c + 1]'], {}), '([n, m, c + 1])\n', (3790, 3805), True, 'import megengine.functional as F\n'), ((3829, 3853), 'megengine.functional.expand_dims', 'F.expand_dims', (['vlabel', '(2)'], {}), '(vlabel, 2)\n', (3842, 3853), True, 'import megengine.functional as F\n'), ((3982, 4004), 'megengine.functional.pow', 'F.pow', (['(1 - pred)', 'gamma'], {}), '(1 - pred, gamma)\n', (3987, 4004), True, 'import megengine.functional as F\n'), ((4043, 4061), 'megengine.functional.pow', 'F.pow', (['pred', 'gamma'], {}), '(pred, gamma)\n', (4048, 4061), True, 'import megengine.functional as F\n'), ((1406, 1432), 'megengine.functional.equal', 'F.equal', (['label', 'background'], {}), '(label, background)\n', (1413, 1432), True, 'import megengine.functional as F\n'), ((1470, 1498), 'megengine.functional.equal', 'F.equal', (['label', 'ignore_label'], {}), '(label, ignore_label)\n', (1477, 1498), True, 'import megengine.functional as F\n'), ((3668, 3696), 'megengine.functional.equal', 'F.equal', (['label', 'ignore_label'], {}), '(label, ignore_label)\n', (3675, 3696), True, 'import megengine.functional as F\n'), ((4767, 4790), 'megengine.functional.equal', 'F.equal', (['gt', 'background'], {}), '(gt, background)\n', (4774, 4790), True, 'import megengine.functional as F\n'), ((4828, 4853), 'megengine.functional.equal', 'F.equal', (['gt', 'ignore_label'], {}), '(gt, ignore_label)\n', (4835, 4853), True, 'import megengine.functional as F\n'), ((270, 281), 'megengine.functional.exp', 'F.exp', (['pred'], {}), '(pred)\n', (275, 281), True, 'import megengine.functional as F\n'), ((714, 725), 'megengine.functional.exp', 'F.exp', (['pred'], {}), '(pred)\n', (719, 725), True, 'import megengine.functional as F\n')]
|
#!/usr/bin/env python
def configuration(parent_package='', top_path=None):
import os.path as op
from numpy.distutils.misc_util import Configuration
from sfepy import Config
site_config = Config()
system = site_config.system()
os_flag = {'posix' : 0, 'windows' : 1}[system]
auto_dir = op.dirname(__file__)
auto_name = op.split(auto_dir)[-1]
config = Configuration(auto_name, parent_package, top_path)
sdir = '\\"%s\\"' % auto_dir.replace('\\', '\\\\')
inline = 'inline' if system == 'posix' else '__inline'
defines = [('__SDIR__', sdir),
('SFEPY_PLATFORM', os_flag),
('inline', inline)]
if '-DDEBUG_FMF' in site_config.debug_flags():
defines.append(('DEBUG_FMF', None))
if '-DDEBUG_MESH' in site_config.debug_flags():
defines.append(('DEBUG_MESH', None))
common_src = ['fmfield.c', 'refmaps.c', 'geommech.c', 'common_python.c']
config.add_library('sfepy_common',
sources=common_src,
extra_compiler_args=site_config.compile_flags(),
extra_link_args=site_config.link_flags(),
include_dirs=[auto_dir, site_config.python_include()],
macros=[('SFEPY_PLATFORM', os_flag),
('inline', inline)])
src = ['_fmfield.pyx']
config.add_extension('_fmfield',
sources=src,
libraries=['sfepy_common'],
depends=common_src,
extra_compile_args=site_config.compile_flags(),
extra_link_args=site_config.link_flags(),
include_dirs=[auto_dir],
define_macros=defines)
src = ['mappings.pyx']
config.add_extension('mappings',
sources=src,
libraries=['sfepy_common'],
depends=common_src,
extra_compile_args=site_config.compile_flags(),
extra_link_args=site_config.link_flags(),
include_dirs=[auto_dir],
define_macros=defines)
src = ['assemble.pyx']
config.add_extension('assemble',
sources=src,
extra_compile_args=site_config.compile_flags(),
extra_link_args=site_config.link_flags(),
include_dirs=[auto_dir],
define_macros=defines)
src = ['cmesh.pyx', 'geomtrans.c', 'mesh.c', 'meshutils.c', 'sort.c',
'common_python.c']
config.add_extension('cmesh',
sources=src,
extra_compile_args=site_config.compile_flags(),
extra_link_args=site_config.link_flags(),
include_dirs=[auto_dir],
define_macros=defines)
src = ['crefcoors.pyx', 'refcoors.c', 'geomtrans.c', 'mesh.c']
config.add_extension('crefcoors',
sources=src,
libraries=['sfepy_common'],
depends=common_src,
extra_compile_args=site_config.compile_flags(),
extra_link_args=site_config.link_flags(),
include_dirs=[auto_dir],
define_macros=defines)
src = ['_geommech.pyx']
config.add_extension('_geommech',
sources=src,
libraries=['sfepy_common'],
extra_compile_args=site_config.compile_flags(),
extra_link_args=site_config.link_flags(),
include_dirs=[auto_dir],
define_macros=defines)
# Include *.pxd files in distribution tarball and install them along
# with the extension modules.
pxd_files = ['cmesh.pxd', 'mappings.pxd', 'types.pxd',
'_fmfield.pxd', '_geommech.pxd']
config.add_data_files(('', pxd_files))
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
[
"sfepy.Config"
] |
[((206, 214), 'sfepy.Config', 'Config', ([], {}), '()\n', (212, 214), False, 'from sfepy import Config\n'), ((316, 336), 'os.path.dirname', 'op.dirname', (['__file__'], {}), '(__file__)\n', (326, 336), True, 'import os.path as op\n'), ((389, 439), 'numpy.distutils.misc_util.Configuration', 'Configuration', (['auto_name', 'parent_package', 'top_path'], {}), '(auto_name, parent_package, top_path)\n', (402, 439), False, 'from numpy.distutils.misc_util import Configuration\n'), ((353, 371), 'os.path.split', 'op.split', (['auto_dir'], {}), '(auto_dir)\n', (361, 371), True, 'import os.path as op\n')]
|
import datetime
import typing
from sqlmodel import SQLModel, Field, Relationship
import typing as tp
import ipaddress
from sqlalchemy import UniqueConstraint
if tp.TYPE_CHECKING:
from .user import User
class LoginToken(SQLModel, table=True):
__tablename__:str = "login_tokens" # type: ignore
__table_args__ = (UniqueConstraint("token"),)
id: int = Field(primary_key=True)
token: str # 校验token
expired_in: typing.Optional[datetime.datetime] # 过期时间
user_id: int = Field(foreign_key="user.id") # 关联用户
user: User = Relationship(back_populates="tokens")
class LoginLog(SQLModel, table=True):
__tablename__:str = "login_logs" # type: ignore
id: int = Field(primary_key=True)
user_id: int = Field(foreign_key="users.id", index=True) # 关联用户
user: User = Relationship(back_populates="login_logs", link_model="User")
login_time: datetime.datetime = Field(
default_factory=lambda: datetime.datetime.utcnow
) # 登录时间
custom_ip: typing.Optional[ipaddress.IPv4Address] # 登录用户IP
|
[
"sqlmodel.Relationship",
"sqlmodel.Field"
] |
[((368, 391), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (373, 391), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((496, 524), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""user.id"""'}), "(foreign_key='user.id')\n", (501, 524), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((550, 587), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""tokens"""'}), "(back_populates='tokens')\n", (562, 587), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((694, 717), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (699, 717), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((737, 778), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""users.id"""', 'index': '(True)'}), "(foreign_key='users.id', index=True)\n", (742, 778), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((804, 864), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""login_logs"""', 'link_model': '"""User"""'}), "(back_populates='login_logs', link_model='User')\n", (816, 864), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((901, 957), 'sqlmodel.Field', 'Field', ([], {'default_factory': '(lambda : datetime.datetime.utcnow)'}), '(default_factory=lambda : datetime.datetime.utcnow)\n', (906, 957), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((326, 351), 'sqlalchemy.UniqueConstraint', 'UniqueConstraint', (['"""token"""'], {}), "('token')\n", (342, 351), False, 'from sqlalchemy import UniqueConstraint\n')]
|
"""add power
Revision ID: 135aec058ce1
Revises: 4400883a1249
Create Date: 2021-12-28 11:38:37.439383
"""
import sqlalchemy as sa
import sqlmodel
from alembic import op
# revision identifiers, used by Alembic.
revision = "135aec058ce1"
down_revision = "4400883a1249"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("preps", sa.Column("power", sqlmodel.sql.sqltypes.AutoString(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("preps", "power")
# ### end Alembic commands ###
|
[
"sqlmodel.sql.sqltypes.AutoString"
] |
[((613, 645), 'alembic.op.drop_column', 'op.drop_column', (['"""preps"""', '"""power"""'], {}), "('preps', 'power')\n", (627, 645), False, 'from alembic import op\n'), ((437, 471), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (469, 471), False, 'import sqlmodel\n')]
|
from datetime import datetime, date
from decimal import Decimal
from typing import Optional, List
from fastapi import APIRouter, Depends
from sqlmodel import Field, SQLModel
from ...db import get_session
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
router = APIRouter()
class HistoryProblem(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
history_id: int
detail: str
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
@router.post("/history_problem", response_model=HistoryProblem)
async def create_history_problem(history_problem: HistoryProblem, session: AsyncSession = Depends(get_session)):
session.add(history_problem)
await session.commit()
await session.refresh(history_problem)
return history_problem
@router.get("/history_problem/{id}", response_model=HistoryProblem)
async def get_history_problem(id: int, session: AsyncSession = Depends(get_session)):
history_problems = await session.execute(select(HistoryProblem).where(HistoryProblem.id == id))
history_problem = history_problems.scalars().first()
return history_problem
@router.put("/history_problem/{id}", response_model=HistoryProblem)
async def update_history_problem(id: int, session: AsyncSession = Depends(get_session)):
return None
@router.delete("/history_problem/{id}")
async def delete_history_problem(session: AsyncSession = Depends(get_session)):
return None
|
[
"sqlmodel.Field"
] |
[((295, 306), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (304, 306), False, 'from fastapi import APIRouter, Depends\n'), ((377, 414), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (382, 414), False, 'from sqlmodel import Field, SQLModel\n'), ((714, 734), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (721, 734), False, 'from fastapi import APIRouter, Depends\n'), ((1000, 1020), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1007, 1020), False, 'from fastapi import APIRouter, Depends\n'), ((1343, 1363), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1350, 1363), False, 'from fastapi import APIRouter, Depends\n'), ((1481, 1501), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1488, 1501), False, 'from fastapi import APIRouter, Depends\n'), ((1068, 1090), 'sqlalchemy.select', 'select', (['HistoryProblem'], {}), '(HistoryProblem)\n', (1074, 1090), False, 'from sqlalchemy import select\n')]
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import cv2
import megengine.functional as F
import numpy as np
__all__ = [
"preprocess",
"postprocess",
]
def preprocess(image, input_size, mean, std, swap=(2, 0, 1)):
if len(image.shape) == 3:
padded_img = np.ones((input_size[0], input_size[1], 3)) * 114.0
else:
padded_img = np.ones(input_size) * 114.0
img = np.array(image)
r = min(input_size[0] / img.shape[0], input_size[1] / img.shape[1])
resized_img = cv2.resize(
img,
(int(img.shape[1] * r), int(img.shape[0] * r)),
interpolation=cv2.INTER_LINEAR,
).astype(np.float32)
padded_img[: int(img.shape[0] * r), : int(img.shape[1] * r)] = resized_img
image = padded_img
image = image.astype(np.float32)
image = image[:, :, ::-1]
image /= 255.0
if mean is not None:
image -= mean
if std is not None:
image /= std
image = image.transpose(swap)
image = np.ascontiguousarray(image, dtype=np.float32)
return image, r
def postprocess(prediction, num_classes, conf_thre=0.7, nms_thre=0.45):
box_corner = F.zeros_like(prediction)
box_corner[:, :, 0] = prediction[:, :, 0] - prediction[:, :, 2] / 2
box_corner[:, :, 1] = prediction[:, :, 1] - prediction[:, :, 3] / 2
box_corner[:, :, 2] = prediction[:, :, 0] + prediction[:, :, 2] / 2
box_corner[:, :, 3] = prediction[:, :, 1] + prediction[:, :, 3] / 2
prediction[:, :, :4] = box_corner[:, :, :4]
output = [None for _ in range(len(prediction))]
for i, image_pred in enumerate(prediction):
# If none are remaining => process next image
if not image_pred.shape[0]:
continue
# Get score and class with highest confidence
class_conf = F.max(image_pred[:, 5 : 5 + num_classes], 1, keepdims=True)
class_pred = F.argmax(image_pred[:, 5 : 5 + num_classes], 1, keepdims=True)
class_conf_squeeze = F.squeeze(class_conf)
conf_mask = image_pred[:, 4] * class_conf_squeeze >= conf_thre
detections = F.concat((image_pred[:, :5], class_conf, class_pred), 1)
detections = detections[conf_mask]
if not detections.shape[0]:
continue
nms_out_index = F.vision.nms(
detections[:, :4], detections[:, 4] * detections[:, 5], nms_thre,
)
detections = detections[nms_out_index]
if output[i] is None:
output[i] = detections
else:
output[i] = F.concat((output[i], detections))
return output
|
[
"megengine.functional.vision.nms",
"megengine.functional.argmax",
"megengine.functional.squeeze",
"megengine.functional.zeros_like",
"megengine.functional.concat",
"megengine.functional.max"
] |
[((475, 490), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (483, 490), True, 'import numpy as np\n'), ((1072, 1117), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['image'], {'dtype': 'np.float32'}), '(image, dtype=np.float32)\n', (1092, 1117), True, 'import numpy as np\n'), ((1234, 1258), 'megengine.functional.zeros_like', 'F.zeros_like', (['prediction'], {}), '(prediction)\n', (1246, 1258), True, 'import megengine.functional as F\n'), ((1897, 1954), 'megengine.functional.max', 'F.max', (['image_pred[:, 5:5 + num_classes]', '(1)'], {'keepdims': '(True)'}), '(image_pred[:, 5:5 + num_classes], 1, keepdims=True)\n', (1902, 1954), True, 'import megengine.functional as F\n'), ((1979, 2039), 'megengine.functional.argmax', 'F.argmax', (['image_pred[:, 5:5 + num_classes]', '(1)'], {'keepdims': '(True)'}), '(image_pred[:, 5:5 + num_classes], 1, keepdims=True)\n', (1987, 2039), True, 'import megengine.functional as F\n'), ((2074, 2095), 'megengine.functional.squeeze', 'F.squeeze', (['class_conf'], {}), '(class_conf)\n', (2083, 2095), True, 'import megengine.functional as F\n'), ((2190, 2246), 'megengine.functional.concat', 'F.concat', (['(image_pred[:, :5], class_conf, class_pred)', '(1)'], {}), '((image_pred[:, :5], class_conf, class_pred), 1)\n', (2198, 2246), True, 'import megengine.functional as F\n'), ((2377, 2455), 'megengine.functional.vision.nms', 'F.vision.nms', (['detections[:, :4]', '(detections[:, 4] * detections[:, 5])', 'nms_thre'], {}), '(detections[:, :4], detections[:, 4] * detections[:, 5], nms_thre)\n', (2389, 2455), True, 'import megengine.functional as F\n'), ((352, 394), 'numpy.ones', 'np.ones', (['(input_size[0], input_size[1], 3)'], {}), '((input_size[0], input_size[1], 3))\n', (359, 394), True, 'import numpy as np\n'), ((436, 455), 'numpy.ones', 'np.ones', (['input_size'], {}), '(input_size)\n', (443, 455), True, 'import numpy as np\n'), ((2636, 2669), 'megengine.functional.concat', 'F.concat', (['(output[i], detections)'], {}), '((output[i], detections))\n', (2644, 2669), True, 'import megengine.functional as F\n')]
|
from pydantic.types import List, Optional
from sqlmodel import Field, Relationship, SQLModel
class TeamBase(SQLModel):
name: str
headquarters: str
class Config:
schema_extra = {
"example": {
"name": "wonderful league",
"headquarters": "Fortress of Solitude",
}
}
class Team(TeamBase, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
heroes: List["Hero"] = Relationship(back_populates="team")
class TeamCreate(TeamBase):
pass
class TeamRead(TeamBase):
id: int
class TeamUpdate(TeamBase):
name: Optional[str] = None
headquarters: Optional[str] = None
|
[
"sqlmodel.Field",
"sqlmodel.Relationship"
] |
[((410, 447), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (415, 447), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((476, 511), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""team"""'}), "(back_populates='team')\n", (488, 511), False, 'from sqlmodel import Field, Relationship, SQLModel\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine.core import tensor
from megengine.module import BatchNorm1d, BatchNorm2d
from megengine.test import assertTensorClose
def test_batchnorm():
nr_chan = 8
data_shape = (3, nr_chan, 4)
momentum = 0.9
bn = BatchNorm1d(nr_chan, momentum=momentum)
running_mean = np.zeros((1, nr_chan, 1), dtype=np.float32)
running_var = np.ones((1, nr_chan, 1), dtype=np.float32)
data = tensor()
for i in range(3):
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
mean = np.mean(np.mean(xv, axis=0, keepdims=True), axis=2, keepdims=True)
xv_transposed = np.transpose(xv, [0, 2, 1]).reshape(
(data_shape[0] * data_shape[2], nr_chan)
)
var_biased = np.var(xv_transposed, axis=0).reshape((1, nr_chan, 1))
sd = np.sqrt(var_biased + bn.eps)
var_unbiased = np.var(xv_transposed, axis=0, ddof=1).reshape((1, nr_chan, 1))
running_mean = running_mean * momentum + mean * (1 - momentum)
running_var = running_var * momentum + var_unbiased * (1 - momentum)
data.set_value(xv)
yv = bn(data)
yv_expect = (xv - mean) / sd
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6)
assertTensorClose(
running_mean.reshape(-1), bn.running_mean.numpy().reshape(-1), max_err=5e-6
)
assertTensorClose(
running_var.reshape(-1), bn.running_var.numpy().reshape(-1), max_err=5e-6
)
# test set 'training' flag to False
mean_backup = bn.running_mean.numpy()
var_backup = bn.running_var.numpy()
bn.training = False
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
data.set_value(xv)
yv1 = bn(data)
yv2 = bn(data)
assertTensorClose(yv1.numpy(), yv2.numpy(), max_err=0)
assertTensorClose(mean_backup, bn.running_mean.numpy(), max_err=0)
assertTensorClose(var_backup, bn.running_var.numpy(), max_err=0)
yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps)
assertTensorClose(yv_expect, yv1.numpy(), max_err=5e-6)
def test_batchnorm2d():
nr_chan = 8
data_shape = (3, nr_chan, 16, 16)
momentum = 0.9
bn = BatchNorm2d(nr_chan, momentum=momentum)
running_mean = np.zeros((1, nr_chan, 1, 1), dtype=np.float32)
running_var = np.ones((1, nr_chan, 1, 1), dtype=np.float32)
data = tensor()
for i in range(3):
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
xv_transposed = np.transpose(xv, [0, 2, 3, 1]).reshape(
(data_shape[0] * data_shape[2] * data_shape[3], nr_chan)
)
mean = np.mean(xv_transposed, axis=0).reshape(1, nr_chan, 1, 1)
var_biased = np.var(xv_transposed, axis=0).reshape((1, nr_chan, 1, 1))
sd = np.sqrt(var_biased + bn.eps)
var_unbiased = np.var(xv_transposed, axis=0, ddof=1).reshape((1, nr_chan, 1, 1))
running_mean = running_mean * momentum + mean * (1 - momentum)
running_var = running_var * momentum + var_unbiased * (1 - momentum)
data.set_value(xv)
yv = bn(data)
yv_expect = (xv - mean) / sd
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6)
assertTensorClose(running_mean, bn.running_mean.numpy(), max_err=5e-6)
assertTensorClose(running_var, bn.running_var.numpy(), max_err=5e-6)
# test set 'training' flag to False
mean_backup = bn.running_mean.numpy()
var_backup = bn.running_var.numpy()
bn.training = False
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
data.set_value(xv)
yv1 = bn(data)
yv2 = bn(data)
assertTensorClose(yv1.numpy(), yv2.numpy(), max_err=0)
assertTensorClose(mean_backup, bn.running_mean.numpy(), max_err=0)
assertTensorClose(var_backup, bn.running_var.numpy(), max_err=0)
yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps)
assertTensorClose(yv_expect, yv1.numpy(), max_err=5e-6)
def test_batchnorm_no_stats():
nr_chan = 8
data_shape = (3, nr_chan, 4)
bn = BatchNorm1d(8, track_running_stats=False)
data = tensor()
for i in range(4):
if i == 2:
bn.training = False
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
mean = np.mean(np.mean(xv, axis=0, keepdims=True), axis=2, keepdims=True)
var = np.var(
np.transpose(xv, [0, 2, 1]).reshape(
(data_shape[0] * data_shape[2], nr_chan)
),
axis=0,
).reshape((1, nr_chan, 1))
sd = np.sqrt(var + bn.eps)
data.set_value(xv)
yv = bn(data)
yv_expect = (xv - mean) / sd
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6)
def test_batchnorm2d_no_stats():
nr_chan = 8
data_shape = (3, nr_chan, 16, 16)
bn = BatchNorm2d(8, track_running_stats=False)
data = tensor()
for i in range(4):
if i == 2:
bn.training = False
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
xv_transposed = np.transpose(xv, [0, 2, 3, 1]).reshape(
(data_shape[0] * data_shape[2] * data_shape[3], nr_chan)
)
mean = np.mean(xv_transposed, axis=0).reshape(1, nr_chan, 1, 1)
var = np.var(xv_transposed, axis=0).reshape((1, nr_chan, 1, 1))
sd = np.sqrt(var + bn.eps)
data.set_value(xv)
yv = bn(data)
yv_expect = (xv - mean) / sd
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6)
|
[
"megengine.module.BatchNorm2d",
"megengine.module.BatchNorm1d",
"megengine.core.tensor"
] |
[((669, 708), 'megengine.module.BatchNorm1d', 'BatchNorm1d', (['nr_chan'], {'momentum': 'momentum'}), '(nr_chan, momentum=momentum)\n', (680, 708), False, 'from megengine.module import BatchNorm1d, BatchNorm2d\n'), ((728, 771), 'numpy.zeros', 'np.zeros', (['(1, nr_chan, 1)'], {'dtype': 'np.float32'}), '((1, nr_chan, 1), dtype=np.float32)\n', (736, 771), True, 'import numpy as np\n'), ((790, 832), 'numpy.ones', 'np.ones', (['(1, nr_chan, 1)'], {'dtype': 'np.float32'}), '((1, nr_chan, 1), dtype=np.float32)\n', (797, 832), True, 'import numpy as np\n'), ((844, 852), 'megengine.core.tensor', 'tensor', ([], {}), '()\n', (850, 852), False, 'from megengine.core import tensor\n'), ((2624, 2663), 'megengine.module.BatchNorm2d', 'BatchNorm2d', (['nr_chan'], {'momentum': 'momentum'}), '(nr_chan, momentum=momentum)\n', (2635, 2663), False, 'from megengine.module import BatchNorm1d, BatchNorm2d\n'), ((2683, 2729), 'numpy.zeros', 'np.zeros', (['(1, nr_chan, 1, 1)'], {'dtype': 'np.float32'}), '((1, nr_chan, 1, 1), dtype=np.float32)\n', (2691, 2729), True, 'import numpy as np\n'), ((2748, 2793), 'numpy.ones', 'np.ones', (['(1, nr_chan, 1, 1)'], {'dtype': 'np.float32'}), '((1, nr_chan, 1, 1), dtype=np.float32)\n', (2755, 2793), True, 'import numpy as np\n'), ((2805, 2813), 'megengine.core.tensor', 'tensor', ([], {}), '()\n', (2811, 2813), False, 'from megengine.core import tensor\n'), ((4492, 4533), 'megengine.module.BatchNorm1d', 'BatchNorm1d', (['(8)'], {'track_running_stats': '(False)'}), '(8, track_running_stats=False)\n', (4503, 4533), False, 'from megengine.module import BatchNorm1d, BatchNorm2d\n'), ((4545, 4553), 'megengine.core.tensor', 'tensor', ([], {}), '()\n', (4551, 4553), False, 'from megengine.core import tensor\n'), ((5267, 5308), 'megengine.module.BatchNorm2d', 'BatchNorm2d', (['(8)'], {'track_running_stats': '(False)'}), '(8, track_running_stats=False)\n', (5278, 5308), False, 'from megengine.module import BatchNorm1d, BatchNorm2d\n'), ((5320, 5328), 'megengine.core.tensor', 'tensor', ([], {}), '()\n', (5326, 5328), False, 'from megengine.core import tensor\n'), ((1247, 1275), 'numpy.sqrt', 'np.sqrt', (['(var_biased + bn.eps)'], {}), '(var_biased + bn.eps)\n', (1254, 1275), True, 'import numpy as np\n'), ((2426, 2455), 'numpy.sqrt', 'np.sqrt', (['(running_var + bn.eps)'], {}), '(running_var + bn.eps)\n', (2433, 2455), True, 'import numpy as np\n'), ((3221, 3249), 'numpy.sqrt', 'np.sqrt', (['(var_biased + bn.eps)'], {}), '(var_biased + bn.eps)\n', (3228, 3249), True, 'import numpy as np\n'), ((4311, 4340), 'numpy.sqrt', 'np.sqrt', (['(running_var + bn.eps)'], {}), '(running_var + bn.eps)\n', (4318, 4340), True, 'import numpy as np\n'), ((4996, 5017), 'numpy.sqrt', 'np.sqrt', (['(var + bn.eps)'], {}), '(var + bn.eps)\n', (5003, 5017), True, 'import numpy as np\n'), ((5779, 5800), 'numpy.sqrt', 'np.sqrt', (['(var + bn.eps)'], {}), '(var + bn.eps)\n', (5786, 5800), True, 'import numpy as np\n'), ((974, 1008), 'numpy.mean', 'np.mean', (['xv'], {'axis': '(0)', 'keepdims': '(True)'}), '(xv, axis=0, keepdims=True)\n', (981, 1008), True, 'import numpy as np\n'), ((2066, 2108), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(2.3)', 'size': 'data_shape'}), '(loc=2.3, size=data_shape)\n', (2082, 2108), True, 'import numpy as np\n'), ((3951, 3993), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(2.3)', 'size': 'data_shape'}), '(loc=2.3, size=data_shape)\n', (3967, 3993), True, 'import numpy as np\n'), ((4726, 4760), 'numpy.mean', 'np.mean', (['xv'], {'axis': '(0)', 'keepdims': '(True)'}), '(xv, axis=0, keepdims=True)\n', (4733, 4760), True, 'import numpy as np\n'), ((889, 931), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(2.3)', 'size': 'data_shape'}), '(loc=2.3, size=data_shape)\n', (905, 931), True, 'import numpy as np\n'), ((1057, 1084), 'numpy.transpose', 'np.transpose', (['xv', '[0, 2, 1]'], {}), '(xv, [0, 2, 1])\n', (1069, 1084), True, 'import numpy as np\n'), ((1179, 1208), 'numpy.var', 'np.var', (['xv_transposed'], {'axis': '(0)'}), '(xv_transposed, axis=0)\n', (1185, 1208), True, 'import numpy as np\n'), ((1300, 1337), 'numpy.var', 'np.var', (['xv_transposed'], {'axis': '(0)', 'ddof': '(1)'}), '(xv_transposed, axis=0, ddof=1)\n', (1306, 1337), True, 'import numpy as np\n'), ((2850, 2892), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(2.3)', 'size': 'data_shape'}), '(loc=2.3, size=data_shape)\n', (2866, 2892), True, 'import numpy as np\n'), ((2936, 2966), 'numpy.transpose', 'np.transpose', (['xv', '[0, 2, 3, 1]'], {}), '(xv, [0, 2, 3, 1])\n', (2948, 2966), True, 'import numpy as np\n'), ((3071, 3101), 'numpy.mean', 'np.mean', (['xv_transposed'], {'axis': '(0)'}), '(xv_transposed, axis=0)\n', (3078, 3101), True, 'import numpy as np\n'), ((3150, 3179), 'numpy.var', 'np.var', (['xv_transposed'], {'axis': '(0)'}), '(xv_transposed, axis=0)\n', (3156, 3179), True, 'import numpy as np\n'), ((3274, 3311), 'numpy.var', 'np.var', (['xv_transposed'], {'axis': '(0)', 'ddof': '(1)'}), '(xv_transposed, axis=0, ddof=1)\n', (3280, 3311), True, 'import numpy as np\n'), ((4641, 4683), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(2.3)', 'size': 'data_shape'}), '(loc=2.3, size=data_shape)\n', (4657, 4683), True, 'import numpy as np\n'), ((5416, 5458), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(2.3)', 'size': 'data_shape'}), '(loc=2.3, size=data_shape)\n', (5432, 5458), True, 'import numpy as np\n'), ((5502, 5532), 'numpy.transpose', 'np.transpose', (['xv', '[0, 2, 3, 1]'], {}), '(xv, [0, 2, 3, 1])\n', (5514, 5532), True, 'import numpy as np\n'), ((5637, 5667), 'numpy.mean', 'np.mean', (['xv_transposed'], {'axis': '(0)'}), '(xv_transposed, axis=0)\n', (5644, 5667), True, 'import numpy as np\n'), ((5708, 5737), 'numpy.var', 'np.var', (['xv_transposed'], {'axis': '(0)'}), '(xv_transposed, axis=0)\n', (5714, 5737), True, 'import numpy as np\n'), ((4819, 4846), 'numpy.transpose', 'np.transpose', (['xv', '[0, 2, 1]'], {}), '(xv, [0, 2, 1])\n', (4831, 4846), True, 'import numpy as np\n')]
|
"""user latest record
Revision ID: 7c2a518ed636
Revises: fe2df95ee61a
Create Date: 2021-11-27 15:37:54.561822
"""
import sqlalchemy as sa
import sqlmodel
import sqlmodel.sql.sqltypes
from alembic import op
# revision identifiers, used by Alembic.
revision = "7c2a518ed636"
down_revision = "fe2df95ee61a"
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"user_latest_records",
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("TIMEZONE('utc', CURRENT_TIMESTAMP)"),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("TIMEZONE('utc', CURRENT_TIMESTAMP)"),
nullable=False,
),
sa.Column("user_id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
sa.Column("problem_id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
sa.Column("problem_set_id", sqlmodel.sql.sqltypes.GUID(), nullable=True),
sa.Column("record_id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
sa.Column("id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
sa.ForeignKeyConstraint(["problem_id"], ["problems.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(
["problem_set_id"], ["problem_sets.id"], ondelete="CASCADE"
),
sa.ForeignKeyConstraint(["record_id"], ["records.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["user_id"], ["users.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("user_id", "problem_id", "problem_set_id", "record_id"),
)
op.create_index(
op.f("ix_user_latest_records_created_at"),
"user_latest_records",
["created_at"],
unique=False,
)
op.create_index(
op.f("ix_user_latest_records_id"), "user_latest_records", ["id"], unique=False
)
op.create_index(
op.f("ix_user_latest_records_updated_at"),
"user_latest_records",
["updated_at"],
unique=False,
)
op.add_column(
"problem_configs",
sa.Column(
"commit_message",
sqlmodel.sql.sqltypes.AutoString(),
server_default="",
nullable=False,
),
)
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("problem_configs", "commit_message")
op.drop_index(
op.f("ix_user_latest_records_updated_at"), table_name="user_latest_records"
)
op.drop_index(op.f("ix_user_latest_records_id"), table_name="user_latest_records")
op.drop_index(
op.f("ix_user_latest_records_created_at"), table_name="user_latest_records"
)
op.drop_table("user_latest_records")
# ### end Alembic commands ###
|
[
"sqlmodel.sql.sqltypes.AutoString",
"sqlmodel.sql.sqltypes.GUID"
] |
[((2542, 2593), 'alembic.op.drop_column', 'op.drop_column', (['"""problem_configs"""', '"""commit_message"""'], {}), "('problem_configs', 'commit_message')\n", (2556, 2593), False, 'from alembic import op\n'), ((2903, 2939), 'alembic.op.drop_table', 'op.drop_table', (['"""user_latest_records"""'], {}), "('user_latest_records')\n", (2916, 2939), False, 'from alembic import op\n'), ((1279, 1355), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['problem_id']", "['problems.id']"], {'ondelete': '"""CASCADE"""'}), "(['problem_id'], ['problems.id'], ondelete='CASCADE')\n", (1302, 1355), True, 'import sqlalchemy as sa\n'), ((1365, 1454), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['problem_set_id']", "['problem_sets.id']"], {'ondelete': '"""CASCADE"""'}), "(['problem_set_id'], ['problem_sets.id'], ondelete=\n 'CASCADE')\n", (1388, 1454), True, 'import sqlalchemy as sa\n'), ((1481, 1555), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['record_id']", "['records.id']"], {'ondelete': '"""CASCADE"""'}), "(['record_id'], ['records.id'], ondelete='CASCADE')\n", (1504, 1555), True, 'import sqlalchemy as sa\n'), ((1565, 1635), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['user_id']", "['users.id']"], {'ondelete': '"""CASCADE"""'}), "(['user_id'], ['users.id'], ondelete='CASCADE')\n", (1588, 1635), True, 'import sqlalchemy as sa\n'), ((1645, 1674), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (1668, 1674), True, 'import sqlalchemy as sa\n'), ((1684, 1759), 'sqlalchemy.UniqueConstraint', 'sa.UniqueConstraint', (['"""user_id"""', '"""problem_id"""', '"""problem_set_id"""', '"""record_id"""'], {}), "('user_id', 'problem_id', 'problem_set_id', 'record_id')\n", (1703, 1759), True, 'import sqlalchemy as sa\n'), ((1796, 1837), 'alembic.op.f', 'op.f', (['"""ix_user_latest_records_created_at"""'], {}), "('ix_user_latest_records_created_at')\n", (1800, 1837), False, 'from alembic import op\n'), ((1951, 1984), 'alembic.op.f', 'op.f', (['"""ix_user_latest_records_id"""'], {}), "('ix_user_latest_records_id')\n", (1955, 1984), False, 'from alembic import op\n'), ((2065, 2106), 'alembic.op.f', 'op.f', (['"""ix_user_latest_records_updated_at"""'], {}), "('ix_user_latest_records_updated_at')\n", (2069, 2106), False, 'from alembic import op\n'), ((2621, 2662), 'alembic.op.f', 'op.f', (['"""ix_user_latest_records_updated_at"""'], {}), "('ix_user_latest_records_updated_at')\n", (2625, 2662), False, 'from alembic import op\n'), ((2721, 2754), 'alembic.op.f', 'op.f', (['"""ix_user_latest_records_id"""'], {}), "('ix_user_latest_records_id')\n", (2725, 2754), False, 'from alembic import op\n'), ((2817, 2858), 'alembic.op.f', 'op.f', (['"""ix_user_latest_records_created_at"""'], {}), "('ix_user_latest_records_created_at')\n", (2821, 2858), False, 'from alembic import op\n'), ((546, 572), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {'timezone': '(True)'}), '(timezone=True)\n', (557, 572), True, 'import sqlalchemy as sa\n'), ((744, 770), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {'timezone': '(True)'}), '(timezone=True)\n', (755, 770), True, 'import sqlalchemy as sa\n'), ((914, 942), 'sqlmodel.sql.sqltypes.GUID', 'sqlmodel.sql.sqltypes.GUID', ([], {}), '()\n', (940, 942), False, 'import sqlmodel\n'), ((993, 1021), 'sqlmodel.sql.sqltypes.GUID', 'sqlmodel.sql.sqltypes.GUID', ([], {}), '()\n', (1019, 1021), False, 'import sqlmodel\n'), ((1076, 1104), 'sqlmodel.sql.sqltypes.GUID', 'sqlmodel.sql.sqltypes.GUID', ([], {}), '()\n', (1102, 1104), False, 'import sqlmodel\n'), ((1153, 1181), 'sqlmodel.sql.sqltypes.GUID', 'sqlmodel.sql.sqltypes.GUID', ([], {}), '()\n', (1179, 1181), False, 'import sqlmodel\n'), ((1224, 1252), 'sqlmodel.sql.sqltypes.GUID', 'sqlmodel.sql.sqltypes.GUID', ([], {}), '()\n', (1250, 1252), False, 'import sqlmodel\n'), ((2298, 2332), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2330, 2332), False, 'import sqlmodel\n'), ((601, 646), 'sqlalchemy.text', 'sa.text', (['"""TIMEZONE(\'utc\', CURRENT_TIMESTAMP)"""'], {}), '("TIMEZONE(\'utc\', CURRENT_TIMESTAMP)")\n', (608, 646), True, 'import sqlalchemy as sa\n'), ((799, 844), 'sqlalchemy.text', 'sa.text', (['"""TIMEZONE(\'utc\', CURRENT_TIMESTAMP)"""'], {}), '("TIMEZONE(\'utc\', CURRENT_TIMESTAMP)")\n', (806, 844), True, 'import sqlalchemy as sa\n')]
|
import numpy as nm
from sfepy.linalg import dot_sequences
from sfepy.terms.terms import Term, terms
class DivGradTerm(Term):
r"""
Diffusion term.
:Definition:
.. math::
\int_{\Omega} \nu\ \nabla \ul{v} : \nabla \ul{u} \mbox{ , }
\int_{\Omega} \nu\ \nabla \ul{u} : \nabla \ul{w} \\
\int_{\Omega} \nabla \ul{v} : \nabla \ul{u} \mbox{ , }
\int_{\Omega} \nabla \ul{u} : \nabla \ul{w}
:Arguments 1:
- material : :math:`\nu` (viscosity, optional)
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
:Arguments 2:
- material : :math:`\nu` (viscosity, optional)
- parameter_1 : :math:`\ul{u}`
- parameter_2 : :math:`\ul{w}`
"""
name = 'dw_div_grad'
arg_types = (('opt_material', 'virtual', 'state'),
('opt_material', 'parameter_1', 'parameter_2'))
arg_shapes = {'opt_material' : '1, 1', 'virtual' : ('D', 'state'),
'state' : 'D', 'parameter_1' : 'D', 'parameter_2' : 'D'}
modes = ('weak', 'eval')
function = staticmethod(terms.term_ns_asm_div_grad)
def d_div_grad(self, out, grad1, grad2, mat, vg, fmode):
sh = grad1.shape
g1 = grad1.reshape((sh[0], sh[1], sh[2] * sh[3]))
g2 = grad2.reshape((sh[0], sh[1], sh[2] * sh[3]))
aux = mat * dot_sequences(g1[..., None], g2, 'ATB')[..., None]
if fmode == 2:
out[:] = aux
status = 0
else:
status = vg.integrate(out, aux, fmode)
return status
def get_fargs(self, mat, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
if mat is None:
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(state)
mat = nm.ones((1, n_qp, 1, 1), dtype=nm.float64)
if mode == 'weak':
if diff_var is None:
grad = self.get(state, 'grad').transpose((0, 1, 3, 2))
sh = grad.shape
grad = grad.reshape((sh[0], sh[1], sh[2] * sh[3], 1))
fmode = 0
else:
grad = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return grad, mat, vg, fmode
elif mode == 'eval':
grad1 = self.get(virtual, 'grad')
grad2 = self.get(state, 'grad')
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
return grad1, grad2, mat, vg, fmode
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
def get_eval_shape(self, mat, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(state)
return (n_el, 1, 1, 1), state.dtype
def set_arg_types(self):
if self.mode == 'weak':
self.function = terms.term_ns_asm_div_grad
else:
self.function = self.d_div_grad
class ConvectTerm(Term):
r"""
Nonlinear convective term.
:Definition:
.. math::
\int_{\Omega} ((\ul{u} \cdot \nabla) \ul{u}) \cdot \ul{v}
:Arguments:
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
"""
name = 'dw_convect'
arg_types = ('virtual', 'state')
arg_shapes = {'virtual' : ('D', 'state'), 'state' : 'D'}
function = staticmethod(terms.term_ns_asm_convect)
def get_fargs(self, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
grad = self.get(state, 'grad').transpose((0, 1, 3, 2)).copy()
val_qp = self.get(state, 'val')
fmode = diff_var is not None
return grad, val_qp, vg, fmode
class LinearConvectTerm(Term):
r"""
Linearized convective term.
:Definition:
.. math::
\int_{\Omega} ((\ul{b} \cdot \nabla) \ul{u}) \cdot \ul{v}
.. math::
((\ul{b} \cdot \nabla) \ul{u})|_{qp}
:Arguments:
- virtual : :math:`\ul{v}`
- parameter : :math:`\ul{b}`
- state : :math:`\ul{u}`
"""
name = 'dw_lin_convect'
arg_types = ('virtual', 'parameter', 'state')
arg_shapes = {'virtual' : ('D', 'state'), 'parameter' : 'D', 'state' : 'D'}
function = staticmethod(terms.dw_lin_convect)
def get_fargs(self, virtual, parameter, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
val_qp = self.get(parameter, 'val')
if mode == 'weak':
if diff_var is None:
grad = self.get(state, 'grad').transpose((0, 1, 3, 2)).copy()
fmode = 0
else:
grad = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return grad, val_qp, vg, fmode
elif mode == 'qp':
grad = self.get(state, 'grad').transpose((0, 1, 3, 2)).copy()
fmode = 2
return grad, val_qp, vg, fmode
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
class StokesTerm(Term):
r"""
Stokes problem coupling term. Corresponds to weak forms of gradient and
divergence terms. Can be evaluated.
:Definition:
.. math::
\int_{\Omega} p\ \nabla \cdot \ul{v} \mbox{ , }
\int_{\Omega} q\ \nabla \cdot \ul{u}
\mbox{ or }
\int_{\Omega} c\ p\ \nabla \cdot \ul{v} \mbox{ , }
\int_{\Omega} c\ q\ \nabla \cdot \ul{u}
:Arguments 1:
- material : :math:`c` (optional)
- virtual : :math:`\ul{v}`
- state : :math:`p`
:Arguments 2:
- material : :math:`c` (optional)
- state : :math:`\ul{u}`
- virtual : :math:`q`
:Arguments 3:
- material : :math:`c` (optional)
- parameter_v : :math:`\ul{u}`
- parameter_s : :math:`p`
"""
name = 'dw_stokes'
arg_types = (('opt_material', 'virtual', 'state'),
('opt_material', 'state', 'virtual'),
('opt_material', 'parameter_v', 'parameter_s'))
arg_shapes = [{'opt_material' : '1, 1',
'virtual/grad' : ('D', None), 'state/grad' : 1,
'virtual/div' : (1, None), 'state/div' : 'D',
'parameter_v' : 'D', 'parameter_s' : 1},
{'opt_material' : None}]
modes = ('grad', 'div', 'eval')
@staticmethod
def d_eval(out, coef, vec_qp, div, vvg):
out_qp = coef * vec_qp * div
status = vvg.integrate(out, out_qp)
return status
def get_fargs(self, coef, vvar, svar,
mode=None, term_mode=None, diff_var=None, **kwargs):
if self.mode == 'grad':
qp_var, qp_name = svar, 'val'
else:
qp_var, qp_name = vvar, 'div'
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(vvar)
if coef is None:
coef = nm.ones((1, n_qp, 1, 1), dtype=nm.float64)
if mode == 'weak':
vvg, _ = self.get_mapping(vvar)
svg, _ = self.get_mapping(svar)
if diff_var is None:
val_qp = self.get(qp_var, qp_name)
fmode = 0
else:
val_qp = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return coef, val_qp, svg, vvg, fmode
elif mode == 'eval':
vvg, _ = self.get_mapping(vvar)
div = self.get(vvar, 'div')
vec_qp = self.get(svar, 'val')
return coef, vec_qp, div, vvg
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
def get_eval_shape(self, coef, vvar, svar,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(vvar)
return (n_el, 1, 1, 1), vvar.dtype
def set_arg_types(self):
self.function = {
'grad' : terms.dw_grad,
'div' : terms.dw_div,
'eval' : self.d_eval,
}[self.mode]
class GradTerm(Term):
r"""
Evaluate gradient of a scalar or vector field.
Supports 'eval', 'el_avg' and 'qp' evaluation modes.
:Definition:
.. math::
\int_{\Omega} \nabla p \mbox{ or } \int_{\Omega} \nabla \ul{w}
.. math::
\mbox{vector for } K \from \Ical_h: \int_{T_K} \nabla p /
\int_{T_K} 1 \mbox{ or } \int_{T_K} \nabla \ul{w} /
\int_{T_K} 1
.. math::
(\nabla p)|_{qp} \mbox{ or } \nabla \ul{w}|_{qp}
:Arguments:
- parameter : :math:`p` or :math:`\ul{w}`
"""
name = 'ev_grad'
arg_types = ('parameter',)
arg_shapes = [{'parameter' : 1}, {'parameter' : 'D'}]
@staticmethod
def function(out, grad, vg, fmode):
if fmode == 2:
out[:] = grad
status = 0
else:
status = vg.integrate(out, grad, fmode)
return status
def get_fargs(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(parameter)
grad = self.get(parameter, 'grad')
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
return grad, vg, fmode
def get_eval_shape(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(parameter)
if mode != 'qp':
n_qp = 1
return (n_el, n_qp, dim, n_c), parameter.dtype
class DivTerm(Term):
r"""
Evaluate divergence of a vector field.
Supports 'eval', 'el_avg' and 'qp' evaluation modes.
:Definition:
.. math::
\int_{\Omega} \nabla \cdot \ul{u}
.. math::
\mbox{vector for } K \from \Ical_h:
\int_{T_K} \nabla \cdot \ul{u} / \int_{T_K} 1
.. math::
(\nabla \cdot \ul{u})|_{qp}
:Arguments:
- parameter : :math:`\ul{u}`
"""
name = 'ev_div'
arg_types = ('parameter',)
arg_shapes = {'parameter' : 'D'}
@staticmethod
def function(out, div, vg, fmode):
if fmode == 2:
out[:] = div
status = 0
else:
status = vg.integrate(out, div, fmode)
return status
def get_fargs(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(parameter)
div = self.get(parameter, 'div')
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
return div, vg, fmode
def get_eval_shape(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(parameter)
if mode != 'qp':
n_qp = 1
return (n_el, n_qp, 1, 1), parameter.dtype
class DivOperatorTerm(Term):
r"""
Weighted divergence term of a test function.
:Definition:
.. math::
\int_{\Omega} \nabla \cdot \ul{v} \mbox { or } \int_{\Omega} c \nabla
\cdot \ul{v}
:Arguments:
- material : :math:`c` (optional)
- virtual : :math:`\ul{v}`
"""
name = 'dw_div'
arg_types = ('opt_material', 'virtual')
arg_shapes = [{'opt_material' : '1, 1', 'virtual' : ('D', None)},
{'opt_material' : None}]
@staticmethod
def function(out, mat, vg):
div_bf = vg.bfg
n_el, n_qp, dim, n_ep = div_bf.shape
div_bf = div_bf.reshape((n_el, n_qp, dim * n_ep, 1))
div_bf = nm.ascontiguousarray(div_bf)
if mat is not None:
status = vg.integrate(out, mat * div_bf)
else:
status = vg.integrate(out, div_bf)
return status
def get_fargs(self, mat, virtual,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(virtual)
return mat, vg
class GradDivStabilizationTerm(Term):
r"""
Grad-div stabilization term ( :math:`\gamma` is a global stabilization
parameter).
:Definition:
.. math::
\gamma \int_{\Omega} (\nabla\cdot\ul{u}) \cdot (\nabla\cdot\ul{v})
:Arguments:
- material : :math:`\gamma`
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
"""
name = 'dw_st_grad_div'
arg_types = ('material', 'virtual', 'state')
arg_shapes = {'material' : '1, 1', 'virtual' : ('D', 'state'),
'state' : 'D'}
function = staticmethod(terms.dw_st_grad_div)
def get_fargs(self, gamma, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
if diff_var is None:
div = self.get(state, 'div')
fmode = 0
else:
div = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return div, gamma, vg, fmode
from sfepy.terms.terms_diffusion import LaplaceTerm
class PSPGPStabilizationTerm(LaplaceTerm):
r"""
PSPG stabilization term, pressure part ( :math:`\tau` is a local
stabilization parameter), alias to Laplace term dw_laplace.
:Definition:
.. math::
\sum_{K \in \Ical_h}\int_{T_K} \tau_K\ \nabla p \cdot \nabla q
:Arguments:
- material : :math:`\tau_K`
- virtual : :math:`q`
- state : :math:`p`
"""
name = 'dw_st_pspg_p'
class PSPGCStabilizationTerm(Term):
r"""
PSPG stabilization term, convective part ( :math:`\tau` is a local
stabilization parameter).
:Definition:
.. math::
\sum_{K \in \Ical_h}\int_{T_K} \tau_K\ ((\ul{b} \cdot \nabla) \ul{u})
\cdot \nabla q
:Arguments:
- material : :math:`\tau_K`
- virtual : :math:`q`
- parameter : :math:`\ul{b}`
- state : :math:`\ul{u}`
"""
name = 'dw_st_pspg_c'
arg_types = ('material', 'virtual', 'parameter', 'state')
arg_shapes = {'material' : '1, 1', 'virtual' : (1, None),
'parameter' : 'D', 'state' : 'D'}
function = staticmethod(terms.dw_st_pspg_c)
def get_fargs(self, tau, virtual, parameter, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
sap, svg = self.get_approximation(virtual)
vap, vvg = self.get_approximation(state)
val_qp = self.get(parameter, 'val')
conn = vap.get_connectivity(self.region, self.integration)
if diff_var is None:
fmode = 0
else:
fmode = 1
return val_qp, state(), tau, svg, vvg, conn, fmode
class SUPGPStabilizationTerm(Term):
r"""
SUPG stabilization term, pressure part ( :math:`\delta` is a local
stabilization parameter).
:Definition:
.. math::
\sum_{K \in \Ical_h}\int_{T_K} \delta_K\ \nabla p\cdot ((\ul{b} \cdot
\nabla) \ul{v})
:Arguments:
- material : :math:`\delta_K`
- virtual : :math:`\ul{v}`
- parameter : :math:`\ul{b}`
- state : :math:`p`
"""
name = 'dw_st_supg_p'
arg_types = ('material', 'virtual', 'parameter', 'state')
arg_shapes = {'material' : '1, 1', 'virtual' : ('D', None),
'parameter' : 'D', 'state' : 1}
function = staticmethod(terms.dw_st_supg_p)
def get_fargs(self, delta, virtual, parameter, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vvg, _ = self.get_mapping(virtual)
svg, _ = self.get_mapping(state)
val_qp = self.get(parameter, 'val')
if diff_var is None:
grad = self.get(state, 'grad')
fmode = 0
else:
grad = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return val_qp, grad, delta, vvg, svg, fmode
class SUPGCStabilizationTerm(Term):
r"""
SUPG stabilization term, convective part ( :math:`\delta` is a local
stabilization parameter).
:Definition:
.. math::
\sum_{K \in \Ical_h}\int_{T_K} \delta_K\ ((\ul{b} \cdot \nabla)
\ul{u})\cdot ((\ul{b} \cdot \nabla) \ul{v})
:Arguments:
- material : :math:`\delta_K`
- virtual : :math:`\ul{v}`
- parameter : :math:`\ul{b}`
- state : :math:`\ul{u}`
"""
name = 'dw_st_supg_c'
arg_types = ('material', 'virtual', 'parameter', 'state')
arg_shapes = {'material' : '1, 1', 'virtual' : ('D', 'state'),
'parameter' : 'D', 'state' : 'D'}
function = staticmethod(terms.dw_st_supg_c)
def get_fargs(self, delta, virtual, parameter, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
ap, vg = self.get_approximation(virtual)
val_qp = self.get(parameter, 'val')
conn = ap.get_connectivity(self.region, self.integration)
if diff_var is None:
fmode = 0
else:
fmode = 1
return val_qp, state(), delta, vg, conn, fmode
|
[
"sfepy.linalg.dot_sequences"
] |
[((11757, 11785), 'numpy.ascontiguousarray', 'nm.ascontiguousarray', (['div_bf'], {}), '(div_bf)\n', (11777, 11785), True, 'import numpy as nm\n'), ((1821, 1863), 'numpy.ones', 'nm.ones', (['(1, n_qp, 1, 1)'], {'dtype': 'nm.float64'}), '((1, n_qp, 1, 1), dtype=nm.float64)\n', (1828, 1863), True, 'import numpy as nm\n'), ((7094, 7136), 'numpy.ones', 'nm.ones', (['(1, n_qp, 1, 1)'], {'dtype': 'nm.float64'}), '((1, n_qp, 1, 1), dtype=nm.float64)\n', (7101, 7136), True, 'import numpy as nm\n'), ((13021, 13061), 'numpy.array', 'nm.array', (['[0]'], {'ndmin': '(4)', 'dtype': 'nm.float64'}), '([0], ndmin=4, dtype=nm.float64)\n', (13029, 13061), True, 'import numpy as nm\n'), ((15893, 15933), 'numpy.array', 'nm.array', (['[0]'], {'ndmin': '(4)', 'dtype': 'nm.float64'}), '([0], ndmin=4, dtype=nm.float64)\n', (15901, 15933), True, 'import numpy as nm\n'), ((1341, 1380), 'sfepy.linalg.dot_sequences', 'dot_sequences', (['g1[..., None]', 'g2', '"""ATB"""'], {}), "(g1[..., None], g2, 'ATB')\n", (1354, 1380), False, 'from sfepy.linalg import dot_sequences\n'), ((2166, 2206), 'numpy.array', 'nm.array', (['[0]'], {'ndmin': '(4)', 'dtype': 'nm.float64'}), '([0], ndmin=4, dtype=nm.float64)\n', (2174, 2206), True, 'import numpy as nm\n'), ((4832, 4872), 'numpy.array', 'nm.array', (['[0]'], {'ndmin': '(4)', 'dtype': 'nm.float64'}), '([0], ndmin=4, dtype=nm.float64)\n', (4840, 4872), True, 'import numpy as nm\n'), ((7408, 7448), 'numpy.array', 'nm.array', (['[0]'], {'ndmin': '(4)', 'dtype': 'nm.float64'}), '([0], ndmin=4, dtype=nm.float64)\n', (7416, 7448), True, 'import numpy as nm\n')]
|
"""initial
Revision ID: a57c89b47e7b
Revises:
Create Date: 2021-11-01 04:27:56.134285
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = 'a57c89b47e7b'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('increment',
sa.Column('id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_increment_id'), 'increment', ['id'], unique=False)
op.create_table('listings',
sa.Column('id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_listings_id'), 'listings', ['id'], unique=False)
op.create_table('song',
sa.Column('name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('artist', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('year', sa.Integer(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_song_artist'), 'song', ['artist'], unique=False)
op.create_index(op.f('ix_song_id'), 'song', ['id'], unique=False)
op.create_index(op.f('ix_song_name'), 'song', ['name'], unique=False)
op.create_index(op.f('ix_song_year'), 'song', ['year'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_song_year'), table_name='song')
op.drop_index(op.f('ix_song_name'), table_name='song')
op.drop_index(op.f('ix_song_id'), table_name='song')
op.drop_index(op.f('ix_song_artist'), table_name='song')
op.drop_table('song')
op.drop_index(op.f('ix_listings_id'), table_name='listings')
op.drop_table('listings')
op.drop_index(op.f('ix_increment_id'), table_name='increment')
op.drop_table('increment')
# ### end Alembic commands ###
|
[
"sqlmodel.sql.sqltypes.AutoString"
] |
[((1752, 1773), 'alembic.op.drop_table', 'op.drop_table', (['"""song"""'], {}), "('song')\n", (1765, 1773), False, 'from alembic import op\n'), ((1843, 1868), 'alembic.op.drop_table', 'op.drop_table', (['"""listings"""'], {}), "('listings')\n", (1856, 1868), False, 'from alembic import op\n'), ((1940, 1966), 'alembic.op.drop_table', 'op.drop_table', (['"""increment"""'], {}), "('increment')\n", (1953, 1966), False, 'from alembic import op\n'), ((456, 485), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (479, 485), True, 'import sqlalchemy as sa\n'), ((512, 535), 'alembic.op.f', 'op.f', (['"""ix_increment_id"""'], {}), "('ix_increment_id')\n", (516, 535), False, 'from alembic import op\n'), ((659, 688), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (682, 688), True, 'import sqlalchemy as sa\n'), ((715, 737), 'alembic.op.f', 'op.f', (['"""ix_listings_id"""'], {}), "('ix_listings_id')\n", (719, 737), False, 'from alembic import op\n'), ((1060, 1089), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (1083, 1089), True, 'import sqlalchemy as sa\n'), ((1116, 1138), 'alembic.op.f', 'op.f', (['"""ix_song_artist"""'], {}), "('ix_song_artist')\n", (1120, 1138), False, 'from alembic import op\n'), ((1194, 1212), 'alembic.op.f', 'op.f', (['"""ix_song_id"""'], {}), "('ix_song_id')\n", (1198, 1212), False, 'from alembic import op\n'), ((1264, 1284), 'alembic.op.f', 'op.f', (['"""ix_song_name"""'], {}), "('ix_song_name')\n", (1268, 1284), False, 'from alembic import op\n'), ((1338, 1358), 'alembic.op.f', 'op.f', (['"""ix_song_year"""'], {}), "('ix_song_year')\n", (1342, 1358), False, 'from alembic import op\n'), ((1530, 1550), 'alembic.op.f', 'op.f', (['"""ix_song_year"""'], {}), "('ix_song_year')\n", (1534, 1550), False, 'from alembic import op\n'), ((1589, 1609), 'alembic.op.f', 'op.f', (['"""ix_song_name"""'], {}), "('ix_song_name')\n", (1593, 1609), False, 'from alembic import op\n'), ((1648, 1666), 'alembic.op.f', 'op.f', (['"""ix_song_id"""'], {}), "('ix_song_id')\n", (1652, 1666), False, 'from alembic import op\n'), ((1705, 1727), 'alembic.op.f', 'op.f', (['"""ix_song_artist"""'], {}), "('ix_song_artist')\n", (1709, 1727), False, 'from alembic import op\n'), ((1792, 1814), 'alembic.op.f', 'op.f', (['"""ix_listings_id"""'], {}), "('ix_listings_id')\n", (1796, 1814), False, 'from alembic import op\n'), ((1887, 1910), 'alembic.op.f', 'op.f', (['"""ix_increment_id"""'], {}), "('ix_increment_id')\n", (1891, 1910), False, 'from alembic import op\n'), ((421, 433), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (431, 433), True, 'import sqlalchemy as sa\n'), ((624, 636), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (634, 636), True, 'import sqlalchemy as sa\n'), ((823, 857), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (855, 857), False, 'import sqlmodel\n'), ((900, 934), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (932, 934), False, 'import sqlmodel\n'), ((975, 987), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (985, 987), True, 'import sqlalchemy as sa\n'), ((1025, 1037), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1035, 1037), True, 'import sqlalchemy as sa\n')]
|
import numpy as np
import megengine.functional as F
from common import se3, so3
def compute_losses(endpoints, params):
loss = {}
# compute losses
if params.loss_type == "finet":
num_iter = len(endpoints["all_pose_pair"])
triplet_loss = {}
for i in range(num_iter):
# reg loss
pose_pair = endpoints["all_pose_pair"][i]
loss["quat_{}".format(i)] = F.nn.l1_loss(pose_pair[0][:, :4], pose_pair[1][:, :4]) * params.loss_alpha1
loss["translate_{}".format(i)] = F.nn.square_loss(pose_pair[0][:, 4:], pose_pair[1][:, 4:]) * params.loss_alpha2
# transformation sensitivity loss (TSL)
if i < 2:
all_R_feats = endpoints["all_R_feats"][i]
all_t_feats = endpoints["all_t_feats"][i]
# R feats triplet loss
R_feats_pos = F.nn.square_loss(all_t_feats[0], all_t_feats[1])
R_feats_neg = F.nn.square_loss(all_R_feats[0], all_R_feats[1])
triplet_loss["R_feats_triplet_pos_{}".format(i)] = R_feats_pos
triplet_loss["R_feats_triplet_neg_{}".format(i)] = R_feats_neg
loss["R_feats_triplet_{}".format(i)] = (F.clip(-R_feats_neg + params.margin[i], lower=0.0) +
R_feats_pos) * params.loss_alpha3
# t feats triplet loss
t_feats_pos = F.nn.square_loss(all_R_feats[0], all_R_feats[2])
t_feats_neg = F.nn.square_loss(all_t_feats[0], all_t_feats[2])
triplet_loss["t_feats_triplet_pos_{}".format(i)] = t_feats_pos
triplet_loss["t_feats_triplet_neg_{}".format(i)] = t_feats_neg
loss["t_feats_triplet_{}".format(i)] = (F.clip(-t_feats_neg + params.margin[i], lower=0.0) +
t_feats_pos) * params.loss_alpha3
# point-wise feature dropout loss (PFDL)
all_dropout_R_feats = endpoints["all_dropout_R_feats"][i]
all_dropout_t_feats = endpoints["all_dropout_t_feats"][i]
loss["src_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[0], all_dropout_R_feats[1]) * params.loss_alpha4
loss["ref_R_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_R_feats[2], all_dropout_R_feats[3]) * params.loss_alpha4
loss["src_t_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_t_feats[0], all_dropout_t_feats[1]) * params.loss_alpha4
loss["ref_t_feats_dropout_{}".format(i)] = F.nn.square_loss(all_dropout_t_feats[2], all_dropout_t_feats[3]) * params.loss_alpha4
# total loss
total_losses = []
for k in loss:
total_losses.append(loss[k])
loss["total"] = F.sum(F.concat(total_losses))
else:
raise NotImplementedError
return loss
def compute_metrics(endpoints, params):
metrics = {}
gt_transforms = endpoints["transform_pair"][0]
pred_transforms = endpoints["transform_pair"][1]
# Euler angles, Individual translation errors (Deep Closest Point convention)
if "prnet" in params.transform_type:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="zyx")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="zyx")
else:
r_gt_euler_deg = so3.mge_dcm2euler(gt_transforms[:, :3, :3], seq="xyz")
r_pred_euler_deg = so3.mge_dcm2euler(pred_transforms[:, :3, :3], seq="xyz")
t_gt = gt_transforms[:, :3, 3]
t_pred = pred_transforms[:, :3, 3]
r_mse = F.mean((r_gt_euler_deg - r_pred_euler_deg)**2, axis=1)
r_mae = F.mean(F.abs(r_gt_euler_deg - r_pred_euler_deg), axis=1)
t_mse = F.mean((t_gt - t_pred)**2, axis=1)
t_mae = F.mean(F.abs(t_gt - t_pred), axis=1)
r_mse = F.mean(r_mse)
t_mse = F.mean(t_mse)
r_mae = F.mean(r_mae)
t_mae = F.mean(t_mae)
# Rotation, translation errors (isotropic, i.e. doesn"t depend on error
# direction, which is more representative of the actual error)
concatenated = se3.mge_concatenate(se3.mge_inverse(gt_transforms), pred_transforms)
rot_trace = concatenated[:, 0, 0] + concatenated[:, 1, 1] + concatenated[:, 2, 2]
residual_rotdeg = F.acos(F.clip(0.5 * (rot_trace - 1), -1.0, 1.0)) * 180.0 / np.pi
residual_transmag = F.norm(concatenated[:, :, 3], axis=-1)
err_r = F.mean(residual_rotdeg)
err_t = F.mean(residual_transmag)
# weighted score of isotropic errors
score = err_r * 0.01 + err_t
metrics = {"R_MSE": r_mse, "R_MAE": r_mae, "t_MSE": t_mse, "t_MAE": t_mae, "Err_R": err_r, "Err_t": err_t, "score": score}
return metrics
|
[
"megengine.functional.nn.l1_loss",
"megengine.functional.clip",
"megengine.functional.nn.square_loss",
"megengine.functional.mean",
"megengine.functional.norm",
"megengine.functional.abs",
"megengine.functional.concat"
] |
[((3616, 3672), 'megengine.functional.mean', 'F.mean', (['((r_gt_euler_deg - r_pred_euler_deg) ** 2)'], {'axis': '(1)'}), '((r_gt_euler_deg - r_pred_euler_deg) ** 2, axis=1)\n', (3622, 3672), True, 'import megengine.functional as F\n'), ((3752, 3788), 'megengine.functional.mean', 'F.mean', (['((t_gt - t_pred) ** 2)'], {'axis': '(1)'}), '((t_gt - t_pred) ** 2, axis=1)\n', (3758, 3788), True, 'import megengine.functional as F\n'), ((3849, 3862), 'megengine.functional.mean', 'F.mean', (['r_mse'], {}), '(r_mse)\n', (3855, 3862), True, 'import megengine.functional as F\n'), ((3875, 3888), 'megengine.functional.mean', 'F.mean', (['t_mse'], {}), '(t_mse)\n', (3881, 3888), True, 'import megengine.functional as F\n'), ((3901, 3914), 'megengine.functional.mean', 'F.mean', (['r_mae'], {}), '(r_mae)\n', (3907, 3914), True, 'import megengine.functional as F\n'), ((3927, 3940), 'megengine.functional.mean', 'F.mean', (['t_mae'], {}), '(t_mae)\n', (3933, 3940), True, 'import megengine.functional as F\n'), ((4370, 4408), 'megengine.functional.norm', 'F.norm', (['concatenated[:, :, 3]'], {'axis': '(-1)'}), '(concatenated[:, :, 3], axis=-1)\n', (4376, 4408), True, 'import megengine.functional as F\n'), ((4421, 4444), 'megengine.functional.mean', 'F.mean', (['residual_rotdeg'], {}), '(residual_rotdeg)\n', (4427, 4444), True, 'import megengine.functional as F\n'), ((4457, 4482), 'megengine.functional.mean', 'F.mean', (['residual_transmag'], {}), '(residual_transmag)\n', (4463, 4482), True, 'import megengine.functional as F\n'), ((3216, 3270), 'common.so3.mge_dcm2euler', 'so3.mge_dcm2euler', (['gt_transforms[:, :3, :3]'], {'seq': '"""zyx"""'}), "(gt_transforms[:, :3, :3], seq='zyx')\n", (3233, 3270), False, 'from common import se3, so3\n'), ((3298, 3354), 'common.so3.mge_dcm2euler', 'so3.mge_dcm2euler', (['pred_transforms[:, :3, :3]'], {'seq': '"""zyx"""'}), "(pred_transforms[:, :3, :3], seq='zyx')\n", (3315, 3354), False, 'from common import se3, so3\n'), ((3390, 3444), 'common.so3.mge_dcm2euler', 'so3.mge_dcm2euler', (['gt_transforms[:, :3, :3]'], {'seq': '"""xyz"""'}), "(gt_transforms[:, :3, :3], seq='xyz')\n", (3407, 3444), False, 'from common import se3, so3\n'), ((3472, 3528), 'common.so3.mge_dcm2euler', 'so3.mge_dcm2euler', (['pred_transforms[:, :3, :3]'], {'seq': '"""xyz"""'}), "(pred_transforms[:, :3, :3], seq='xyz')\n", (3489, 3528), False, 'from common import se3, so3\n'), ((3690, 3730), 'megengine.functional.abs', 'F.abs', (['(r_gt_euler_deg - r_pred_euler_deg)'], {}), '(r_gt_euler_deg - r_pred_euler_deg)\n', (3695, 3730), True, 'import megengine.functional as F\n'), ((3806, 3826), 'megengine.functional.abs', 'F.abs', (['(t_gt - t_pred)'], {}), '(t_gt - t_pred)\n', (3811, 3826), True, 'import megengine.functional as F\n'), ((4124, 4154), 'common.se3.mge_inverse', 'se3.mge_inverse', (['gt_transforms'], {}), '(gt_transforms)\n', (4139, 4154), False, 'from common import se3, so3\n'), ((2819, 2841), 'megengine.functional.concat', 'F.concat', (['total_losses'], {}), '(total_losses)\n', (2827, 2841), True, 'import megengine.functional as F\n'), ((420, 474), 'megengine.functional.nn.l1_loss', 'F.nn.l1_loss', (['pose_pair[0][:, :4]', 'pose_pair[1][:, :4]'], {}), '(pose_pair[0][:, :4], pose_pair[1][:, :4])\n', (432, 474), True, 'import megengine.functional as F\n'), ((541, 599), 'megengine.functional.nn.square_loss', 'F.nn.square_loss', (['pose_pair[0][:, 4:]', 'pose_pair[1][:, 4:]'], {}), '(pose_pair[0][:, 4:], pose_pair[1][:, 4:])\n', (557, 599), True, 'import megengine.functional as F\n'), ((881, 929), 'megengine.functional.nn.square_loss', 'F.nn.square_loss', (['all_t_feats[0]', 'all_t_feats[1]'], {}), '(all_t_feats[0], all_t_feats[1])\n', (897, 929), True, 'import megengine.functional as F\n'), ((960, 1008), 'megengine.functional.nn.square_loss', 'F.nn.square_loss', (['all_R_feats[0]', 'all_R_feats[1]'], {}), '(all_R_feats[0], all_R_feats[1])\n', (976, 1008), True, 'import megengine.functional as F\n'), ((1435, 1483), 'megengine.functional.nn.square_loss', 'F.nn.square_loss', (['all_R_feats[0]', 'all_R_feats[2]'], {}), '(all_R_feats[0], all_R_feats[2])\n', (1451, 1483), True, 'import megengine.functional as F\n'), ((1514, 1562), 'megengine.functional.nn.square_loss', 'F.nn.square_loss', (['all_t_feats[0]', 'all_t_feats[2]'], {}), '(all_t_feats[0], all_t_feats[2])\n', (1530, 1562), True, 'import megengine.functional as F\n'), ((2169, 2233), 'megengine.functional.nn.square_loss', 'F.nn.square_loss', (['all_dropout_R_feats[0]', 'all_dropout_R_feats[1]'], {}), '(all_dropout_R_feats[0], all_dropout_R_feats[1])\n', (2185, 2233), True, 'import megengine.functional as F\n'), ((2310, 2374), 'megengine.functional.nn.square_loss', 'F.nn.square_loss', (['all_dropout_R_feats[2]', 'all_dropout_R_feats[3]'], {}), '(all_dropout_R_feats[2], all_dropout_R_feats[3])\n', (2326, 2374), True, 'import megengine.functional as F\n'), ((2451, 2515), 'megengine.functional.nn.square_loss', 'F.nn.square_loss', (['all_dropout_t_feats[0]', 'all_dropout_t_feats[1]'], {}), '(all_dropout_t_feats[0], all_dropout_t_feats[1])\n', (2467, 2515), True, 'import megengine.functional as F\n'), ((2592, 2656), 'megengine.functional.nn.square_loss', 'F.nn.square_loss', (['all_dropout_t_feats[2]', 'all_dropout_t_feats[3]'], {}), '(all_dropout_t_feats[2], all_dropout_t_feats[3])\n', (2608, 2656), True, 'import megengine.functional as F\n'), ((4288, 4328), 'megengine.functional.clip', 'F.clip', (['(0.5 * (rot_trace - 1))', '(-1.0)', '(1.0)'], {}), '(0.5 * (rot_trace - 1), -1.0, 1.0)\n', (4294, 4328), True, 'import megengine.functional as F\n'), ((1223, 1273), 'megengine.functional.clip', 'F.clip', (['(-R_feats_neg + params.margin[i])'], {'lower': '(0.0)'}), '(-R_feats_neg + params.margin[i], lower=0.0)\n', (1229, 1273), True, 'import megengine.functional as F\n'), ((1777, 1827), 'megengine.functional.clip', 'F.clip', (['(-t_feats_neg + params.margin[i])'], {'lower': '(0.0)'}), '(-t_feats_neg + params.margin[i], lower=0.0)\n', (1783, 1827), True, 'import megengine.functional as F\n')]
|
from datetime import datetime
from typing import Optional
from fastapi import APIRouter, Depends
from sqlmodel import Field, SQLModel
from ..db import get_session
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
router = APIRouter()
class Procedure(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
procedure_group_id: int
parent_procedure_id: int
name: str
detail: str
icd_9: str
icd_10: str
class ProcedureGroup(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class ProcedureDiseaseMap(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
procedure_id: int
disease_id: bool
require: bool
age_min: float
age_max: float
class HistoryProcedure(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
history_id: int
procedure_id: int
detail: str
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class HistoryProcedureDoctorMap(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
history_procedure_id: int
doctor_id: int
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
#
#
# @router.post("/history_procedure", response_model=HistoryProcedure)
# async def create_history_procedure(history_procedure: HistoryProcedure, session: AsyncSession = Depends(get_session)):
# session.add(history_procedure)
# await session.commit()
# await session.refresh(history_procedure)
# return history_procedure
#
#
# @router.get("/history_procedure/{procedure_id}", response_model=HistoryProcedure)
# async def get_history_procedure(procedure_id: int, session: AsyncSession = Depends(get_session)):
# history_procedures = await session.execute(select(HistoryProcedure).where(HistoryProcedure.id == procedure_id))
# history_procedure = history_procedures.scalars().first()
# return history_procedure
#
#
# @router.put("/history_procedure/{procedure_id}", response_model=HistoryProcedure)
# async def update_history_procedure(id: int, session: AsyncSession = Depends(get_session)):
# return None
#
#
# @router.delete("/history_procedure/{procedure_id}")
# async def delete_history_procedure(session: AsyncSession = Depends(get_session)):
# return None
|
[
"sqlmodel.Field"
] |
[((254, 265), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (263, 265), False, 'from fastapi import APIRouter, Depends\n'), ((331, 368), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (336, 368), False, 'from sqlmodel import Field, SQLModel\n'), ((557, 594), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (562, 594), False, 'from sqlmodel import Field, SQLModel\n'), ((684, 721), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (689, 721), False, 'from sqlmodel import Field, SQLModel\n'), ((893, 930), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (898, 930), False, 'from sqlmodel import Field, SQLModel\n'), ((1177, 1214), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (1182, 1214), False, 'from sqlmodel import Field, SQLModel\n')]
|
from __future__ import absolute_import
from sfepy.base.testing import TestCommon
def get_ortho_d(phi1, phi2):
import numpy as nm
import sfepy.mechanics.tensors as tn
v1 = nm.array([nm.cos(phi1), nm.sin(phi1), 0])
v2 = nm.array([nm.cos(phi2), nm.sin(phi2), 0])
om1 = nm.outer(v1, v1)
om2 = nm.outer(v2, v2)
ii = tn.get_sym_indices(3)
o1 = om1.flat[ii]
o2 = om2.flat[ii]
dr = nm.outer(o1, o1) + nm.outer(o2, o2)
return dr, v1, v2, om1, om2
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
return Test(conf=conf, options=options)
def test_tensors(self):
import numpy as nm
import sfepy.mechanics.tensors as tn
ok = True
a_full = 2.0 * nm.ones((5,3,3), dtype=nm.float64)
a_sym = 2.0 * nm.ones((5,6), dtype=nm.float64)
_tr = nm.array([6.0] * 5, dtype=nm.float64)
_vt_full = 2.0 * nm.tile(nm.eye(3, dtype=nm.float64), (5,1,1))
_vt_sym = nm.tile(nm.array([2, 2, 2, 0, 0, 0], dtype=nm.float64),
(5,1,1))
_dev_full = a_full - _vt_full
_dev_sym = a_sym - _vt_sym
_vms = 6.0 * nm.ones((5,1), dtype=nm.float64)
tr = tn.get_trace(a_full, sym_storage=False)
_ok = nm.allclose(tr, _tr, rtol=0.0, atol=1e-14)
self.report('trace full: %s' % _ok)
ok = ok and _ok
tr = tn.get_trace(a_sym, sym_storage=True)
ok = ok and nm.allclose(tr, _tr, rtol=0.0, atol=1e-14)
self.report('trace sym: %s' % _ok)
ok = ok and _ok
vt = tn.get_volumetric_tensor(a_full, sym_storage=False)
_ok = nm.allclose(vt, _vt_full, rtol=0.0, atol=1e-14)
self.report('volumetric tensor full: %s' % _ok)
ok = ok and _ok
vt = tn.get_volumetric_tensor(a_sym, sym_storage=True)
_ok = nm.allclose(vt, _vt_sym, rtol=0.0, atol=1e-14)
self.report('volumetric tensor sym: %s' % _ok)
ok = ok and _ok
dev = tn.get_deviator(a_full, sym_storage=False)
_ok = nm.allclose(dev, _dev_full, rtol=0.0, atol=1e-14)
self.report('deviator full: %s' % _ok)
ok = ok and _ok
aux = (dev * nm.transpose(dev, (0, 2, 1))).sum(axis=1).sum(axis=1)
vms2 = nm.sqrt((3.0/2.0) * aux)[:,None]
dev = tn.get_deviator(a_sym, sym_storage=True)
_ok = nm.allclose(dev, _dev_sym, rtol=0.0, atol=1e-14)
self.report('deviator sym: %s' % _ok)
ok = ok and _ok
vms = tn.get_von_mises_stress(a_full, sym_storage=False)
_ok = nm.allclose(vms, _vms, rtol=0.0, atol=1e-14)
self.report('von Mises stress full: %s' % _ok)
ok = ok and _ok
vms = tn.get_von_mises_stress(a_sym, sym_storage=True)
_ok = nm.allclose(vms, _vms, rtol=0.0, atol=1e-14)
self.report('von Mises stress sym: %s' % _ok)
ok = ok and _ok
_ok = nm.allclose(vms2, _vms, rtol=0.0, atol=1e-14)
self.report('von Mises stress via deviator: %s' % _ok)
ok = ok and _ok
t2s = nm.arange(9).reshape(3, 3)
t2s = (t2s + t2s.T) / 2
t4 = tn.get_t4_from_t2s(t2s)
expected = nm.array([[[[0, 4], [4, 2]],
[[4, 8], [8, 6]]],
[[[4, 8], [8, 6]],
[[2, 6], [6, 4]]]])
_ok = nm.allclose(t4, expected, rtol=0.0, atol=1e-14)
self.report('full 4D tensor from 2D matrix, 2D space: %s' % _ok)
ok = ok and _ok
return ok
def test_transform_data(self):
import numpy as nm
from sfepy.mechanics.tensors import transform_data
ok = True
coors = nm.eye(3)
data = nm.eye(3)
expected = nm.zeros((3, 3))
expected[[0, 1, 2], [0, 0, 2]] = 1.0
out = transform_data(data, coors)
_ok = nm.allclose(out, expected, rtol=0.0, atol=1e-14)
self.report('vectors in cylindrical coordinates: %s' % _ok)
ok = ok and _ok
data = nm.zeros((3, 6))
data[:, :3] = [[1, 2, 3]]
expected = data.copy()
expected[1, [0, 1]] = expected[1, [1, 0]]
out = transform_data(data, coors)
_ok = nm.allclose(out, expected, rtol=0.0, atol=1e-14)
self.report('sym. tensors in cylindrical coordinates: %s' % _ok)
ok = ok and _ok
return ok
def test_transform_data4(self):
import numpy as nm
import sfepy.mechanics.tensors as tn
ok = True
if not hasattr(nm, 'einsum'):
self.report('no numpy.einsum(), skipping!')
return ok
expected = nm.zeros((6, 6), dtype=nm.float64)
expected[0, 0] = expected[1, 1] = 1.0
phi = nm.deg2rad(30.)
dr, v1, v2, om1, om2 = get_ortho_d(phi, phi + nm.deg2rad(90.))
# Rotate coordinate system by phi.
mtx = tn.make_axis_rotation_matrix([0., 0., 1.], phi)
do = tn.transform_data(dr[None, ...], mtx=mtx[None, ...])
_ok = nm.allclose(do, expected, rtol=0.0, atol=1e-14)
self.report('sym. 4th-th order tensor rotation: %s' % _ok)
ok = ok and _ok
dt, vt1, vt2, omt1, omt2 = get_ortho_d(0, nm.deg2rad(90.))
expected1 = nm.zeros((3, 3), dtype=nm.float64)
expected1[0, 0] = 1.0
expected2 = nm.zeros((3, 3), dtype=nm.float64)
expected2[1, 1] = 1.0
omr1 = nm.einsum('pq,ip,jq->ij', om1, mtx, mtx)
omr2 = nm.einsum('pq,ip,jq->ij', om2, mtx, mtx)
ii = tn.get_sym_indices(3)
jj = tn.get_full_indices(3)
o1 = om1.flat[ii]
o2 = om2.flat[ii]
omr12 = tn.transform_data(o1[None,...], mtx=mtx[None, ...])[0, jj]
omr22 = tn.transform_data(o2[None,...], mtx=mtx[None, ...])[0, jj]
_ok1 = nm.allclose(omr1, expected1, rtol=0.0, atol=1e-14)
_ok2 = nm.allclose(omr12, expected1, rtol=0.0, atol=1e-14)
self.report('einsum-transform_data compatibility 1: %s %s'
% (_ok1, _ok2))
ok = ok and _ok1 and _ok2
_ok1 = nm.allclose(omr2, expected2, rtol=0.0, atol=1e-14)
_ok2 = nm.allclose(omr22, expected2, rtol=0.0, atol=1e-14)
self.report('einsum-transform_data compatibility 2: %s %s'
% (_ok1, _ok2))
ok = ok and _ok1 and _ok2
return ok
def test_stress_transform(self):
import numpy as nm
from sfepy.mechanics.tensors import StressTransform
stress_2pk = nm.arange(6) + 1
def_grad = nm.array([[0.5047051 , 0.71142596, 0.10180901],
[0.13427707, 0.87156371, 0.42612244],
[0.27509466, 0.6262605 , 0.87659051]])
det = nm.linalg.det(def_grad)
aux = stress_2pk[[0, 3, 4, 3, 1, 5, 4, 5, 2]].reshape(3, 3)
expected = nm.dot(nm.dot(def_grad, aux), def_grad.T) / det
expected = expected.ravel()[[0, 4, 8, 1, 2, 5]][:, None]
expected = nm.tile(expected, (5, 1, 1, 1))
transform = StressTransform(nm.tile(def_grad, (5, 1, 1, 1)))
stress_2pk.shape = (6, 1)
ts = nm.tile(stress_2pk.reshape((6, 1)), (5, 1, 1, 1))
stress_cauchy = transform.get_cauchy_from_2pk(ts)
ok = nm.allclose(stress_cauchy, expected, rtol=0.0, atol=1e-12)
self.report('stress: Cauchy from second Piola-Kirchhoff: %s' % ok)
return ok
|
[
"sfepy.mechanics.tensors.get_trace",
"sfepy.mechanics.tensors.get_full_indices",
"sfepy.mechanics.tensors.make_axis_rotation_matrix",
"sfepy.mechanics.tensors.get_sym_indices",
"sfepy.mechanics.tensors.transform_data",
"sfepy.mechanics.tensors.get_t4_from_t2s",
"sfepy.mechanics.tensors.get_deviator",
"sfepy.mechanics.tensors.get_volumetric_tensor",
"sfepy.mechanics.tensors.get_von_mises_stress"
] |
[((288, 304), 'numpy.outer', 'nm.outer', (['v1', 'v1'], {}), '(v1, v1)\n', (296, 304), True, 'import numpy as nm\n'), ((315, 331), 'numpy.outer', 'nm.outer', (['v2', 'v2'], {}), '(v2, v2)\n', (323, 331), True, 'import numpy as nm\n'), ((342, 363), 'sfepy.mechanics.tensors.get_sym_indices', 'tn.get_sym_indices', (['(3)'], {}), '(3)\n', (360, 363), True, 'import sfepy.mechanics.tensors as tn\n'), ((419, 435), 'numpy.outer', 'nm.outer', (['o1', 'o1'], {}), '(o1, o1)\n', (427, 435), True, 'import numpy as nm\n'), ((438, 454), 'numpy.outer', 'nm.outer', (['o2', 'o2'], {}), '(o2, o2)\n', (446, 454), True, 'import numpy as nm\n'), ((862, 899), 'numpy.array', 'nm.array', (['([6.0] * 5)'], {'dtype': 'nm.float64'}), '([6.0] * 5, dtype=nm.float64)\n', (870, 899), True, 'import numpy as nm\n'), ((1221, 1260), 'sfepy.mechanics.tensors.get_trace', 'tn.get_trace', (['a_full'], {'sym_storage': '(False)'}), '(a_full, sym_storage=False)\n', (1233, 1260), True, 'import sfepy.mechanics.tensors as tn\n'), ((1275, 1317), 'numpy.allclose', 'nm.allclose', (['tr', '_tr'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(tr, _tr, rtol=0.0, atol=1e-14)\n', (1286, 1317), True, 'import numpy as nm\n'), ((1400, 1437), 'sfepy.mechanics.tensors.get_trace', 'tn.get_trace', (['a_sym'], {'sym_storage': '(True)'}), '(a_sym, sym_storage=True)\n', (1412, 1437), True, 'import sfepy.mechanics.tensors as tn\n'), ((1582, 1633), 'sfepy.mechanics.tensors.get_volumetric_tensor', 'tn.get_volumetric_tensor', (['a_full'], {'sym_storage': '(False)'}), '(a_full, sym_storage=False)\n', (1606, 1633), True, 'import sfepy.mechanics.tensors as tn\n'), ((1648, 1695), 'numpy.allclose', 'nm.allclose', (['vt', '_vt_full'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(vt, _vt_full, rtol=0.0, atol=1e-14)\n', (1659, 1695), True, 'import numpy as nm\n'), ((1790, 1839), 'sfepy.mechanics.tensors.get_volumetric_tensor', 'tn.get_volumetric_tensor', (['a_sym'], {'sym_storage': '(True)'}), '(a_sym, sym_storage=True)\n', (1814, 1839), True, 'import sfepy.mechanics.tensors as tn\n'), ((1854, 1900), 'numpy.allclose', 'nm.allclose', (['vt', '_vt_sym'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(vt, _vt_sym, rtol=0.0, atol=1e-14)\n', (1865, 1900), True, 'import numpy as nm\n'), ((1995, 2037), 'sfepy.mechanics.tensors.get_deviator', 'tn.get_deviator', (['a_full'], {'sym_storage': '(False)'}), '(a_full, sym_storage=False)\n', (2010, 2037), True, 'import sfepy.mechanics.tensors as tn\n'), ((2052, 2101), 'numpy.allclose', 'nm.allclose', (['dev', '_dev_full'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(dev, _dev_full, rtol=0.0, atol=1e-14)\n', (2063, 2101), True, 'import numpy as nm\n'), ((2312, 2352), 'sfepy.mechanics.tensors.get_deviator', 'tn.get_deviator', (['a_sym'], {'sym_storage': '(True)'}), '(a_sym, sym_storage=True)\n', (2327, 2352), True, 'import sfepy.mechanics.tensors as tn\n'), ((2367, 2415), 'numpy.allclose', 'nm.allclose', (['dev', '_dev_sym'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(dev, _dev_sym, rtol=0.0, atol=1e-14)\n', (2378, 2415), True, 'import numpy as nm\n'), ((2501, 2551), 'sfepy.mechanics.tensors.get_von_mises_stress', 'tn.get_von_mises_stress', (['a_full'], {'sym_storage': '(False)'}), '(a_full, sym_storage=False)\n', (2524, 2551), True, 'import sfepy.mechanics.tensors as tn\n'), ((2566, 2610), 'numpy.allclose', 'nm.allclose', (['vms', '_vms'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(vms, _vms, rtol=0.0, atol=1e-14)\n', (2577, 2610), True, 'import numpy as nm\n'), ((2705, 2753), 'sfepy.mechanics.tensors.get_von_mises_stress', 'tn.get_von_mises_stress', (['a_sym'], {'sym_storage': '(True)'}), '(a_sym, sym_storage=True)\n', (2728, 2753), True, 'import sfepy.mechanics.tensors as tn\n'), ((2768, 2812), 'numpy.allclose', 'nm.allclose', (['vms', '_vms'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(vms, _vms, rtol=0.0, atol=1e-14)\n', (2779, 2812), True, 'import numpy as nm\n'), ((2906, 2951), 'numpy.allclose', 'nm.allclose', (['vms2', '_vms'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(vms2, _vms, rtol=0.0, atol=1e-14)\n', (2917, 2951), True, 'import numpy as nm\n'), ((3126, 3149), 'sfepy.mechanics.tensors.get_t4_from_t2s', 'tn.get_t4_from_t2s', (['t2s'], {}), '(t2s)\n', (3144, 3149), True, 'import sfepy.mechanics.tensors as tn\n'), ((3169, 3259), 'numpy.array', 'nm.array', (['[[[[0, 4], [4, 2]], [[4, 8], [8, 6]]], [[[4, 8], [8, 6]], [[2, 6], [6, 4]]]]'], {}), '([[[[0, 4], [4, 2]], [[4, 8], [8, 6]]], [[[4, 8], [8, 6]], [[2, 6],\n [6, 4]]]])\n', (3177, 3259), True, 'import numpy as nm\n'), ((3360, 3407), 'numpy.allclose', 'nm.allclose', (['t4', 'expected'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(t4, expected, rtol=0.0, atol=1e-14)\n', (3371, 3407), True, 'import numpy as nm\n'), ((3682, 3691), 'numpy.eye', 'nm.eye', (['(3)'], {}), '(3)\n', (3688, 3691), True, 'import numpy as nm\n'), ((3708, 3717), 'numpy.eye', 'nm.eye', (['(3)'], {}), '(3)\n', (3714, 3717), True, 'import numpy as nm\n'), ((3737, 3753), 'numpy.zeros', 'nm.zeros', (['(3, 3)'], {}), '((3, 3))\n', (3745, 3753), True, 'import numpy as nm\n'), ((3814, 3841), 'sfepy.mechanics.tensors.transform_data', 'transform_data', (['data', 'coors'], {}), '(data, coors)\n', (3828, 3841), False, 'from sfepy.mechanics.tensors import transform_data\n'), ((3857, 3905), 'numpy.allclose', 'nm.allclose', (['out', 'expected'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(out, expected, rtol=0.0, atol=1e-14)\n', (3868, 3905), True, 'import numpy as nm\n'), ((4014, 4030), 'numpy.zeros', 'nm.zeros', (['(3, 6)'], {}), '((3, 6))\n', (4022, 4030), True, 'import numpy as nm\n'), ((4161, 4188), 'sfepy.mechanics.tensors.transform_data', 'transform_data', (['data', 'coors'], {}), '(data, coors)\n', (4175, 4188), False, 'from sfepy.mechanics.tensors import transform_data\n'), ((4204, 4252), 'numpy.allclose', 'nm.allclose', (['out', 'expected'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(out, expected, rtol=0.0, atol=1e-14)\n', (4215, 4252), True, 'import numpy as nm\n'), ((4634, 4668), 'numpy.zeros', 'nm.zeros', (['(6, 6)'], {'dtype': 'nm.float64'}), '((6, 6), dtype=nm.float64)\n', (4642, 4668), True, 'import numpy as nm\n'), ((4730, 4746), 'numpy.deg2rad', 'nm.deg2rad', (['(30.0)'], {}), '(30.0)\n', (4740, 4746), True, 'import numpy as nm\n'), ((4875, 4925), 'sfepy.mechanics.tensors.make_axis_rotation_matrix', 'tn.make_axis_rotation_matrix', (['[0.0, 0.0, 1.0]', 'phi'], {}), '([0.0, 0.0, 1.0], phi)\n', (4903, 4925), True, 'import sfepy.mechanics.tensors as tn\n'), ((4936, 4988), 'sfepy.mechanics.tensors.transform_data', 'tn.transform_data', (['dr[None, ...]'], {'mtx': 'mtx[None, ...]'}), '(dr[None, ...], mtx=mtx[None, ...])\n', (4953, 4988), True, 'import sfepy.mechanics.tensors as tn\n'), ((5004, 5051), 'numpy.allclose', 'nm.allclose', (['do', 'expected'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(do, expected, rtol=0.0, atol=1e-14)\n', (5015, 5051), True, 'import numpy as nm\n'), ((5232, 5266), 'numpy.zeros', 'nm.zeros', (['(3, 3)'], {'dtype': 'nm.float64'}), '((3, 3), dtype=nm.float64)\n', (5240, 5266), True, 'import numpy as nm\n'), ((5318, 5352), 'numpy.zeros', 'nm.zeros', (['(3, 3)'], {'dtype': 'nm.float64'}), '((3, 3), dtype=nm.float64)\n', (5326, 5352), True, 'import numpy as nm\n'), ((5399, 5439), 'numpy.einsum', 'nm.einsum', (['"""pq,ip,jq->ij"""', 'om1', 'mtx', 'mtx'], {}), "('pq,ip,jq->ij', om1, mtx, mtx)\n", (5408, 5439), True, 'import numpy as nm\n'), ((5455, 5495), 'numpy.einsum', 'nm.einsum', (['"""pq,ip,jq->ij"""', 'om2', 'mtx', 'mtx'], {}), "('pq,ip,jq->ij', om2, mtx, mtx)\n", (5464, 5495), True, 'import numpy as nm\n'), ((5510, 5531), 'sfepy.mechanics.tensors.get_sym_indices', 'tn.get_sym_indices', (['(3)'], {}), '(3)\n', (5528, 5531), True, 'import sfepy.mechanics.tensors as tn\n'), ((5545, 5567), 'sfepy.mechanics.tensors.get_full_indices', 'tn.get_full_indices', (['(3)'], {}), '(3)\n', (5564, 5567), True, 'import sfepy.mechanics.tensors as tn\n'), ((5788, 5838), 'numpy.allclose', 'nm.allclose', (['omr1', 'expected1'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(omr1, expected1, rtol=0.0, atol=1e-14)\n', (5799, 5838), True, 'import numpy as nm\n'), ((5854, 5905), 'numpy.allclose', 'nm.allclose', (['omr12', 'expected1'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(omr12, expected1, rtol=0.0, atol=1e-14)\n', (5865, 5905), True, 'import numpy as nm\n'), ((6059, 6109), 'numpy.allclose', 'nm.allclose', (['omr2', 'expected2'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(omr2, expected2, rtol=0.0, atol=1e-14)\n', (6070, 6109), True, 'import numpy as nm\n'), ((6125, 6176), 'numpy.allclose', 'nm.allclose', (['omr22', 'expected2'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(omr22, expected2, rtol=0.0, atol=1e-14)\n', (6136, 6176), True, 'import numpy as nm\n'), ((6517, 6644), 'numpy.array', 'nm.array', (['[[0.5047051, 0.71142596, 0.10180901], [0.13427707, 0.87156371, 0.42612244],\n [0.27509466, 0.6262605, 0.87659051]]'], {}), '([[0.5047051, 0.71142596, 0.10180901], [0.13427707, 0.87156371, \n 0.42612244], [0.27509466, 0.6262605, 0.87659051]])\n', (6525, 6644), True, 'import numpy as nm\n'), ((6714, 6737), 'numpy.linalg.det', 'nm.linalg.det', (['def_grad'], {}), '(def_grad)\n', (6727, 6737), True, 'import numpy as nm\n'), ((6958, 6989), 'numpy.tile', 'nm.tile', (['expected', '(5, 1, 1, 1)'], {}), '(expected, (5, 1, 1, 1))\n', (6965, 6989), True, 'import numpy as nm\n'), ((7230, 7288), 'numpy.allclose', 'nm.allclose', (['stress_cauchy', 'expected'], {'rtol': '(0.0)', 'atol': '(1e-12)'}), '(stress_cauchy, expected, rtol=0.0, atol=1e-12)\n', (7241, 7288), True, 'import numpy as nm\n'), ((195, 207), 'numpy.cos', 'nm.cos', (['phi1'], {}), '(phi1)\n', (201, 207), True, 'import numpy as nm\n'), ((209, 221), 'numpy.sin', 'nm.sin', (['phi1'], {}), '(phi1)\n', (215, 221), True, 'import numpy as nm\n'), ((246, 258), 'numpy.cos', 'nm.cos', (['phi2'], {}), '(phi2)\n', (252, 258), True, 'import numpy as nm\n'), ((260, 272), 'numpy.sin', 'nm.sin', (['phi2'], {}), '(phi2)\n', (266, 272), True, 'import numpy as nm\n'), ((757, 793), 'numpy.ones', 'nm.ones', (['(5, 3, 3)'], {'dtype': 'nm.float64'}), '((5, 3, 3), dtype=nm.float64)\n', (764, 793), True, 'import numpy as nm\n'), ((814, 847), 'numpy.ones', 'nm.ones', (['(5, 6)'], {'dtype': 'nm.float64'}), '((5, 6), dtype=nm.float64)\n', (821, 847), True, 'import numpy as nm\n'), ((997, 1043), 'numpy.array', 'nm.array', (['[2, 2, 2, 0, 0, 0]'], {'dtype': 'nm.float64'}), '([2, 2, 2, 0, 0, 0], dtype=nm.float64)\n', (1005, 1043), True, 'import numpy as nm\n'), ((1174, 1207), 'numpy.ones', 'nm.ones', (['(5, 1)'], {'dtype': 'nm.float64'}), '((5, 1), dtype=nm.float64)\n', (1181, 1207), True, 'import numpy as nm\n'), ((1458, 1500), 'numpy.allclose', 'nm.allclose', (['tr', '_tr'], {'rtol': '(0.0)', 'atol': '(1e-14)'}), '(tr, _tr, rtol=0.0, atol=1e-14)\n', (1469, 1500), True, 'import numpy as nm\n'), ((2264, 2288), 'numpy.sqrt', 'nm.sqrt', (['(3.0 / 2.0 * aux)'], {}), '(3.0 / 2.0 * aux)\n', (2271, 2288), True, 'import numpy as nm\n'), ((5194, 5210), 'numpy.deg2rad', 'nm.deg2rad', (['(90.0)'], {}), '(90.0)\n', (5204, 5210), True, 'import numpy as nm\n'), ((5638, 5690), 'sfepy.mechanics.tensors.transform_data', 'tn.transform_data', (['o1[None, ...]'], {'mtx': 'mtx[None, ...]'}), '(o1[None, ...], mtx=mtx[None, ...])\n', (5655, 5690), True, 'import sfepy.mechanics.tensors as tn\n'), ((5713, 5765), 'sfepy.mechanics.tensors.transform_data', 'tn.transform_data', (['o2[None, ...]'], {'mtx': 'mtx[None, ...]'}), '(o2[None, ...], mtx=mtx[None, ...])\n', (5730, 5765), True, 'import sfepy.mechanics.tensors as tn\n'), ((6480, 6492), 'numpy.arange', 'nm.arange', (['(6)'], {}), '(6)\n', (6489, 6492), True, 'import numpy as nm\n'), ((7027, 7058), 'numpy.tile', 'nm.tile', (['def_grad', '(5, 1, 1, 1)'], {}), '(def_grad, (5, 1, 1, 1))\n', (7034, 7058), True, 'import numpy as nm\n'), ((933, 960), 'numpy.eye', 'nm.eye', (['(3)'], {'dtype': 'nm.float64'}), '(3, dtype=nm.float64)\n', (939, 960), True, 'import numpy as nm\n'), ((3054, 3066), 'numpy.arange', 'nm.arange', (['(9)'], {}), '(9)\n', (3063, 3066), True, 'import numpy as nm\n'), ((4800, 4816), 'numpy.deg2rad', 'nm.deg2rad', (['(90.0)'], {}), '(90.0)\n', (4810, 4816), True, 'import numpy as nm\n'), ((6833, 6854), 'numpy.dot', 'nm.dot', (['def_grad', 'aux'], {}), '(def_grad, aux)\n', (6839, 6854), True, 'import numpy as nm\n'), ((2195, 2223), 'numpy.transpose', 'nm.transpose', (['dev', '(0, 2, 1)'], {}), '(dev, (0, 2, 1))\n', (2207, 2223), True, 'import numpy as nm\n')]
|
from datetime import datetime
from sqlmodel import Field, SQLModel, Relationship
from typing import Optional
from sqlalchemy import Column
from sqlalchemy.dialects.postgresql import JSON
class TextInferenceBase(SQLModel):
text: str = Field(nullable=False, index=True)
class TextInference(TextInferenceBase, table=True):
id: Optional[int] = Field(default=None, nullable=False, primary_key=True)
result: dict[str, float] = Field(nullable=False, sa_column=Column(JSON))
created_at: Optional[datetime]
updated_at: Optional[datetime]
created_by_id: Optional[int] = Field(default=None, foreign_key="user.id")
created_by: "User" = Relationship(
sa_relationship_kwargs={
"lazy": "selectin",
"primaryjoin": "TextInference.created_by_id == User.id",
}
)
|
[
"sqlmodel.Field",
"sqlmodel.Relationship"
] |
[((240, 273), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)', 'index': '(True)'}), '(nullable=False, index=True)\n', (245, 273), False, 'from sqlmodel import Field, SQLModel, Relationship\n'), ((352, 405), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'nullable': '(False)', 'primary_key': '(True)'}), '(default=None, nullable=False, primary_key=True)\n', (357, 405), False, 'from sqlmodel import Field, SQLModel, Relationship\n'), ((588, 630), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""user.id"""'}), "(default=None, foreign_key='user.id')\n", (593, 630), False, 'from sqlmodel import Field, SQLModel, Relationship\n'), ((656, 774), 'sqlmodel.Relationship', 'Relationship', ([], {'sa_relationship_kwargs': "{'lazy': 'selectin', 'primaryjoin': 'TextInference.created_by_id == User.id'}"}), "(sa_relationship_kwargs={'lazy': 'selectin', 'primaryjoin':\n 'TextInference.created_by_id == User.id'})\n", (668, 774), False, 'from sqlmodel import Field, SQLModel, Relationship\n'), ((469, 481), 'sqlalchemy.Column', 'Column', (['JSON'], {}), '(JSON)\n', (475, 481), False, 'from sqlalchemy import Column\n')]
|
import pickle
from typing import Optional, Dict, Any, List, cast
from enum import Enum
import orjson
from fastapi import APIRouter, Depends, Query
from pydantic import validator
from sqlmodel import Session, Field, select # type: ignore[import]
from sqlalchemy import distinct # type: ignore[import]
from app.db import get_db, FeedModel, FeedBase
router = APIRouter()
class FeedRead(FeedBase):
# parse from the backend to data structures
tags: List[str] = Field(default_factory=list)
data: Dict[str, Any] = Field(default_factory={})
@validator("tags", pre=True)
def parse_tags(cls, v: Any) -> List[str]:
return cast(List[str], orjson.loads(v))
@validator("data", pre=True)
def parse_data(cls, v: Any) -> Dict[str, Any]:
if v is None:
return {}
else:
return cast(Dict[str, Any], pickle.loads(v))
class OrderBy(Enum):
score = "score"
when = "when"
class Sort(Enum):
asc = "asc"
ascending = "ascending"
desc = "desc"
descending = "descending"
# items which shouldn't be shown when sorted by 'score'
# since it'd make the feed too busy
INDIVIDUAL_FEED_TYPES = [
"anime_episode",
"manga_chapter",
"listen",
"trakt_history_episode",
"trakt_history_movie",
]
@router.get("/types", response_model=List[str])
async def data_types(
session: Session = Depends(get_db),
) -> List[str]:
stmt = select(distinct(FeedModel.ftype))
with session:
items: List[str] = list(session.exec(stmt))
return items
@router.get("/", response_model=List[FeedRead])
async def data(
offset: int = 0,
limit: int = Query(default=100, lte=100),
order_by: OrderBy = Query(default=OrderBy.when),
sort: Sort = Query(default=Sort.desc),
ftype: Optional[str] = Query(default=None, min_length=2),
query: Optional[str] = Query(default=None, min_length=2),
title: Optional[str] = Query(default=None, min_length=2),
creator: Optional[str] = Query(default=None, min_length=2),
subtitle: Optional[str] = Query(default=None, min_length=2),
session: Session = Depends(get_db),
) -> List[FeedRead]:
stmt = select(FeedModel)
if ftype is not None and ftype.strip():
if parts := ftype.strip().split(","):
stmt = stmt.filter(FeedModel.ftype.in_(parts)) # type: ignore
if query is None:
if title:
stmt = stmt.filter(FeedModel.title.ilike(f"%{title}%")) # type: ignore
if creator:
stmt = stmt.filter(FeedModel.creator.ilike(f"%{creator}%")) # type: ignore
if subtitle:
stmt = stmt.filter(FeedModel.subtitle.ilike(f"%{subtitle}%")) # type: ignore
else:
stmt = stmt.filter(
(FeedModel.title.ilike(f"%{query}%")) # type: ignore
| (FeedModel.creator.ilike(f"%{query}%")) # type: ignore
| (FeedModel.subtitle.ilike(f"%{query}%")) # type: ignore
| (FeedModel.model_id.ilike(f"%{query}%")) # type: ignore
)
if order_by == OrderBy.score:
stmt = stmt.filter(FeedModel.score != None)
stmt = stmt.filter(FeedModel.ftype.notin_(INDIVIDUAL_FEED_TYPES)) # type: ignore
# ORDER BY Score [CHOSEN], When DESC to show things I completed recently higher when sorting by score
stmt = stmt.order_by(FeedModel.score.asc() if sort == Sort.asc else FeedModel.score.desc(), FeedModel.when.desc()) # type: ignore
else:
stmt = stmt.order_by(FeedModel.when.asc() if sort == Sort.asc else FeedModel.when.desc()) # type: ignore
stmt = stmt.limit(limit).offset(offset)
with session:
items: List[FeedModel] = list(session.exec(stmt))
return items
|
[
"sqlmodel.select",
"sqlmodel.Field"
] |
[((363, 374), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (372, 374), False, 'from fastapi import APIRouter, Depends, Query\n'), ((473, 500), 'sqlmodel.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (478, 500), False, 'from sqlmodel import Session, Field, select\n'), ((528, 553), 'sqlmodel.Field', 'Field', ([], {'default_factory': '{}'}), '(default_factory={})\n', (533, 553), False, 'from sqlmodel import Session, Field, select\n'), ((560, 587), 'pydantic.validator', 'validator', (['"""tags"""'], {'pre': '(True)'}), "('tags', pre=True)\n", (569, 587), False, 'from pydantic import validator\n'), ((688, 715), 'pydantic.validator', 'validator', (['"""data"""'], {'pre': '(True)'}), "('data', pre=True)\n", (697, 715), False, 'from pydantic import validator\n'), ((1384, 1399), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (1391, 1399), False, 'from fastapi import APIRouter, Depends, Query\n'), ((1653, 1680), 'fastapi.Query', 'Query', ([], {'default': '(100)', 'lte': '(100)'}), '(default=100, lte=100)\n', (1658, 1680), False, 'from fastapi import APIRouter, Depends, Query\n'), ((1706, 1733), 'fastapi.Query', 'Query', ([], {'default': 'OrderBy.when'}), '(default=OrderBy.when)\n', (1711, 1733), False, 'from fastapi import APIRouter, Depends, Query\n'), ((1752, 1776), 'fastapi.Query', 'Query', ([], {'default': 'Sort.desc'}), '(default=Sort.desc)\n', (1757, 1776), False, 'from fastapi import APIRouter, Depends, Query\n'), ((1805, 1838), 'fastapi.Query', 'Query', ([], {'default': 'None', 'min_length': '(2)'}), '(default=None, min_length=2)\n', (1810, 1838), False, 'from fastapi import APIRouter, Depends, Query\n'), ((1867, 1900), 'fastapi.Query', 'Query', ([], {'default': 'None', 'min_length': '(2)'}), '(default=None, min_length=2)\n', (1872, 1900), False, 'from fastapi import APIRouter, Depends, Query\n'), ((1929, 1962), 'fastapi.Query', 'Query', ([], {'default': 'None', 'min_length': '(2)'}), '(default=None, min_length=2)\n', (1934, 1962), False, 'from fastapi import APIRouter, Depends, Query\n'), ((1993, 2026), 'fastapi.Query', 'Query', ([], {'default': 'None', 'min_length': '(2)'}), '(default=None, min_length=2)\n', (1998, 2026), False, 'from fastapi import APIRouter, Depends, Query\n'), ((2058, 2091), 'fastapi.Query', 'Query', ([], {'default': 'None', 'min_length': '(2)'}), '(default=None, min_length=2)\n', (2063, 2091), False, 'from fastapi import APIRouter, Depends, Query\n'), ((2116, 2131), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (2123, 2131), False, 'from fastapi import APIRouter, Depends, Query\n'), ((2165, 2182), 'sqlmodel.select', 'select', (['FeedModel'], {}), '(FeedModel)\n', (2171, 2182), False, 'from sqlmodel import Session, Field, select\n'), ((1435, 1460), 'sqlalchemy.distinct', 'distinct', (['FeedModel.ftype'], {}), '(FeedModel.ftype)\n', (1443, 1460), False, 'from sqlalchemy import distinct\n'), ((665, 680), 'orjson.loads', 'orjson.loads', (['v'], {}), '(v)\n', (677, 680), False, 'import orjson\n'), ((3131, 3176), 'app.db.FeedModel.ftype.notin_', 'FeedModel.ftype.notin_', (['INDIVIDUAL_FEED_TYPES'], {}), '(INDIVIDUAL_FEED_TYPES)\n', (3153, 3176), False, 'from app.db import get_db, FeedModel, FeedBase\n'), ((3404, 3425), 'app.db.FeedModel.when.desc', 'FeedModel.when.desc', ([], {}), '()\n', (3423, 3425), False, 'from app.db import get_db, FeedModel, FeedBase\n'), ((865, 880), 'pickle.loads', 'pickle.loads', (['v'], {}), '(v)\n', (877, 880), False, 'import pickle\n'), ((2304, 2330), 'app.db.FeedModel.ftype.in_', 'FeedModel.ftype.in_', (['parts'], {}), '(parts)\n', (2323, 2330), False, 'from app.db import get_db, FeedModel, FeedBase\n'), ((2420, 2455), 'app.db.FeedModel.title.ilike', 'FeedModel.title.ilike', (['f"""%{title}%"""'], {}), "(f'%{title}%')\n", (2441, 2455), False, 'from app.db import get_db, FeedModel, FeedBase\n'), ((2524, 2563), 'app.db.FeedModel.creator.ilike', 'FeedModel.creator.ilike', (['f"""%{creator}%"""'], {}), "(f'%{creator}%')\n", (2547, 2563), False, 'from app.db import get_db, FeedModel, FeedBase\n'), ((2633, 2674), 'app.db.FeedModel.subtitle.ilike', 'FeedModel.subtitle.ilike', (['f"""%{subtitle}%"""'], {}), "(f'%{subtitle}%')\n", (2657, 2674), False, 'from app.db import get_db, FeedModel, FeedBase\n'), ((2952, 2990), 'app.db.FeedModel.model_id.ilike', 'FeedModel.model_id.ilike', (['f"""%{query}%"""'], {}), "(f'%{query}%')\n", (2976, 2990), False, 'from app.db import get_db, FeedModel, FeedBase\n'), ((3333, 3354), 'app.db.FeedModel.score.asc', 'FeedModel.score.asc', ([], {}), '()\n', (3352, 3354), False, 'from app.db import get_db, FeedModel, FeedBase\n'), ((3380, 3402), 'app.db.FeedModel.score.desc', 'FeedModel.score.desc', ([], {}), '()\n', (3400, 3402), False, 'from app.db import get_db, FeedModel, FeedBase\n'), ((3482, 3502), 'app.db.FeedModel.when.asc', 'FeedModel.when.asc', ([], {}), '()\n', (3500, 3502), False, 'from app.db import get_db, FeedModel, FeedBase\n'), ((3528, 3549), 'app.db.FeedModel.when.desc', 'FeedModel.when.desc', ([], {}), '()\n', (3547, 3549), False, 'from app.db import get_db, FeedModel, FeedBase\n'), ((2881, 2919), 'app.db.FeedModel.subtitle.ilike', 'FeedModel.subtitle.ilike', (['f"""%{query}%"""'], {}), "(f'%{query}%')\n", (2905, 2919), False, 'from app.db import get_db, FeedModel, FeedBase\n'), ((2743, 2778), 'app.db.FeedModel.title.ilike', 'FeedModel.title.ilike', (['f"""%{query}%"""'], {}), "(f'%{query}%')\n", (2764, 2778), False, 'from app.db import get_db, FeedModel, FeedBase\n'), ((2811, 2848), 'app.db.FeedModel.creator.ilike', 'FeedModel.creator.ilike', (['f"""%{query}%"""'], {}), "(f'%{query}%')\n", (2834, 2848), False, 'from app.db import get_db, FeedModel, FeedBase\n')]
|
from datetime import timedelta
from enum import Enum
from tkinter import *
from tkinter import ttk
import typer
from sqlmodel import Session
class Status(str, Enum):
"""Status"""
to_do = 'to do'
doing = 'doing'
done = 'done'
def round_timedelta(delta: timedelta):
"""round timedelta object"""
seconds = round(delta.total_seconds())
if seconds >= 3600:
hours = round(seconds/3600)
seconds += - hours*3600
else:
hours = 0
if seconds >= 60:
minutes = round(seconds/60)
seconds += - minutes * 60
else:
minutes = 0
if hours < 10:
hours = '0' + str(hours)
if minutes < 10:
minutes = '0' + str(minutes)
return f'{hours}:{minutes}'
def list_query(engine, query):
"""Calculate duration of a task"""
with Session(engine) as session:
query_list = session.exec(query).all()
try:
for task in query_list:
duration = timedelta()
for dur in task.timers:
duration += dur.duration
yield task, duration
except TypeError:
typer.secho(f'\nTask is running. Stop timer first.\n',
fg=typer.colors.RED)
raise typer.Exit(code=1)
def make_table_view(engine, tasks):
table = [['id', 'Task', 'Project', 'Status', 'Tag', 'hh:mm',
'Due in']]
try:
for i in list_query(engine, tasks):
task = i[0]
duration = i[1]
table.append(
[task.id, task.task, task.project, task.status, task.tag,
round_timedelta(duration), task.due_date])
except UnboundLocalError:
pass
return table
def pop_up_msg():
"""Pop up finish msg"""
root = Tk()
frm = ttk.Frame(root, padding=10)
frm.grid()
ttk.Label(frm, text="Your Time is Over! Well done!").grid(column=0, row=0)
ttk.Button(frm, text="Quit", command=root.destroy).grid(column=1, row=0)
root.mainloop()
def make_table_projects(engine, tasks):
table = [['id', 'Task', 'Status', 'Tag', 'hh:mm', 'Due in']]
try:
project_duration = timedelta()
for i in list_query(engine, tasks):
task = i[0]
duration = i[1]
project_duration += duration
table.append(
[task.id, task.task, task.status, task.tag,
round_timedelta(duration), task.due_date])
except UnboundLocalError:
pass
return table, round_timedelta(project_duration)
|
[
"sqlmodel.Session"
] |
[((1814, 1841), 'tkinter.ttk.Frame', 'ttk.Frame', (['root'], {'padding': '(10)'}), '(root, padding=10)\n', (1823, 1841), False, 'from tkinter import ttk\n'), ((827, 842), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (834, 842), False, 'from sqlmodel import Session\n'), ((2176, 2187), 'datetime.timedelta', 'timedelta', ([], {}), '()\n', (2185, 2187), False, 'from datetime import timedelta\n'), ((1861, 1913), 'tkinter.ttk.Label', 'ttk.Label', (['frm'], {'text': '"""Your Time is Over! Well done!"""'}), "(frm, text='Your Time is Over! Well done!')\n", (1870, 1913), False, 'from tkinter import ttk\n'), ((1940, 1990), 'tkinter.ttk.Button', 'ttk.Button', (['frm'], {'text': '"""Quit"""', 'command': 'root.destroy'}), "(frm, text='Quit', command=root.destroy)\n", (1950, 1990), False, 'from tkinter import ttk\n'), ((978, 989), 'datetime.timedelta', 'timedelta', ([], {}), '()\n', (987, 989), False, 'from datetime import timedelta\n'), ((1150, 1227), 'typer.secho', 'typer.secho', (['f"""\nTask is running. Stop timer first.\n"""'], {'fg': 'typer.colors.RED'}), '(f"""\nTask is running. Stop timer first.\n""", fg=typer.colors.RED)\n', (1161, 1227), False, 'import typer\n'), ((1268, 1286), 'typer.Exit', 'typer.Exit', ([], {'code': '(1)'}), '(code=1)\n', (1278, 1286), False, 'import typer\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import argparse
import datetime
import multiprocessing as mp
import time
import megengine as mge
import megengine.amp as amp
import megengine.autodiff as autodiff
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
from basecore.utils import log_every_n_seconds
from loguru import logger
from basecls.data.fake_data import FakeDataLoader
from basecls.layers import Preprocess
from basecls.utils import registers, set_nccl_env, set_num_threads
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", default="resnet50", type=str)
parser.add_argument("--mode", default="eval", type=str)
parser.add_argument("-d", "--device", default="gpu", type=str)
parser.add_argument("--amp", default=0, type=int)
parser.add_argument("--fastrun", action="store_true")
parser.add_argument("--trace", action="store_true")
parser.add_argument("--dtr", action="store_true")
parser.add_argument("-b", "--batch-size", default=32, type=int)
parser.add_argument("--channel", default=3, type=int)
parser.add_argument("--height", default=224, type=int)
parser.add_argument("--width", default=224, type=int)
parser.add_argument("-n", "--world-size", default=8, type=int)
parser.add_argument("--warm-iters", default=50, type=int)
parser.add_argument("-t", "--total-iters", default=200, type=int)
parser.add_argument("--log-seconds", default=2, type=int)
args = parser.parse_args()
mp.set_start_method("spawn")
set_nccl_env()
set_num_threads()
if args.world_size == 1:
worker(args)
else:
dist.launcher(worker, n_gpus=args.world_size)(args)
@logger.catch
def worker(args: argparse.Namespace):
if dist.get_rank() != 0:
logger.remove()
logger.info(f"args: {args}")
if args.fastrun:
logger.info("Using fastrun mode...")
mge.functional.debug_param.set_execution_strategy("PROFILE")
if args.dtr:
logger.info("Enabling DTR...")
mge.dtr.enable()
mge.set_default_device(f"{args.device}{dist.get_rank()}")
model = registers.models.get(args.model)(head=dict(w_out=1000))
dataloader = FakeDataLoader(
args.batch_size,
(args.height, args.width),
args.channel,
length=args.warm_iters + args.total_iters,
num_classes=1000,
)
if args.mode == "train":
BenchCls = TrainBench
elif args.mode == "eval":
BenchCls = EvalBench
else:
raise NotImplementedError(f"Benchmark mode '{args.mode}' not supported")
bench = BenchCls(model, dataloader, args.trace, args.amp)
bench.benchmark(args.warm_iters, args.log_seconds)
class ClsBench:
def __init__(self, model, dataloader, trace: bool = False):
self.model = model
self.dataloader = dataloader
self.preprocess = Preprocess(mean=127, std=128)
if trace:
self.model_step = jit.trace(self.model_step, symbolic=True)
def benchmark(self, warm_iters=50, log_seconds=2):
total_iters = len(self.dataloader) - warm_iters
total_time = 0
for i, data in enumerate(self.dataloader, 1):
if i == warm_iters + 1:
total_time = 0
samples, targets = self.preprocess(data)
mge._full_sync()
t = time.perf_counter()
self.model_step(samples, targets)
mge._full_sync()
total_time += time.perf_counter() - t
if log_seconds > 0:
cnt = i - warm_iters if i > warm_iters else i
tot = total_iters if i > warm_iters else warm_iters
cycle = total_time / cnt
eta = (tot - cnt) * cycle
log_every_n_seconds(
"{} process {}/{}, average speed:{:0.3f}ms/iters. ETA:{}".format(
"Benchmark" if i > warm_iters else "Warmup",
cnt,
tot,
cycle * 1000,
datetime.timedelta(seconds=int(eta)),
),
n=log_seconds,
)
avg_speed_ms = total_time / total_iters * 1000
logger.info(
"Benchmark total time:{}, average speed:{:0.3f}ms/iters.".format(
datetime.timedelta(seconds=int(total_time)), avg_speed_ms
)
)
return avg_speed_ms
def model_step(self, samples, targets):
raise NotImplementedError
class TrainBench(ClsBench):
def __init__(self, model, dataloader, trace: bool = False, amp_version: int = 0):
model.train()
super().__init__(model, dataloader, trace)
self.opt = optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=0.0001)
self.gm = autodiff.GradManager()
callbacks = (
[dist.make_allreduce_cb("mean", dist.WORLD)] if dist.get_world_size() > 1 else None
)
self.gm.attach(model.parameters(), callbacks=callbacks)
self.amp_version = amp_version
self.scaler = (
amp.GradScaler(init_scale=65536.0, growth_interval=2000)
if amp_version == 2
else amp.GradScaler(init_scale=128.0, growth_interval=0)
)
def model_step(self, samples, targets):
with self.gm:
with amp.autocast(enabled=self.amp_version > 0):
pred = self.model(samples)
loss = F.loss.cross_entropy(pred, targets)
if self.amp_version > 0:
self.scaler.backward(self.gm, loss, update_scale=False)
self.scaler.update()
else:
self.gm.backward(loss)
self.opt.step().clear_grad()
class EvalBench(ClsBench):
def __init__(self, model, dataloader, trace: bool = False, amp_version: int = 0):
model.eval()
super().__init__(model, dataloader, trace)
self.amp_version = amp_version
def model_step(self, samples, targets):
with amp.autocast(enabled=self.amp_version > 0):
self.model(samples)
if __name__ == "__main__":
main()
|
[
"megengine.functional.loss.cross_entropy",
"megengine._full_sync",
"megengine.autodiff.GradManager",
"megengine.functional.debug_param.set_execution_strategy",
"megengine.jit.trace",
"megengine.amp.autocast",
"megengine.distributed.make_allreduce_cb",
"megengine.distributed.launcher",
"megengine.dtr.enable",
"megengine.amp.GradScaler",
"megengine.distributed.get_rank",
"megengine.distributed.get_world_size"
] |
[((941, 966), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (964, 966), False, 'import argparse\n'), ((1927, 1955), 'multiprocessing.set_start_method', 'mp.set_start_method', (['"""spawn"""'], {}), "('spawn')\n", (1946, 1955), True, 'import multiprocessing as mp\n'), ((1961, 1975), 'basecls.utils.set_nccl_env', 'set_nccl_env', ([], {}), '()\n', (1973, 1975), False, 'from basecls.utils import registers, set_nccl_env, set_num_threads\n'), ((1980, 1997), 'basecls.utils.set_num_threads', 'set_num_threads', ([], {}), '()\n', (1995, 1997), False, 'from basecls.utils import registers, set_nccl_env, set_num_threads\n'), ((2230, 2258), 'loguru.logger.info', 'logger.info', (['f"""args: {args}"""'], {}), "(f'args: {args}')\n", (2241, 2258), False, 'from loguru import logger\n'), ((2627, 2764), 'basecls.data.fake_data.FakeDataLoader', 'FakeDataLoader', (['args.batch_size', '(args.height, args.width)', 'args.channel'], {'length': '(args.warm_iters + args.total_iters)', 'num_classes': '(1000)'}), '(args.batch_size, (args.height, args.width), args.channel,\n length=args.warm_iters + args.total_iters, num_classes=1000)\n', (2641, 2764), False, 'from basecls.data.fake_data import FakeDataLoader\n'), ((2180, 2195), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (2193, 2195), True, 'import megengine.distributed as dist\n'), ((2210, 2225), 'loguru.logger.remove', 'logger.remove', ([], {}), '()\n', (2223, 2225), False, 'from loguru import logger\n'), ((2289, 2325), 'loguru.logger.info', 'logger.info', (['"""Using fastrun mode..."""'], {}), "('Using fastrun mode...')\n", (2300, 2325), False, 'from loguru import logger\n'), ((2334, 2394), 'megengine.functional.debug_param.set_execution_strategy', 'mge.functional.debug_param.set_execution_strategy', (['"""PROFILE"""'], {}), "('PROFILE')\n", (2383, 2394), True, 'import megengine as mge\n'), ((2421, 2451), 'loguru.logger.info', 'logger.info', (['"""Enabling DTR..."""'], {}), "('Enabling DTR...')\n", (2432, 2451), False, 'from loguru import logger\n'), ((2460, 2476), 'megengine.dtr.enable', 'mge.dtr.enable', ([], {}), '()\n', (2474, 2476), True, 'import megengine as mge\n'), ((2553, 2585), 'basecls.utils.registers.models.get', 'registers.models.get', (['args.model'], {}), '(args.model)\n', (2573, 2585), False, 'from basecls.utils import registers, set_nccl_env, set_num_threads\n'), ((3308, 3337), 'basecls.layers.Preprocess', 'Preprocess', ([], {'mean': '(127)', 'std': '(128)'}), '(mean=127, std=128)\n', (3318, 3337), False, 'from basecls.layers import Preprocess\n'), ((5266, 5288), 'megengine.autodiff.GradManager', 'autodiff.GradManager', ([], {}), '()\n', (5286, 5288), True, 'import megengine.autodiff as autodiff\n'), ((2067, 2112), 'megengine.distributed.launcher', 'dist.launcher', (['worker'], {'n_gpus': 'args.world_size'}), '(worker, n_gpus=args.world_size)\n', (2080, 2112), True, 'import megengine.distributed as dist\n'), ((3387, 3428), 'megengine.jit.trace', 'jit.trace', (['self.model_step'], {'symbolic': '(True)'}), '(self.model_step, symbolic=True)\n', (3396, 3428), True, 'import megengine.jit as jit\n'), ((3753, 3769), 'megengine._full_sync', 'mge._full_sync', ([], {}), '()\n', (3767, 3769), True, 'import megengine as mge\n'), ((3786, 3805), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3803, 3805), False, 'import time\n'), ((3866, 3882), 'megengine._full_sync', 'mge._full_sync', ([], {}), '()\n', (3880, 3882), True, 'import megengine as mge\n'), ((5557, 5613), 'megengine.amp.GradScaler', 'amp.GradScaler', ([], {'init_scale': '(65536.0)', 'growth_interval': '(2000)'}), '(init_scale=65536.0, growth_interval=2000)\n', (5571, 5613), True, 'import megengine.amp as amp\n'), ((5663, 5714), 'megengine.amp.GradScaler', 'amp.GradScaler', ([], {'init_scale': '(128.0)', 'growth_interval': '(0)'}), '(init_scale=128.0, growth_interval=0)\n', (5677, 5714), True, 'import megengine.amp as amp\n'), ((6480, 6522), 'megengine.amp.autocast', 'amp.autocast', ([], {'enabled': '(self.amp_version > 0)'}), '(enabled=self.amp_version > 0)\n', (6492, 6522), True, 'import megengine.amp as amp\n'), ((2521, 2536), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (2534, 2536), True, 'import megengine.distributed as dist\n'), ((3909, 3928), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3926, 3928), False, 'import time\n'), ((5371, 5392), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (5390, 5392), True, 'import megengine.distributed as dist\n'), ((5324, 5366), 'megengine.distributed.make_allreduce_cb', 'dist.make_allreduce_cb', (['"""mean"""', 'dist.WORLD'], {}), "('mean', dist.WORLD)\n", (5346, 5366), True, 'import megengine.distributed as dist\n'), ((5809, 5851), 'megengine.amp.autocast', 'amp.autocast', ([], {'enabled': '(self.amp_version > 0)'}), '(enabled=self.amp_version > 0)\n', (5821, 5851), True, 'import megengine.amp as amp\n'), ((5919, 5954), 'megengine.functional.loss.cross_entropy', 'F.loss.cross_entropy', (['pred', 'targets'], {}), '(pred, targets)\n', (5939, 5954), True, 'import megengine.functional as F\n')]
|
r"""
Elastic contact sphere simulating an indentation test.
Find :math:`\ul{u}` such that:
.. math::
\int_{\Omega} D_{ijkl}\ e_{ij}(\ul{v}) e_{kl}(\ul{u})
+ \int_{\Gamma} \ul{v} \cdot f(d(\ul{u})) \ul{n}(\ul{u})
= 0 \;,
where
.. math::
D_{ijkl} = \mu (\delta_{ik} \delta_{jl} + \delta_{il} \delta_{jk}) +
\lambda \ \delta_{ij} \delta_{kl}
\;.
Notes
-----
Even though the material is linear elastic and small deformations are used, the
problem is highly nonlinear due to contacts with the sphere. See also
elastic_contact_planes.py example.
"""
from sfepy import data_dir
filename_mesh = data_dir + '/meshes/3d/cube_medium_hexa.mesh'
k = 1e5 # Elastic sphere stiffness for positive penetration.
f0 = 1e-2 # Force at zero penetration.
options = {
'nls' : 'newton',
'ls' : 'ls',
'output_format': 'vtk',
}
fields = {
'displacement': ('real', 3, 'Omega', 1),
}
materials = {
'solid' : ({
'lam' : 5.769,
'mu' : 3.846,
},),
'cs' : ({
'f' : [k, f0],
'.c' : [0.0, 0.0, 1.2],
'.r' : 0.8,
},),
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
}
regions = {
'Omega' : 'all',
'Bottom' : ('vertices in (z < -0.499)', 'facet'),
'Top' : ('vertices in (z > 0.499)', 'facet'),
}
ebcs = {
'fixed' : ('Bottom', {'u.all' : 0.0}),
}
equations = {
'elasticity' :
"""dw_lin_elastic_iso.2.Omega(solid.lam, solid.mu, v, u)
+ dw_contact_sphere.2.Top(cs.f, cs.c, cs.r, v, u)
= 0""",
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 20,
'eps_a' : 1e-1,
'ls_on' : 2.0,
'problem' : 'nonlinear',
'check' : 0,
'delta' : 1e-6,
}),
}
def main():
import os
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.discrete.fem import MeshIO
import sfepy.linalg as la
from sfepy.mechanics.contact_bodies import ContactSphere, plot_points
conf_dir = os.path.dirname(__file__)
io = MeshIO.any_from_filename(filename_mesh, prefix_dir=conf_dir)
bb = io.read_bounding_box()
outline = [vv for vv in la.combine(zip(*bb))]
ax = plot_points(None, nm.array(outline), 'r*')
csc = materials['cs'][0]
cs = ContactSphere(csc['.c'], csc['.r'])
pps = (bb[1] - bb[0]) * nm.random.rand(5000, 3) + bb[0]
mask = cs.mask_points(pps, 0.0)
ax = plot_points(ax, cs.centre[None, :], 'b*', ms=30)
ax = plot_points(ax, pps[mask], 'kv')
ax = plot_points(ax, pps[~mask], 'r.')
plt.show()
if __name__ == '__main__':
main()
|
[
"sfepy.discrete.fem.MeshIO.any_from_filename",
"sfepy.mechanics.contact_bodies.plot_points",
"sfepy.mechanics.contact_bodies.ContactSphere"
] |
[((2053, 2078), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2068, 2078), False, 'import os\n'), ((2088, 2148), 'sfepy.discrete.fem.MeshIO.any_from_filename', 'MeshIO.any_from_filename', (['filename_mesh'], {'prefix_dir': 'conf_dir'}), '(filename_mesh, prefix_dir=conf_dir)\n', (2112, 2148), False, 'from sfepy.discrete.fem import MeshIO\n'), ((2322, 2357), 'sfepy.mechanics.contact_bodies.ContactSphere', 'ContactSphere', (["csc['.c']", "csc['.r']"], {}), "(csc['.c'], csc['.r'])\n", (2335, 2357), False, 'from sfepy.mechanics.contact_bodies import ContactSphere, plot_points\n'), ((2465, 2513), 'sfepy.mechanics.contact_bodies.plot_points', 'plot_points', (['ax', 'cs.centre[None, :]', '"""b*"""'], {'ms': '(30)'}), "(ax, cs.centre[None, :], 'b*', ms=30)\n", (2476, 2513), False, 'from sfepy.mechanics.contact_bodies import ContactSphere, plot_points\n'), ((2523, 2555), 'sfepy.mechanics.contact_bodies.plot_points', 'plot_points', (['ax', 'pps[mask]', '"""kv"""'], {}), "(ax, pps[mask], 'kv')\n", (2534, 2555), False, 'from sfepy.mechanics.contact_bodies import ContactSphere, plot_points\n'), ((2565, 2598), 'sfepy.mechanics.contact_bodies.plot_points', 'plot_points', (['ax', 'pps[~mask]', '"""r."""'], {}), "(ax, pps[~mask], 'r.')\n", (2576, 2598), False, 'from sfepy.mechanics.contact_bodies import ContactSphere, plot_points\n'), ((2604, 2614), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2612, 2614), True, 'import matplotlib.pyplot as plt\n'), ((2259, 2276), 'numpy.array', 'nm.array', (['outline'], {}), '(outline)\n', (2267, 2276), True, 'import numpy as nm\n'), ((2387, 2410), 'numpy.random.rand', 'nm.random.rand', (['(5000)', '(3)'], {}), '(5000, 3)\n', (2401, 2410), True, 'import numpy as nm\n')]
|
import numpy as nm
from sfepy.base.base import OneTypeList, Container, Struct
class Functions(Container):
"""Container to hold all user-defined functions."""
def from_conf(conf):
objs = OneTypeList(Function)
for key, fc in conf.iteritems():
fun = Function(name = fc.name,
function = fc.function,
is_constant = False,
extra_args = {})
objs.append(fun)
obj = Functions(objs)
return obj
from_conf = staticmethod(from_conf)
class Function(Struct):
"""Base class for user-defined functions."""
def __init__(self, name, function, is_constant=False, extra_args=None):
Struct.__init__(self, name = name, function = function,
is_constant = is_constant)
if extra_args is None:
extra_args = {}
self.extra_args = extra_args
def __call__(self, *args, **kwargs):
_kwargs = dict(kwargs)
_kwargs.update(self.extra_args)
return self.function(*args, **_kwargs)
def set_function(self, function, is_constant=False):
self.function = function
self.is_constant = is_constant
def set_extra_args(self, **extra_args):
self.extra_args = extra_args
class ConstantFunction(Function):
"""Function with constant values."""
def __init__(self, values):
"""Make a function out of a dictionary of constant values. When
called with coors argument, the values are repeated for each
coordinate."""
name = '_'.join(['get_constants'] + values.keys())
def get_constants(ts=None, coors=None, mode=None, **kwargs):
out = {}
if mode == 'special':
for key, val in values.iteritems():
if '.' in key:
vkey = key.split('.')[1]
out[vkey] = val
elif (mode == 'qp'):
for key, val in values.iteritems():
if '.' in key: continue
val = nm.array(val, dtype=nm.float64, ndmin=3)
out[key] = nm.tile(val, (coors.shape[0], 1, 1))
elif (mode == 'special_constant') or (mode is None):
for key, val in values.iteritems():
if '.' in key: continue
out[key] = val
else:
raise ValueError('unknown function mode! (%s)' % mode)
return out
Function.__init__(self, name = name, function = get_constants,
is_constant = True)
class ConstantFunctionByRegion(Function):
"""
Function with constant values in regions.
"""
def __init__(self, values):
"""
Make a function out of a dictionary of constant values per region. When
called with coors argument, the values are repeated for each
coordinate in each of the given regions.
"""
name = '_'.join(['get_constants_by_region'] + values.keys())
def get_constants(ts=None, coors=None, mode=None,
term=None, problem=None, **kwargs):
out = {}
if mode == 'qp':
qps = term.get_physical_qps()
for key, val in values.iteritems():
if '.' in key: continue
rval = nm.array(val[val.keys()[0]], dtype=nm.float64,
ndmin=3)
matdata = nm.zeros((coors.shape[0], ) + rval.shape[1:],
dtype=nm.float64)
for rkey, rval in val.iteritems():
region = problem.domain.regions[rkey]
rval = nm.array(rval, dtype=nm.float64, ndmin=3)
for ig in region.igs:
if not (ig in qps.igs):
continue
matdata[qps.rindx[ig]] = rval
out[key] = matdata
return out
Function.__init__(self, name=name, function=get_constants,
is_constant=True)
|
[
"sfepy.base.base.OneTypeList",
"sfepy.base.base.Struct.__init__"
] |
[((205, 226), 'sfepy.base.base.OneTypeList', 'OneTypeList', (['Function'], {}), '(Function)\n', (216, 226), False, 'from sfepy.base.base import OneTypeList, Container, Struct\n'), ((732, 808), 'sfepy.base.base.Struct.__init__', 'Struct.__init__', (['self'], {'name': 'name', 'function': 'function', 'is_constant': 'is_constant'}), '(self, name=name, function=function, is_constant=is_constant)\n', (747, 808), False, 'from sfepy.base.base import OneTypeList, Container, Struct\n'), ((3529, 3591), 'numpy.zeros', 'nm.zeros', (['((coors.shape[0],) + rval.shape[1:])'], {'dtype': 'nm.float64'}), '((coors.shape[0],) + rval.shape[1:], dtype=nm.float64)\n', (3537, 3591), True, 'import numpy as nm\n'), ((2098, 2138), 'numpy.array', 'nm.array', (['val'], {'dtype': 'nm.float64', 'ndmin': '(3)'}), '(val, dtype=nm.float64, ndmin=3)\n', (2106, 2138), True, 'import numpy as nm\n'), ((2170, 2206), 'numpy.tile', 'nm.tile', (['val', '(coors.shape[0], 1, 1)'], {}), '(val, (coors.shape[0], 1, 1))\n', (2177, 2206), True, 'import numpy as nm\n'), ((3781, 3822), 'numpy.array', 'nm.array', (['rval'], {'dtype': 'nm.float64', 'ndmin': '(3)'}), '(rval, dtype=nm.float64, ndmin=3)\n', (3789, 3822), True, 'import numpy as nm\n')]
|
import random
from megengine.data.transform import RandomResizedCrop as mge_RRC
from megengine.data.transform import Resize as mge_resize
from ..registry import PIPELINES
from edit.utils import interp_codes
@PIPELINES.register_module()
class Resize(object):
"""
Args:
size (int|list|tuple): Desired output size. If size is a sequence like
(h, w), output size will be matched to this. If size is an int,
smaller edge of the image will be matched to this number.
i.e, if height > width, then image will be rescaled to
(size * height / width, size)
interpolation (int): Interpolation mode of resize. Default: cv2.INTER_LINEAR.
"""
def __init__(self, keys, size, interpolation='bilinear'):
assert interpolation in interp_codes
self.keys = keys
self.size = size
self.interpolation_str = interpolation
self.resize = mge_resize(output_size=self.size, interpolation=interp_codes[interpolation])
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for key in self.keys:
if isinstance(results[key], list):
results[key] = [
self.resize.apply(v) for v in results[key]
]
else:
results[key] = self.resize.apply(results[key])
return results
def __repr__(self):
interpolate_str = self.interpolation_str
format_string = self.__class__.__name__ + '(size={0}'.format(self.size)
format_string += ', interpolation={0})'.format(interpolate_str)
return format_string
@PIPELINES.register_module()
class RandomResizedCrop(object):
"""
Crop the input data to random size and aspect ratio.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 1.33) of the original aspect ratio is made.
After applying crop transfrom, the input data will be resized to given size.
Args:
output_size (int|list|tuple): Target size of output image, with (height, width) shape.
scale (list|tuple): Range of size of the origin size cropped. Default: (0.08, 1.0)
ratio (list|tuple): Range of aspect ratio of the origin aspect ratio cropped. Default: (0.75, 1.33)
interpolation:
'nearest': cv2.INTER_NEAREST,
'bilinear': cv2.INTER_LINEAR,
'bicubic': cv2.INTER_CUBIC,
'area': cv2.INTER_AREA,
'lanczos': cv2.INTER_LANCZOS4
"""
def __init__(self, keys, output_size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation='bilinear', do_prob = 0.5):
assert interpolation in interp_codes
self.keys = keys
self.size = output_size
self.interpolation_str = interpolation
self.scale = scale
self.ratio = ratio
self.rrc = mge_RRC(output_size=output_size, scale_range=scale, ratio_range=ratio, interpolation=interp_codes[interpolation])
self.do_prob = do_prob
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
if random.random() < self.do_prob:
for key in self.keys:
if isinstance(results[key], list):
results[key] = [
self.rrc.apply(v) for v in results[key]
]
else:
results[key] = self.rrc.apply(results[key])
return results
else:
return results
def __repr__(self):
interpolate_str = self.interpolation_str
format_string = self.__class__.__name__ + '(size={0}'.format(self.size)
format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale))
format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio))
format_string += ', interpolation={0})'.format(interpolate_str)
return format_string
|
[
"megengine.data.transform.Resize",
"megengine.data.transform.RandomResizedCrop"
] |
[((935, 1011), 'megengine.data.transform.Resize', 'mge_resize', ([], {'output_size': 'self.size', 'interpolation': 'interp_codes[interpolation]'}), '(output_size=self.size, interpolation=interp_codes[interpolation])\n', (945, 1011), True, 'from megengine.data.transform import Resize as mge_resize\n'), ((3160, 3277), 'megengine.data.transform.RandomResizedCrop', 'mge_RRC', ([], {'output_size': 'output_size', 'scale_range': 'scale', 'ratio_range': 'ratio', 'interpolation': 'interp_codes[interpolation]'}), '(output_size=output_size, scale_range=scale, ratio_range=ratio,\n interpolation=interp_codes[interpolation])\n', (3167, 3277), True, 'from megengine.data.transform import RandomResizedCrop as mge_RRC\n'), ((3592, 3607), 'random.random', 'random.random', ([], {}), '()\n', (3605, 3607), False, 'import random\n')]
|
from worker import writer, scanTemplates
from utils.manager import TrainManager
import time
from db import engine
from sqlmodel import Session
from models import Logrun,Usefuel, Template
import cachetool
import inspect
from disclog import postLog,postGeneric
def filler(posrr,posm) -> str:
start=time.time()
manager = TrainManager(posrr=posrr,
posm=posm)
run = True
postGeneric([("info","API init success! Logger started.")],"Startup")
while run:
try:
manager.fetch()
if len(manager.out) > 0:
writer.delay(manager.out,"action")
manager.out = []
except Exception as e:
postLog(e,"error",f"{inspect.stack()[0][3]}:{inspect.stack()[0][2]}")
time.sleep(30)
return f"{(time.time()-start)} total time"
if __name__ == "__main__":
startup = True
while startup:
try:
with Session(engine) as session:
toptemp = session.query(Template).order_by(Template.template_id.desc()).first()
posrrr = session.query(Logrun).order_by(Logrun.action_seq.desc()).first()
posmr = session.query(Usefuel).order_by(Usefuel.action_seq.desc()).first()
if toptemp:
print("skipping init")
else:
cachetool.set_cache(f"last_templates",1622316652000)
cachetool.set_cache(f"last_assets",1622316652000)
scanTemplates()
time.sleep(1200)
if posrrr: posrr = posrrr.action_seq
else: posrr = 1642127
if posmr: posm = posmr.action_seq
else: posm = 981927
filler(posrr,posm)
except Exception as e:
postLog(e,"warn",f"{inspect.stack()[0][3]}:{inspect.stack()[0][2]}")
time.sleep(30)
|
[
"sqlmodel.Session"
] |
[((301, 312), 'time.time', 'time.time', ([], {}), '()\n', (310, 312), False, 'import time\n'), ((332, 368), 'utils.manager.TrainManager', 'TrainManager', ([], {'posrr': 'posrr', 'posm': 'posm'}), '(posrr=posrr, posm=posm)\n', (344, 368), False, 'from utils.manager import TrainManager\n'), ((416, 487), 'disclog.postGeneric', 'postGeneric', (["[('info', 'API init success! Logger started.')]", '"""Startup"""'], {}), "([('info', 'API init success! Logger started.')], 'Startup')\n", (427, 487), False, 'from disclog import postLog, postGeneric\n'), ((599, 634), 'worker.writer.delay', 'writer.delay', (['manager.out', '"""action"""'], {}), "(manager.out, 'action')\n", (611, 634), False, 'from worker import writer, scanTemplates\n'), ((793, 807), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (803, 807), False, 'import time\n'), ((824, 835), 'time.time', 'time.time', ([], {}), '()\n', (833, 835), False, 'import time\n'), ((954, 969), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (961, 969), False, 'from sqlmodel import Session\n'), ((1386, 1439), 'cachetool.set_cache', 'cachetool.set_cache', (['f"""last_templates"""', '(1622316652000)'], {}), "(f'last_templates', 1622316652000)\n", (1405, 1439), False, 'import cachetool\n'), ((1455, 1505), 'cachetool.set_cache', 'cachetool.set_cache', (['f"""last_assets"""', '(1622316652000)'], {}), "(f'last_assets', 1622316652000)\n", (1474, 1505), False, 'import cachetool\n'), ((1521, 1536), 'worker.scanTemplates', 'scanTemplates', ([], {}), '()\n', (1534, 1536), False, 'from worker import writer, scanTemplates\n'), ((1553, 1569), 'time.sleep', 'time.sleep', (['(1200)'], {}), '(1200)\n', (1563, 1569), False, 'import time\n'), ((1932, 1946), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (1942, 1946), False, 'import time\n'), ((1058, 1085), 'models.Template.template_id.desc', 'Template.template_id.desc', ([], {}), '()\n', (1083, 1085), False, 'from models import Logrun, Usefuel, Template\n'), ((1151, 1175), 'models.Logrun.action_seq.desc', 'Logrun.action_seq.desc', ([], {}), '()\n', (1173, 1175), False, 'from models import Logrun, Usefuel, Template\n'), ((1241, 1266), 'models.Usefuel.action_seq.desc', 'Usefuel.action_seq.desc', ([], {}), '()\n', (1264, 1266), False, 'from models import Logrun, Usefuel, Template\n'), ((732, 747), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (745, 747), False, 'import inspect\n'), ((756, 771), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (769, 771), False, 'import inspect\n'), ((1867, 1882), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (1880, 1882), False, 'import inspect\n'), ((1891, 1906), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (1904, 1906), False, 'import inspect\n')]
|
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))
if (abs(of) < conf.eps_of):
status = 0
elif ofg_norm and (ofg_norm < conf.eps_ofg):
status = 2
elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs(of0))):
status = 3
if (status == -1) and (it >= conf.i_max):
status = 1
return status
def wrap_function(function, args):
ncalls = [0]
times = []
timer = Timer()
def function_wrapper(x):
ncalls[0] += 1
timer.start()
out = function(x, *args)
times.append(timer.stop())
return out
return ncalls, times, function_wrapper
def check_gradient(xit, aofg, fn_of, delta, check):
dofg = nm.zeros_like(aofg)
xd = xit.copy()
for ii in range(xit.shape[0]):
xd[ii] = xit[ii] + delta
ofp = fn_of(xd)
xd[ii] = xit[ii] - delta
ofm = fn_of(xd)
xd[ii] = xit[ii]
dofg[ii] = 0.5 * (ofp - ofm) / delta
output('**********', ii, aofg[ii], dofg[ii])
diff = abs(aofg - dofg)
aux = nm.concatenate((aofg[:,nm.newaxis], dofg[:,nm.newaxis],
diff[:,nm.newaxis]), 1)
output(aux)
output(nla.norm(diff, nm.Inf))
aofg.tofile('aofg.txt', ' ')
dofg.tofile('dofg.txt', ' ')
diff.tofile('diff.txt', ' ')
if check == 2:
import pylab
pylab.plot(aofg)
pylab.plot(dofg)
pylab.legend(('analytical', 'finite difference'))
pylab.show()
pause('gradient checking done')
class FMinSteepestDescent(OptimizationSolver):
"""
Steepest descent optimization solver.
"""
name = 'opt.fmin_sd'
_parameters = [
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('eps_rd', 'float', 1e-5, False,
'The relative delta of the objective function.'),
('eps_of', 'float', 1e-4, False,
'The tolerance for the objective function.'),
('eps_ofg', 'float', 1e-8, False,
'The tolerance for the objective function gradient.'),
('norm', 'numpy norm', nm.Inf, False,
'The norm to be used.'),
('ls', 'bool', True, False,
'If True, use a line-search.'),
('ls_method', "{'backtracking', 'full'}", 'backtracking', False,
'The line-search method.'),
('ls_on', 'float', 0.99999, False,
"""Start the backtracking line-search by reducing the step, if
:math:`||f(x^i)|| / ||f(x^{i-1})||` is larger than `ls_on`."""),
('ls0', '0.0 < float < 1.0', 1.0, False,
'The initial step.'),
('ls_red', '0.0 < float < 1.0', 0.5, False,
'The step reduction factor in case of correct residual assembling.'),
('ls_red_warp', '0.0 < float < 1.0', 0.1, False,
"""The step reduction factor in case of failed residual assembling
(e.g. the "warp violation" error caused by a negative volume
element resulting from too large deformations)."""),
('ls_min', '0.0 < float < 1.0', 1e-5, False,
'The minimum step reduction factor.'),
('check', '0, 1 or 2', 0, False,
"""If >= 1, check the tangent matrix using finite differences. If 2,
plot the resulting sparsity patterns."""),
('delta', 'float', 1e-6, False,
r"""If `check >= 1`, the finite difference matrix is taken as
:math:`A_{ij} = \frac{f_i(x_j + \delta) - f_i(x_j - \delta)}{2
\delta}`."""),
('output', 'function', None, False,
"""If given, use it instead of :func:`output()
<sfepy.base.base.output()>` function."""),
('yscales', 'list of str', ['linear', 'log', 'log', 'linear'], False,
'The list of four convergence log subplot scales.'),
('log', 'dict or None', None, False,
"""If not None, log the convergence according to the configuration in
the following form: ``{'text' : 'log.txt', 'plot' : 'log.pdf'}``.
Each of the dict items can be None."""),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
conf = self.conf
log = get_logging_conf(conf)
conf.log = log = Struct(name='log_conf', **log)
conf.is_any_log = (log.text is not None) or (log.plot is not None)
if conf.is_any_log:
self.log = Log([[r'$||\Psi||$'], [r'$||\nabla \Psi||$'],
[r'$\alpha$'], ['iteration']],
xlabels=['', '', 'all iterations', 'all iterations'],
yscales=conf.yscales,
is_plot=conf.log.plot is not None,
log_filename=conf.log.text,
formats=[['%.8e'], ['%.3e'], ['%.3e'], ['%d']])
else:
self.log = None
def __call__(self, x0, conf=None, obj_fun=None, obj_fun_grad=None,
status=None, obj_args=None):
conf = get_default(conf, self.conf)
obj_fun = get_default(obj_fun, self.obj_fun)
obj_fun_grad = get_default(obj_fun_grad, self.obj_fun_grad)
status = get_default(status, self.status)
obj_args = get_default(obj_args, self.obj_args)
if conf.output:
globals()['output'] = conf.output
output('entering optimization loop...')
nc_of, tt_of, fn_of = wrap_function(obj_fun, obj_args)
nc_ofg, tt_ofg, fn_ofg = wrap_function(obj_fun_grad, obj_args)
timer = Timer()
time_stats = {'of' : tt_of, 'ofg': tt_ofg, 'check' : []}
ofg = None
it = 0
xit = x0.copy()
while 1:
of = fn_of(xit)
if it == 0:
of0 = ofit0 = of_prev = of
of_prev_prev = of + 5000.0
if ofg is None:
ofg = fn_ofg(xit)
if conf.check:
timer.start()
check_gradient(xit, ofg, fn_of, conf.delta, conf.check)
time_stats['check'].append(timer.stop())
ofg_norm = nla.norm(ofg, conf.norm)
ret = conv_test(conf, it, of, ofit0, ofg_norm)
if ret >= 0:
break
ofit0 = of
##
# Backtrack (on errors).
alpha = conf.ls0
can_ls = True
while 1:
xit2 = xit - alpha * ofg
aux = fn_of(xit2)
if self.log is not None:
self.log(of, ofg_norm, alpha, it)
if aux is None:
alpha *= conf.ls_red_warp
can_ls = False
output('warp: reducing step (%f)' % alpha)
elif conf.ls and conf.ls_method == 'backtracking':
if aux < of * conf.ls_on: break
alpha *= conf.ls_red
output('backtracking: reducing step (%f)' % alpha)
else:
of_prev_prev = of_prev
of_prev = aux
break
if alpha < conf.ls_min:
if aux is None:
raise RuntimeError('giving up...')
output('linesearch failed, continuing anyway')
break
# These values are modified by the line search, even if it fails
of_prev_bak = of_prev
of_prev_prev_bak = of_prev_prev
if conf.ls and can_ls and conf.ls_method == 'full':
output('full linesearch...')
alpha, fc, gc, of_prev, of_prev_prev, ofg1 = \
linesearch.line_search(fn_of,fn_ofg,xit,
-ofg,ofg,of_prev,of_prev_prev,
c2=0.4)
if alpha is None: # line search failed -- use different one.
alpha, fc, gc, of_prev, of_prev_prev, ofg1 = \
sopt.line_search(fn_of,fn_ofg,xit,
-ofg,ofg,of_prev_bak,
of_prev_prev_bak)
if alpha is None or alpha == 0:
# This line search also failed to find a better
# solution.
ret = 3
break
output(' -> alpha: %.8e' % alpha)
else:
if conf.ls_method == 'full':
output('full linesearch off (%s and %s)'
% (conf.ls, can_ls))
ofg1 = None
if self.log is not None:
self.log.plot_vlines(color='g', linewidth=0.5)
xit = xit - alpha * ofg
if ofg1 is None:
ofg = None
else:
ofg = ofg1.copy()
for key, val in six.iteritems(time_stats):
if len(val):
output('%10s: %7.2f [s]' % (key, val[-1]))
it = it + 1
output('status: %d' % ret)
output('initial value: %.8e' % of0)
output('current value: %.8e' % of)
output('iterations: %d' % it)
output('function evaluations: %d in %.2f [s]'
% (nc_of[0], nm.sum(time_stats['of'])))
output('gradient evaluations: %d in %.2f [s]'
% (nc_ofg[0], nm.sum(time_stats['ofg'])))
if self.log is not None:
self.log(of, ofg_norm, alpha, it)
if conf.log.plot is not None:
self.log(save_figure=conf.log.plot, finished=True)
else:
self.log(finished=True)
if status is not None:
status['log'] = self.log
status['status'] = status
status['of0'] = of0
status['of'] = of
status['it'] = it
status['nc_of'] = nc_of[0]
status['nc_ofg'] = nc_ofg[0]
status['time_stats'] = time_stats
return xit
class ScipyFMinSolver(OptimizationSolver):
"""
Interface to SciPy optimization solvers scipy.optimize.fmin_*.
"""
name = 'nls.scipy_fmin_like'
_i_max_name = {
'fmin' : 'maxiter',
'fmin_bfgs' : 'maxiter',
'fmin_cg' : 'maxiter',
'fmin_cobyla' : 'maxfun',
'fmin_l_bfgs_b' : 'maxfun',
'fmin_ncg' : 'maxiter',
'fmin_powell' : 'maxiter',
'fmin_slsqp' : 'iter',
'fmin_tnc' : 'maxfun',
}
_has_grad = ('fmin_bfgs', 'fmin_cg', 'fmin_l_bfgs_b', 'fmin_ncg',
'fmin_slsqp', 'fmin_tnc')
_parameters = [
('method',
'{%s}' % ', '.join(sorted(repr(ii) for ii in _i_max_name.keys())),
'fmin', False,
'The actual optimization method to use.'),
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
self.set_method(self.conf)
def set_method(self, conf):
import scipy.optimize as so
try:
solver = getattr(so, conf.method)
except AttributeError:
raise ValueError('scipy solver %s does not exist!' % conf.method)
self.solver = solver
def __call__(self, x0, conf=None, obj_fun=None, obj_fun_grad=None,
status=None, obj_args=None):
import inspect
if conf is not None:
self.set_method(conf)
else:
conf = self.conf
obj_fun = get_default(obj_fun, self.obj_fun)
obj_fun_grad = get_default(obj_fun_grad, self.obj_fun_grad)
status = get_default(status, self.status)
obj_args = get_default(obj_args, self.obj_args)
timer = Timer(start=True)
kwargs = {self._i_max_name[conf.method] : conf.i_max,
'args' : obj_args}
if conf.method in self._has_grad:
kwargs['fprime'] = obj_fun_grad
if 'disp' in inspect.getargspec(self.solver)[0]:
kwargs['disp'] = conf.verbose
kwargs.update(self.build_solver_kwargs(conf))
out = self.solver(obj_fun, x0, **kwargs)
if status is not None:
status['time_stats'] = timer.stop()
return out
|
[
"sfepy.base.timing.Timer",
"sfepy.base.base.Struct",
"sfepy.base.base.get_default",
"sfepy.base.log.Log",
"sfepy.base.base.output",
"sfepy.solvers.solvers.OptimizationSolver.__init__",
"sfepy.base.log.get_logging_conf",
"sfepy.base.base.pause"
] |
[((712, 778), 'sfepy.base.base.output', 'output', (["('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))"], {}), "('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))\n", (718, 778), False, 'from sfepy.base.base import output, get_default, pause, Struct\n'), ((1149, 1156), 'sfepy.base.timing.Timer', 'Timer', ([], {}), '()\n', (1154, 1156), False, 'from sfepy.base.timing import Timer\n'), ((1426, 1445), 'numpy.zeros_like', 'nm.zeros_like', (['aofg'], {}), '(aofg)\n', (1439, 1445), True, 'import numpy as nm\n'), ((1480, 1499), 'six.moves.range', 'range', (['xit.shape[0]'], {}), '(xit.shape[0])\n', (1485, 1499), False, 'from six.moves import range\n'), ((1781, 1868), 'numpy.concatenate', 'nm.concatenate', (['(aofg[:, nm.newaxis], dofg[:, nm.newaxis], diff[:, nm.newaxis])', '(1)'], {}), '((aofg[:, nm.newaxis], dofg[:, nm.newaxis], diff[:, nm.\n newaxis]), 1)\n', (1795, 1868), True, 'import numpy as nm\n'), ((1891, 1902), 'sfepy.base.base.output', 'output', (['aux'], {}), '(aux)\n', (1897, 1902), False, 'from sfepy.base.base import output, get_default, pause, Struct\n'), ((2210, 2241), 'sfepy.base.base.pause', 'pause', (['"""gradient checking done"""'], {}), "('gradient checking done')\n", (2215, 2241), False, 'from sfepy.base.base import output, get_default, pause, Struct\n'), ((1697, 1741), 'sfepy.base.base.output', 'output', (['"""**********"""', 'ii', 'aofg[ii]', 'dofg[ii]'], {}), "('**********', ii, aofg[ii], dofg[ii])\n", (1703, 1741), False, 'from sfepy.base.base import output, get_default, pause, Struct\n'), ((1914, 1936), 'numpy.linalg.norm', 'nla.norm', (['diff', 'nm.Inf'], {}), '(diff, nm.Inf)\n', (1922, 1936), True, 'import numpy.linalg as nla\n'), ((2085, 2101), 'pylab.plot', 'pylab.plot', (['aofg'], {}), '(aofg)\n', (2095, 2101), False, 'import pylab\n'), ((2110, 2126), 'pylab.plot', 'pylab.plot', (['dofg'], {}), '(dofg)\n', (2120, 2126), False, 'import pylab\n'), ((2135, 2184), 'pylab.legend', 'pylab.legend', (["('analytical', 'finite difference')"], {}), "(('analytical', 'finite difference'))\n", (2147, 2184), False, 'import pylab\n'), ((2193, 2205), 'pylab.show', 'pylab.show', ([], {}), '()\n', (2203, 2205), False, 'import pylab\n'), ((4814, 4863), 'sfepy.solvers.solvers.OptimizationSolver.__init__', 'OptimizationSolver.__init__', (['self', 'conf'], {}), '(self, conf, **kwargs)\n', (4841, 4863), False, 'from sfepy.solvers.solvers import OptimizationSolver\n'), ((4905, 4927), 'sfepy.base.log.get_logging_conf', 'get_logging_conf', (['conf'], {}), '(conf)\n', (4921, 4927), False, 'from sfepy.base.log import Log, get_logging_conf\n'), ((4953, 4983), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""log_conf"""'}), "(name='log_conf', **log)\n", (4959, 4983), False, 'from sfepy.base.base import output, get_default, pause, Struct\n'), ((5713, 5741), 'sfepy.base.base.get_default', 'get_default', (['conf', 'self.conf'], {}), '(conf, self.conf)\n', (5724, 5741), False, 'from sfepy.base.base import output, get_default, pause, Struct\n'), ((5760, 5794), 'sfepy.base.base.get_default', 'get_default', (['obj_fun', 'self.obj_fun'], {}), '(obj_fun, self.obj_fun)\n', (5771, 5794), False, 'from sfepy.base.base import output, get_default, pause, Struct\n'), ((5818, 5862), 'sfepy.base.base.get_default', 'get_default', (['obj_fun_grad', 'self.obj_fun_grad'], {}), '(obj_fun_grad, self.obj_fun_grad)\n', (5829, 5862), False, 'from sfepy.base.base import output, get_default, pause, Struct\n'), ((5880, 5912), 'sfepy.base.base.get_default', 'get_default', (['status', 'self.status'], {}), '(status, self.status)\n', (5891, 5912), False, 'from sfepy.base.base import output, get_default, pause, Struct\n'), ((5932, 5968), 'sfepy.base.base.get_default', 'get_default', (['obj_args', 'self.obj_args'], {}), '(obj_args, self.obj_args)\n', (5943, 5968), False, 'from sfepy.base.base import output, get_default, pause, Struct\n'), ((6049, 6088), 'sfepy.base.base.output', 'output', (['"""entering optimization loop..."""'], {}), "('entering optimization loop...')\n", (6055, 6088), False, 'from sfepy.base.base import output, get_default, pause, Struct\n'), ((6241, 6248), 'sfepy.base.timing.Timer', 'Timer', ([], {}), '()\n', (6246, 6248), False, 'from sfepy.base.timing import Timer\n'), ((9763, 9803), 'sfepy.base.base.output', 'output', (["('status: %d' % ret)"], {}), "('status: %d' % ret)\n", (9769, 9803), False, 'from sfepy.base.base import output, get_default, pause, Struct\n'), ((9812, 9854), 'sfepy.base.base.output', 'output', (["('initial value: %.8e' % of0)"], {}), "('initial value: %.8e' % of0)\n", (9818, 9854), False, 'from sfepy.base.base import output, get_default, pause, Struct\n'), ((9863, 9904), 'sfepy.base.base.output', 'output', (["('current value: %.8e' % of)"], {}), "('current value: %.8e' % of)\n", (9869, 9904), False, 'from sfepy.base.base import output, get_default, pause, Struct\n'), ((9913, 9952), 'sfepy.base.base.output', 'output', (["('iterations: %d' % it)"], {}), "('iterations: %d' % it)\n", (9919, 9952), False, 'from sfepy.base.base import output, get_default, pause, Struct\n'), ((11781, 11830), 'sfepy.solvers.solvers.OptimizationSolver.__init__', 'OptimizationSolver.__init__', (['self', 'conf'], {}), '(self, conf, **kwargs)\n', (11808, 11830), False, 'from sfepy.solvers.solvers import OptimizationSolver\n'), ((12402, 12436), 'sfepy.base.base.get_default', 'get_default', (['obj_fun', 'self.obj_fun'], {}), '(obj_fun, self.obj_fun)\n', (12413, 12436), False, 'from sfepy.base.base import output, get_default, pause, Struct\n'), ((12460, 12504), 'sfepy.base.base.get_default', 'get_default', (['obj_fun_grad', 'self.obj_fun_grad'], {}), '(obj_fun_grad, self.obj_fun_grad)\n', (12471, 12504), False, 'from sfepy.base.base import output, get_default, pause, Struct\n'), ((12522, 12554), 'sfepy.base.base.get_default', 'get_default', (['status', 'self.status'], {}), '(status, self.status)\n', (12533, 12554), False, 'from sfepy.base.base import output, get_default, pause, Struct\n'), ((12574, 12610), 'sfepy.base.base.get_default', 'get_default', (['obj_args', 'self.obj_args'], {}), '(obj_args, self.obj_args)\n', (12585, 12610), False, 'from sfepy.base.base import output, get_default, pause, Struct\n'), ((12628, 12645), 'sfepy.base.timing.Timer', 'Timer', ([], {'start': '(True)'}), '(start=True)\n', (12633, 12645), False, 'from sfepy.base.timing import Timer\n'), ((5111, 5389), 'sfepy.base.log.Log', 'Log', (["[['$||\\\\Psi||$'], ['$||\\\\nabla \\\\Psi||$'], ['$\\\\alpha$'], ['iteration']]"], {'xlabels': "['', '', 'all iterations', 'all iterations']", 'yscales': 'conf.yscales', 'is_plot': '(conf.log.plot is not None)', 'log_filename': 'conf.log.text', 'formats': "[['%.8e'], ['%.3e'], ['%.3e'], ['%d']]"}), "([['$||\\\\Psi||$'], ['$||\\\\nabla \\\\Psi||$'], ['$\\\\alpha$'], ['iteration']\n ], xlabels=['', '', 'all iterations', 'all iterations'], yscales=conf.\n yscales, is_plot=conf.log.plot is not None, log_filename=conf.log.text,\n formats=[['%.8e'], ['%.3e'], ['%.3e'], ['%d']])\n", (5114, 5389), False, 'from sfepy.base.log import Log, get_logging_conf\n'), ((6805, 6829), 'numpy.linalg.norm', 'nla.norm', (['ofg', 'conf.norm'], {}), '(ofg, conf.norm)\n', (6813, 6829), True, 'import numpy.linalg as nla\n'), ((9610, 9635), 'six.iteritems', 'six.iteritems', (['time_stats'], {}), '(time_stats)\n', (9623, 9635), False, 'import six\n'), ((8259, 8287), 'sfepy.base.base.output', 'output', (['"""full linesearch..."""'], {}), "('full linesearch...')\n", (8265, 8287), False, 'from sfepy.base.base import output, get_default, pause, Struct\n'), ((8371, 8459), 'scipy.optimize.linesearch.line_search', 'linesearch.line_search', (['fn_of', 'fn_ofg', 'xit', '(-ofg)', 'ofg', 'of_prev', 'of_prev_prev'], {'c2': '(0.4)'}), '(fn_of, fn_ofg, xit, -ofg, ofg, of_prev, of_prev_prev,\n c2=0.4)\n', (8393, 8459), True, 'import scipy.optimize.linesearch as linesearch\n'), ((9101, 9134), 'sfepy.base.base.output', 'output', (["(' -> alpha: %.8e' % alpha)"], {}), "(' -> alpha: %.8e' % alpha)\n", (9107, 9134), False, 'from sfepy.base.base import output, get_default, pause, Struct\n'), ((12855, 12886), 'inspect.getargspec', 'inspect.getargspec', (['self.solver'], {}), '(self.solver)\n', (12873, 12886), False, 'import inspect\n'), ((7394, 7436), 'sfepy.base.base.output', 'output', (["('warp: reducing step (%f)' % alpha)"], {}), "('warp: reducing step (%f)' % alpha)\n", (7400, 7436), False, 'from sfepy.base.base import output, get_default, pause, Struct\n'), ((7949, 7995), 'sfepy.base.base.output', 'output', (['"""linesearch failed, continuing anyway"""'], {}), "('linesearch failed, continuing anyway')\n", (7955, 7995), False, 'from sfepy.base.base import output, get_default, pause, Struct\n'), ((8706, 8784), 'scipy.optimize.line_search', 'sopt.line_search', (['fn_of', 'fn_ofg', 'xit', '(-ofg)', 'ofg', 'of_prev_bak', 'of_prev_prev_bak'], {}), '(fn_of, fn_ofg, xit, -ofg, ofg, of_prev_bak, of_prev_prev_bak)\n', (8722, 8784), True, 'import scipy.optimize as sopt\n'), ((9218, 9279), 'sfepy.base.base.output', 'output', (["('full linesearch off (%s and %s)' % (conf.ls, can_ls))"], {}), "('full linesearch off (%s and %s)' % (conf.ls, can_ls))\n", (9224, 9279), False, 'from sfepy.base.base import output, get_default, pause, Struct\n'), ((9686, 9728), 'sfepy.base.base.output', 'output', (["('%10s: %7.2f [s]' % (key, val[-1]))"], {}), "('%10s: %7.2f [s]' % (key, val[-1]))\n", (9692, 9728), False, 'from sfepy.base.base import output, get_default, pause, Struct\n'), ((10035, 10059), 'numpy.sum', 'nm.sum', (["time_stats['of']"], {}), "(time_stats['of'])\n", (10041, 10059), True, 'import numpy as nm\n'), ((10145, 10170), 'numpy.sum', 'nm.sum', (["time_stats['ofg']"], {}), "(time_stats['ofg'])\n", (10151, 10170), True, 'import numpy as nm\n'), ((7617, 7667), 'sfepy.base.base.output', 'output', (["('backtracking: reducing step (%f)' % alpha)"], {}), "('backtracking: reducing step (%f)' % alpha)\n", (7623, 7667), False, 'from sfepy.base.base import output, get_default, pause, Struct\n')]
|
# This example implements homogenization of porous structures undergoing finite strains.
#
# largedef_porous_mac.py - problem at (global) macroscopic level
# largedef_porous_mic.py - local subproblems, homogenized coefficients
#
# The mathematical model and numerical results are described in:
#
# <NAME>., <NAME>.
# Homogenization of large deforming fluid-saturated porous structures
# https://arxiv.org/abs/2012.03730
#
# Run simulation:
#
# ./simple.py example_largedef_porous-1/largedef_porous_mac.py
#
# The results are stored in `example_largedef_porous-1/results` directory.
#
import numpy as nm
import six
import os.path as osp
from sfepy import data_dir
from sfepy.base.base import Struct, output, debug
from sfepy.terms.terms_hyperelastic_ul import HyperElasticULFamilyData
from sfepy.homogenization.micmac import get_homog_coefs_nonlinear
import sfepy.linalg as la
from sfepy.solvers.ts import TimeStepper
from sfepy.discrete.state import State
wdir = osp.dirname(__file__)
hyperelastic_data = {
'update_materials': True,
'state': {'u': None, 'du': None,
'p': None, 'dp': None},
'mapping0': None,
'coors0': None,
'macro_data': None,
}
def post_process(out, pb, state, extend=False):
ts = hyperelastic_data['ts']
if isinstance(state, dict):
pass
else:
stress = pb.evaluate('ev_volume_integrate_mat.i.Omega(solid.S, u)',
mode='el_avg')
out['cauchy_stress'] = Struct(name='output_data',
mode='cell',
data=stress)
ret_stress = pb.evaluate('ev_volume_integrate_mat.i.Omega(solid.Q, u)',
mode='el_avg')
out['retardation_stress'] = Struct(name='output_data',
mode='cell',
data=ret_stress)
strain = pb.evaluate('ev_volume_integrate_mat.i.Omega(solid.E, u)',
mode='el_avg')
out['green_strain'] = Struct(name='output_data',
mode='cell',
data=strain)
he_state = hyperelastic_data['state']
for ch in pb.conf.chs:
plab = 'p%d' % ch
out['p0_%d' % ch] = Struct(name='output_data',
mode='vertex',
data=he_state[plab][:, nm.newaxis])
dvel = pb.evaluate('ev_diffusion_velocity.i.Omega(solid.C%d, %s)' % (ch, plab),
mode='el_avg')
out['w%d' % ch] = Struct(name='output_data',
mode='cell',
data=dvel)
out['u0'] = Struct(name='output_data',
mode='vertex',
data=he_state['u'])
return out
def homog_macro_map(ccoors, macro, nel):
nqpe = ccoors.shape[0] // nel
macro_ = {k: nm.sum(v.reshape((nel, nqpe) + v.shape[1:]), axis=1) / nqpe
for k, v in macro.items()}
macro_['recovery_idxs'] = []
ccoors_ = nm.sum(ccoors.reshape((nel, nqpe) + ccoors.shape[1:]), axis=1) / nqpe
return ccoors_, macro_
def homog_macro_remap(homcf, ncoor):
nqpe = ncoor // homcf['Volume_total'].shape[0]
homcf_ = {k: nm.repeat(v, nqpe, axis=0) for k, v in homcf.items()
if not k == 'Volume_total'}
return homcf_
def get_homog_mat(ts, coors, mode, term=None, problem=None, **kwargs):
hyperela = hyperelastic_data
ts = hyperela['ts']
output('get_homog_mat: mode=%s, update=%s'\
% (mode, hyperela['update_materials']))
if not mode == 'qp':
return
if not hyperela['update_materials']:
out = hyperela['homog_mat']
return {k: nm.array(v) for k, v in six.iteritems(out)}
dim = problem.domain.mesh.dim
nqp = coors.shape[0]
state_u = problem.equations.variables['u']
if len(state_u.field.mappings0) == 0:
state_u.field.get_mapping(term.region, term.integral,
term.integration)
state_u.field.save_mappings()
state_u.field.clear_mappings()
state_u.set_data(hyperela['state']['u'].ravel()) # + state_u.data[-1][state_u.indx]
mtx_f = problem.evaluate('ev_def_grad.i.Omega(u)',
mode='qp').reshape(-1, dim, dim)
# relative deformation gradient
if hasattr(problem, 'mtx_f_prev'):
rel_mtx_f = la.dot_sequences(mtx_f, nm.linalg.inv(problem.mtx_f_prev),
'AB')
else:
rel_mtx_f = mtx_f
problem.mtx_f_prev = mtx_f.copy()
macro_data = {
'mtx_e_rel': rel_mtx_f - nm.eye(dim), # relative macro strain
}
for ch in problem.conf.chs:
plab = 'p%d' % ch
state_p = problem.equations.variables[plab]
state_p.set_data(hyperela['state'][plab])
macro_data['p%d_0' % ch] = \
problem.evaluate('ev_volume_integrate.i.Omega(p%d)' % ch,
mode='qp').reshape(-1, 1, 1)
macro_data['gp%d_0' % ch] = \
problem.evaluate('ev_grad.i.Omega(p%d)' % ch,
mode='qp').reshape(-1, dim, 1)
state_p.set_data(hyperela['state']['d' + plab])
macro_data['dp%d_0' % ch] = \
problem.evaluate('ev_volume_integrate.i.Omega(p%d)' % ch,
mode='qp').reshape(-1, 1, 1)
macro_data['gdp%d_0' % ch] = \
problem.evaluate('ev_grad.i.Omega(p%d)' % ch,
mode='qp').reshape(-1, dim, 1)
nel = term.region.entities[-1].shape[0]
ccoors0, macro_data0 = homog_macro_map(coors, macro_data, nel)
macro_data0['macro_ccoor'] = ccoors0
out0 = get_homog_coefs_nonlinear(ts, ccoors0, mode, macro_data0,
term=term, problem=problem,
iteration=ts.step, **kwargs)
out0['C1'] += nm.eye(2) * 1e-12 # ! auxiliary permeability
out0['C2'] += nm.eye(2) * 1e-12 # ! auxiliary permeability
out = homog_macro_remap(out0, nqp)
# Green strain
out['E'] = 0.5 * (la.dot_sequences(mtx_f, mtx_f, 'ATB') - nm.eye(dim))
for ch in problem.conf.chs:
out['B%d' % ch] = out['B%d' % ch].reshape((nqp, dim, dim))
out['Q'] = out['Q'].reshape((nqp, dim, dim))
hyperela['time'] = ts.step
hyperela['homog_mat'] = \
{k: nm.array(v) for k, v in six.iteritems(out)}
hyperela['update_materials'] = False
hyperela['macro_data'] = macro_data
return out
def incremental_algorithm(pb):
hyperela = hyperelastic_data
chs = pb.conf.chs
ts = pb.conf.ts
hyperela['ts'] = ts
hyperela['ofn_trunk'] = pb.ofn_trunk + '_%03d'
pb.domain.mesh.coors_act = pb.domain.mesh.coors.copy()
pbvars = pb.get_variables()
he_state = hyperela['state']
out = []
out_data ={}
coors0 = pbvars['u'].field.get_coor()
he_state['coors0'] = coors0.copy()
he_state['u'] = nm.zeros_like(coors0)
he_state['du'] = nm.zeros_like(coors0)
for ch in chs:
plab = 'p%d' % ch
press0 = pbvars[plab].field.get_coor()[:, 0].squeeze()
he_state[plab] = nm.zeros_like(press0)
he_state['d' + plab] = nm.zeros_like(press0)
for step, time in ts:
print('>>> step %d (%e):' % (step, time))
hyperela['update_materials'] = True
pb.ofn_trunk = hyperela['ofn_trunk'] % step
yield pb, out
state = out[-1][1]
result = state.get_parts()
du = result['u']
he_state['u'] += du.reshape(he_state['du'].shape)
he_state['du'][:] = du.reshape(he_state['du'].shape)
pb.set_mesh_coors(he_state['u'] + he_state['coors0'],
update_fields=True, actual=True, clear_all=False)
for ch in chs:
plab = 'p%d' % ch
dp = result[plab]
he_state[plab] += dp
he_state['d' + plab][:] = dp
out_data = post_process(out_data, pb, state, extend=False)
filename = pb.get_output_name()
pb.save_state(filename, out=out_data)
yield None
print('<<< step %d finished' % step)
def move(ts, coor, problem=None, ramp=0.4, **kwargs):
ts = problem.conf.ts
nrs = round(ts.n_step * ramp)
switch = 1 if (ts.step <= nrs) and (ts.step > 0) else 0
displ = nm.ones((coor.shape[0],)) * problem.conf.move_val / nrs * switch
return displ
def define():
chs = [1, 2]
ts = TimeStepper(0, 0.15, n_step=30)
options = {
'output_dir': osp.join(wdir, 'results'),
'micro_filename': osp.join(wdir, 'largedef_porous_mic.py'),
'parametric_hook': 'incremental_algorithm',
}
materials = {
'solid': 'get_homog',
}
fields = {
'displacement': ('real', 'vector', 'Omega', 1),
'pressure': ('real', 'scalar', 'Omega', 1),
}
variables = {
'u': ('unknown field', 'displacement', 0),
'v': ('test field', 'displacement', 'u'),
'p1': ('unknown field', 'pressure', 1),
'q1': ('test field', 'pressure', 'p1'),
'p2': ('unknown field', 'pressure', 2),
'q2': ('test field', 'pressure', 'p2'),
}
filename_mesh = osp.join(wdir, 'macro_mesh_3x2.vtk')
mesh_d, move_val = 0.24, -0.04
regions = {
'Omega': 'all',
'Left': ('vertices in (x < 0.0001)', 'facet'),
'Right': ('vertices in (x > %e)' % (mesh_d * 0.999), 'facet'),
'Recovery': ('cell 1', 'cell'),
}
ebcs = {
'left_fix_all': ('Left', {'u.all': 0.0}),
'right_fix_x': ('Right', {'u.0': 0.0}),
'right_move_x': ('Right', {'u.1': 'move'}),
}
micro_args = {
'eps0': mesh_d / 3,
'dt': ts.dt,
}
functions = {
'move': (move,),
'get_homog': (lambda ts, coors, mode, **kwargs:
get_homog_mat(ts, coors, mode, define_args=micro_args, **kwargs),),
}
integrals = {
'i': 3,
}
equations = {
# eq. (60)
'balance_of_forces': """
dw_nonsym_elastic.i.Omega(solid.A, v, u)
- dw_biot.i.Omega(solid.B1, v, p1)
- dw_biot.i.Omega(solid.B2, v, p2)
=
- dw_lin_prestress.i.Omega(solid.S, v)
- dw_lin_prestress.i.Omega(solid.Q, v)""",
# eq. (61), alpha = 1
'mass_conservation_1': """
- %e * dw_biot.i.Omega(solid.B1, u, q1)
- dw_volume_dot.i.Omega(solid.G11, q1, p1)
- dw_volume_dot.i.Omega(solid.G12, q1, p2)
- dw_diffusion.i.Omega(solid.C1, q1, p1)
=
dw_volume_lvf.i.Omega(solid.Z1, q1)
""" % (1 / ts.dt),
# eq. (61), alpha = 2
'mass_conservation_2': """
- %e * dw_biot.i.Omega(solid.B2, u, q2)
- dw_volume_dot.i.Omega(solid.G21, q2, p1)
- dw_volume_dot.i.Omega(solid.G22, q2, p2)
- dw_diffusion.i.Omega(solid.C2, q2, p2)
=
dw_volume_lvf.i.Omega(solid.Z2, q2)
""" % (1. / ts.dt),
}
solvers = {
'ls': ('ls.scipy_direct', {}),
'newton': ('nls.newton', {
'eps_a': 1e-3,
'eps_r': 1e-3,
'i_max': 1,
}),
}
return locals()
|
[
"sfepy.base.base.Struct",
"sfepy.homogenization.micmac.get_homog_coefs_nonlinear",
"sfepy.linalg.dot_sequences",
"sfepy.base.base.output",
"sfepy.solvers.ts.TimeStepper"
] |
[((968, 989), 'os.path.dirname', 'osp.dirname', (['__file__'], {}), '(__file__)\n', (979, 989), True, 'import os.path as osp\n'), ((3622, 3709), 'sfepy.base.base.output', 'output', (["('get_homog_mat: mode=%s, update=%s' % (mode, hyperela['update_materials']))"], {}), "('get_homog_mat: mode=%s, update=%s' % (mode, hyperela[\n 'update_materials']))\n", (3628, 3709), False, 'from sfepy.base.base import Struct, output, debug\n'), ((5836, 5954), 'sfepy.homogenization.micmac.get_homog_coefs_nonlinear', 'get_homog_coefs_nonlinear', (['ts', 'ccoors0', 'mode', 'macro_data0'], {'term': 'term', 'problem': 'problem', 'iteration': 'ts.step'}), '(ts, ccoors0, mode, macro_data0, term=term,\n problem=problem, iteration=ts.step, **kwargs)\n', (5861, 5954), False, 'from sfepy.homogenization.micmac import get_homog_coefs_nonlinear\n'), ((7095, 7116), 'numpy.zeros_like', 'nm.zeros_like', (['coors0'], {}), '(coors0)\n', (7108, 7116), True, 'import numpy as nm\n'), ((7138, 7159), 'numpy.zeros_like', 'nm.zeros_like', (['coors0'], {}), '(coors0)\n', (7151, 7159), True, 'import numpy as nm\n'), ((8611, 8642), 'sfepy.solvers.ts.TimeStepper', 'TimeStepper', (['(0)', '(0.15)'], {'n_step': '(30)'}), '(0, 0.15, n_step=30)\n', (8622, 8642), False, 'from sfepy.solvers.ts import TimeStepper\n'), ((9359, 9395), 'os.path.join', 'osp.join', (['wdir', '"""macro_mesh_3x2.vtk"""'], {}), "(wdir, 'macro_mesh_3x2.vtk')\n", (9367, 9395), True, 'import os.path as osp\n'), ((1476, 1528), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""cell"""', 'data': 'stress'}), "(name='output_data', mode='cell', data=stress)\n", (1482, 1528), False, 'from sfepy.base.base import Struct, output, debug\n'), ((1771, 1827), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""cell"""', 'data': 'ret_stress'}), "(name='output_data', mode='cell', data=ret_stress)\n", (1777, 1827), False, 'from sfepy.base.base import Struct, output, debug\n'), ((2066, 2118), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""cell"""', 'data': 'strain'}), "(name='output_data', mode='cell', data=strain)\n", (2072, 2118), False, 'from sfepy.base.base import Struct, output, debug\n'), ((2804, 2865), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""vertex"""', 'data': "he_state['u']"}), "(name='output_data', mode='vertex', data=he_state['u'])\n", (2810, 2865), False, 'from sfepy.base.base import Struct, output, debug\n'), ((3377, 3403), 'numpy.repeat', 'nm.repeat', (['v', 'nqpe'], {'axis': '(0)'}), '(v, nqpe, axis=0)\n', (3386, 3403), True, 'import numpy as nm\n'), ((6043, 6052), 'numpy.eye', 'nm.eye', (['(2)'], {}), '(2)\n', (6049, 6052), True, 'import numpy as nm\n'), ((6107, 6116), 'numpy.eye', 'nm.eye', (['(2)'], {}), '(2)\n', (6113, 6116), True, 'import numpy as nm\n'), ((6511, 6522), 'numpy.array', 'nm.array', (['v'], {}), '(v)\n', (6519, 6522), True, 'import numpy as nm\n'), ((7294, 7315), 'numpy.zeros_like', 'nm.zeros_like', (['press0'], {}), '(press0)\n', (7307, 7315), True, 'import numpy as nm\n'), ((7347, 7368), 'numpy.zeros_like', 'nm.zeros_like', (['press0'], {}), '(press0)\n', (7360, 7368), True, 'import numpy as nm\n'), ((8682, 8707), 'os.path.join', 'osp.join', (['wdir', '"""results"""'], {}), "(wdir, 'results')\n", (8690, 8707), True, 'import os.path as osp\n'), ((8735, 8775), 'os.path.join', 'osp.join', (['wdir', '"""largedef_porous_mic.py"""'], {}), "(wdir, 'largedef_porous_mic.py')\n", (8743, 8775), True, 'import os.path as osp\n'), ((2333, 2410), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""vertex"""', 'data': 'he_state[plab][:, nm.newaxis]'}), "(name='output_data', mode='vertex', data=he_state[plab][:, nm.newaxis])\n", (2339, 2410), False, 'from sfepy.base.base import Struct, output, debug\n'), ((2658, 2708), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""cell"""', 'data': 'dvel'}), "(name='output_data', mode='cell', data=dvel)\n", (2664, 2708), False, 'from sfepy.base.base import Struct, output, debug\n'), ((3852, 3863), 'numpy.array', 'nm.array', (['v'], {}), '(v)\n', (3860, 3863), True, 'import numpy as nm\n'), ((4560, 4593), 'numpy.linalg.inv', 'nm.linalg.inv', (['problem.mtx_f_prev'], {}), '(problem.mtx_f_prev)\n', (4573, 4593), True, 'import numpy as nm\n'), ((4766, 4777), 'numpy.eye', 'nm.eye', (['dim'], {}), '(dim)\n', (4772, 4777), True, 'import numpy as nm\n'), ((6234, 6271), 'sfepy.linalg.dot_sequences', 'la.dot_sequences', (['mtx_f', 'mtx_f', '"""ATB"""'], {}), "(mtx_f, mtx_f, 'ATB')\n", (6250, 6271), True, 'import sfepy.linalg as la\n'), ((6274, 6285), 'numpy.eye', 'nm.eye', (['dim'], {}), '(dim)\n', (6280, 6285), True, 'import numpy as nm\n'), ((6535, 6553), 'six.iteritems', 'six.iteritems', (['out'], {}), '(out)\n', (6548, 6553), False, 'import six\n'), ((3876, 3894), 'six.iteritems', 'six.iteritems', (['out'], {}), '(out)\n', (3889, 3894), False, 'import six\n'), ((8485, 8510), 'numpy.ones', 'nm.ones', (['(coor.shape[0],)'], {}), '((coor.shape[0],))\n', (8492, 8510), True, 'import numpy as nm\n')]
|
"""
IGA domain generators.
"""
import numpy as nm
from sfepy.base.base import output, Struct
import sfepy.discrete.iga as iga
from sfepy.discrete.iga.domain import NurbsPatch
def gen_patch_block_domain(dims, shape, centre, degrees, continuity=None,
name='block', verbose=True):
"""
Generate a single IGA patch block in 2D or 3D of given degrees and
continuity using igakit.
Parameters
----------
dims : array of D floats
Dimensions of the block.
shape : array of D ints
Numbers of unique knot values along each axis.
centre : array of D floats
Centre of the block.
degrees : array of D floats
NURBS degrees along each axis.
continuity : array of D ints, optional
NURBS continuity along each axis. If None, `degrees-1` is used.
name : string
Domain name.
verbose : bool
If True, report progress of the domain generation.
Returns
-------
nurbs : NurbsPatch instance
The NURBS data. The igakit NURBS object is stored as `nurbs` attribute.
bmesh : Struct instance
The Bezier mesh data.
regions : dict
The patch surface regions.
"""
import igakit.cad as cad
dims = nm.asarray(dims, dtype=nm.float64)
shape = nm.asarray(shape, dtype=nm.int32)
centre = nm.asarray(centre, dtype=nm.float64)
degrees = nm.asarray(degrees, dtype=nm.int32)
if continuity is None:
continuity = degrees - 1
else:
continuity = nm.asarray(continuity, dtype=nm.int32)
dim = len(shape)
output('generating NURBS...', verbose=verbose)
dd = centre - 0.5 * dims
block = cad.grid(shape - 1, degree=degrees, continuity=continuity)
for ia in xrange(dim):
block.scale(dims[ia], ia)
for ia in xrange(dim):
block.translate(dd[ia], ia)
# Force uniform control points. This prevents coarser resolution inside the
# block.
shape = nm.asarray(block.points.shape[:-1])
n_nod = nm.prod(shape)
x0 = centre - 0.5 * dims
dd = dims / (shape - 1)
ngrid = nm.mgrid[[slice(ii) for ii in shape]]
ngrid.shape = (dim, n_nod)
coors = x0 + ngrid.T * dd
coors.shape = shape.tolist() + [dim]
block.array[..., :dim] = coors
output('...done', verbose=verbose)
# Compute Bezier extraction data.
output('computing Bezier mesh...', verbose=verbose)
cs = iga.compute_bezier_extraction(block.knots, block.degree)
n_els = [len(ii) for ii in cs]
conn, bconn = iga.create_connectivity(n_els, block.knots, block.degree)
ccs = iga.combine_bezier_extraction(cs)
cps = block.points[..., :dim].copy()
cps = cps.reshape((-1, dim))
bcps, bweights = iga.compute_bezier_control(cps, block.weights.ravel(), ccs,
conn, bconn)
nurbs = NurbsPatch(block.knots, degrees, cps, block.weights.ravel(), cs,
conn)
nurbs.nurbs = block
bmesh = Struct(name='bmesh', cps=bcps, weights=bweights, conn=bconn)
output('...done', verbose=verbose)
output('defining regions...', verbose=verbose)
regions = iga.get_patch_box_regions(n_els, block.degree)
output('...done', verbose=verbose)
return nurbs, bmesh, regions
|
[
"sfepy.discrete.iga.get_patch_box_regions",
"sfepy.base.base.output",
"sfepy.discrete.iga.combine_bezier_extraction",
"sfepy.base.base.Struct",
"sfepy.discrete.iga.compute_bezier_extraction",
"sfepy.discrete.iga.create_connectivity"
] |
[((1252, 1286), 'numpy.asarray', 'nm.asarray', (['dims'], {'dtype': 'nm.float64'}), '(dims, dtype=nm.float64)\n', (1262, 1286), True, 'import numpy as nm\n'), ((1299, 1332), 'numpy.asarray', 'nm.asarray', (['shape'], {'dtype': 'nm.int32'}), '(shape, dtype=nm.int32)\n', (1309, 1332), True, 'import numpy as nm\n'), ((1346, 1382), 'numpy.asarray', 'nm.asarray', (['centre'], {'dtype': 'nm.float64'}), '(centre, dtype=nm.float64)\n', (1356, 1382), True, 'import numpy as nm\n'), ((1397, 1432), 'numpy.asarray', 'nm.asarray', (['degrees'], {'dtype': 'nm.int32'}), '(degrees, dtype=nm.int32)\n', (1407, 1432), True, 'import numpy as nm\n'), ((1592, 1638), 'sfepy.base.base.output', 'output', (['"""generating NURBS..."""'], {'verbose': 'verbose'}), "('generating NURBS...', verbose=verbose)\n", (1598, 1638), False, 'from sfepy.base.base import output, Struct\n'), ((1681, 1739), 'igakit.cad.grid', 'cad.grid', (['(shape - 1)'], {'degree': 'degrees', 'continuity': 'continuity'}), '(shape - 1, degree=degrees, continuity=continuity)\n', (1689, 1739), True, 'import igakit.cad as cad\n'), ((1972, 2007), 'numpy.asarray', 'nm.asarray', (['block.points.shape[:-1]'], {}), '(block.points.shape[:-1])\n', (1982, 2007), True, 'import numpy as nm\n'), ((2020, 2034), 'numpy.prod', 'nm.prod', (['shape'], {}), '(shape)\n', (2027, 2034), True, 'import numpy as nm\n'), ((2287, 2321), 'sfepy.base.base.output', 'output', (['"""...done"""'], {'verbose': 'verbose'}), "('...done', verbose=verbose)\n", (2293, 2321), False, 'from sfepy.base.base import output, Struct\n'), ((2365, 2416), 'sfepy.base.base.output', 'output', (['"""computing Bezier mesh..."""'], {'verbose': 'verbose'}), "('computing Bezier mesh...', verbose=verbose)\n", (2371, 2416), False, 'from sfepy.base.base import output, Struct\n'), ((2426, 2482), 'sfepy.discrete.iga.compute_bezier_extraction', 'iga.compute_bezier_extraction', (['block.knots', 'block.degree'], {}), '(block.knots, block.degree)\n', (2455, 2482), True, 'import sfepy.discrete.iga as iga\n'), ((2536, 2593), 'sfepy.discrete.iga.create_connectivity', 'iga.create_connectivity', (['n_els', 'block.knots', 'block.degree'], {}), '(n_els, block.knots, block.degree)\n', (2559, 2593), True, 'import sfepy.discrete.iga as iga\n'), ((2605, 2638), 'sfepy.discrete.iga.combine_bezier_extraction', 'iga.combine_bezier_extraction', (['cs'], {}), '(cs)\n', (2634, 2638), True, 'import sfepy.discrete.iga as iga\n'), ((2999, 3059), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""bmesh"""', 'cps': 'bcps', 'weights': 'bweights', 'conn': 'bconn'}), "(name='bmesh', cps=bcps, weights=bweights, conn=bconn)\n", (3005, 3059), False, 'from sfepy.base.base import output, Struct\n'), ((3064, 3098), 'sfepy.base.base.output', 'output', (['"""...done"""'], {'verbose': 'verbose'}), "('...done', verbose=verbose)\n", (3070, 3098), False, 'from sfepy.base.base import output, Struct\n'), ((3104, 3150), 'sfepy.base.base.output', 'output', (['"""defining regions..."""'], {'verbose': 'verbose'}), "('defining regions...', verbose=verbose)\n", (3110, 3150), False, 'from sfepy.base.base import output, Struct\n'), ((3165, 3211), 'sfepy.discrete.iga.get_patch_box_regions', 'iga.get_patch_box_regions', (['n_els', 'block.degree'], {}), '(n_els, block.degree)\n', (3190, 3211), True, 'import sfepy.discrete.iga as iga\n'), ((3216, 3250), 'sfepy.base.base.output', 'output', (['"""...done"""'], {'verbose': 'verbose'}), "('...done', verbose=verbose)\n", (3222, 3250), False, 'from sfepy.base.base import output, Struct\n'), ((1526, 1564), 'numpy.asarray', 'nm.asarray', (['continuity'], {'dtype': 'nm.int32'}), '(continuity, dtype=nm.int32)\n', (1536, 1564), True, 'import numpy as nm\n')]
|
from typing import TYPE_CHECKING, List, Optional
from sqlmodel import Field, Relationship, SQLModel
if TYPE_CHECKING:
from .hero_model import Hero
class Team(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
headquarters: str
heroes: List["Hero"] = Relationship(back_populates="team")
|
[
"sqlmodel.Relationship",
"sqlmodel.Field"
] |
[((213, 250), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (218, 250), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((315, 350), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""team"""'}), "(back_populates='team')\n", (327, 350), False, 'from sqlmodel import Field, Relationship, SQLModel\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from typing import List, Tuple
import numpy as np
import megengine._internal as mgb
import megengine.functional as F
from megengine import Graph, jit
from megengine.module import Linear, Module
from megengine.test import assertTensorClose
from .env import modified_environ
class MLP(Module):
def __init__(self):
super().__init__()
self.dense0 = Linear(28, 50)
self.dense1 = Linear(50, 20)
def forward(self, x):
x = self.dense0(x)
x = F.relu(x)
x = self.dense1(x)
return x
def has_gpu(num=1):
try:
mgb.comp_node("gpu{}".format(num - 1))
except mgb.MegBrainError:
return False
return True
def randomNp(*args):
for arg in args:
assert isinstance(arg, int)
return np.random.random(args)
def randomTorch(*args):
import torch # pylint: disable=import-outside-toplevel
for arg in args:
assert isinstance(arg, int)
return torch.tensor(randomNp(*args), dtype=torch.float32)
def graph_mode(*modes):
if not set(modes).issubset({"eager", "static"}):
raise ValueError("graph mode must be in (eager, static)")
def decorator(func):
def wrapper(*args, **kwargs):
if "eager" in set(modes):
func(*args, **kwargs)
if "static" in set(modes):
with Graph() as cg:
cg.set_option("eager_evaluation", False)
func(*args, **kwargs)
return wrapper
return decorator
def _default_compare_fn(x, y):
assertTensorClose(x.numpy(), y)
def opr_test(
cases,
func,
mode=("eager", "static", "dynamic_shape"),
compare_fn=_default_compare_fn,
ref_fn=None,
**kwargs
):
"""
mode: the list of test mode which are eager, static and dynamic_shape
will test all the cases if None.
func: the function to run opr.
compare_fn: the function to compare the result and expected, use assertTensorClose if None.
ref_fn: the function to generate expected data, should assign output if None.
cases: the list which have dict element, the list length should be 2 for dynamic shape test.
and the dict should have input,
and should have output if ref_fn is None.
should use list for multiple inputs and outputs for each case.
kwargs: The additional kwargs for opr func.
simple examples:
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases,
F.eye,
ref_fn=lambda n, m: np.eye(n, m).astype(dtype),
dtype=dtype)
"""
def check_results(results, expected):
if not isinstance(results, Tuple):
results = (results,)
for r, e in zip(results, expected):
compare_fn(r, e)
def get_trace_fn(func, enabled, symbolic):
jit.trace.enabled = enabled
return jit.trace(func, symbolic=symbolic)
def get_param(cases, idx):
case = cases[idx]
inp = case.get("input", None)
outp = case.get("output", None)
if inp is None:
raise ValueError("the test case should have input")
if not isinstance(inp, List):
inp = (inp,)
else:
inp = tuple(inp)
if ref_fn is not None and callable(ref_fn):
outp = ref_fn(*inp)
if outp is None:
raise ValueError("the test case should have output or reference function")
if not isinstance(outp, List):
outp = (outp,)
else:
outp = tuple(outp)
return inp, outp
if not set(mode).issubset({"eager", "static", "dynamic_shape"}):
raise ValueError("opr test mode must be in (eager, static, dynamic_shape)")
if len(cases) == 0:
raise ValueError("should give one case at least")
if "dynamic_shape" in set(mode):
if len(cases) != 2:
raise ValueError("should give 2 cases for dynamic shape test")
if not callable(func):
raise ValueError("the input func should be callable")
inp, outp = get_param(cases, 0)
def run(*args, **kwargs):
return func(*args, **kwargs)
if "eager" in set(mode):
f = get_trace_fn(run, False, False)
results = f(*inp, **kwargs)
check_results(results, outp)
if "static" in set(mode) or "dynamic_shape" in set(mode):
f = get_trace_fn(run, True, True)
results = f(*inp, **kwargs)
check_results(results, outp)
if "dynamic_shape" in set(mode):
inp, outp = get_param(cases, 1)
results = f(*inp, **kwargs)
check_results(results, outp)
|
[
"megengine.Graph",
"megengine.functional.relu",
"megengine.jit.trace",
"megengine.module.Linear"
] |
[((1156, 1178), 'numpy.random.random', 'np.random.random', (['args'], {}), '(args)\n', (1172, 1178), True, 'import numpy as np\n'), ((747, 761), 'megengine.module.Linear', 'Linear', (['(28)', '(50)'], {}), '(28, 50)\n', (753, 761), False, 'from megengine.module import Linear, Module\n'), ((784, 798), 'megengine.module.Linear', 'Linear', (['(50)', '(20)'], {}), '(50, 20)\n', (790, 798), False, 'from megengine.module import Linear, Module\n'), ((865, 874), 'megengine.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (871, 874), True, 'import megengine.functional as F\n'), ((3321, 3355), 'megengine.jit.trace', 'jit.trace', (['func'], {'symbolic': 'symbolic'}), '(func, symbolic=symbolic)\n', (3330, 3355), False, 'from megengine import Graph, jit\n'), ((1730, 1737), 'megengine.Graph', 'Graph', ([], {}), '()\n', (1735, 1737), False, 'from megengine import Graph, jit\n')]
|
from datetime import date, datetime
from typing import Optional
from pydantic import BaseModel, validator
from sqlmodel import Field, SQLModel
# Simple classes for access control tokens
class Token(BaseModel):
access_token: str
token_type: str
expiry: datetime
class TokenData(BaseModel):
username: Optional[str] = None
# Default user class, this is the one to interact with.
class User(SQLModel):
id: Optional[int] = Field(default=None, primary_key=True)
full_name: str
username: str
email: str
disabled: Optional[bool] = Field(default=False)
roles: Optional[str] = Field(default="appuser")
created: Optional[datetime] = Field(default=datetime.utcnow())
# Don't ever return FullUser instances - ALWAYS return 'User' at maximum, since FullUser includes hashedpasword.
# FullUser is only need during creation or resetting of password.
class FullUser(User, table=True):
__tablename__ = "Users"
hashedpassword: str
# Opservation class is used for both storage and retrieval operations.
class Observation(SQLModel, table=True):
__tablename__ = "Observations"
id: Optional[int] = Field(default=None, primary_key=True)
indoortempf: float
tempf: float
dewptf: float
windchillf: float
indoorhumidity: float
humidity: float
windspeedmph: float
windgustmph: float
winddir: int
absbaromin: float
baromin: float
rainin: float
dailyrainin: float
weeklyrainin: float
monthlyrainin: float
solarradiation: float
UV: int
dateutc: datetime
realtime: int
rtfreq: int
# Reappropriate @validator decorators to perform convertions from imperial to metric on each datapoint as they are created for output.
# This saves logic/ressources in the endpoint and/or in the client looping datasets or during mapping.
class Metric_Observation(Observation):
@validator('indoortempf', 'tempf', 'dewptf', 'windchillf', allow_reuse=True)
def convertf(cls, v: float):
# convert to Celcius
v = (v - 32) * 5.0/9.0
return round(v, 2)
@validator('windspeedmph', 'windgustmph', allow_reuse=True)
def convertmph(cls, v: float):
# convert to m/s
v = v*0.44704
return round(v, 2)
@validator('absbaromin', 'baromin', allow_reuse=True)
def converthpa(cls, v: float):
# convert to hPa
v = v * 33.86
return round(v, 2)
@validator('rainin', 'dailyrainin', 'weeklyrainin', 'monthlyrainin', allow_reuse=True)
def convertin(cls, v: float):
# convert to hPa
v = v * 25.4
return round(v, 2)
# Dependency function to map an ugly pile of params to a cleaner Observation object
def create_observation(ID: str, PASSWORD: str, indoortempf: float, tempf: float, dewptf: float,
windchillf: float, indoorhumidity: float, humidity: float, windspeedmph: float,
windgustmph: float, winddir: int, absbaromin: float, baromin: float, rainin: float,
dailyrainin: float, weeklyrainin: float, monthlyrainin: float, solarradiation: float,
UV: int, dateutc: str, softwaretype: str, action: str, realtime: int, rtfreq: int):
return Observation(**locals())
|
[
"sqlmodel.Field"
] |
[((445, 482), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (450, 482), False, 'from sqlmodel import Field, SQLModel\n'), ((566, 586), 'sqlmodel.Field', 'Field', ([], {'default': '(False)'}), '(default=False)\n', (571, 586), False, 'from sqlmodel import Field, SQLModel\n'), ((614, 638), 'sqlmodel.Field', 'Field', ([], {'default': '"""appuser"""'}), "(default='appuser')\n", (619, 638), False, 'from sqlmodel import Field, SQLModel\n'), ((1146, 1183), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (1151, 1183), False, 'from sqlmodel import Field, SQLModel\n'), ((1883, 1958), 'pydantic.validator', 'validator', (['"""indoortempf"""', '"""tempf"""', '"""dewptf"""', '"""windchillf"""'], {'allow_reuse': '(True)'}), "('indoortempf', 'tempf', 'dewptf', 'windchillf', allow_reuse=True)\n", (1892, 1958), False, 'from pydantic import BaseModel, validator\n'), ((2085, 2143), 'pydantic.validator', 'validator', (['"""windspeedmph"""', '"""windgustmph"""'], {'allow_reuse': '(True)'}), "('windspeedmph', 'windgustmph', allow_reuse=True)\n", (2094, 2143), False, 'from pydantic import BaseModel, validator\n'), ((2259, 2311), 'pydantic.validator', 'validator', (['"""absbaromin"""', '"""baromin"""'], {'allow_reuse': '(True)'}), "('absbaromin', 'baromin', allow_reuse=True)\n", (2268, 2311), False, 'from pydantic import BaseModel, validator\n'), ((2427, 2516), 'pydantic.validator', 'validator', (['"""rainin"""', '"""dailyrainin"""', '"""weeklyrainin"""', '"""monthlyrainin"""'], {'allow_reuse': '(True)'}), "('rainin', 'dailyrainin', 'weeklyrainin', 'monthlyrainin',\n allow_reuse=True)\n", (2436, 2516), False, 'from pydantic import BaseModel, validator\n'), ((687, 704), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (702, 704), False, 'from datetime import date, datetime\n')]
|
from datetime import datetime, date
from decimal import Decimal
from typing import Optional, List
from fastapi import APIRouter, Depends
from sqlmodel import Field, SQLModel
from ...db import get_session
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
router = APIRouter()
class HistoryTravelReimburse(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
history_id: int
history_procedure_id: int
group: str
guardian_id: Optional[int] = None
procedure_id: int
amount: float
detail: str
pdf_path: str
signature_path: str
document_path: str
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
@router.post("/history_travel_reimburse", response_model=HistoryTravelReimburse)
async def create_history_travel_reimburse(history_travel_reimburse: HistoryTravelReimburse, session: AsyncSession = Depends(get_session)):
session.add(history_travel_reimburse)
await session.commit()
await session.refresh(history_travel_reimburse)
return history_travel_reimburse
@router.get("/history_travel_reimburse/{id}", response_model=HistoryTravelReimburse)
async def get_history_travel_reimburse(id: int, session: AsyncSession = Depends(get_session)):
history_travel_reimburses = await session.execute(select(HistoryTravelReimburse).where(HistoryTravelReimburse.id == id))
history_travel_reimburse = history_travel_reimburses.scalars().first()
return history_travel_reimburse
@router.put("/history_travel_reimburse/{id}", response_model=HistoryTravelReimburse)
async def update_history_travel_reimburse(id: int, session: AsyncSession = Depends(get_session)):
return None
@router.delete("/history_travel_reimburse/{id}")
async def delete_history_travel_reimburse(session: AsyncSession = Depends(get_session)):
return None
@router.get("/history_travel_reimburse/patient/{patient_id}", response_model=HistoryTravelReimburse)
async def get_history_travel_reimburse_patient(patient_id: int, session: AsyncSession = Depends(get_session)):
history_id = await session.execute(select(HistoryTravelReimburse.id).where(HistoryTravelReimburse.patient_id == patient_id))
history_travel_reimburses = await session.execute(select(HistoryTravelReimburse).where(HistoryTravelReimburse.history_id == history_id))
history_travel_reimburse = history_travel_reimburses.scalars().first()
return history_travel_reimburse
@router.get("/history_travel_reimburse", response_model=HistoryTravelReimburse)
async def get_history_travel_reimburse_daily(session: AsyncSession = Depends(get_session)):
return None
@router.get("/history_travel_reimburse/{id}", response_model=HistoryTravelReimburse)
async def get_history_travel_reimburse_pdf(id: int, session: AsyncSession = Depends(get_session)):
history_travel_reimburses = await session.execute(select(HistoryTravelReimburse.pdf_path).where(HistoryTravelReimburse.id == id))
history_travel_reimburse = history_travel_reimburses.scalars().first()
return history_travel_reimburse
@router.post("/history_travel_reimburse/{id}/document", response_model=HistoryTravelReimburse)
async def upload_document(session: AsyncSession = Depends(get_session)):
return None
@router.post("/history_travel_reimburse/{id}/signature")
async def upload_signature(session: AsyncSession = Depends(get_session)):
return None
|
[
"sqlmodel.Field"
] |
[((295, 306), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (304, 306), False, 'from fastapi import APIRouter, Depends\n'), ((385, 422), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (390, 422), False, 'from sqlmodel import Field, SQLModel\n'), ((953, 973), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (960, 973), False, 'from fastapi import APIRouter, Depends\n'), ((1292, 1312), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1299, 1312), False, 'from fastapi import APIRouter, Depends\n'), ((1713, 1733), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1720, 1733), False, 'from fastapi import APIRouter, Depends\n'), ((1869, 1889), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1876, 1889), False, 'from fastapi import APIRouter, Depends\n'), ((2099, 2119), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (2106, 2119), False, 'from fastapi import APIRouter, Depends\n'), ((2654, 2674), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (2661, 2674), False, 'from fastapi import APIRouter, Depends\n'), ((2856, 2876), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (2863, 2876), False, 'from fastapi import APIRouter, Depends\n'), ((3271, 3291), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (3278, 3291), False, 'from fastapi import APIRouter, Depends\n'), ((3420, 3440), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (3427, 3440), False, 'from fastapi import APIRouter, Depends\n'), ((1369, 1399), 'sqlalchemy.select', 'select', (['HistoryTravelReimburse'], {}), '(HistoryTravelReimburse)\n', (1375, 1399), False, 'from sqlalchemy import select\n'), ((2161, 2194), 'sqlalchemy.select', 'select', (['HistoryTravelReimburse.id'], {}), '(HistoryTravelReimburse.id)\n', (2167, 2194), False, 'from sqlalchemy import select\n'), ((2305, 2335), 'sqlalchemy.select', 'select', (['HistoryTravelReimburse'], {}), '(HistoryTravelReimburse)\n', (2311, 2335), False, 'from sqlalchemy import select\n'), ((2933, 2972), 'sqlalchemy.select', 'select', (['HistoryTravelReimburse.pdf_path'], {}), '(HistoryTravelReimburse.pdf_path)\n', (2939, 2972), False, 'from sqlalchemy import select\n')]
|
import time
import os
from typing import Optional
from sqlalchemy.exc import OperationalError
from sqlalchemy.engine import URL
from sqlmodel import Field, Session, SQLModel, create_engine, select
from loguru import logger
class Hero(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
secret_name: str
age: Optional[int] = None
def main():
hero_1 = Hero(name="Deadpond", secret_name="<NAME>")
hero_2 = Hero(name="Spider-Boy", secret_name="<NAME>")
hero_3 = Hero(name="Rusty-Man", secret_name="<NAME>", age=48)
host_name = "postgres" if os.environ.get("IS_INSIDE_DOCKER") else "localhost"
url = URL.create(drivername="postgresql", username="postgres", password="<PASSWORD>", host=host_name, port=5432)
engine = create_engine(url)
for _ in range(5):
try:
SQLModel.metadata.create_all(engine)
break
except OperationalError:
logger.error("Is postgres database running?")
time.sleep(2)
with Session(engine) as session:
session.add_all([hero_1, hero_2])
session.add(hero_3)
session.commit()
with Session(engine) as session:
statement = select(Hero).where(Hero.name == "Spider-Boy")
hero = session.exec(statement).first()
logger.info(hero)
if __name__ == '__main__':
main()
|
[
"sqlmodel.create_engine",
"sqlmodel.select",
"sqlmodel.Session",
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.Field"
] |
[((285, 322), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (290, 322), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((677, 788), 'sqlalchemy.engine.URL.create', 'URL.create', ([], {'drivername': '"""postgresql"""', 'username': '"""postgres"""', 'password': '"""<PASSWORD>"""', 'host': 'host_name', 'port': '(5432)'}), "(drivername='postgresql', username='postgres', password=\n '<PASSWORD>', host=host_name, port=5432)\n", (687, 788), False, 'from sqlalchemy.engine import URL\n'), ((797, 815), 'sqlmodel.create_engine', 'create_engine', (['url'], {}), '(url)\n', (810, 815), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((615, 649), 'os.environ.get', 'os.environ.get', (['"""IS_INSIDE_DOCKER"""'], {}), "('IS_INSIDE_DOCKER')\n", (629, 649), False, 'import os\n'), ((1046, 1061), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1053, 1061), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((1179, 1194), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1186, 1194), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((1328, 1345), 'loguru.logger.info', 'logger.info', (['hero'], {}), '(hero)\n', (1339, 1345), False, 'from loguru import logger\n'), ((864, 900), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (892, 900), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((964, 1009), 'loguru.logger.error', 'logger.error', (['"""Is postgres database running?"""'], {}), "('Is postgres database running?')\n", (976, 1009), False, 'from loguru import logger\n'), ((1022, 1035), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1032, 1035), False, 'import time\n'), ((1227, 1239), 'sqlmodel.select', 'select', (['Hero'], {}), '(Hero)\n', (1233, 1239), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n')]
|
from sqlmodel import Field, Relationship, SQLModel
from typing import Optional
from app.models.base_uuid_model import BaseUUIDModel
from uuid import UUID
class HeroBase(SQLModel):
name: str = Field(index=True)
secret_name: str
age: Optional[int] = Field(default=None, index=True)
team_id: Optional[UUID] = Field(default=None, foreign_key="team.id")
class Hero(BaseUUIDModel, HeroBase, table=True):
team: Optional["Team"] = Relationship(back_populates="heroes", sa_relationship_kwargs={"lazy": "selectin"})
created_by_id: Optional[UUID] = Field(default=None, foreign_key="user.id")
created_by: "User" = Relationship(sa_relationship_kwargs={"lazy":"selectin", "primaryjoin":"Hero.created_by_id==User.id"})
|
[
"sqlmodel.Field",
"sqlmodel.Relationship"
] |
[((197, 214), 'sqlmodel.Field', 'Field', ([], {'index': '(True)'}), '(index=True)\n', (202, 214), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((261, 292), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'index': '(True)'}), '(default=None, index=True)\n', (266, 292), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((323, 365), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""team.id"""'}), "(default=None, foreign_key='team.id')\n", (328, 365), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((449, 535), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""heroes"""', 'sa_relationship_kwargs': "{'lazy': 'selectin'}"}), "(back_populates='heroes', sa_relationship_kwargs={'lazy':\n 'selectin'})\n", (461, 535), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((568, 610), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""user.id"""'}), "(default=None, foreign_key='user.id')\n", (573, 610), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((636, 743), 'sqlmodel.Relationship', 'Relationship', ([], {'sa_relationship_kwargs': "{'lazy': 'selectin', 'primaryjoin': 'Hero.created_by_id==User.id'}"}), "(sa_relationship_kwargs={'lazy': 'selectin', 'primaryjoin':\n 'Hero.created_by_id==User.id'})\n", (648, 743), False, 'from sqlmodel import Field, Relationship, SQLModel\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
from megengine.core import Tensor
from official.vision.detection import layers
def get_focal_loss(
logits: Tensor,
labels: Tensor,
ignore_label: int = -1,
background: int = 0,
alpha: float = 0.5,
gamma: float = 0,
norm_type: str = "fg",
) -> Tensor:
r"""Focal Loss for Dense Object Detection:
<https://arxiv.org/pdf/1708.02002.pdf>
.. math::
FL(p_t) = -\alpha_t(1-p_t)^\gamma \log(p_t)
Args:
logits (Tensor):
the predicted logits with the shape of :math:`(B, A, C)`
labels (Tensor):
the assigned labels of boxes with shape of :math:`(B, A)`
ignore_label (int):
the value of ignore class. Default: -1
background (int):
the value of background class. Default: 0
alpha (float):
parameter to mitigate class imbalance. Default: 0.5
gamma (float):
parameter to mitigate easy/hard loss imbalance. Default: 0
norm_type (str): current support "fg", "none":
"fg": loss will be normalized by number of fore-ground samples
"none": not norm
Returns:
the calculated focal loss.
"""
class_range = F.arange(1, logits.shape[2] + 1)
labels = F.add_axis(labels, axis=2)
scores = F.sigmoid(logits)
pos_part = (1 - scores) ** gamma * layers.logsigmoid(logits)
neg_part = scores ** gamma * layers.logsigmoid(-logits)
pos_loss = -(labels == class_range) * pos_part * alpha
neg_loss = (
-(labels != class_range) * (labels != ignore_label) * neg_part * (1 - alpha)
)
loss = (pos_loss + neg_loss).sum()
if norm_type == "fg":
fg_mask = (labels != background) * (labels != ignore_label)
return loss / F.maximum(fg_mask.sum(), 1)
elif norm_type == "none":
return loss
else:
raise NotImplementedError
def get_smooth_l1_loss(
pred_bbox: Tensor,
gt_bbox: Tensor,
labels: Tensor,
beta: int = 1,
background: int = 0,
ignore_label: int = -1,
norm_type: str = "fg",
) -> Tensor:
r"""Smooth l1 loss used in RetinaNet.
Args:
pred_bbox (Tensor):
the predicted bbox with the shape of :math:`(B, A, 4)`
gt_bbox (Tensor):
the ground-truth bbox with the shape of :math:`(B, A, 4)`
labels (Tensor):
the assigned labels of boxes with shape of :math:`(B, A)`
beta (int):
the parameter of smooth l1 loss. Default: 1
background (int):
the value of background class. Default: 0
ignore_label (int):
the value of ignore class. Default: -1
norm_type (str): current support "fg", "all", "none":
"fg": loss will be normalized by number of fore-ground samples
"all": loss will be normalized by number of all samples
"none": not norm
Returns:
the calculated smooth l1 loss.
"""
pred_bbox = pred_bbox.reshape(-1, 4)
gt_bbox = gt_bbox.reshape(-1, 4)
labels = labels.reshape(-1)
fg_mask = (labels != background) * (labels != ignore_label)
loss = get_smooth_l1_base(pred_bbox, gt_bbox, beta)
loss = (loss.sum(axis=1) * fg_mask).sum()
if norm_type == "fg":
loss = loss / F.maximum(fg_mask.sum(), 1)
elif norm_type == "all":
all_mask = labels != ignore_label
loss = loss / F.maximum(all_mask.sum(), 1)
elif norm_type == "none":
return loss
else:
raise NotImplementedError
return loss
def get_smooth_l1_base(pred_bbox: Tensor, gt_bbox: Tensor, beta: float) -> Tensor:
r"""
Args:
pred_bbox (Tensor):
the predicted bbox with the shape of :math:`(N, 4)`
gt_bbox (Tensor):
the ground-truth bbox with the shape of :math:`(N, 4)`
beta (int):
the parameter of smooth l1 loss.
Returns:
the calculated smooth l1 loss.
"""
x = pred_bbox - gt_bbox
abs_x = F.abs(x)
if beta < 1e-5:
loss = abs_x
else:
in_loss = 0.5 * x ** 2 / beta
out_loss = abs_x - 0.5 * beta
# FIXME: F.where cannot handle 0-shape tensor yet
# loss = F.where(abs_x < beta, in_loss, out_loss)
in_mask = abs_x < beta
loss = in_loss * in_mask + out_loss * (1 - in_mask)
return loss
def softmax_loss(scores: Tensor, labels: Tensor, ignore_label: int = -1) -> Tensor:
max_scores = F.zero_grad(scores.max(axis=1, keepdims=True))
scores -= max_scores
log_prob = scores - F.log(F.exp(scores).sum(axis=1, keepdims=True))
mask = labels != ignore_label
vlabels = labels * mask
loss = -(F.indexing_one_hot(log_prob, vlabels.astype("int32"), 1) * mask).sum()
loss = loss / F.maximum(mask.sum(), 1)
return loss
|
[
"megengine.functional.add_axis",
"megengine.functional.arange",
"megengine.functional.sigmoid",
"megengine.functional.abs",
"megengine.functional.exp"
] |
[((1623, 1655), 'megengine.functional.arange', 'F.arange', (['(1)', '(logits.shape[2] + 1)'], {}), '(1, logits.shape[2] + 1)\n', (1631, 1655), True, 'import megengine.functional as F\n'), ((1670, 1696), 'megengine.functional.add_axis', 'F.add_axis', (['labels'], {'axis': '(2)'}), '(labels, axis=2)\n', (1680, 1696), True, 'import megengine.functional as F\n'), ((1710, 1727), 'megengine.functional.sigmoid', 'F.sigmoid', (['logits'], {}), '(logits)\n', (1719, 1727), True, 'import megengine.functional as F\n'), ((4412, 4420), 'megengine.functional.abs', 'F.abs', (['x'], {}), '(x)\n', (4417, 4420), True, 'import megengine.functional as F\n'), ((1767, 1792), 'official.vision.detection.layers.logsigmoid', 'layers.logsigmoid', (['logits'], {}), '(logits)\n', (1784, 1792), False, 'from official.vision.detection import layers\n'), ((1826, 1852), 'official.vision.detection.layers.logsigmoid', 'layers.logsigmoid', (['(-logits)'], {}), '(-logits)\n', (1843, 1852), False, 'from official.vision.detection import layers\n'), ((4977, 4990), 'megengine.functional.exp', 'F.exp', (['scores'], {}), '(scores)\n', (4982, 4990), True, 'import megengine.functional as F\n')]
|
from datetime import datetime
from typing import Optional
from fastapi import APIRouter, Depends
from sqlmodel import Field, SQLModel
from ...db import get_session
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
router = APIRouter()
class HistorySummaryTreatmsummaryConference(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
history_id_order: int
history_id_conference: int
summary_treatmsummary_conference_id: int
state: str
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class SummaryTreatmsummaryConference(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
problem: str
question: str
summary_plan: str
surgeon_summary: str
pre_operation_abg: bool
post_operation_abg: bool
pre_operation_redo_abg: bool
pre_operation_jaw_surgery: bool
pre_operation_computing_design: bool
pre_operation_3d_print: bool
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class SummaryTreatmsummaryConferenceDoctorMap(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
summary_treatmsummary_conference_id: int
doctor_id: int
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
@router.post("/history_summary_conference", response_model=HistorySummaryTreatmsummaryConference)
async def create_history_summary_conference(history_summary_conference: HistorySummaryTreatmsummaryConference, session: AsyncSession = Depends(get_session)):
session.add(history_summary_conference)
await session.commit()
await session.refresh(history_summary_conference)
return history_summary_conference
@router.post("/summary_conference", response_model=SummaryTreatmsummaryConference)
async def create_summary_conference(summary_conference: SummaryTreatmsummaryConference, session: AsyncSession = Depends(get_session)):
session.add(summary_conference)
await session.commit()
await session.refresh(summary_conference)
return summary_conference
@router.get("/history_summary_conference/{id}", response_model=HistorySummaryTreatmsummaryConference)
async def get_history_summary_conference(id: int, session: AsyncSession = Depends(get_session)):
history_summary_conferences = await session.execute(select(HistorySummaryTreatmsummaryConference).where(HistorySummaryTreatmsummaryConference.id == id))
history_summary_conference = history_summary_conferences.scalars().first()
return history_summary_conference
@router.put("/history_summary_conference/{id}", response_model=HistorySummaryTreatmsummaryConference)
async def update_history_summary_conference(id: int, session: AsyncSession = Depends(get_session)):
return None
@router.delete("/history_summary_conference/{id}")
async def delete_history_summary_conference(session: AsyncSession = Depends(get_session)):
return None
@router.delete("/history_summary_conference/{id}")
async def delete_summary_conference(session: AsyncSession = Depends(get_session)):
return None
|
[
"sqlmodel.Field"
] |
[((256, 267), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (265, 267), False, 'from fastapi import APIRouter, Depends\n'), ((361, 398), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (366, 398), False, 'from sqlmodel import Field, SQLModel\n'), ((709, 746), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (714, 746), False, 'from sqlmodel import Field, SQLModel\n'), ((1231, 1268), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (1236, 1268), False, 'from sqlmodel import Field, SQLModel\n'), ((1675, 1695), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1682, 1695), False, 'from fastapi import APIRouter, Depends\n'), ((2058, 2078), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (2065, 2078), False, 'from fastapi import APIRouter, Depends\n'), ((2398, 2418), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (2405, 2418), False, 'from fastapi import APIRouter, Depends\n'), ((2877, 2897), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (2884, 2897), False, 'from fastapi import APIRouter, Depends\n'), ((3037, 3057), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (3044, 3057), False, 'from fastapi import APIRouter, Depends\n'), ((3189, 3209), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (3196, 3209), False, 'from fastapi import APIRouter, Depends\n'), ((2477, 2522), 'sqlalchemy.select', 'select', (['HistorySummaryTreatmsummaryConference'], {}), '(HistorySummaryTreatmsummaryConference)\n', (2483, 2522), False, 'from sqlalchemy import select\n')]
|
#!/usr/bin/env python
# 12.01.2007, c
import os.path as op
import shutil
from optparse import OptionParser
import sfepy
from sfepy.base.base import *
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.fem import ProblemDefinition
from sfepy.fem.evaluate import assemble_by_blocks
from sfepy.homogenization.phono import transform_plot_data, plot_logs, \
plot_gaps, detect_band_gaps, compute_cat, compute_polarization_angles
from sfepy.homogenization.engine import HomogenizationEngine
from sfepy.applications import SimpleApp
from sfepy.solvers import Solver, eig
from sfepy.base.plotutils import plt
def make_save_hook( base_name, post_process_hook = None, file_per_var = None ):
def save_phono_correctors( state, problem, ir, ic ):
problem.save_state( (base_name % (ir, ic)) + '.vtk', state,
post_process_hook = post_process_hook,
file_per_var = file_per_var )
return save_phono_correctors
def try_set_defaults( obj, attr, defaults ):
try:
values = getattr( obj, attr )
set_defaults( values, defaults )
except:
values = defaults
return values
def report_iw_cat( iw_dir, christoffel ):
output( 'incident wave direction:' )
output( iw_dir )
output( 'Christoffel acoustic tensor:' )
output( christoffel )
class AcousticBandGapsApp( SimpleApp ):
def process_options( options ):
"""Application options setup. Sets default values for missing
non-compulsory options."""
get = options.get_default_attr
clear_cache = get( 'clear_cache', {} )
eigensolver = get( 'eigensolver', 'eig.sgscipy' )
eig_problem = get( 'eig_problem', 'simple' )
schur = get( 'schur', None )
elasticity_contrast = get( 'elasticity_contrast', 1.0 )
scale_epsilon = get( 'scale_epsilon', 1.0 )
incident_wave_dir = get( 'incident_wave_dir', None )
dispersion = get( 'dispersion', 'simple' )
dispersion_conf = get( 'dispersion_conf', None )
homogeneous = get( 'homogeneous', False )
save = get( 'save_eig_vectors', (0, 0) )
eig_range = get( 'eig_range', None )
freq_margins = get( 'freq_margins', (5, 5) )
# Given in per cent.
freq_margins = 0.01 * nm.array( freq_margins, dtype = nm.float64 )
fixed_eig_range = get( 'fixed_eig_range', None )
# Given in per cent.
freq_step = 0.01 * get( 'freq_step', 5 )
feps = get( 'feps', 1e-8 )
zeps = get( 'zeps', 1e-8 )
teps = get( 'teps', 1e-4 )
teps_rel = get( 'teps_rel', True )
eig_vector_transform = get( 'eig_vector_transform', None )
plot_transform = get( 'plot_transform', None )
plot_transform_wave = get( 'plot_transform_wave', None )
plot_transform_angle = get( 'plot_transform_angle', None )
plot_options = get( 'plot_options', {'show' : True,'legend' : False,} )
fig_name = get( 'fig_name', None )
fig_name_wave = get( 'fig_name_wave', None )
fig_name_angle = get( 'fig_name_angle', None )
aux = {
'resonance' : 'eigenfrequencies',
'masked' : 'masked eigenfrequencies',
'eig_min' : 'min eig($M^*$)',
'eig_mid' : 'mid eig($M^*$)',
'eig_max' : 'max eig($M^*$)',
'y_axis' : 'eigenvalues of mass matrix $M^*$',
}
plot_labels = try_set_defaults( options, 'plot_labels', aux )
aux = {
'resonance' : 'eigenfrequencies',
'masked' : 'masked eigenfrequencies',
'eig_min' : r'$\kappa$(min)',
'eig_mid' : r'$\kappa$(mid)',
'eig_max' : r'$\kappa$(max)',
'y_axis' : 'polarization angles',
}
plot_labels_angle = try_set_defaults( options, 'plot_labels_angle', aux )
aux = {
'resonance' : 'eigenfrequencies',
'masked' : 'masked eigenfrequencies',
'eig_min' : r'wave number (min)',
'eig_mid' : r'wave number (mid)',
'eig_max' : r'wave number (max)',
'y_axis' : 'wave numbers',
}
plot_labels_wave = try_set_defaults( options, 'plot_labels_wave', aux )
plot_rsc = {
'resonance' : {'linewidth' : 0.5, 'color' : 'r', 'linestyle' : '-' },
'masked' : {'linewidth' : 0.5, 'color' : 'r', 'linestyle' : ':' },
'x_axis' : {'linewidth' : 0.5, 'color' : 'k', 'linestyle' : '--' },
'eig_min' : {'linewidth' : 0.5, 'color' : 'b', 'linestyle' : '--' },
'eig_mid' : {'linewidth' : 0.5, 'color' : 'b', 'linestyle' : '-.' },
'eig_max' : {'linewidth' : 0.5, 'color' : 'b', 'linestyle' : '-' },
'strong_gap' : {'linewidth' : 0, 'facecolor' : (1, 1, 0.5) },
'weak_gap' : {'linewidth' : 0, 'facecolor' : (1, 1, 1) },
'propagation' : {'linewidth' : 0, 'facecolor' : (0.5, 1, 0.5) },
'params' : {'axes.labelsize': 'large',
'text.fontsize': 'large',
'legend.fontsize': 'large',
'xtick.labelsize': 'large',
'ytick.labelsize': 'large',
'text.usetex': False},
}
plot_rsc = try_set_defaults( options, 'plot_rsc', plot_rsc )
eigenmomentum = get( 'eigenmomentum', None,
'missing "eigenmomentum" in options!' )
region_to_material = get( 'region_to_material', None,
'missing "region_to_material" in options!' )
tensor_names = get( 'tensor_names', None,
'missing "tensor_names" in options!' )
volume = get( 'volume', None, 'missing "volume" in options!' )
if eig_problem == 'simple_liquid':
liquid_region = get('liquid_region', None,
'missing "liquid_region" in options!')
else:
liquid_region = None
return Struct( **locals() )
process_options = staticmethod( process_options )
def process_options_pv( options ):
"""Application options setup for phase velocity computation. Sets
default values for missing non-compulsory options."""
get = options.get_default_attr
clear_cache = get( 'clear_cache', {} )
eigensolver = get( 'eigensolver', 'eig.sgscipy' )
incident_wave_dir = get( 'incident_wave_dir', None )
dispersion = get( 'dispersion', 'simple' )
dispersion_conf = get( 'dispersion_conf', None )
homogeneous = get( 'homogeneous', False )
fig_suffix = get( 'fig_suffix', '.pdf' )
region_to_material = get( 'region_to_material', None,
'missing "region_to_material" in options!' )
tensor_names = get( 'tensor_names', None,
'missing "tensor_names" in options!' )
volume = get( 'volume', None, 'missing "volume" in options!' )
return Struct( **locals() )
process_options_pv = staticmethod( process_options_pv )
def __init__( self, conf, options, output_prefix, **kwargs ):
SimpleApp.__init__( self, conf, options, output_prefix,
init_equations = False )
self.setup_options()
self.cached_coefs = None
self.cached_iw_dir = None
self.cached_christoffel = None
self.cached_evp = None
output_dir = self.problem.output_dir
shutil.copyfile( conf._filename,
op.join( output_dir, op.basename( conf._filename ) ) )
def setup_options( self ):
SimpleApp.setup_options( self )
if self.options.phase_velocity:
process_options = AcousticBandGapsApp.process_options_pv
else:
process_options = AcousticBandGapsApp.process_options
self.app_options += process_options( self.conf.options )
def call( self ):
"""In parametric runs, cached data (homogenized coefficients,
Christoffel acoustic tensor and eigenvalue problem solution) are
cleared according to 'clear_cache' aplication options.
Example:
clear_cache = {'cached_christoffel' : True, 'cached_evp' : True}
"""
options = self.options
for key, val in self.app_options.clear_cache.iteritems():
if val and key.startswith('cached_'):
setattr(self, key, None)
if options.phase_velocity:
# No band gaps in this case.
return self.compute_phase_velocity()
evp = self.solve_eigen_problem()
self.fix_eig_range( evp.eigs.shape[0] )
if options.detect_band_gaps:
bg = detect_band_gaps( self.problem, evp.kind,
evp.eigs_rescaled, evp.eig_vectors,
self.app_options, self.conf.funmod )
if options.plot:
plot_range, teigs = transform_plot_data( bg.logs.eigs,
bg.opts.plot_transform,
self.conf.funmod )
plot_rsc = bg.opts.plot_rsc
plot_opts = bg.opts.plot_options
plot_labels = bg.opts.plot_labels
plt.rcParams.update( plot_rsc['params'] )
fig = plot_gaps( 1, plot_rsc, bg.gaps, bg.kinds,
bg.freq_range_margins, plot_range,
clear = True )
fig = plot_logs( 1, plot_rsc, plot_labels, bg.logs.freqs, teigs,
bg.valid[bg.eig_range],
bg.freq_range_initial,
plot_range, False,
show_legend = plot_opts['legend'],
new_axes = True )
fig_name = bg.opts.fig_name
if fig_name is not None:
fig.savefig( fig_name )
if plot_opts['show']:
plt.show()
elif options.analyze_dispersion:
christoffel, iw_dir = self.compute_cat(ret_iw_dir=True)
bg = detect_band_gaps( self.problem, evp.kind,
evp.eigs_rescaled, evp.eig_vectors,
self.app_options, self.conf.funmod,
christoffel = christoffel )
output( 'computing polarization angles...' )
pas = compute_polarization_angles( iw_dir, bg.logs.eig_vectors )
output( '...done' )
bg.polarization_angles = pas
output( 'computing phase velocity...' )
bg.phase_velocity = self.compute_phase_velocity()
output( '...done' )
if options.plot:
plot_rsc = bg.opts.plot_rsc
plot_opts = bg.opts.plot_options
plt.rcParams.update( plot_rsc['params'] )
aux = transform_plot_data( pas,
bg.opts.plot_transform_angle,
self.conf.funmod )
plot_range, pas = aux
plot_labels = bg.opts.plot_labels_angle
fig = plot_gaps( 1, plot_rsc, bg.gaps, bg.kinds,
bg.freq_range_margins, plot_range,
clear = True )
fig = plot_logs( 1, plot_rsc, plot_labels, bg.logs.freqs, pas,
bg.valid[bg.eig_range],
bg.freq_range_initial,
plot_range, False,
show_legend = plot_opts['legend'],
new_axes = True )
fig_name = bg.opts.fig_name_angle
if fig_name is not None:
fig.savefig( fig_name )
aux = transform_plot_data( bg.logs.eigs,
bg.opts.plot_transform_wave,
self.conf.funmod )
plot_range, teigs = aux
plot_labels = bg.opts.plot_labels_wave
fig = plot_gaps( 2, plot_rsc, bg.gaps, bg.kinds,
bg.freq_range_margins, plot_range,
clear = True )
fig = plot_logs( 2, plot_rsc, plot_labels, bg.logs.freqs, teigs,
bg.valid[bg.eig_range],
bg.freq_range_initial,
plot_range, False,
show_legend = plot_opts['legend'],
new_axes = True )
fig_name = bg.opts.fig_name_wave
if fig_name is not None:
fig.savefig( fig_name )
if plot_opts['show']:
plt.show()
else:
bg = None
return evp, bg
def fix_eig_range( self, n_eigs ):
eig_range = get_default( self.app_options.eig_range, (0, n_eigs) )
if eig_range[-1] < 0:
eig_range[-1] += n_eigs + 1
assert_( eig_range[0] < (eig_range[1] - 1) )
assert_( eig_range[1] <= n_eigs )
self.app_options.eig_range = eig_range
def solve_eigen_problem( self, ofn_trunk = None, post_process_hook = None ):
if self.cached_evp is not None:
return self.cached_evp
problem = self.problem
ofn_trunk = get_default( ofn_trunk, problem.ofn_trunk,
'output file name trunk missing!' )
post_process_hook = get_default( post_process_hook,
self.post_process_hook )
conf = self.conf
eig_problem = self.app_options.eig_problem
if eig_problem in ['simple', 'simple_liquid']:
problem.set_equations( conf.equations )
problem.time_update()
mtx_a = problem.evaluate(conf.equations['lhs'], mode='weak',
auto_init=True, dw_mode='matrix')
mtx_m = problem.evaluate(conf.equations['rhs'], mode='weak',
dw_mode='matrix')
elif eig_problem == 'schur':
# A = K + B^T D^{-1} B.
mtx = assemble_by_blocks( conf.equations, self.problem,
ebcs = conf.ebcs,
epbcs = conf.epbcs )
problem.set_equations( conf.equations )
problem.time_update()
ls = Solver.any_from_conf( problem.ls_conf,
presolve = True, mtx = mtx['D'] )
mtx_b, mtx_m = mtx['B'], mtx['M']
mtx_dib = nm.empty( mtx_b.shape, dtype = mtx_b.dtype )
for ic in xrange( mtx_b.shape[1] ):
mtx_dib[:,ic] = ls( mtx_b[:,ic].toarray().squeeze() )
mtx_a = mtx['K'] + mtx_b.T * mtx_dib
else:
raise NotImplementedError
## from sfepy.base.plotutils import spy, plt
## spy( mtx_b, eps = 1e-12 )
## plt.show()
## mtx_a.save( 'a.txt', format='%d %d %.12f\n' )
## mtx_b.save( 'b.txt', format='%d %d %.12f\n' )
## pause()
output( 'computing resonance frequencies...' )
tt = [0]
if isinstance( mtx_a, sc.sparse.spmatrix ):
mtx_a = mtx_a.toarray()
if isinstance( mtx_m, sc.sparse.spmatrix ):
mtx_m = mtx_m.toarray()
eigs, mtx_s_phi = eig(mtx_a, mtx_m, return_time=tt,
method=self.app_options.eigensolver)
eigs[eigs<0.0] = 0.0
output( '...done in %.2f s' % tt[0] )
output( 'original eigenfrequencies:' )
output( eigs )
opts = self.app_options
epsilon2 = opts.scale_epsilon * opts.scale_epsilon
eigs_rescaled = (opts.elasticity_contrast / epsilon2) * eigs
output( 'rescaled eigenfrequencies:' )
output( eigs_rescaled )
output( 'number of eigenfrequencies: %d' % eigs.shape[0] )
try:
assert_( nm.isfinite( eigs ).all() )
except ValueError:
debug()
# B-orthogonality check.
## print nm.dot( mtx_s_phi[:,5], nm.dot( mtx_m, mtx_s_phi[:,5] ) )
## print nm.dot( mtx_s_phi[:,5], nm.dot( mtx_m, mtx_s_phi[:,0] ) )
## debug()
n_eigs = eigs.shape[0]
variables = problem.get_variables()
mtx_phi = nm.empty( (variables.di.ptr[-1], mtx_s_phi.shape[1]),
dtype = nm.float64 )
make_full = variables.make_full_vec
if eig_problem in ['simple', 'simple_liquid']:
for ii in xrange( n_eigs ):
mtx_phi[:,ii] = make_full( mtx_s_phi[:,ii] )
eig_vectors = mtx_phi
elif eig_problem == 'schur':
# Update also eliminated variables.
schur = self.app_options.schur
primary_var = schur['primary_var']
eliminated_var = schur['eliminated_var']
mtx_s_phi_schur = - sc.dot( mtx_dib, mtx_s_phi )
aux = nm.empty( (variables.adi.ptr[-1],),
dtype = nm.float64 )
set = variables.set_state_part
for ii in xrange( n_eigs ):
set( aux, mtx_s_phi[:,ii], primary_var, stripped = True )
set( aux, mtx_s_phi_schur[:,ii], eliminated_var,
stripped = True )
mtx_phi[:,ii] = make_full( aux )
indx = variables.get_indx( primary_var )
eig_vectors = mtx_phi[indx,:]
save = self.app_options.save
out = {}
for ii in xrange( n_eigs ):
if (ii >= save[0]) and (ii < (n_eigs - save[1])): continue
aux = problem.state_to_output( mtx_phi[:,ii] )
for name, val in aux.iteritems():
out[name+'%03d' % ii] = val
if post_process_hook is not None:
out = post_process_hook( out, problem, mtx_phi )
problem.domain.mesh.write( ofn_trunk + '.vtk', io = 'auto', out = out )
fd = open( ofn_trunk + '_eigs.txt', 'w' )
eigs.tofile( fd, ' ' )
fd.close()
evp = Struct( kind = eig_problem,
eigs = eigs, eigs_rescaled = eigs_rescaled,
eig_vectors = eig_vectors )
self.cached_evp = evp
return evp
def eval_homogenized_coefs( self ):
if self.cached_coefs is not None:
return self.cached_coefs
opts = self.app_options
if opts.homogeneous:
rtm = opts.region_to_material
mat_region = rtm.keys()[0]
mat_name = rtm[mat_region]
self.problem.update_materials()
mat = self.problem.materials[mat_name]
coefs = mat.get_data( mat_region, 0, opts.tensor_names )
else:
dc = opts.dispersion_conf
dconf = ProblemConf.from_dict( dc['input'], dc['module'] )
dconf.materials = self.conf.materials
dconf.fe = self.conf.fe
dconf.regions.update( self.conf.regions )
dconf.options['output_dir'] = self.problem.output_dir
volume = opts.volume(self.problem, 'Y')
problem = ProblemDefinition.from_conf(dconf, init_equations=False)
he = HomogenizationEngine( problem, self.options, volume = volume )
coefs = he()
## print coefs
## pause()
output.prefix = self.output_prefix
self.cached_coefs = coefs
return coefs
def compute_cat( self, ret_iw_dir=False ):
"""Compute the Christoffel acoustic tensor, given the incident wave
direction."""
opts = self.app_options
iw_dir = nm.array( opts.incident_wave_dir, dtype = nm.float64 )
dim = self.problem.get_dim()
assert_( dim == iw_dir.shape[0] )
iw_dir = iw_dir / nla.norm( iw_dir )
if self.cached_christoffel is not None:
christoffel = self.cached_christoffel
else:
coefs = self.eval_homogenized_coefs()
christoffel = compute_cat( coefs, iw_dir,
self.app_options.dispersion )
report_iw_cat( iw_dir, christoffel )
self.cached_christoffel = christoffel
if ret_iw_dir:
return christoffel, iw_dir
else:
return christoffel
def compute_phase_velocity( self ):
from sfepy.homogenization.phono import compute_density_volume_info
opts = self.app_options
dim = self.problem.domain.mesh.dim
christoffel = self.compute_cat()
self.problem.update_materials()
dv_info = compute_density_volume_info( self.problem, opts.volume,
opts.region_to_material )
output( 'average density:', dv_info.average_density )
eye = nm.eye( dim, dim, dtype = nm.float64 )
mtx_mass = eye * dv_info.average_density
meigs, mvecs = eig( mtx_mass, mtx_b = christoffel,
eigenvectors = True, method = opts.eigensolver )
phase_velocity = 1.0 / nm.sqrt( meigs )
return phase_velocity
usage = """%prog [options] filename_in"""
help = {
'filename' :
'basename of output file(s) [default: <basename of input file>]',
'detect_band_gaps' :
'detect frequency band gaps',
'analyze_dispersion' :
'analyze dispersion properties (low frequency domain)',
'plot' :
'plot frequency band gaps, assumes -b',
'phase_velocity' :
'compute phase velocity (frequency-independet mass only)'
}
def main():
parser = OptionParser(usage = usage, version = "%prog " + sfepy.__version__)
parser.add_option( "-o", "", metavar = 'filename',
action = "store", dest = "output_filename_trunk",
default = None, help = help['filename'] )
parser.add_option( "-b", "--band-gaps",
action = "store_true", dest = "detect_band_gaps",
default = False, help = help['detect_band_gaps'] )
parser.add_option( "-d", "--dispersion",
action = "store_true", dest = "analyze_dispersion",
default = False, help = help['analyze_dispersion'] )
parser.add_option( "-p", "--plot",
action = "store_true", dest = "plot",
default = False, help = help['plot'] )
parser.add_option( "--phase-velocity",
action = "store_true", dest = "phase_velocity",
default = False, help = help['phase_velocity'] )
options, args = parser.parse_args()
if options.plot:
if plt is None:
output( 'matplotlib.pyplot cannot be imported, ignoring option -p!' )
options.plot = False
elif options.analyze_dispersion == False:
options.detect_band_gaps = True
if (len( args ) == 1):
filename_in = args[0];
else:
parser.print_help(),
return
required, other = get_standard_keywords()
required.remove( 'solver_[0-9]+|solvers' )
if options.phase_velocity:
required.remove( 'ebc_[0-9]+|ebcs' )
required.remove( 'equations' )
conf = ProblemConf.from_file( filename_in, required, other )
app = AcousticBandGapsApp( conf, options, 'eigen:' )
opts = conf.options
if hasattr( opts, 'parametric_hook' ): # Parametric study.
parametric_hook = getattr( conf, opts.parametric_hook )
app.parametrize( parametric_hook )
app()
if __name__ == '__main__':
## mtx_k = io.read_sparse_matrix_hdf5( '1todo/K.h5', output_format = 'csr' )
## print mtx_k.__repr__()
## mtx_m = io.read_sparse_matrix_hdf5( '1todo/M.h5', output_format = 'csr' )
## print mtx_m.__repr__()
## mtx_k.save( 'k.txt', format='%d %d %.12f\n' )
## mtx_m.save( 'm.txt', format='%d %d %.12f\n' )
## eigs, mtx_s_phi = eig( mtx_k.toarray(), mtx_m.toarray(),
## print_time = True )
## print eigs
## eigs, aux = eig( mtx_m.toarray(),
## print_time = True )
## print eigs
## pause()
main()
|
[
"sfepy.homogenization.phono.detect_band_gaps",
"sfepy.fem.evaluate.assemble_by_blocks",
"sfepy.base.conf.ProblemConf.from_file",
"sfepy.homogenization.engine.HomogenizationEngine",
"sfepy.homogenization.phono.plot_logs",
"sfepy.solvers.eig",
"sfepy.fem.ProblemDefinition.from_conf",
"sfepy.base.plotutils.plt.rcParams.update",
"sfepy.base.conf.get_standard_keywords",
"sfepy.applications.SimpleApp.setup_options",
"sfepy.applications.SimpleApp.__init__",
"sfepy.base.conf.ProblemConf.from_dict",
"sfepy.homogenization.phono.compute_density_volume_info",
"sfepy.homogenization.phono.plot_gaps",
"sfepy.homogenization.phono.transform_plot_data",
"sfepy.homogenization.phono.compute_polarization_angles",
"sfepy.base.plotutils.plt.show",
"sfepy.homogenization.phono.compute_cat",
"sfepy.solvers.Solver.any_from_conf"
] |
[((22108, 22171), 'optparse.OptionParser', 'OptionParser', ([], {'usage': 'usage', 'version': "('%prog ' + sfepy.__version__)"}), "(usage=usage, version='%prog ' + sfepy.__version__)\n", (22120, 22171), False, 'from optparse import OptionParser\n'), ((23535, 23558), 'sfepy.base.conf.get_standard_keywords', 'get_standard_keywords', ([], {}), '()\n', (23556, 23558), False, 'from sfepy.base.conf import ProblemConf, get_standard_keywords\n'), ((23732, 23783), 'sfepy.base.conf.ProblemConf.from_file', 'ProblemConf.from_file', (['filename_in', 'required', 'other'], {}), '(filename_in, required, other)\n', (23753, 23783), False, 'from sfepy.base.conf import ProblemConf, get_standard_keywords\n'), ((7263, 7339), 'sfepy.applications.SimpleApp.__init__', 'SimpleApp.__init__', (['self', 'conf', 'options', 'output_prefix'], {'init_equations': '(False)'}), '(self, conf, options, output_prefix, init_equations=False)\n', (7281, 7339), False, 'from sfepy.applications import SimpleApp\n'), ((7746, 7775), 'sfepy.applications.SimpleApp.setup_options', 'SimpleApp.setup_options', (['self'], {}), '(self)\n', (7769, 7775), False, 'from sfepy.applications import SimpleApp\n'), ((15828, 15898), 'sfepy.solvers.eig', 'eig', (['mtx_a', 'mtx_m'], {'return_time': 'tt', 'method': 'self.app_options.eigensolver'}), '(mtx_a, mtx_m, return_time=tt, method=self.app_options.eigensolver)\n', (15831, 15898), False, 'from sfepy.solvers import Solver, eig\n'), ((21138, 21217), 'sfepy.homogenization.phono.compute_density_volume_info', 'compute_density_volume_info', (['self.problem', 'opts.volume', 'opts.region_to_material'], {}), '(self.problem, opts.volume, opts.region_to_material)\n', (21165, 21217), False, 'from sfepy.homogenization.phono import compute_density_volume_info\n'), ((21456, 21532), 'sfepy.solvers.eig', 'eig', (['mtx_mass'], {'mtx_b': 'christoffel', 'eigenvectors': '(True)', 'method': 'opts.eigensolver'}), '(mtx_mass, mtx_b=christoffel, eigenvectors=True, method=opts.eigensolver)\n', (21459, 21532), False, 'from sfepy.solvers import Solver, eig\n'), ((8839, 8955), 'sfepy.homogenization.phono.detect_band_gaps', 'detect_band_gaps', (['self.problem', 'evp.kind', 'evp.eigs_rescaled', 'evp.eig_vectors', 'self.app_options', 'self.conf.funmod'], {}), '(self.problem, evp.kind, evp.eigs_rescaled, evp.eig_vectors,\n self.app_options, self.conf.funmod)\n', (8855, 8955), False, 'from sfepy.homogenization.phono import transform_plot_data, plot_logs, plot_gaps, detect_band_gaps, compute_cat, compute_polarization_angles\n'), ((19328, 19376), 'sfepy.base.conf.ProblemConf.from_dict', 'ProblemConf.from_dict', (["dc['input']", "dc['module']"], {}), "(dc['input'], dc['module'])\n", (19349, 19376), False, 'from sfepy.base.conf import ProblemConf, get_standard_keywords\n'), ((19661, 19717), 'sfepy.fem.ProblemDefinition.from_conf', 'ProblemDefinition.from_conf', (['dconf'], {'init_equations': '(False)'}), '(dconf, init_equations=False)\n', (19688, 19717), False, 'from sfepy.fem import ProblemDefinition\n'), ((19735, 19793), 'sfepy.homogenization.engine.HomogenizationEngine', 'HomogenizationEngine', (['problem', 'self.options'], {'volume': 'volume'}), '(problem, self.options, volume=volume)\n', (19755, 19793), False, 'from sfepy.homogenization.engine import HomogenizationEngine\n'), ((20540, 20595), 'sfepy.homogenization.phono.compute_cat', 'compute_cat', (['coefs', 'iw_dir', 'self.app_options.dispersion'], {}), '(coefs, iw_dir, self.app_options.dispersion)\n', (20551, 20595), False, 'from sfepy.homogenization.phono import transform_plot_data, plot_logs, plot_gaps, detect_band_gaps, compute_cat, compute_polarization_angles\n'), ((7672, 7699), 'os.path.basename', 'op.basename', (['conf._filename'], {}), '(conf._filename)\n', (7683, 7699), True, 'import os.path as op\n'), ((9090, 9165), 'sfepy.homogenization.phono.transform_plot_data', 'transform_plot_data', (['bg.logs.eigs', 'bg.opts.plot_transform', 'self.conf.funmod'], {}), '(bg.logs.eigs, bg.opts.plot_transform, self.conf.funmod)\n', (9109, 9165), False, 'from sfepy.homogenization.phono import transform_plot_data, plot_logs, plot_gaps, detect_band_gaps, compute_cat, compute_polarization_angles\n'), ((9461, 9500), 'sfepy.base.plotutils.plt.rcParams.update', 'plt.rcParams.update', (["plot_rsc['params']"], {}), "(plot_rsc['params'])\n", (9480, 9500), False, 'from sfepy.base.plotutils import plt\n'), ((9526, 9618), 'sfepy.homogenization.phono.plot_gaps', 'plot_gaps', (['(1)', 'plot_rsc', 'bg.gaps', 'bg.kinds', 'bg.freq_range_margins', 'plot_range'], {'clear': '(True)'}), '(1, plot_rsc, bg.gaps, bg.kinds, bg.freq_range_margins, plot_range,\n clear=True)\n', (9535, 9618), False, 'from sfepy.homogenization.phono import transform_plot_data, plot_logs, plot_gaps, detect_band_gaps, compute_cat, compute_polarization_angles\n'), ((9707, 9888), 'sfepy.homogenization.phono.plot_logs', 'plot_logs', (['(1)', 'plot_rsc', 'plot_labels', 'bg.logs.freqs', 'teigs', 'bg.valid[bg.eig_range]', 'bg.freq_range_initial', 'plot_range', '(False)'], {'show_legend': "plot_opts['legend']", 'new_axes': '(True)'}), "(1, plot_rsc, plot_labels, bg.logs.freqs, teigs, bg.valid[bg.\n eig_range], bg.freq_range_initial, plot_range, False, show_legend=\n plot_opts['legend'], new_axes=True)\n", (9716, 9888), False, 'from sfepy.homogenization.phono import transform_plot_data, plot_logs, plot_gaps, detect_band_gaps, compute_cat, compute_polarization_angles\n'), ((10377, 10518), 'sfepy.homogenization.phono.detect_band_gaps', 'detect_band_gaps', (['self.problem', 'evp.kind', 'evp.eigs_rescaled', 'evp.eig_vectors', 'self.app_options', 'self.conf.funmod'], {'christoffel': 'christoffel'}), '(self.problem, evp.kind, evp.eigs_rescaled, evp.eig_vectors,\n self.app_options, self.conf.funmod, christoffel=christoffel)\n', (10393, 10518), False, 'from sfepy.homogenization.phono import transform_plot_data, plot_logs, plot_gaps, detect_band_gaps, compute_cat, compute_polarization_angles\n'), ((10700, 10756), 'sfepy.homogenization.phono.compute_polarization_angles', 'compute_polarization_angles', (['iw_dir', 'bg.logs.eig_vectors'], {}), '(iw_dir, bg.logs.eig_vectors)\n', (10727, 10756), False, 'from sfepy.homogenization.phono import transform_plot_data, plot_logs, plot_gaps, detect_band_gaps, compute_cat, compute_polarization_angles\n'), ((14614, 14701), 'sfepy.fem.evaluate.assemble_by_blocks', 'assemble_by_blocks', (['conf.equations', 'self.problem'], {'ebcs': 'conf.ebcs', 'epbcs': 'conf.epbcs'}), '(conf.equations, self.problem, ebcs=conf.ebcs, epbcs=conf\n .epbcs)\n', (14632, 14701), False, 'from sfepy.fem.evaluate import assemble_by_blocks\n'), ((14883, 14949), 'sfepy.solvers.Solver.any_from_conf', 'Solver.any_from_conf', (['problem.ls_conf'], {'presolve': '(True)', 'mtx': "mtx['D']"}), "(problem.ls_conf, presolve=True, mtx=mtx['D'])\n", (14903, 14949), False, 'from sfepy.solvers import Solver, eig\n'), ((10238, 10248), 'sfepy.base.plotutils.plt.show', 'plt.show', ([], {}), '()\n', (10246, 10248), False, 'from sfepy.base.plotutils import plt\n'), ((11120, 11159), 'sfepy.base.plotutils.plt.rcParams.update', 'plt.rcParams.update', (["plot_rsc['params']"], {}), "(plot_rsc['params'])\n", (11139, 11159), False, 'from sfepy.base.plotutils import plt\n'), ((11185, 11257), 'sfepy.homogenization.phono.transform_plot_data', 'transform_plot_data', (['pas', 'bg.opts.plot_transform_angle', 'self.conf.funmod'], {}), '(pas, bg.opts.plot_transform_angle, self.conf.funmod)\n', (11204, 11257), False, 'from sfepy.homogenization.phono import transform_plot_data, plot_logs, plot_gaps, detect_band_gaps, compute_cat, compute_polarization_angles\n'), ((11481, 11573), 'sfepy.homogenization.phono.plot_gaps', 'plot_gaps', (['(1)', 'plot_rsc', 'bg.gaps', 'bg.kinds', 'bg.freq_range_margins', 'plot_range'], {'clear': '(True)'}), '(1, plot_rsc, bg.gaps, bg.kinds, bg.freq_range_margins, plot_range,\n clear=True)\n', (11490, 11573), False, 'from sfepy.homogenization.phono import transform_plot_data, plot_logs, plot_gaps, detect_band_gaps, compute_cat, compute_polarization_angles\n'), ((11662, 11841), 'sfepy.homogenization.phono.plot_logs', 'plot_logs', (['(1)', 'plot_rsc', 'plot_labels', 'bg.logs.freqs', 'pas', 'bg.valid[bg.eig_range]', 'bg.freq_range_initial', 'plot_range', '(False)'], {'show_legend': "plot_opts['legend']", 'new_axes': '(True)'}), "(1, plot_rsc, plot_labels, bg.logs.freqs, pas, bg.valid[bg.\n eig_range], bg.freq_range_initial, plot_range, False, show_legend=\n plot_opts['legend'], new_axes=True)\n", (11671, 11841), False, 'from sfepy.homogenization.phono import transform_plot_data, plot_logs, plot_gaps, detect_band_gaps, compute_cat, compute_polarization_angles\n'), ((12162, 12247), 'sfepy.homogenization.phono.transform_plot_data', 'transform_plot_data', (['bg.logs.eigs', 'bg.opts.plot_transform_wave', 'self.conf.funmod'], {}), '(bg.logs.eigs, bg.opts.plot_transform_wave, self.conf.funmod\n )\n', (12181, 12247), False, 'from sfepy.homogenization.phono import transform_plot_data, plot_logs, plot_gaps, detect_band_gaps, compute_cat, compute_polarization_angles\n'), ((12451, 12543), 'sfepy.homogenization.phono.plot_gaps', 'plot_gaps', (['(2)', 'plot_rsc', 'bg.gaps', 'bg.kinds', 'bg.freq_range_margins', 'plot_range'], {'clear': '(True)'}), '(2, plot_rsc, bg.gaps, bg.kinds, bg.freq_range_margins, plot_range,\n clear=True)\n', (12460, 12543), False, 'from sfepy.homogenization.phono import transform_plot_data, plot_logs, plot_gaps, detect_band_gaps, compute_cat, compute_polarization_angles\n'), ((12632, 12813), 'sfepy.homogenization.phono.plot_logs', 'plot_logs', (['(2)', 'plot_rsc', 'plot_labels', 'bg.logs.freqs', 'teigs', 'bg.valid[bg.eig_range]', 'bg.freq_range_initial', 'plot_range', '(False)'], {'show_legend': "plot_opts['legend']", 'new_axes': '(True)'}), "(2, plot_rsc, plot_labels, bg.logs.freqs, teigs, bg.valid[bg.\n eig_range], bg.freq_range_initial, plot_range, False, show_legend=\n plot_opts['legend'], new_axes=True)\n", (12641, 12813), False, 'from sfepy.homogenization.phono import transform_plot_data, plot_logs, plot_gaps, detect_band_gaps, compute_cat, compute_polarization_angles\n'), ((13168, 13178), 'sfepy.base.plotutils.plt.show', 'plt.show', ([], {}), '()\n', (13176, 13178), False, 'from sfepy.base.plotutils import plt\n')]
|
import numpy as np
import megengine
import megengine.module as M
import megengine.functional as F
from edit.models.common import ShuffleV2Block, CoordAtt
import math
from . import default_init_weights
class MobileNeXt(M.Module):
def __init__(self, in_channels, out_channels, kernel_size=3):
"""
默认使用coordinate attention在第一个dwise之后
https://github.com/Andrew-Qibin/CoordAttention/blob/main/coordatt.py
"""
super(MobileNeXt, self).__init__()
self.dconv1 = M.ConvRelu2d(in_channels, out_channels, kernel_size=kernel_size, stride=1, padding=(kernel_size//2), groups=in_channels)
self.CA = CoordAtt(inp = out_channels, oup=out_channels)
self.conv1 = M.Conv2d(out_channels, out_channels, kernel_size=1, stride=1, padding=0)
self.conv2 = M.ConvRelu2d(out_channels, out_channels, kernel_size=1, stride=1, padding=0)
self.dconv2 = M.Conv2d(out_channels, out_channels, kernel_size=kernel_size, stride=1, padding=(kernel_size//2), groups=out_channels)
self.init_weights()
def init_weights(self):
for m in [self.conv1, self.conv2, self.dconv1, self.dconv2]:
default_init_weights(m, scale=0.1)
def forward(self, x):
identity = x
out = self.dconv2(self.conv2(self.conv1(self.CA(self.dconv1(x)))))
return identity + out
class ResBlock(M.Module):
def __init__(self, in_channels, out_channels, kernel_size=3):
super(ResBlock, self).__init__()
self.conv1 = M.ConvRelu2d(in_channels, out_channels, kernel_size=kernel_size, stride=1, padding=(kernel_size//2))
self.conv2 = M.Conv2d(out_channels, out_channels, kernel_size=kernel_size, stride=1, padding=(kernel_size//2))
self.init_weights()
def init_weights(self):
for m in [self.conv1, self.conv2]:
default_init_weights(m, scale=0.1)
def forward(self, x):
identity = x
out = self.conv2(self.conv1(x))
return identity + out
class ResBlocks(M.Module):
def __init__(self, channel_num, resblock_num, kernel_size=3, blocktype="resblock"):
super(ResBlocks, self).__init__()
assert blocktype in ("resblock", "shuffleblock", "MobileNeXt")
if blocktype == "resblock":
self.model = M.Sequential(
self.make_resblock_layer(channel_num, resblock_num, kernel_size),
)
elif blocktype == "shuffleblock":
self.model = M.Sequential(
self.make_shuffleblock_layer(channel_num, resblock_num, kernel_size),
)
elif blocktype == "MobileNeXt":
self.model = M.Sequential(
self.make_MobileNeXt_layer(channel_num, resblock_num, kernel_size)
)
else:
raise NotImplementedError("")
def make_MobileNeXt_layer(self, ch_out, num_blocks, kernel_size):
layers = []
for _ in range(num_blocks):
layers.append(MobileNeXt(ch_out, ch_out, kernel_size))
return M.Sequential(*layers)
def make_resblock_layer(self, ch_out, num_blocks, kernel_size):
layers = []
for _ in range(num_blocks):
layers.append(ResBlock(ch_out, ch_out, kernel_size))
return M.Sequential(*layers)
def make_shuffleblock_layer(self, ch_out, num_blocks, kernel_size):
layers = []
for _ in range(num_blocks):
layers.append(ShuffleV2Block(inp = ch_out//2, oup=ch_out, mid_channels=ch_out//2, ksize=kernel_size, stride=1))
return M.Sequential(*layers)
def forward(self, x):
return self.model(x)
|
[
"megengine.module.ConvRelu2d",
"megengine.module.Sequential",
"megengine.module.Conv2d"
] |
[((515, 639), 'megengine.module.ConvRelu2d', 'M.ConvRelu2d', (['in_channels', 'out_channels'], {'kernel_size': 'kernel_size', 'stride': '(1)', 'padding': '(kernel_size // 2)', 'groups': 'in_channels'}), '(in_channels, out_channels, kernel_size=kernel_size, stride=1,\n padding=kernel_size // 2, groups=in_channels)\n', (527, 639), True, 'import megengine.module as M\n'), ((654, 698), 'edit.models.common.CoordAtt', 'CoordAtt', ([], {'inp': 'out_channels', 'oup': 'out_channels'}), '(inp=out_channels, oup=out_channels)\n', (662, 698), False, 'from edit.models.common import ShuffleV2Block, CoordAtt\n'), ((722, 794), 'megengine.module.Conv2d', 'M.Conv2d', (['out_channels', 'out_channels'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(out_channels, out_channels, kernel_size=1, stride=1, padding=0)\n', (730, 794), True, 'import megengine.module as M\n'), ((816, 892), 'megengine.module.ConvRelu2d', 'M.ConvRelu2d', (['out_channels', 'out_channels'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(out_channels, out_channels, kernel_size=1, stride=1, padding=0)\n', (828, 892), True, 'import megengine.module as M\n'), ((915, 1037), 'megengine.module.Conv2d', 'M.Conv2d', (['out_channels', 'out_channels'], {'kernel_size': 'kernel_size', 'stride': '(1)', 'padding': '(kernel_size // 2)', 'groups': 'out_channels'}), '(out_channels, out_channels, kernel_size=kernel_size, stride=1,\n padding=kernel_size // 2, groups=out_channels)\n', (923, 1037), True, 'import megengine.module as M\n'), ((1515, 1619), 'megengine.module.ConvRelu2d', 'M.ConvRelu2d', (['in_channels', 'out_channels'], {'kernel_size': 'kernel_size', 'stride': '(1)', 'padding': '(kernel_size // 2)'}), '(in_channels, out_channels, kernel_size=kernel_size, stride=1,\n padding=kernel_size // 2)\n', (1527, 1619), True, 'import megengine.module as M\n'), ((1637, 1738), 'megengine.module.Conv2d', 'M.Conv2d', (['out_channels', 'out_channels'], {'kernel_size': 'kernel_size', 'stride': '(1)', 'padding': '(kernel_size // 2)'}), '(out_channels, out_channels, kernel_size=kernel_size, stride=1,\n padding=kernel_size // 2)\n', (1645, 1738), True, 'import megengine.module as M\n'), ((3022, 3043), 'megengine.module.Sequential', 'M.Sequential', (['*layers'], {}), '(*layers)\n', (3034, 3043), True, 'import megengine.module as M\n'), ((3249, 3270), 'megengine.module.Sequential', 'M.Sequential', (['*layers'], {}), '(*layers)\n', (3261, 3270), True, 'import megengine.module as M\n'), ((3539, 3560), 'megengine.module.Sequential', 'M.Sequential', (['*layers'], {}), '(*layers)\n', (3551, 3560), True, 'import megengine.module as M\n'), ((3426, 3529), 'edit.models.common.ShuffleV2Block', 'ShuffleV2Block', ([], {'inp': '(ch_out // 2)', 'oup': 'ch_out', 'mid_channels': '(ch_out // 2)', 'ksize': 'kernel_size', 'stride': '(1)'}), '(inp=ch_out // 2, oup=ch_out, mid_channels=ch_out // 2, ksize\n =kernel_size, stride=1)\n', (3440, 3529), False, 'from edit.models.common import ShuffleV2Block, CoordAtt\n')]
|
import typer
import uvicorn
from sqlmodel import Session, select
from .app import app
from .config import settings
from .db import create_db_and_tables, engine
from .models.content import Content
from .security import User
cli = typer.Typer(name="pythontasks API")
@cli.command()
def run(
port: int = settings.server.port,
host: str = settings.server.host,
log_level: str = settings.server.log_level,
reload: bool = settings.server.reload,
): # pragma: no cover
"""Run the API server."""
uvicorn.run(
"pythontasks.app:app",
host=host,
port=port,
log_level=log_level,
reload=reload,
)
@cli.command()
def create_user(username: str, password: str, superuser: bool = False):
"""Create user"""
create_db_and_tables(engine)
with Session(engine) as session:
user = User(username=username, password=password, superuser=superuser)
session.add(user)
session.commit()
session.refresh(user)
typer.echo(f"created {username} user")
return user
@cli.command()
def shell(): # pragma: no cover
"""Opens an interactive shell with objects auto imported"""
_vars = {
"app": app,
"settings": settings,
"User": User,
"engine": engine,
"cli": cli,
"create_user": create_user,
"select": select,
"session": Session(engine),
"Content": Content,
}
typer.echo(f"Auto imports: {list(_vars.keys())}")
try:
from IPython import start_ipython
start_ipython(argv=[], user_ns=_vars)
except ImportError:
import code
code.InteractiveConsole(_vars).interact()
|
[
"sqlmodel.Session"
] |
[((231, 266), 'typer.Typer', 'typer.Typer', ([], {'name': '"""pythontasks API"""'}), "(name='pythontasks API')\n", (242, 266), False, 'import typer\n'), ((517, 614), 'uvicorn.run', 'uvicorn.run', (['"""pythontasks.app:app"""'], {'host': 'host', 'port': 'port', 'log_level': 'log_level', 'reload': 'reload'}), "('pythontasks.app:app', host=host, port=port, log_level=\n log_level, reload=reload)\n", (528, 614), False, 'import uvicorn\n'), ((810, 825), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (817, 825), False, 'from sqlmodel import Session, select\n'), ((1006, 1044), 'typer.echo', 'typer.echo', (['f"""created {username} user"""'], {}), "(f'created {username} user')\n", (1016, 1044), False, 'import typer\n'), ((1392, 1407), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1399, 1407), False, 'from sqlmodel import Session, select\n'), ((1557, 1594), 'IPython.start_ipython', 'start_ipython', ([], {'argv': '[]', 'user_ns': '_vars'}), '(argv=[], user_ns=_vars)\n', (1570, 1594), False, 'from IPython import start_ipython\n'), ((1648, 1678), 'code.InteractiveConsole', 'code.InteractiveConsole', (['_vars'], {}), '(_vars)\n', (1671, 1678), False, 'import code\n')]
|
"""
Friction-slip model formulated as the implicit complementarity problem.
To integrate over a (dual) mesh, one needs:
* coordinates of element vertices
* element connectivity
* local base for each element
* constant in each sub-triangle of the dual mesh
Data for each dual element:
* connectivity of its sub-triangles
* base directions t_1, t_2
Normal stresses:
* Assemble the rezidual and apply the LCBC operator described below.
Solution in \hat{V}_h^c:
* construct a restriction operator via LCBC just like in the no-penetration case
* use the substitution:
u_1 = n_1 * w
u_2 = n_2 * w
u_3 = n_3 * w
The new DOF is `w`.
* for the record, no-penetration does:
w_1 = - (1 / n_1) * (u_2 * n_2 + u_3 * n_3)
w_2 = u_2
w_3 = u_3
"""
from sfepy.base.base import *
from sfepy.base.compat import unique
import sfepy.linalg as la
from sfepy.fem import Mesh, Domain, Field, Variables
from sfepy.fem.mappings import VolumeMapping, SurfaceMapping
from sfepy.fem.fe_surface import FESurface
from sfepy.fem.utils import compute_nodal_normals
def edge_data_to_output(coors, conn, e_sort, data):
out = nm.zeros_like(coors)
out[conn[e_sort,0]] = data
return Struct(name='output_data',
mode='vertex', data=out,
dofs=None)
class DualMesh(Struct):
"""Dual mesh corresponding to a (surface) region."""
def __init__(self, region):
"""
Assume a single GeometryElement type in all groups, linear
approximation.
Works for one group only for the moment.
"""
domain = region.domain
self.dim = domain.shape.dim
self.region = copy(region)
self.region.setup_face_indices()
self.mesh_coors = domain.mesh.coors
# add_to_regions=True due to Field implementation shortcomings.
omega = domain.create_region('Omega', 'all', add_to_regions=True)
self.field = Field('displacements', nm.float64, (3,), omega, 1)
self.gel = domain.geom_els.values()[0]
self.sgel = self.gel.surface_facet
face_key = 's%d' % self.sgel.n_vertex
# Coordinate interpolation to face centres.
self.ps = self.gel.interp.poly_spaces[face_key]
centre = self.ps.node_coors.sum(axis=0) / self.ps.n_nod
self.bf = self.ps.eval_base(centre[None,:])
self.surfaces = surfaces = {}
self.dual_surfaces = dual_surfaces = {}
for ig, conn in enumerate(domain.mesh.conns):
surface = FESurface(None, self.region, self.gel.faces, conn, ig)
surfaces[ig] = surface
dual_surface = self.describe_dual_surface(surface)
dual_surfaces[ig] = dual_surface
def describe_dual_surface(self, surface):
n_fa, n_edge = surface.n_fa, self.sgel.n_edge
mesh_coors = self.mesh_coors
# Face centres.
fcoors = mesh_coors[surface.econn]
centre_coors = nm.dot(self.bf.squeeze(), fcoors)
surface_coors = mesh_coors[surface.nodes]
dual_coors = nm.r_[surface_coors, centre_coors]
coor_offset = surface.nodes.shape[0]
# Normals in primary mesh nodes.
nodal_normals = compute_nodal_normals(surface.nodes, self.region,
self.field)
ee = surface.leconn[:,self.sgel.edges].copy()
edges_per_face = ee.copy()
sh = edges_per_face.shape
ee.shape = edges_per_face.shape = (sh[0] * sh[1], sh[2])
edges_per_face.sort(axis=1)
eo = nm.empty((sh[0] * sh[1],), dtype=nm.object)
eo[:] = [tuple(ii) for ii in edges_per_face]
ueo, e_sort, e_id = unique(eo, return_index=True, return_inverse=True)
ueo = edges_per_face[e_sort]
# edge centre, edge point 1, face centre, edge point 2
conn = nm.empty((n_edge * n_fa, 4), dtype=nm.int32)
conn[:,0] = e_id
conn[:,1] = ee[:,0]
conn[:,2] = nm.repeat(nm.arange(n_fa, dtype=nm.int32), n_edge) \
+ coor_offset
conn[:,3] = ee[:,1]
# face centre, edge point 2, edge point 1
tri_conn = nm.ascontiguousarray(conn[:,[2,1,3]])
# Ensure orientation - outward normal.
cc = dual_coors[tri_conn]
v1 = cc[:,1] - cc[:,0]
v2 = cc[:,2] - cc[:,0]
normals = nm.cross(v1, v2)
nn = nodal_normals[surface.leconn].sum(axis=1).repeat(n_edge, 0)
centre_normals = (1.0 / surface.n_fp) * nn
centre_normals /= la.norm_l2_along_axis(centre_normals)[:,None]
dot = nm.sum(normals * centre_normals, axis=1)
assert_((dot > 0.0).all())
# Prepare mapping from reference triangle e_R to a
# triangle within reference face e_D.
gel = self.gel.surface_facet
ref_coors = gel.coors
ref_centre = nm.dot(self.bf.squeeze(), ref_coors)
cc = nm.r_[ref_coors, ref_centre[None,:]]
rconn = nm.empty((n_edge, 3), dtype=nm.int32)
rconn[:,0] = gel.n_vertex
rconn[:,1] = gel.edges[:,0]
rconn[:,2] = gel.edges[:,1]
map_er_ed = VolumeMapping(cc, rconn, gel=gel)
# Prepare mapping from reference triangle e_R to a
# physical triangle e.
map_er_e = SurfaceMapping(dual_coors, tri_conn, gel=gel)
# Compute triangle basis (edge) vectors.
nn = surface.nodes[ueo]
edge_coors = mesh_coors[nn]
edge_centre_coors = 0.5 * edge_coors.sum(axis=1)
edge_normals = 0.5 * nodal_normals[ueo].sum(axis=1)
edge_normals /= la.norm_l2_along_axis(edge_normals)[:,None]
nn = surface.nodes[ueo]
edge_dirs = edge_coors[:,1] - edge_coors[:,0]
edge_dirs /= la.norm_l2_along_axis(edge_dirs)[:,None]
edge_ortho = nm.cross(edge_normals, edge_dirs)
edge_ortho /= la.norm_l2_along_axis(edge_ortho)[:,None]
# Primary face - dual sub-faces map.
# i-th row: indices to conn corresponding to sub-faces of i-th face.
face_map = nm.arange(n_fa * n_edge, dtype=nm.int32)
face_map.shape = (n_fa, n_edge)
# The actual connectivity for assembling (unique nodes per master
# faces).
asm_conn = e_id[face_map]
n_nod = ueo.shape[0] # One node per unique edge.
n_components = self.dim - 1
dual_surface = Struct(name = 'dual_surface_description',
dim = self.dim,
n_dual_fa = conn.shape[0],
n_dual_fp = self.dim,
n_fa = n_fa,
n_edge = n_edge,
n_nod = n_nod,
n_components = n_components,
n_dof = n_nod * n_components,
dual_coors = dual_coors,
coor_offset = coor_offset,
e_sort = e_sort,
conn = conn,
tri_conn = tri_conn,
map_er_e = map_er_e,
map_er_ed = map_er_ed,
face_map = face_map,
asm_conn = asm_conn,
nodal_normals = nodal_normals,
edge_centre_coors = edge_centre_coors,
edge_normals = edge_normals,
edge_dirs = edge_dirs,
edge_ortho = edge_ortho)
return dual_surface
def save(self, filename):
coors = []
conns = []
mat_ids = []
offset = 0
for ig, dual_surface in self.dual_surfaces.iteritems():
cc = dual_surface.dual_coors
coors.append(cc)
conn = dual_surface.conn[:,1:].copy() + offset
conns.append(conn)
mat_id = nm.empty((conn.shape[0],), dtype=nm.int32)
mat_id[:] = ig
mat_ids.append(mat_id)
offset += cc.shape[0]
coors = nm.concatenate(coors, axis=0)
dual_mesh = Mesh.from_data('dual_mesh', coors, None, conns,
mat_ids, ['2_3'] * len(conns))
dual_mesh.write(filename, io='auto')
def save_axes(self, filename):
coors = []
conns = []
mat_ids = []
offset = 0
for ig, dual_surface in self.dual_surfaces.iteritems():
cc = nm.r_[dual_surface.edge_centre_coors,
dual_surface.dual_coors]
coors.append(cc)
conn = dual_surface.conn.copy() + offset
conn[:,1:] += dual_surface.edge_centre_coors.shape[0]
conns.append(conn)
mat_id = nm.empty((conn.shape[0],), dtype=nm.int32)
mat_id[:] = ig
mat_ids.append(mat_id)
offset += cc.shape[0]
coors = nm.concatenate(coors, axis=0)
out = {}
for ig, dual_surface in self.dual_surfaces.iteritems():
eto = edge_data_to_output
out['en_%d' % ig] = eto(coors, conns[ig], dual_surface.e_sort,
dual_surface.edge_normals)
out['ed_%d' % ig] = eto(coors, conns[ig], dual_surface.e_sort,
dual_surface.edge_dirs)
out['eo_%d' % ig] = eto(coors, conns[ig], dual_surface.e_sort,
dual_surface.edge_ortho)
dual_mesh = Mesh.from_data('dual_mesh_vectors', coors, None, conns,
mat_ids, ['2_4'] * len(conns))
dual_mesh.write(filename, io='auto', out=out)
|
[
"sfepy.fem.fe_surface.FESurface",
"sfepy.linalg.norm_l2_along_axis",
"sfepy.fem.utils.compute_nodal_normals",
"sfepy.base.compat.unique",
"sfepy.fem.Field",
"sfepy.fem.mappings.SurfaceMapping",
"sfepy.fem.mappings.VolumeMapping"
] |
[((1918, 1968), 'sfepy.fem.Field', 'Field', (['"""displacements"""', 'nm.float64', '(3,)', 'omega', '(1)'], {}), "('displacements', nm.float64, (3,), omega, 1)\n", (1923, 1968), False, 'from sfepy.fem import Mesh, Domain, Field, Variables\n'), ((3178, 3239), 'sfepy.fem.utils.compute_nodal_normals', 'compute_nodal_normals', (['surface.nodes', 'self.region', 'self.field'], {}), '(surface.nodes, self.region, self.field)\n', (3199, 3239), False, 'from sfepy.fem.utils import compute_nodal_normals\n'), ((3651, 3701), 'sfepy.base.compat.unique', 'unique', (['eo'], {'return_index': '(True)', 'return_inverse': '(True)'}), '(eo, return_index=True, return_inverse=True)\n', (3657, 3701), False, 'from sfepy.base.compat import unique\n'), ((5088, 5121), 'sfepy.fem.mappings.VolumeMapping', 'VolumeMapping', (['cc', 'rconn'], {'gel': 'gel'}), '(cc, rconn, gel=gel)\n', (5101, 5121), False, 'from sfepy.fem.mappings import VolumeMapping, SurfaceMapping\n'), ((5232, 5277), 'sfepy.fem.mappings.SurfaceMapping', 'SurfaceMapping', (['dual_coors', 'tri_conn'], {'gel': 'gel'}), '(dual_coors, tri_conn, gel=gel)\n', (5246, 5277), False, 'from sfepy.fem.mappings import VolumeMapping, SurfaceMapping\n'), ((2496, 2550), 'sfepy.fem.fe_surface.FESurface', 'FESurface', (['None', 'self.region', 'self.gel.faces', 'conn', 'ig'], {}), '(None, self.region, self.gel.faces, conn, ig)\n', (2505, 2550), False, 'from sfepy.fem.fe_surface import FESurface\n'), ((4489, 4526), 'sfepy.linalg.norm_l2_along_axis', 'la.norm_l2_along_axis', (['centre_normals'], {}), '(centre_normals)\n', (4510, 4526), True, 'import sfepy.linalg as la\n'), ((5540, 5575), 'sfepy.linalg.norm_l2_along_axis', 'la.norm_l2_along_axis', (['edge_normals'], {}), '(edge_normals)\n', (5561, 5575), True, 'import sfepy.linalg as la\n'), ((5692, 5724), 'sfepy.linalg.norm_l2_along_axis', 'la.norm_l2_along_axis', (['edge_dirs'], {}), '(edge_dirs)\n', (5713, 5724), True, 'import sfepy.linalg as la\n'), ((5811, 5844), 'sfepy.linalg.norm_l2_along_axis', 'la.norm_l2_along_axis', (['edge_ortho'], {}), '(edge_ortho)\n', (5832, 5844), True, 'import sfepy.linalg as la\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
from collections import namedtuple
from typing import Iterable, Union
import megengine as mge
import megengine.distributed as dist
import megengine.module as M
import megengine.optimizer as optim
from basecore.config import ConfigDict
from megengine import Parameter
from megengine.amp import GradScaler
from megengine.autodiff import GradManager
from pkg_resources import packaging
from basecls.utils import registers
from .optimizer import LAMB, LARS, SGD
from .weight_decay import get_param_groups
__all__ = ["Solver", "BaseSolver", "DefaultSolver"]
Solver = namedtuple("Solver", ["optimizer", "grad_manager", "grad_scaler"])
class BaseSolver:
"""Base class for solver factory.
A solver factory should return a :py:class:`~Solver` object, which combines
an :py:class:`~megengine.optimizer.Optimizer` and
a :py:class:`~megengine.autodiff.GradManager`.
"""
@classmethod
def build(cls, cfg: ConfigDict, model: M.Module) -> Solver:
"""Abstract build function
Args:
cfg: config for training.
model: model for training.
Returns:
A solver.
"""
raise NotImplementedError
@registers.solvers.register()
class DefaultSolver(BaseSolver):
"""The default solver factory.
According to ``cfg.reduce_mode``, learning rate and weight decay will be scaled automatically
following the linear scaling rule, see
`"Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour"
<https://arxiv.org/abs/1706.02677>`_ for more details.
It supports ``"sgd"``, ``"adam"`` and ``"adamw"``.
Note:
This linear scaling rule can only work well with SGD. We are still looking for
the applicable scaling rule for Adam and AdamW. Thus we recommend keeping default
training settings (like learning rate and world size) when using Adam and AdamW.
"""
@classmethod
def build(cls, cfg: ConfigDict, model: M.Module) -> Solver:
"""Build function with the linear scaling strategy.
Args:
cfg: config for training.
model: model for training.
Returns:
A solver.
"""
amp_cfg = cfg.amp
cfg = cfg.solver
world_size = dist.get_world_size()
# build optimizer
lr = cfg.basic_lr * world_size # linear scaling rule
optim_params = get_param_groups(model, cfg.weight_decay)
optimizer = cls.build_optimizer(cfg, optim_params, lr, 0)
# build grad_manager
gm = GradManager()
callbacks = [dist.make_allreduce_cb("mean", dist.WORLD)] if world_size > 1 else None
gm.attach(model.parameters(), callbacks=callbacks)
# build grad_scaler
scaler = (
GradScaler(init_scale=65536.0, growth_interval=2000)
if amp_cfg.dynamic_scale
else GradScaler(init_scale=128.0, growth_interval=0)
)
return Solver(optimizer, gm, scaler)
@classmethod
def build_optimizer(
cls, cfg: ConfigDict, params: Union[Iterable[Parameter], dict], lr: float, wd: float
) -> optim.Optimizer:
"""Build optimizer according to training config.
Args:
cfg: config for training.
params: iterable of parameters to optimize or dicts defining parameter groups.
lr: learning rate.
weight_decay: weight decay (L2, penalty).
Returns:
An optimizer.
"""
if cfg.optimizer == "adam":
return optim.Adam(params, lr=lr, weight_decay=wd, betas=cfg.betas)
elif cfg.optimizer == "adamw":
return optim.AdamW(params, lr=lr, weight_decay=wd, betas=cfg.betas)
elif cfg.optimizer == "lamb":
return LAMB(
params, lr=lr, weight_decay=wd, betas=cfg.betas, always_adapt=cfg.always_adapt
)
elif cfg.optimizer == "lars":
return LARS(
params,
lr=lr,
weight_decay=wd,
momentum=cfg.momentum,
nesterov=cfg.nesterov,
always_adapt=cfg.always_adapt,
)
elif cfg.optimizer == "sgd":
if packaging.version.parse(mge.__version__) < packaging.version.parse("1.7.0"):
return SGD(
params, lr=lr, weight_decay=wd, momentum=cfg.momentum, nesterov=cfg.nesterov
)
return optim.SGD(
params, lr=lr, weight_decay=wd, momentum=cfg.momentum, nesterov=cfg.nesterov
)
else:
raise NotImplementedError(f"Optimizer '{cfg.optimizer}' not supported")
|
[
"megengine.optimizer.SGD",
"megengine.optimizer.Adam",
"megengine.amp.GradScaler",
"megengine.optimizer.AdamW",
"megengine.autodiff.GradManager",
"megengine.distributed.make_allreduce_cb",
"megengine.distributed.get_world_size"
] |
[((649, 715), 'collections.namedtuple', 'namedtuple', (['"""Solver"""', "['optimizer', 'grad_manager', 'grad_scaler']"], {}), "('Solver', ['optimizer', 'grad_manager', 'grad_scaler'])\n", (659, 715), False, 'from collections import namedtuple\n'), ((1267, 1295), 'basecls.utils.registers.solvers.register', 'registers.solvers.register', ([], {}), '()\n', (1293, 1295), False, 'from basecls.utils import registers\n'), ((2331, 2352), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (2350, 2352), True, 'import megengine.distributed as dist\n'), ((2618, 2631), 'megengine.autodiff.GradManager', 'GradManager', ([], {}), '()\n', (2629, 2631), False, 'from megengine.autodiff import GradManager\n'), ((2844, 2896), 'megengine.amp.GradScaler', 'GradScaler', ([], {'init_scale': '(65536.0)', 'growth_interval': '(2000)'}), '(init_scale=65536.0, growth_interval=2000)\n', (2854, 2896), False, 'from megengine.amp import GradScaler\n'), ((2951, 2998), 'megengine.amp.GradScaler', 'GradScaler', ([], {'init_scale': '(128.0)', 'growth_interval': '(0)'}), '(init_scale=128.0, growth_interval=0)\n', (2961, 2998), False, 'from megengine.amp import GradScaler\n'), ((3614, 3673), 'megengine.optimizer.Adam', 'optim.Adam', (['params'], {'lr': 'lr', 'weight_decay': 'wd', 'betas': 'cfg.betas'}), '(params, lr=lr, weight_decay=wd, betas=cfg.betas)\n', (3624, 3673), True, 'import megengine.optimizer as optim\n'), ((2653, 2695), 'megengine.distributed.make_allreduce_cb', 'dist.make_allreduce_cb', (['"""mean"""', 'dist.WORLD'], {}), "('mean', dist.WORLD)\n", (2675, 2695), True, 'import megengine.distributed as dist\n'), ((3732, 3792), 'megengine.optimizer.AdamW', 'optim.AdamW', (['params'], {'lr': 'lr', 'weight_decay': 'wd', 'betas': 'cfg.betas'}), '(params, lr=lr, weight_decay=wd, betas=cfg.betas)\n', (3743, 3792), True, 'import megengine.optimizer as optim\n'), ((4538, 4630), 'megengine.optimizer.SGD', 'optim.SGD', (['params'], {'lr': 'lr', 'weight_decay': 'wd', 'momentum': 'cfg.momentum', 'nesterov': 'cfg.nesterov'}), '(params, lr=lr, weight_decay=wd, momentum=cfg.momentum, nesterov=\n cfg.nesterov)\n', (4547, 4630), True, 'import megengine.optimizer as optim\n'), ((4299, 4339), 'pkg_resources.packaging.version.parse', 'packaging.version.parse', (['mge.__version__'], {}), '(mge.__version__)\n', (4322, 4339), False, 'from pkg_resources import packaging\n'), ((4342, 4374), 'pkg_resources.packaging.version.parse', 'packaging.version.parse', (['"""1.7.0"""'], {}), "('1.7.0')\n", (4365, 4374), False, 'from pkg_resources import packaging\n')]
|
from typing import Any, Dict, List, Optional, Union
from pydantic.networks import EmailStr
from app.crud.base_sqlmodel import CRUDBase
from sqlmodel.ext.asyncio.session import AsyncSession
from sqlmodel import select
from app.schemas.user import IUserCreate, IUserUpdate
from app.models.user import User
from app.core.security import verify_password, get_password_hash
from datetime import datetime
class CRUDUser(CRUDBase[User, IUserCreate, IUserUpdate]):
async def get_by_email(
self, db_session: AsyncSession, *, email: str
) -> Optional[User]:
users = await db_session.exec(select(User).where(User.email == email))
return users.first()
async def get_user_by_id(self, db_session: AsyncSession, id: int) -> Optional[User]:
return await super().get(db_session, id=id)
async def create(self, db_session: AsyncSession, *, obj_in: IUserCreate) -> User:
db_obj = User(
first_name=obj_in.first_name,
last_name=obj_in.last_name,
email=obj_in.email,
is_superuser=obj_in.is_superuser,
hashed_password=get_password_hash(obj_in.password),
created_at=datetime.utcnow(),
updated_at=datetime.utcnow(),
role_id=obj_in.role_id,
)
db_session.add(db_obj)
await db_session.commit()
await db_session.refresh(db_obj)
return db_obj
def update(
self,
db_session: AsyncSession,
*,
db_obj: User,
obj_in: Union[IUserUpdate, Dict[str, Any]]
) -> User:
if isinstance(obj_in, dict):
update_data = obj_in
else:
update_data = obj_in.dict(exclude_unset=True)
update_data["updated_at"] = datetime.utcnow()
update_data["first_name"] = obj_in.first_name
update_data["last_name"] = obj_in.last_name
response = super().update(db_session, db_obj=db_obj, obj_in=update_data)
return response
async def update_is_active(
self,
db_session: AsyncSession,
*,
db_obj: List[User],
obj_in: Union[int, str, Dict[str, Any]]
) -> Union[User, None]:
response = None
for x in db_obj:
setattr(x, "is_active", obj_in.is_active)
setattr(x, "updated_at", datetime.utcnow())
db_session.add(x)
await db_session.commit()
await db_session.refresh(x)
response.append(x)
return response
async def authenticate(
self, db_session: AsyncSession, *, email: EmailStr, password: str
) -> Optional[User]:
user = await self.get_by_email(db_session, email=email)
if not user:
return None
if not verify_password(password, user.hashed_password):
return None
return user
user = CRUDUser(User)
|
[
"sqlmodel.select"
] |
[((1751, 1768), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1766, 1768), False, 'from datetime import datetime\n'), ((2751, 2798), 'app.core.security.verify_password', 'verify_password', (['password', 'user.hashed_password'], {}), '(password, user.hashed_password)\n', (2766, 2798), False, 'from app.core.security import verify_password, get_password_hash\n'), ((1114, 1148), 'app.core.security.get_password_hash', 'get_password_hash', (['obj_in.password'], {}), '(obj_in.password)\n', (1131, 1148), False, 'from app.core.security import verify_password, get_password_hash\n'), ((1173, 1190), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1188, 1190), False, 'from datetime import datetime\n'), ((1215, 1232), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1230, 1232), False, 'from datetime import datetime\n'), ((2317, 2334), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (2332, 2334), False, 'from datetime import datetime\n'), ((604, 616), 'sqlmodel.select', 'select', (['User'], {}), '(User)\n', (610, 616), False, 'from sqlmodel import select\n')]
|
from typing import List
from fastapi import APIRouter, Depends
from sqlmodel import select, Session
from app.models import *
from utils import get_session
router = APIRouter()
@router.get("/users", response_model=List[UserRead])
async def get_users(*, session: Session=Depends(get_session)):
statement = select(User)
results = session.exec(statement).all()
return results
@router.post("/tasks", response_model=List[TaskRead])
async def get_tasks(user: UserQuery, session: Session=Depends(get_session)):
statement = select(Task).where(Task.owner_id == user.id)
results = session.exec(statement).all()
return results
@router.post("/task", response_model=TaskRead)
async def get_task(task: TaskQuery, session: Session=Depends(get_session)):
statement = select(Task).where(Task.owner_id == task.owner_id and Task.id == task.id)
result = session.exec(statement).one_or_none()
return result
@router.post("/create/task", response_model=StandardResponse)
async def create_task(task: TaskCreate, session: Session=Depends(get_session)):
db_task = Task.from_orm(task)
session.add(db_task)
session.commit()
session.refresh(db_task)
return StandardResponse()
@router.post("/create/user", response_model=StandardResponse)
async def create_user(user: UserCreate, session: Session=Depends(get_session)):
db_user = User.from_orm(user)
session.add(db_user)
session.commit()
session.refresh(db_user)
return StandardResponse()
@router.post("/delete/task", response_model=StandardResponse)
async def delete_task(task: TaskQuery, session: Session=Depends(get_session)):
statement = select(Task).where(Task.id == task.id and Task.owner_id == task.owner_id)
result = session.exec(statement)
task = result.one_or_none()
if task:
session.delete(task)
session.commit()
return StandardResponse()
return StandardResponse(success="Failure", message="Invalid Task id or Owner id", code=400)
@router.post("/delete/user", response_model=StandardResponse)
async def delete_user(user: UserQuery, session: Session=Depends(get_session)):
statement = select(User).where(User.id == user.id)
result = session.exec(statement)
user = result.one_or_none()
if user:
session.delete(user)
session.commit()
return StandardResponse()
return StandardResponse(success="Failure", message="Invalid User id", code=400)
@router.post("/update/task", response_model=StandardResponse)
async def update_task(task: TaskRead, session: Session=Depends(get_session)):
task = Task.from_orm(task)
session.add(task)
session.commit()
session.refresh(task)
return StandardResponse()
|
[
"sqlmodel.select"
] |
[((165, 176), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (174, 176), False, 'from fastapi import APIRouter, Depends\n'), ((271, 291), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (278, 291), False, 'from fastapi import APIRouter, Depends\n'), ((310, 322), 'sqlmodel.select', 'select', (['User'], {}), '(User)\n', (316, 322), False, 'from sqlmodel import select, Session\n'), ((504, 524), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (511, 524), False, 'from fastapi import APIRouter, Depends\n'), ((753, 773), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (760, 773), False, 'from fastapi import APIRouter, Depends\n'), ((1056, 1076), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1063, 1076), False, 'from fastapi import APIRouter, Depends\n'), ((1339, 1359), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1346, 1359), False, 'from fastapi import APIRouter, Depends\n'), ((1621, 1641), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1628, 1641), False, 'from fastapi import APIRouter, Depends\n'), ((2119, 2139), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (2126, 2139), False, 'from fastapi import APIRouter, Depends\n'), ((2569, 2589), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (2576, 2589), False, 'from fastapi import APIRouter, Depends\n'), ((543, 555), 'sqlmodel.select', 'select', (['Task'], {}), '(Task)\n', (549, 555), False, 'from sqlmodel import select, Session\n'), ((792, 804), 'sqlmodel.select', 'select', (['Task'], {}), '(Task)\n', (798, 804), False, 'from sqlmodel import select, Session\n'), ((1660, 1672), 'sqlmodel.select', 'select', (['Task'], {}), '(Task)\n', (1666, 1672), False, 'from sqlmodel import select, Session\n'), ((2158, 2170), 'sqlmodel.select', 'select', (['User'], {}), '(User)\n', (2164, 2170), False, 'from sqlmodel import select, Session\n')]
|
from typing import List
from app.database import get_session
from app.models import Medication, MedicationUpdate
from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.ext.asyncio import AsyncSession
from sqlmodel import select
router = APIRouter(prefix="/medications", tags=["medications"])
@router.post("", response_model=Medication)
async def create_medication(
*, med: Medication, session: AsyncSession = Depends(get_session)
) -> Medication:
medication = Medication.from_orm(med)
session.add(medication)
await session.commit()
await session.refresh(medication)
return medication
@router.get("/{medication_id}", response_model=Medication)
async def retrieve_medication(
*, medication_id: str, session: AsyncSession = Depends(get_session)
) -> Medication:
result = await session.execute(
select(Medication).where(Medication.id == medication_id)
)
medication = result.scalar_one_or_none()
if not medication:
raise HTTPException(
status_code=404, detail=f"Medication {medication_id} not found"
)
return medication
@router.patch("/{medication_id}", response_model=Medication)
async def update_medication(
*,
medication_id: str,
patch: MedicationUpdate,
session: AsyncSession = Depends(get_session),
) -> Medication:
result = await session.execute(
select(Medication).where(Medication.id == medication_id)
)
medication = result.scalar_one_or_none()
if not medication:
raise HTTPException(
status_code=404, detail=f"Medication {medication_id} not found"
)
patch_data = patch.dict(exclude_unset=True)
for key, value in patch_data.items():
setattr(medication, key, value)
session.add(medication)
await session.commit()
await session.refresh(medication)
return medication
@router.post("/{medication_id}")
async def delete_medication(
*, medication_id: str, session: AsyncSession = Depends(get_session)
):
result = await session.execute(
select(Medication).where(Medication.id == medication_id)
)
medication = result.scalar_one_or_none()
if not medication:
raise HTTPException(
status_code=404, detail=f"Medication {medication_id} not found"
)
await session.delete(medication)
await session.commit()
return {"ok": True}
@router.get("", response_model=List[Medication])
async def list_medications(
*, session: AsyncSession = Depends(get_session)
) -> List[Medication]:
result = await session.execute(select(Medication))
medications = result.scalars().all()
return medications
|
[
"sqlmodel.select"
] |
[((254, 308), 'fastapi.APIRouter', 'APIRouter', ([], {'prefix': '"""/medications"""', 'tags': "['medications']"}), "(prefix='/medications', tags=['medications'])\n", (263, 308), False, 'from fastapi import APIRouter, Depends, HTTPException\n'), ((432, 452), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (439, 452), False, 'from fastapi import APIRouter, Depends, HTTPException\n'), ((487, 511), 'app.models.Medication.from_orm', 'Medication.from_orm', (['med'], {}), '(med)\n', (506, 511), False, 'from app.models import Medication, MedicationUpdate\n'), ((770, 790), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (777, 790), False, 'from fastapi import APIRouter, Depends, HTTPException\n'), ((1300, 1320), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1307, 1320), False, 'from fastapi import APIRouter, Depends, HTTPException\n'), ((1989, 2009), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1996, 2009), False, 'from fastapi import APIRouter, Depends, HTTPException\n'), ((2501, 2521), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (2508, 2521), False, 'from fastapi import APIRouter, Depends, HTTPException\n'), ((997, 1075), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': 'f"""Medication {medication_id} not found"""'}), "(status_code=404, detail=f'Medication {medication_id} not found')\n", (1010, 1075), False, 'from fastapi import APIRouter, Depends, HTTPException\n'), ((1528, 1606), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': 'f"""Medication {medication_id} not found"""'}), "(status_code=404, detail=f'Medication {medication_id} not found')\n", (1541, 1606), False, 'from fastapi import APIRouter, Depends, HTTPException\n'), ((2202, 2280), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': 'f"""Medication {medication_id} not found"""'}), "(status_code=404, detail=f'Medication {medication_id} not found')\n", (2215, 2280), False, 'from fastapi import APIRouter, Depends, HTTPException\n'), ((2580, 2598), 'sqlmodel.select', 'select', (['Medication'], {}), '(Medication)\n', (2586, 2598), False, 'from sqlmodel import select\n'), ((852, 870), 'sqlmodel.select', 'select', (['Medication'], {}), '(Medication)\n', (858, 870), False, 'from sqlmodel import select\n'), ((1383, 1401), 'sqlmodel.select', 'select', (['Medication'], {}), '(Medication)\n', (1389, 1401), False, 'from sqlmodel import select\n'), ((2057, 2075), 'sqlmodel.select', 'select', (['Medication'], {}), '(Medication)\n', (2063, 2075), False, 'from sqlmodel import select\n')]
|
"""seed schools
Revision ID: 8d04b7943264
Revises: <PASSWORD>
Create Date: 2022-04-18 00:38:38.618682+00:00
"""
import json
from os import getcwd
from pathlib import Path
import sqlalchemy as sa
import sqlalchemy.sql as sql
import sqlmodel
from alembic import context, op
# revision identifiers, used by Alembic.
revision = "8d04b7943264"
down_revision = "0<PASSWORD>"
branch_labels = None
depends_on = None
# Ad-hoc schools table for bulk import
schools_table = sql.table(
"schools", sql.column("id", sa.String), sql.column("name", sa.String)
)
def load_schools():
migrations_dir = Path(getcwd(), context.script.dir)
schools_path = migrations_dir.joinpath("schools.json")
return json.load(open(schools_path, "r"))
def upgrade():
# Change schools.id to a string
op.drop_constraint(
"applications_school_id_fkey", "applications", type_="foreignkey"
)
op.alter_column(
"applications",
"school_id",
type_=sqlmodel.sql.sqltypes.AutoString(),
nullable=False,
)
op.alter_column(
"schools", "id", type_=sqlmodel.sql.sqltypes.AutoString(), nullable=False
)
op.create_foreign_key(
None, "applications", "schools", ["school_id"], ["id"], ondelete="CASCADE"
)
# Insert stuff
schools = load_schools()
op.bulk_insert(schools_table, [{"id": s["id"], "name": s["name"]} for s in schools])
def downgrade():
# Delete added records
schools = load_schools()
for school in schools:
op.execute(
schools_table.delete().where(
schools_table.c.id == op.inline_literal(school["id"])
)
)
# Change schools.id back to an integer
op.drop_constraint(
"applications_school_id_fkey", "applications", type_="foreignkey"
)
op.alter_column(
"applications",
"school_id",
type_=sa.Integer(),
nullable=False,
postgresql_using="school_id::integer",
)
op.alter_column(
"schools",
"id",
type_=sa.Integer(),
nullable=False,
postgresql_using="id::integer",
)
op.create_foreign_key(
None, "applications", "schools", ["school_id"], ["id"], ondelete="CASCADE"
)
|
[
"sqlmodel.sql.sqltypes.AutoString"
] |
[((494, 521), 'sqlalchemy.sql.column', 'sql.column', (['"""id"""', 'sa.String'], {}), "('id', sa.String)\n", (504, 521), True, 'import sqlalchemy.sql as sql\n'), ((523, 552), 'sqlalchemy.sql.column', 'sql.column', (['"""name"""', 'sa.String'], {}), "('name', sa.String)\n", (533, 552), True, 'import sqlalchemy.sql as sql\n'), ((795, 885), 'alembic.op.drop_constraint', 'op.drop_constraint', (['"""applications_school_id_fkey"""', '"""applications"""'], {'type_': '"""foreignkey"""'}), "('applications_school_id_fkey', 'applications', type_=\n 'foreignkey')\n", (813, 885), False, 'from alembic import context, op\n'), ((1154, 1256), 'alembic.op.create_foreign_key', 'op.create_foreign_key', (['None', '"""applications"""', '"""schools"""', "['school_id']", "['id']"], {'ondelete': '"""CASCADE"""'}), "(None, 'applications', 'schools', ['school_id'], ['id'\n ], ondelete='CASCADE')\n", (1175, 1256), False, 'from alembic import context, op\n'), ((1319, 1407), 'alembic.op.bulk_insert', 'op.bulk_insert', (['schools_table', "[{'id': s['id'], 'name': s['name']} for s in schools]"], {}), "(schools_table, [{'id': s['id'], 'name': s['name']} for s in\n schools])\n", (1333, 1407), False, 'from alembic import context, op\n'), ((1710, 1800), 'alembic.op.drop_constraint', 'op.drop_constraint', (['"""applications_school_id_fkey"""', '"""applications"""'], {'type_': '"""foreignkey"""'}), "('applications_school_id_fkey', 'applications', type_=\n 'foreignkey')\n", (1728, 1800), False, 'from alembic import context, op\n'), ((2137, 2239), 'alembic.op.create_foreign_key', 'op.create_foreign_key', (['None', '"""applications"""', '"""schools"""', "['school_id']", "['id']"], {'ondelete': '"""CASCADE"""'}), "(None, 'applications', 'schools', ['school_id'], ['id'\n ], ondelete='CASCADE')\n", (2158, 2239), False, 'from alembic import context, op\n'), ((603, 611), 'os.getcwd', 'getcwd', ([], {}), '()\n', (609, 611), False, 'from os import getcwd\n'), ((975, 1009), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1007, 1009), False, 'import sqlmodel\n'), ((1093, 1127), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1125, 1127), False, 'import sqlmodel\n'), ((1890, 1902), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1900, 1902), True, 'import sqlalchemy as sa\n'), ((2049, 2061), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2059, 2061), True, 'import sqlalchemy as sa\n'), ((1606, 1637), 'alembic.op.inline_literal', 'op.inline_literal', (["school['id']"], {}), "(school['id'])\n", (1623, 1637), False, 'from alembic import context, op\n')]
|
from datetime import datetime
from typing import Optional
from pydantic import BaseSettings, HttpUrl
from sqlmodel import Field, SQLModel # pyright: ignore[reportUnknownVariableType]
class Post(SQLModel):
id: int
text: Optional[str]
photos: list[HttpUrl]
date: datetime
class PostDB(SQLModel, table=True):
id: int = Field(default=None, primary_key=True)
class Settings(BaseSettings):
vk_token: str
vk_owner_id: int
tg_token: str
tg_chat_id: int
db_path: str = "/tmp/database.db"
sentry_dsn: Optional[str]
class LambdaSettings(Settings):
s3_bucket: str
s3_key: str
s3_endpoint: str
aws_access_key_id: str
aws_secret_access_key: str
|
[
"sqlmodel.Field"
] |
[((342, 379), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (347, 379), False, 'from sqlmodel import Field, SQLModel\n')]
|
#!/usr/bin/env python
# 12.01.2007, c
"""
Solve partial differential equations given in a SfePy problem definition file.
Example problem definition files can be found in ``examples/`` directory of the
SfePy top-level directory. This script works with all the examples except those
in ``examples/standalone/``.
Both normal and parametric study runs are supported. A parametric study allows
repeated runs for varying some of the simulation parameters - see
``examples/diffusion/poisson_parametric_study.py`` file.
"""
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import sfepy
from sfepy.base.base import output
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.applications import PDESolverApp
def print_terms():
import sfepy.terms as t
tt = t.term_table
print('Terms: %d available:' % len(tt))
print(sorted(tt.keys()))
def print_solvers():
from sfepy.solvers import solver_table
print('Solvers: %d available:' % len(solver_table))
print(sorted(solver_table.keys()))
helps = {
'debug':
'automatically start debugger when an exception is raised',
'conf' :
'override problem description file items, written as python'
' dictionary without surrounding braces',
'options' : 'override options item of problem description,'
' written as python dictionary without surrounding braces',
'define' : 'pass given arguments written as python dictionary'
' without surrounding braces to define() function of problem description'
' file',
'filename' :
'basename of output file(s) [default: <basename of input file>]',
'output_format' :
'output file format, one of: {vtk, h5} [default: vtk]',
'save_restart' :
'if given, save restart files according to the given mode.',
'load_restart' :
'if given, load the given restart file',
'log' :
'log all messages to specified file (existing file will be overwritten!)',
'quiet' :
'do not print any messages to screen',
'save_ebc' :
'save a zero solution with applied EBCs (Dirichlet boundary conditions)',
'save_ebc_nodes' :
'save a zero solution with added non-zeros in EBC (Dirichlet boundary'
' conditions) nodes - scalar variables are shown using colors,'
' vector variables using arrows with non-zero components corresponding'
' to constrained components',
'save_regions' :
'save problem regions as meshes',
'save_regions_as_groups' :
'save problem regions in a single mesh but mark them by using different'
' element/node group numbers',
'save_field_meshes' :
'save meshes of problem fields (with extra DOF nodes)',
'solve_not' :
'do not solve (use in connection with --save-*)',
'list' :
'list data, what can be one of: {terms, solvers}',
}
def main():
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version',
version='%(prog)s ' + sfepy.__version__)
parser.add_argument('--debug',
action='store_true', dest='debug',
default=False, help=helps['debug'])
parser.add_argument('-c', '--conf', metavar='"key : value, ..."',
action='store', dest='conf', type=str,
default=None, help= helps['conf'])
parser.add_argument('-O', '--options', metavar='"key : value, ..."',
action='store', dest='app_options', type=str,
default=None, help=helps['options'])
parser.add_argument('-d', '--define', metavar='"key : value, ..."',
action='store', dest='define_args', type=str,
default=None, help=helps['define'])
parser.add_argument('-o', metavar='filename',
action='store', dest='output_filename_trunk',
default=None, help=helps['filename'])
parser.add_argument('--format', metavar='format',
action='store', dest='output_format',
default=None, help=helps['output_format'])
parser.add_argument('--save-restart', metavar='mode', type=int,
action='store', dest='save_restart',
default=None, help=helps['save_restart'])
parser.add_argument('--load-restart', metavar='filename',
action='store', dest='load_restart',
default=None, help=helps['load_restart'])
parser.add_argument('--log', metavar='file',
action='store', dest='log',
default=None, help=helps['log'])
parser.add_argument('-q', '--quiet',
action='store_true', dest='quiet',
default=False, help=helps['quiet'])
parser.add_argument('--save-ebc',
action='store_true', dest='save_ebc',
default=False, help=helps['save_ebc'])
parser.add_argument('--save-ebc-nodes',
action='store_true', dest='save_ebc_nodes',
default=False, help=helps['save_ebc_nodes'])
parser.add_argument('--save-regions',
action='store_true', dest='save_regions',
default=False, help=helps['save_regions'])
parser.add_argument('--save-regions-as-groups',
action='store_true', dest='save_regions_as_groups',
default=False, help=helps['save_regions_as_groups'])
parser.add_argument('--save-field-meshes',
action='store_true', dest='save_field_meshes',
default=False, help=helps['save_field_meshes'])
parser.add_argument('--solve-not',
action='store_true', dest='solve_not',
default=False, help=helps['solve_not'])
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', metavar='what',
action='store', dest='_list',
default=None, help=helps['list'])
group.add_argument('filename_in', nargs='?')
options, petsc_opts = parser.parse_known_args()
if options._list is not None:
if options._list == 'terms':
print_terms()
elif options._list == 'solvers':
print_solvers()
return
if options.debug:
from sfepy.base.base import debug_on_error; debug_on_error()
filename_in = options.filename_in
output.set_output(filename=options.log,
quiet=options.quiet,
combined=options.log is not None)
required, other = get_standard_keywords()
if options.solve_not:
required.remove('equations')
required.remove('solver_[0-9]+|solvers')
other.extend(['equations'])
conf = ProblemConf.from_file_and_options(filename_in, options,
required, other,
define_args=options.define_args)
opts = conf.options
output_prefix = opts.get('output_prefix', 'sfepy:')
opts.save_restart = options.save_restart
opts.load_restart = options.load_restart
app = PDESolverApp(conf, options, output_prefix)
if hasattr(opts, 'parametric_hook'): # Parametric study.
parametric_hook = conf.get_function(opts.parametric_hook)
app.parametrize(parametric_hook)
app()
if __name__ == '__main__':
main()
|
[
"sfepy.base.conf.get_standard_keywords",
"sfepy.solvers.solver_table.keys",
"sfepy.base.conf.ProblemConf.from_file_and_options",
"sfepy.base.base.output.set_output",
"sfepy.applications.PDESolverApp",
"sfepy.base.base.debug_on_error"
] |
[((2913, 2998), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'RawDescriptionHelpFormatter'}), '(description=__doc__, formatter_class=RawDescriptionHelpFormatter\n )\n', (2927, 2998), False, 'from argparse import ArgumentParser, RawDescriptionHelpFormatter\n'), ((6685, 6784), 'sfepy.base.base.output.set_output', 'output.set_output', ([], {'filename': 'options.log', 'quiet': 'options.quiet', 'combined': '(options.log is not None)'}), '(filename=options.log, quiet=options.quiet, combined=\n options.log is not None)\n', (6702, 6784), False, 'from sfepy.base.base import output\n'), ((6847, 6870), 'sfepy.base.conf.get_standard_keywords', 'get_standard_keywords', ([], {}), '()\n', (6868, 6870), False, 'from sfepy.base.conf import ProblemConf, get_standard_keywords\n'), ((7031, 7140), 'sfepy.base.conf.ProblemConf.from_file_and_options', 'ProblemConf.from_file_and_options', (['filename_in', 'options', 'required', 'other'], {'define_args': 'options.define_args'}), '(filename_in, options, required, other,\n define_args=options.define_args)\n', (7064, 7140), False, 'from sfepy.base.conf import ProblemConf, get_standard_keywords\n'), ((7410, 7452), 'sfepy.applications.PDESolverApp', 'PDESolverApp', (['conf', 'options', 'output_prefix'], {}), '(conf, options, output_prefix)\n', (7422, 7452), False, 'from sfepy.applications import PDESolverApp\n'), ((6625, 6641), 'sfepy.base.base.debug_on_error', 'debug_on_error', ([], {}), '()\n', (6639, 6641), False, 'from sfepy.base.base import debug_on_error\n'), ((1097, 1116), 'sfepy.solvers.solver_table.keys', 'solver_table.keys', ([], {}), '()\n', (1114, 1116), False, 'from sfepy.solvers import solver_table\n')]
|
from typing import Optional
from sqlmodel import Session
from db.base import engine
from db.models import Plant
def create_plants():
plant_1 = Plant(name="Hebe")
plant_2 = Plant(name="Astilbe")
plant_3 = Plant(name="Sedum")
plant_4 = Plant(name="Helenium")
plant_5 = Plant(name="Heather")
session = Session(engine)
session.add(plant_1)
session.add(plant_2)
session.add(plant_3)
session.add(plant_4)
session.add(plant_5)
session.commit()
session.close()
def main():
create_plants()
if __name__ == "__main__":
main()
|
[
"sqlmodel.Session"
] |
[((150, 168), 'db.models.Plant', 'Plant', ([], {'name': '"""Hebe"""'}), "(name='Hebe')\n", (155, 168), False, 'from db.models import Plant\n'), ((183, 204), 'db.models.Plant', 'Plant', ([], {'name': '"""Astilbe"""'}), "(name='Astilbe')\n", (188, 204), False, 'from db.models import Plant\n'), ((219, 238), 'db.models.Plant', 'Plant', ([], {'name': '"""Sedum"""'}), "(name='Sedum')\n", (224, 238), False, 'from db.models import Plant\n'), ((253, 275), 'db.models.Plant', 'Plant', ([], {'name': '"""Helenium"""'}), "(name='Helenium')\n", (258, 275), False, 'from db.models import Plant\n'), ((290, 311), 'db.models.Plant', 'Plant', ([], {'name': '"""Heather"""'}), "(name='Heather')\n", (295, 311), False, 'from db.models import Plant\n'), ((327, 342), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (334, 342), False, 'from sqlmodel import Session\n')]
|
from typing import TYPE_CHECKING, Any, Dict, Optional
from uuid import UUID
from sqlalchemy.schema import Column, ForeignKey
from sqlmodel import Field, Relationship, select
from sqlmodel.sql.sqltypes import GUID
from joj.horse.models.base import BaseORMModel
from joj.horse.services.db import db_session
from joj.horse.services.oauth import OAuth2Profile
if TYPE_CHECKING:
from joj.horse.models import User
class UserOAuthAccount(BaseORMModel, table=True): # type: ignore[call-arg]
__tablename__ = "user_oauth_accounts"
oauth_name: str = Field()
access_token: str = Field()
refresh_token: Optional[str] = Field(None, nullable=True)
expires_at: Optional[int] = Field(None, nullable=True)
account_id: str = Field(index=True)
account_name: Optional[str] = Field(None, index=True, nullable=True)
account_email: str = Field(index=True)
user_id: Optional[UUID] = Field(
sa_column=Column(GUID, ForeignKey("users.id", ondelete="CASCADE"))
)
user: Optional["User"] = Relationship(back_populates="oauth_accounts")
@staticmethod
async def create_or_update(
oauth_name: str, token: Dict[str, Any], profile: OAuth2Profile
) -> "UserOAuthAccount":
access_token = token["access_token"]
refresh_token = token.get("refresh_token", None)
expires_at = token.get("expires_at", None)
async with db_session() as session:
statement = (
select(UserOAuthAccount)
.where(UserOAuthAccount.oauth_name == oauth_name)
.where(UserOAuthAccount.account_id == profile.account_id)
)
results = await session.exec(statement)
oauth_account: Optional[UserOAuthAccount] = results.one_or_none()
if oauth_account:
oauth_account.access_token = access_token
oauth_account.refresh_token = refresh_token
oauth_account.expires_at = expires_at
oauth_account.account_name = profile.account_name
else:
oauth_account = UserOAuthAccount(
oauth_name=oauth_name,
access_token=access_token,
refresh_token=refresh_token,
expires_at=expires_at,
account_id=profile.account_id,
account_name=profile.account_name,
account_email=profile.account_email,
)
session.sync_session.add(oauth_account)
await session.commit()
await session.refresh(oauth_account)
return oauth_account
|
[
"sqlmodel.Field",
"sqlmodel.select",
"sqlmodel.Relationship"
] |
[((558, 565), 'sqlmodel.Field', 'Field', ([], {}), '()\n', (563, 565), False, 'from sqlmodel import Field, Relationship, select\n'), ((590, 597), 'sqlmodel.Field', 'Field', ([], {}), '()\n', (595, 597), False, 'from sqlmodel import Field, Relationship, select\n'), ((633, 659), 'sqlmodel.Field', 'Field', (['None'], {'nullable': '(True)'}), '(None, nullable=True)\n', (638, 659), False, 'from sqlmodel import Field, Relationship, select\n'), ((692, 718), 'sqlmodel.Field', 'Field', (['None'], {'nullable': '(True)'}), '(None, nullable=True)\n', (697, 718), False, 'from sqlmodel import Field, Relationship, select\n'), ((741, 758), 'sqlmodel.Field', 'Field', ([], {'index': '(True)'}), '(index=True)\n', (746, 758), False, 'from sqlmodel import Field, Relationship, select\n'), ((793, 831), 'sqlmodel.Field', 'Field', (['None'], {'index': '(True)', 'nullable': '(True)'}), '(None, index=True, nullable=True)\n', (798, 831), False, 'from sqlmodel import Field, Relationship, select\n'), ((857, 874), 'sqlmodel.Field', 'Field', ([], {'index': '(True)'}), '(index=True)\n', (862, 874), False, 'from sqlmodel import Field, Relationship, select\n'), ((1023, 1068), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""oauth_accounts"""'}), "(back_populates='oauth_accounts')\n", (1035, 1068), False, 'from sqlmodel import Field, Relationship, select\n'), ((1393, 1405), 'joj.horse.services.db.db_session', 'db_session', ([], {}), '()\n', (1403, 1405), False, 'from joj.horse.services.db import db_session\n'), ((944, 986), 'sqlalchemy.schema.ForeignKey', 'ForeignKey', (['"""users.id"""'], {'ondelete': '"""CASCADE"""'}), "('users.id', ondelete='CASCADE')\n", (954, 986), False, 'from sqlalchemy.schema import Column, ForeignKey\n'), ((1460, 1484), 'sqlmodel.select', 'select', (['UserOAuthAccount'], {}), '(UserOAuthAccount)\n', (1466, 1484), False, 'from sqlmodel import Field, Relationship, select\n')]
|
from __future__ import absolute_import
from sfepy import data_dir
import six
filename_mesh = data_dir + '/meshes/3d/special/cube_cylinder.mesh'
if 0:
from sfepy.discrete.fem.utils import refine_mesh
refinement_level = 1
filename_mesh = refine_mesh(filename_mesh, refinement_level)
material_2 = {
'name' : 'coef',
'values' : {'val' : 1.0},
}
field_1 = {
'name' : 'temperature',
'dtype' : 'real',
'shape' : (1,),
'region' : 'Omega',
'approx_order' : 1,
}
variables = {
't' : ('unknown field', 'temperature', 0),
's' : ('test field', 'temperature', 't'),
}
regions = {
'Omega' : 'all',
'Gamma_Left' : ('vertices in (x < 0.0001)', 'facet'),
'Gamma_Right' : ('vertices in (x > 0.999)', 'facet'),
}
ebcs = {
't1' : ('Gamma_Left', {'t.0' : 2.0}),
't2' : ('Gamma_Right', {'t.0' : -2.0}),
}
integral_1 = {
'name' : 'i',
'order' : 1,
}
equations = {
'Temperature' : """dw_laplace.i.Omega(coef.val, s, t) = 0"""
}
class DiagPC(object):
"""
Diagonal (Jacobi) preconditioner.
Equivalent to setting `'precond' : 'jacobi'`.
"""
def setUp(self, pc):
A = pc.getOperators()[0]
self.idiag = 1.0 / A.getDiagonal()
def apply(self, pc, x, y):
y.pointwiseMult(x, self.idiag)
def setup_petsc_precond(mtx, problem):
return DiagPC()
solvers = {
'd00' : ('ls.scipy_direct',
{'method' : 'umfpack',
'warn' : True,}
),
'd01' : ('ls.scipy_direct',
{'method' : 'superlu',
'warn' : True,}
),
'd10' : ('ls.mumps', {}),
'i00' : ('ls.pyamg',
{'method' : 'ruge_stuben_solver',
'accel' : 'cg',
'eps_r' : 1e-12,
'method:max_levels' : 5,
'solve:cycle' : 'V',}
),
'i01' : ('ls.pyamg',
{'method' : 'smoothed_aggregation_solver',
'accel' : 'cg',
'eps_r' : 1e-12,}
),
'i02' : ('ls.pyamg_krylov',
{'method' : 'cg',
'eps_r' : 1e-12,
'i_max' : 1000,}
),
'i10' : ('ls.petsc',
{'method' : 'cg', # ksp_type
'precond' : 'none', # pc_type
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'i_max' : 1000,} # maxits
),
'i11' : ('ls.petsc',
{'method' : 'cg', # ksp_type
'precond' : 'python', # just for output (unused)
'setup_precond' : setup_petsc_precond, # user-defined pc
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'i_max' : 1000,} # maxits
),
'i12' : ('ls.petsc',
{'method' : 'cg', # ksp_type
'precond' : 'jacobi', # pc_type
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'i_max' : 1000,} # maxits
),
'i13' : ('ls.petsc',
{'method' : 'cg', # ksp_type
'precond' : 'icc', # pc_type
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'i_max' : 1000,} # maxits
),
'i20' : ('ls.scipy_iterative',
{'method' : 'cg',
'i_max' : 1000,
'eps_r' : 1e-12,}
),
'i21' : ('ls.scipy_iterative',
{'method' : 'bicgstab',
'i_max' : 1000,
'eps_r' : 1e-12,}
),
'i22' : ('ls.scipy_iterative',
{'method' : 'qmr',
'i_max' : 1000,
'eps_r' : 1e-12,}
),
'newton' : ('nls.newton', {
'i_max' : 1,
'eps_a' : 1e-10,
}),
}
options = {
'nls' : 'newton',
}
from sfepy.base.testing import TestCommon
output_name = 'test_linear_solvers_%s.vtk'
class Test(TestCommon):
can_fail = ['ls.pyamg', 'ls.pyamg_krylov', 'ls.petsc', 'ls.mumps',]
@staticmethod
def from_conf(conf, options):
from sfepy.discrete import Problem
problem = Problem.from_conf(conf)
problem.time_update()
test = Test(problem=problem, conf=conf, options=options)
return test
def _list_linear_solvers(self, confs):
d = []
for key, val in six.iteritems(confs):
if val.kind.find('ls.') == 0:
d.append(val)
d.sort(key=lambda a: a.name)
return d
def test_solvers(self):
from sfepy.base.base import IndexedStruct
import os.path as op
solver_confs = self._list_linear_solvers(self.problem.solver_confs)
ok = True
tt = []
for solver_conf in solver_confs:
method = solver_conf.get('method', '')
precond = solver_conf.get('precond', '')
name = ' '.join((solver_conf.name, solver_conf.kind,
method, precond)).rstrip()
self.report(name)
self.report('matrix size:', self.problem.mtx_a.shape)
self.report(' nnz:', self.problem.mtx_a.nnz)
status = IndexedStruct()
try:
self.problem.init_solvers(status=status,
ls_conf=solver_conf,
force=True)
state = self.problem.solve()
failed = status.nls_status.condition != 0
except Exception as aux:
failed = True
status = None
exc = aux
ok = ok and ((not failed) or (solver_conf.kind in self.can_fail))
if status is not None:
status = status.nls_status
for kv in six.iteritems(status.time_stats):
self.report('%10s: %7.2f [s]' % kv)
self.report('condition: %d, err0: %.3e, err: %.3e'
% (status.condition, status.err0, status.err))
tt.append([name,
status.time_stats['solve'],
status.ls_n_iter,
status.err])
aux = name.replace(' ', '_')
fname = op.join(self.options.out_dir,
op.split(self.conf.output_name)[1]) % aux
self.problem.save_state(fname, state)
else:
self.report('solver failed:')
self.report(exc)
tt.append([name, -1, 1e10, 1e10])
tt.sort(key=lambda a: a[1])
self.report('solution times / numbers of iterations (residual norms):')
for row in tt:
self.report('%.2f [s] / % 4d' % (row[1], row[2]),
'(%.3e)' % row[3], ':', row[0])
return ok
def test_ls_reuse(self):
import numpy as nm
from sfepy.solvers import Solver
from sfepy.discrete.state import State
self.problem.init_solvers(ls_conf=self.problem.solver_confs['d00'])
nls = self.problem.get_nls()
state0 = State(self.problem.equations.variables)
state0.apply_ebc()
vec0 = state0.get_reduced()
self.problem.update_materials()
rhs = nls.fun(vec0)
mtx = nls.fun_grad(vec0)
ok = True
for name in ['i12', 'i01']:
solver_conf = self.problem.solver_confs[name]
method = solver_conf.get('method', '')
precond = solver_conf.get('precond', '')
name = ' '.join((solver_conf.name, solver_conf.kind,
method, precond)).rstrip()
self.report(name)
try:
ls = Solver.any_from_conf(solver_conf)
except:
self.report('skipped!')
continue
conf = ls.conf.copy()
conf.force_reuse = True
sol00 = ls(rhs, mtx=mtx, conf=conf)
digest00 = ls.mtx_digest
sol0 = ls(rhs, mtx=mtx)
digest0 = ls.mtx_digest
sol1 = ls(rhs, mtx=2*mtx, conf=conf)
digest1 = ls.mtx_digest
sol2 = ls(rhs, mtx=2*mtx)
digest2 = ls.mtx_digest
ls(rhs, mtx=2*mtx)
digest3 = ls.mtx_digest
_ok = digest00 != digest0
self.report(digest00, '!=', digest0, ':', _ok); ok = ok and _ok
_ok = digest0 == digest1
self.report(digest0, '==', digest1, ':', _ok); ok = ok and _ok
_ok = digest1 != digest2
self.report(digest1, '!=', digest2, ':', _ok); ok = ok and _ok
_ok = digest2[1] == digest3[1]
self.report(digest2[1], '==', digest3[1], ':', _ok); ok = ok and _ok
_ok = nm.allclose(sol00, sol0, atol=1e-12, rtol=0.0)
self.report('sol00 == sol0:', _ok); ok = ok and _ok
_ok = nm.allclose(sol0, sol1, atol=1e-12, rtol=0.0)
self.report('sol0 == sol1:', _ok); ok = ok and _ok
_ok = nm.allclose(sol0, 2 * sol2, atol=1e-12, rtol=0.0)
self.report('sol0 == 2 * sol2:', _ok); ok = ok and _ok
return ok
|
[
"sfepy.discrete.Problem.from_conf",
"sfepy.discrete.fem.utils.refine_mesh",
"sfepy.solvers.Solver.any_from_conf",
"sfepy.discrete.state.State",
"sfepy.base.base.IndexedStruct"
] |
[((250, 294), 'sfepy.discrete.fem.utils.refine_mesh', 'refine_mesh', (['filename_mesh', 'refinement_level'], {}), '(filename_mesh, refinement_level)\n', (261, 294), False, 'from sfepy.discrete.fem.utils import refine_mesh\n'), ((4007, 4030), 'sfepy.discrete.Problem.from_conf', 'Problem.from_conf', (['conf'], {}), '(conf)\n', (4024, 4030), False, 'from sfepy.discrete import Problem\n'), ((4230, 4250), 'six.iteritems', 'six.iteritems', (['confs'], {}), '(confs)\n', (4243, 4250), False, 'import six\n'), ((6997, 7036), 'sfepy.discrete.state.State', 'State', (['self.problem.equations.variables'], {}), '(self.problem.equations.variables)\n', (7002, 7036), False, 'from sfepy.discrete.state import State\n'), ((5046, 5061), 'sfepy.base.base.IndexedStruct', 'IndexedStruct', ([], {}), '()\n', (5059, 5061), False, 'from sfepy.base.base import IndexedStruct\n'), ((8668, 8714), 'numpy.allclose', 'nm.allclose', (['sol00', 'sol0'], {'atol': '(1e-12)', 'rtol': '(0.0)'}), '(sol00, sol0, atol=1e-12, rtol=0.0)\n', (8679, 8714), True, 'import numpy as nm\n'), ((8797, 8842), 'numpy.allclose', 'nm.allclose', (['sol0', 'sol1'], {'atol': '(1e-12)', 'rtol': '(0.0)'}), '(sol0, sol1, atol=1e-12, rtol=0.0)\n', (8808, 8842), True, 'import numpy as nm\n'), ((8924, 8973), 'numpy.allclose', 'nm.allclose', (['sol0', '(2 * sol2)'], {'atol': '(1e-12)', 'rtol': '(0.0)'}), '(sol0, 2 * sol2, atol=1e-12, rtol=0.0)\n', (8935, 8973), True, 'import numpy as nm\n'), ((5663, 5695), 'six.iteritems', 'six.iteritems', (['status.time_stats'], {}), '(status.time_stats)\n', (5676, 5695), False, 'import six\n'), ((7609, 7642), 'sfepy.solvers.Solver.any_from_conf', 'Solver.any_from_conf', (['solver_conf'], {}), '(solver_conf)\n', (7629, 7642), False, 'from sfepy.solvers import Solver\n'), ((6200, 6231), 'os.path.split', 'op.split', (['self.conf.output_name'], {}), '(self.conf.output_name)\n', (6208, 6231), True, 'import os.path as op\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
import numpy as np
import pytest
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, Tensor
from megengine.core.tensor import megbrain_graph as G
from megengine.jit.tracing import trace
from megengine.quantization.quantize import quantize, quantize_qat
from megengine.utils.naming import AutoNaming
def _dump_and_load(func, symbolic, keep_opr_name=True):
AutoNaming.clear()
func = trace(func, symbolic=symbolic, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)))
func(x).numpy()
file = io.BytesIO()
func.dump(
file,
optimize_for_inference=False,
arg_names=("x",),
keep_opr_name=keep_opr_name,
keep_var_name=2,
)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
ops = cgtools.get_oprs_seq(outputs)
return ops
@pytest.mark.parametrize("symbolic", [False, True])
def test_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
return x + x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "simple.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_tensor(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(1.0, name="k")
def forward(self, x):
x = x + x
x.name = "o_x"
return x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "o_x"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_param(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(2.0, name="k")
def forward(self, x):
return self.k * x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.inputs[0].name == "x"
assert op.inputs[1].name == "simple.k"
@pytest.mark.parametrize("symbolic", [False, True])
def test_without_module(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic)[-1]
assert op.name == "MUL"
@pytest.mark.parametrize("symbolic", [False, True])
def test_ignore_top_module(symbolic):
class Simple(M.Module):
def forward(self, x):
return x + x
m = Simple()
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "ADD"
assert op.outputs[0].name == "ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.linear.ADD"
assert ops[-2].name == "simple.linear.MatrixMul"
assert ops[-1].outputs[0].name == "simple.linear.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule_in_container(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.l0 = [M.Linear(3, 3) for _ in range(2)]
self.l1 = tuple(self.l0)
self.l2 = dict(zip(["l2-0", "l2-1"], self.l0))
def forward(self, x):
for i in range(2):
x = self.l0[i](x)
x = self.l1[i](x)
x = self.l2["l2-%d" % i](x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].outputs[0].name == "simple.l0.1.ADD[2]"
assert ops[-1].name == "simple.l0.1.ADD[2]"
assert ops[-2].name == "simple.l0.1.MatrixMul[2]"
assert ops[-3].name == "simple.l0.1.ADD[1]"
assert ops[-4].name == "simple.l0.1.MatrixMul[1]"
assert ops[-5].name == "simple.l0.1.ADD[0]"
assert ops[-6].name == "simple.l0.1.MatrixMul[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_named_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3, name="x")
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.x.ADD"
assert ops[-2].name == "simple.x.MatrixMul"
assert ops[-1].outputs[0].name == "simple.x.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_same_operators(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
x = F.relu(x)
x = F.relu(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.RELU[1]"
assert ops[-2].name == "simple.RELU[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_not_keep_opr_name(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic, False)[-1]
assert op.name == "MUL(x,const<2>[2])[4]"
@pytest.mark.parametrize("tensor_name, var_name", [("data", "data"), (None, "arg_0")])
def test_catch_input_name(tensor_name, var_name):
def f(x):
return 2 * x
func = trace(f, symbolic=True, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)), name=tensor_name)
func(x).numpy()
file = io.BytesIO()
func.dump(file, optimize_for_inference=False, keep_opr_name=True, keep_var_name=2)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
op = cgtools.get_oprs_seq(outputs)[-1]
assert op.inputs[0].name == var_name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = M.QuantStub()
self.linear = M.Linear(3, 3, bias=True)
self.dequant = M.DequantStub()
def forward(self, x):
out = self.quant(x)
out = self.linear(out)
out = self.dequant(out)
return out
m = Simple("simple")
quantize_qat(m)
quantize(m)
m.eval()
ops = _dump_and_load(m, symbolic)
ops_name = (
"x",
"simple.quant.TypeCvt",
"simple.linear.MatrixMul",
"simple.linear.ADD",
"simple.linear.TypeCvt",
"simple.dequant.TypeCvt",
)
for op, name in zip(ops, ops_name):
assert op.name == name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_user_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = M.QuantStub()
self.linear = M.Linear(3, 3, bias=True, name="user-linear")
self.dequant = M.DequantStub()
def forward(self, x):
out = self.quant(x)
out = self.linear(out)
out = self.dequant(out)
return out
m = Simple("simple")
quantize_qat(m)
quantize(m)
m.eval()
ops = _dump_and_load(m, symbolic)
ops_name = (
"x",
"simple.quant.TypeCvt",
"simple.user-linear.MatrixMul",
"simple.user-linear.ADD",
"simple.user-linear.TypeCvt",
"simple.dequant.TypeCvt",
)
for op, name in zip(ops, ops_name):
assert op.name == name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_user_naming_param(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = M.QuantStub()
self.linear = M.Linear(3, 3, bias=True)
self.dequant = M.DequantStub()
self.linear.weight.name = "user-weight"
self.linear.bias.name = "user-bias"
def forward(self, x):
out = self.quant(x)
out = self.linear(out)
out = self.dequant(out)
return out
m = Simple("simple")
quantize_qat(m)
quantize(m)
m.eval()
ops = _dump_and_load(m, symbolic)
(matrix_mul_op,) = [op for op in ops if op.name == "simple.linear.MatrixMul"]
for var in matrix_mul_op.inputs:
assert var.name in ("simple.quant.TypeCvt", "simple.linear.user-weight")
# WONTFIX: bias' name does not meet expectations because of astype operator after quantization
|
[
"megengine.module.Linear",
"megengine.quantization.quantize.quantize_qat",
"megengine.module.QuantStub",
"megengine.Parameter",
"megengine.functional.relu",
"megengine.module.DequantStub",
"megengine.jit.tracing.trace",
"megengine.core.tensor.megbrain_graph.load_graph",
"megengine.utils.naming.AutoNaming.clear",
"megengine.quantization.quantize.quantize",
"megengine.utils.comp_graph_tools.get_oprs_seq"
] |
[((1296, 1346), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""symbolic"""', '[False, True]'], {}), "('symbolic', [False, True])\n", (1319, 1346), False, 'import pytest\n'), ((1708, 1758), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""symbolic"""', '[False, True]'], {}), "('symbolic', [False, True])\n", (1731, 1758), False, 'import pytest\n'), ((2211, 2261), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""symbolic"""', '[False, True]'], {}), "('symbolic', [False, True])\n", (2234, 2261), False, 'import pytest\n'), ((2678, 2728), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""symbolic"""', '[False, True]'], {}), "('symbolic', [False, True])\n", (2701, 2728), False, 'import pytest\n'), ((2872, 2922), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""symbolic"""', '[False, True]'], {}), "('symbolic', [False, True])\n", (2895, 2922), False, 'import pytest\n'), ((3173, 3223), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""symbolic"""', '[False, True]'], {}), "('symbolic', [False, True])\n", (3196, 3223), False, 'import pytest\n'), ((3731, 3781), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""symbolic"""', '[False, True]'], {}), "('symbolic', [False, True])\n", (3754, 3781), False, 'import pytest\n'), ((4733, 4783), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""symbolic"""', '[False, True]'], {}), "('symbolic', [False, True])\n", (4756, 4783), False, 'import pytest\n'), ((5287, 5337), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""symbolic"""', '[False, True]'], {}), "('symbolic', [False, True])\n", (5310, 5337), False, 'import pytest\n'), ((5760, 5810), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""symbolic"""', '[False, True]'], {}), "('symbolic', [False, True])\n", (5783, 5810), False, 'import pytest\n'), ((5982, 6071), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""tensor_name, var_name"""', "[('data', 'data'), (None, 'arg_0')]"], {}), "('tensor_name, var_name', [('data', 'data'), (None,\n 'arg_0')])\n", (6005, 6071), False, 'import pytest\n'), ((6553, 6603), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""symbolic"""', '[False, True]'], {}), "('symbolic', [False, True])\n", (6576, 6603), False, 'import pytest\n'), ((7433, 7483), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""symbolic"""', '[False, True]'], {}), "('symbolic', [False, True])\n", (7456, 7483), False, 'import pytest\n'), ((8348, 8398), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""symbolic"""', '[False, True]'], {}), "('symbolic', [False, True])\n", (8371, 8398), False, 'import pytest\n'), ((844, 862), 'megengine.utils.naming.AutoNaming.clear', 'AutoNaming.clear', ([], {}), '()\n', (860, 862), False, 'from megengine.utils.naming import AutoNaming\n'), ((874, 927), 'megengine.jit.tracing.trace', 'trace', (['func'], {'symbolic': 'symbolic', 'capture_as_const': '(True)'}), '(func, symbolic=symbolic, capture_as_const=True)\n', (879, 927), False, 'from megengine.jit.tracing import trace\n'), ((997, 1009), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (1007, 1009), False, 'import io\n'), ((1248, 1277), 'megengine.utils.comp_graph_tools.get_oprs_seq', 'cgtools.get_oprs_seq', (['outputs'], {}), '(outputs)\n', (1268, 1277), True, 'import megengine.utils.comp_graph_tools as cgtools\n'), ((6165, 6211), 'megengine.jit.tracing.trace', 'trace', (['f'], {'symbolic': '(True)', 'capture_as_const': '(True)'}), '(f, symbolic=True, capture_as_const=True)\n', (6170, 6211), False, 'from megengine.jit.tracing import trace\n'), ((6299, 6311), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (6309, 6311), False, 'import io\n'), ((7076, 7091), 'megengine.quantization.quantize.quantize_qat', 'quantize_qat', (['m'], {}), '(m)\n', (7088, 7091), False, 'from megengine.quantization.quantize import quantize, quantize_qat\n'), ((7096, 7107), 'megengine.quantization.quantize.quantize', 'quantize', (['m'], {}), '(m)\n', (7104, 7107), False, 'from megengine.quantization.quantize import quantize, quantize_qat\n'), ((7976, 7991), 'megengine.quantization.quantize.quantize_qat', 'quantize_qat', (['m'], {}), '(m)\n', (7988, 7991), False, 'from megengine.quantization.quantize import quantize, quantize_qat\n'), ((7996, 8007), 'megengine.quantization.quantize.quantize', 'quantize', (['m'], {}), '(m)\n', (8004, 8007), False, 'from megengine.quantization.quantize import quantize, quantize_qat\n'), ((8978, 8993), 'megengine.quantization.quantize.quantize_qat', 'quantize_qat', (['m'], {}), '(m)\n', (8990, 8993), False, 'from megengine.quantization.quantize import quantize, quantize_qat\n'), ((8998, 9009), 'megengine.quantization.quantize.quantize', 'quantize', (['m'], {}), '(m)\n', (9006, 9009), False, 'from megengine.quantization.quantize import quantize, quantize_qat\n'), ((943, 964), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 3)'}), '(shape=(2, 3))\n', (950, 964), True, 'import numpy as np\n'), ((1202, 1220), 'megengine.core.tensor.megbrain_graph.load_graph', 'G.load_graph', (['file'], {}), '(file)\n', (1214, 1220), True, 'from megengine.core.tensor import megbrain_graph as G\n'), ((6227, 6248), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 3)'}), '(shape=(2, 3))\n', (6234, 6248), True, 'import numpy as np\n'), ((6430, 6448), 'megengine.core.tensor.megbrain_graph.load_graph', 'G.load_graph', (['file'], {}), '(file)\n', (6442, 6448), True, 'from megengine.core.tensor import megbrain_graph as G\n'), ((6475, 6504), 'megengine.utils.comp_graph_tools.get_oprs_seq', 'cgtools.get_oprs_seq', (['outputs'], {}), '(outputs)\n', (6495, 6504), True, 'import megengine.utils.comp_graph_tools as cgtools\n'), ((1940, 1964), 'megengine.Parameter', 'Parameter', (['(1.0)'], {'name': '"""k"""'}), "(1.0, name='k')\n", (1949, 1964), False, 'from megengine import Parameter, Tensor\n'), ((2442, 2466), 'megengine.Parameter', 'Parameter', (['(2.0)'], {'name': '"""k"""'}), "(2.0, name='k')\n", (2451, 2466), False, 'from megengine import Parameter, Tensor\n'), ((3407, 3421), 'megengine.module.Linear', 'M.Linear', (['(3)', '(3)'], {}), '(3, 3)\n', (3415, 3421), True, 'import megengine.module as M\n'), ((4968, 4992), 'megengine.module.Linear', 'M.Linear', (['(3)', '(3)'], {'name': '"""x"""'}), "(3, 3, name='x')\n", (4976, 4992), True, 'import megengine.module as M\n'), ((5547, 5556), 'megengine.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (5553, 5556), True, 'import megengine.functional as F\n'), ((5573, 5582), 'megengine.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (5579, 5582), True, 'import megengine.functional as F\n'), ((6780, 6793), 'megengine.module.QuantStub', 'M.QuantStub', ([], {}), '()\n', (6791, 6793), True, 'import megengine.module as M\n'), ((6820, 6845), 'megengine.module.Linear', 'M.Linear', (['(3)', '(3)'], {'bias': '(True)'}), '(3, 3, bias=True)\n', (6828, 6845), True, 'import megengine.module as M\n'), ((6873, 6888), 'megengine.module.DequantStub', 'M.DequantStub', ([], {}), '()\n', (6886, 6888), True, 'import megengine.module as M\n'), ((7660, 7673), 'megengine.module.QuantStub', 'M.QuantStub', ([], {}), '()\n', (7671, 7673), True, 'import megengine.module as M\n'), ((7700, 7745), 'megengine.module.Linear', 'M.Linear', (['(3)', '(3)'], {'bias': '(True)', 'name': '"""user-linear"""'}), "(3, 3, bias=True, name='user-linear')\n", (7708, 7745), True, 'import megengine.module as M\n'), ((7773, 7788), 'megengine.module.DequantStub', 'M.DequantStub', ([], {}), '()\n', (7786, 7788), True, 'import megengine.module as M\n'), ((8581, 8594), 'megengine.module.QuantStub', 'M.QuantStub', ([], {}), '()\n', (8592, 8594), True, 'import megengine.module as M\n'), ((8621, 8646), 'megengine.module.Linear', 'M.Linear', (['(3)', '(3)'], {'bias': '(True)'}), '(3, 3, bias=True)\n', (8629, 8646), True, 'import megengine.module as M\n'), ((8674, 8689), 'megengine.module.DequantStub', 'M.DequantStub', ([], {}), '()\n', (8687, 8689), True, 'import megengine.module as M\n'), ((3975, 3989), 'megengine.module.Linear', 'M.Linear', (['(3)', '(3)'], {}), '(3, 3)\n', (3983, 3989), True, 'import megengine.module as M\n')]
|
from typing import Optional
from sqlmodel import Field, SQLModel, Field
from pydantic import root_validator
from datetime import datetime
# {
# "user_id": 1,
# "start_time": "2022-01-19T08:30:00.000Z",
# "end_time": "2022-01-19T09:30:00.000Z",
# "client_id": 1,
# "epic_id": 1,
# "count_hours": 0,
# "count_days": 0,
# "month": 0,
# "year": 0
# }
class TimeLog(SQLModel, table=True):
"""Create an SQLModel for timelogs"""
id: Optional[int] = Field(default=None, primary_key=True)
user_id: int = Field(foreign_key="app_db.appuser.id")
start_time: datetime
end_time: datetime
epic_id: int = Field(foreign_key="app_db.epic.id")
count_hours: float
count_days: float
month: int
year: int
epic_area_id: int = Field(foreign_key="app_db.epicarea.id")
created_at: datetime
updated_at: datetime
is_locked: bool = False
__table_args__ = {"schema": "app_db"}
@root_validator(pre=True)
def check_time_delta(cls, values):
assert (
values["start_time"] < values["end_time"]
), "start_time must be smaller then end_time"
return values
# @validator("count_hours", always=True)
def daily_hours(cls, hours_input):
assert hours_input < 12, "user worked over 12 hours"
return hours_input
# @validator("year", always=True)
def valid_year(cls, year_input):
assert year_input in range(
2021, datetime.now().year + 1
), "year value not in range [2021, current year]"
return year_input
|
[
"sqlmodel.Field"
] |
[((473, 510), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (478, 510), False, 'from sqlmodel import Field, SQLModel, Field\n'), ((530, 568), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""app_db.appuser.id"""'}), "(foreign_key='app_db.appuser.id')\n", (535, 568), False, 'from sqlmodel import Field, SQLModel, Field\n'), ((636, 671), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""app_db.epic.id"""'}), "(foreign_key='app_db.epic.id')\n", (641, 671), False, 'from sqlmodel import Field, SQLModel, Field\n'), ((770, 809), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""app_db.epicarea.id"""'}), "(foreign_key='app_db.epicarea.id')\n", (775, 809), False, 'from sqlmodel import Field, SQLModel, Field\n'), ((937, 961), 'pydantic.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (951, 961), False, 'from pydantic import root_validator\n'), ((1451, 1465), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1463, 1465), False, 'from datetime import datetime\n')]
|
from create_db import Student
from sqlmodel import Session, create_engine, select
sqlite_url = "sqlite:///school.db"
engine = create_engine(sqlite_url, echo=True)
# Read database
with Session(engine) as session:
statement = select(Student)
results = session.exec(statement)
for student in results:
print(student)
print("-" * 100)
# # Read one row
# with Session(engine) as session:
# statement = select(Student).where(Student.first_name=="Misal")
# results = session.exec(statement)
# for student in results:
# print(student)
|
[
"sqlmodel.select",
"sqlmodel.Session",
"sqlmodel.create_engine"
] |
[((127, 163), 'sqlmodel.create_engine', 'create_engine', (['sqlite_url'], {'echo': '(True)'}), '(sqlite_url, echo=True)\n', (140, 163), False, 'from sqlmodel import Session, create_engine, select\n'), ((186, 201), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (193, 201), False, 'from sqlmodel import Session, create_engine, select\n'), ((230, 245), 'sqlmodel.select', 'select', (['Student'], {}), '(Student)\n', (236, 245), False, 'from sqlmodel import Session, create_engine, select\n')]
|
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2021 coolbeam
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.module as nn
import megengine.functional as F
from common.utils import flow_warp, upsample2d_flow_as
def conv(in_planes, out_planes, kernel_size=3, stride=1, dilation=1, isReLU=True, if_IN=False, IN_affine=False, if_BN=False):
if isReLU:
if if_IN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.LeakyReLU(0.1), nn.InstanceNorm(out_planes, affine=IN_affine))
elif if_BN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.LeakyReLU(0.1), nn.BatchNorm2d(out_planes, affine=IN_affine))
else:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.LeakyReLU(0.1))
else:
if if_IN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.InstanceNorm(out_planes, affine=IN_affine))
elif if_BN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.BatchNorm2d(out_planes, affine=IN_affine))
else:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True))
class FlowEstimatorDense_temp(nn.Module):
def __init__(self, ch_in=64, f_channels=(128, 128, 96, 64, 32, 32), ch_out=2):
super(FlowEstimatorDense_temp, self).__init__()
N = 0
ind = 0
N += ch_in
self.conv1 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv2 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv3 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv4 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv5 = conv(N, f_channels[ind])
N += f_channels[ind]
self.num_feature_channel = N
ind += 1
self.conv_last = conv(N, ch_out, isReLU=False)
def forward(self, x):
x1 = F.concat([self.conv1(x), x], axis=1)
x2 = F.concat([self.conv2(x1), x1], axis=1)
x3 = F.concat([self.conv3(x2), x2], axis=1)
x4 = F.concat([self.conv4(x3), x3], axis=1)
x5 = F.concat([self.conv5(x4), x4], axis=1)
x_out = self.conv_last(x5)
return x5, x_out
class FlowMaskEstimator(FlowEstimatorDense_temp):
def __init__(self, ch_in, f_channels, ch_out):
super(FlowMaskEstimator, self).__init__(ch_in=ch_in, f_channels=f_channels, ch_out=ch_out)
class NeuralUpsampler(nn.Module):
def __init__(self):
super(NeuralUpsampler, self).__init__()
f_channels_es = (32, 32, 32, 16, 8)
in_C = 64
self.dense_estimator_mask = FlowEstimatorDense_temp(in_C, f_channels=f_channels_es, ch_out=3)
self.upsample_output_conv = nn.Sequential(
conv(3, 16, kernel_size=3, stride=1, dilation=1),
conv(16, 16, stride=2),
conv(16, 32, kernel_size=3, stride=1, dilation=1),
conv(32, 32, stride=2),
)
def forward(self, flow_init, feature_1, feature_2, output_level_flow=None):
n, c, h, w = flow_init.shape
n_f, c_f, h_f, w_f = feature_1.shape
if h != h_f or w != w_f:
flow_init = F.vision.interpolate(flow_init, scale_factor=2., mode='bilinear', align_corners=True) * 2
feature_2_warp = flow_warp(feature_2, flow_init)
input_feature = F.concat((feature_1, feature_2_warp), axis=1)
_, x_out = self.dense_estimator_mask(input_feature)
inter_flow = x_out[:, :2, :, :]
inter_mask = x_out[:, 2, :, :]
inter_mask = F.expand_dims(inter_mask, 1)
inter_mask = F.sigmoid(inter_mask)
if output_level_flow is not None:
inter_flow = upsample2d_flow_as(inter_flow, output_level_flow, mode="bilinear", if_rate=True)
inter_mask = upsample2d_flow_as(inter_mask, output_level_flow, mode="bilinear")
flow_init = output_level_flow
flow_up = flow_warp(flow_init, inter_flow) * (1 - inter_mask) + flow_init * inter_mask
return flow_up
def output_conv(self, x):
return self.upsample_output_conv(x)
|
[
"megengine.functional.sigmoid",
"megengine.functional.expand_dims",
"megengine.module.BatchNorm2d",
"megengine.functional.vision.interpolate",
"megengine.module.InstanceNorm",
"megengine.functional.concat",
"megengine.module.Conv2d",
"megengine.module.LeakyReLU"
] |
[((5825, 5856), 'common.utils.flow_warp', 'flow_warp', (['feature_2', 'flow_init'], {}), '(feature_2, flow_init)\n', (5834, 5856), False, 'from common.utils import flow_warp, upsample2d_flow_as\n'), ((5881, 5926), 'megengine.functional.concat', 'F.concat', (['(feature_1, feature_2_warp)'], {'axis': '(1)'}), '((feature_1, feature_2_warp), axis=1)\n', (5889, 5926), True, 'import megengine.functional as F\n'), ((6088, 6116), 'megengine.functional.expand_dims', 'F.expand_dims', (['inter_mask', '(1)'], {}), '(inter_mask, 1)\n', (6101, 6116), True, 'import megengine.functional as F\n'), ((6138, 6159), 'megengine.functional.sigmoid', 'F.sigmoid', (['inter_mask'], {}), '(inter_mask)\n', (6147, 6159), True, 'import megengine.functional as F\n'), ((6228, 6313), 'common.utils.upsample2d_flow_as', 'upsample2d_flow_as', (['inter_flow', 'output_level_flow'], {'mode': '"""bilinear"""', 'if_rate': '(True)'}), "(inter_flow, output_level_flow, mode='bilinear', if_rate=True\n )\n", (6246, 6313), False, 'from common.utils import flow_warp, upsample2d_flow_as\n'), ((6334, 6400), 'common.utils.upsample2d_flow_as', 'upsample2d_flow_as', (['inter_mask', 'output_level_flow'], {'mode': '"""bilinear"""'}), "(inter_mask, output_level_flow, mode='bilinear')\n", (6352, 6400), False, 'from common.utils import flow_warp, upsample2d_flow_as\n'), ((1202, 1351), 'megengine.module.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'dilation': 'dilation', 'padding': '((kernel_size - 1) * dilation // 2)', 'bias': '(True)'}), '(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n dilation=dilation, padding=(kernel_size - 1) * dilation // 2, bias=True)\n', (1211, 1351), True, 'import megengine.module as nn\n'), ((1507, 1524), 'megengine.module.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (1519, 1524), True, 'import megengine.module as nn\n'), ((1526, 1571), 'megengine.module.InstanceNorm', 'nn.InstanceNorm', (['out_planes'], {'affine': 'IN_affine'}), '(out_planes, affine=IN_affine)\n', (1541, 1571), True, 'import megengine.module as nn\n'), ((2479, 2628), 'megengine.module.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'dilation': 'dilation', 'padding': '((kernel_size - 1) * dilation // 2)', 'bias': '(True)'}), '(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n dilation=dilation, padding=(kernel_size - 1) * dilation // 2, bias=True)\n', (2488, 2628), True, 'import megengine.module as nn\n'), ((2784, 2829), 'megengine.module.InstanceNorm', 'nn.InstanceNorm', (['out_planes'], {'affine': 'IN_affine'}), '(out_planes, affine=IN_affine)\n', (2799, 2829), True, 'import megengine.module as nn\n'), ((5710, 5800), 'megengine.functional.vision.interpolate', 'F.vision.interpolate', (['flow_init'], {'scale_factor': '(2.0)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(flow_init, scale_factor=2.0, mode='bilinear',\n align_corners=True)\n", (5730, 5800), True, 'import megengine.functional as F\n'), ((6462, 6494), 'common.utils.flow_warp', 'flow_warp', (['flow_init', 'inter_flow'], {}), '(flow_init, inter_flow)\n', (6471, 6494), False, 'from common.utils import flow_warp, upsample2d_flow_as\n'), ((1643, 1792), 'megengine.module.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'dilation': 'dilation', 'padding': '((kernel_size - 1) * dilation // 2)', 'bias': '(True)'}), '(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n dilation=dilation, padding=(kernel_size - 1) * dilation // 2, bias=True)\n', (1652, 1792), True, 'import megengine.module as nn\n'), ((1948, 1965), 'megengine.module.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (1960, 1965), True, 'import megengine.module as nn\n'), ((1967, 2011), 'megengine.module.BatchNorm2d', 'nn.BatchNorm2d', (['out_planes'], {'affine': 'IN_affine'}), '(out_planes, affine=IN_affine)\n', (1981, 2011), True, 'import megengine.module as nn\n'), ((2077, 2226), 'megengine.module.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'dilation': 'dilation', 'padding': '((kernel_size - 1) * dilation // 2)', 'bias': '(True)'}), '(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n dilation=dilation, padding=(kernel_size - 1) * dilation // 2, bias=True)\n', (2086, 2226), True, 'import megengine.module as nn\n'), ((2382, 2399), 'megengine.module.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (2394, 2399), True, 'import megengine.module as nn\n'), ((2901, 3050), 'megengine.module.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'dilation': 'dilation', 'padding': '((kernel_size - 1) * dilation // 2)', 'bias': '(True)'}), '(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n dilation=dilation, padding=(kernel_size - 1) * dilation // 2, bias=True)\n', (2910, 3050), True, 'import megengine.module as nn\n'), ((3206, 3250), 'megengine.module.BatchNorm2d', 'nn.BatchNorm2d', (['out_planes'], {'affine': 'IN_affine'}), '(out_planes, affine=IN_affine)\n', (3220, 3250), True, 'import megengine.module as nn\n'), ((3316, 3465), 'megengine.module.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'dilation': 'dilation', 'padding': '((kernel_size - 1) * dilation // 2)', 'bias': '(True)'}), '(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n dilation=dilation, padding=(kernel_size - 1) * dilation // 2, bias=True)\n', (3325, 3465), True, 'import megengine.module as nn\n')]
|
from datetime import datetime, date , time
from typing import Optional, List
from fastapi import APIRouter, Depends
from sqlmodel import Field, SQLModel
from ...db import get_session
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
router = APIRouter()
class HistoryAppointmentOr(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
history_id: int
appointment_or_id: int
state_from: str
state_to: str
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class HistoryAppointmentOrMap(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
appointment_or_id: int
procedure_id: int
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class AppointmentOr(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
state: str
date_procedure: date
date_admission: date
date_confirmation: date
time_start: time
time_end: time
disease: str
detail: str
is_special_tool_required: bool
is_icu_reserved: bool
is_date_recorded: bool
tool_note: str
icu_note: str
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class AppointmentOrReschedule(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
appointment_or_id: int
date_from: date
date_to: date
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class AppointmentOrDoctorMap(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
appointment_or_id: int
doctor_id: int
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
@router.post("/history_appointment_or", response_model=HistoryAppointmentOr)
async def create_appointment_or(history_appointment_or: HistoryAppointmentOr, session: AsyncSession = Depends(get_session)):
session.add(history_appointment_or)
await session.commit()
await session.refresh(history_appointment_or)
return history_appointment_or
@router.post("/appointment_or", response_model=AppointmentOr)
async def create_history_appointment_or(appointment_or: AppointmentOr, session: AsyncSession = Depends(get_session)):
session.add(appointment_or)
await session.commit()
await session.refresh(appointment_or)
return appointment_or
@router.get("/history_appointment_or/{id}", response_model=HistoryAppointmentOr)
async def get_history_appointment_or(id: int, session: AsyncSession = Depends(get_session)):
history_appointments_or = await session.execute(select(HistoryAppointmentOr).where(HistoryAppointmentOr.id == id))
history_appointment_or = history_appointments_or.scalars().first()
return history_appointment_or
@router.get("/history_appointment_or/user/{user_id}", response_model=HistoryAppointmentOr)
async def get_history_appointment_or_user(user_id: int, session: AsyncSession = Depends(get_session)):
history_appointments_or = await session.execute(select(HistoryAppointmentOr).where(HistoryAppointmentOr.created_by == user_id))
history_appointment_or = history_appointments_or.scalars().first()
return history_appointment_or
@router.put("/history_appointment_or/{id}", response_model=HistoryAppointmentOr)
async def update_history_appointment_or(id: int, session: AsyncSession = Depends(get_session)):
return None
@router.delete("/history_appointment_or/{id}")
async def delete_history_appointment_or(session: AsyncSession = Depends(get_session)):
return None
@router.delete("/appointment_or/{id}")
async def delete_appointment_or(session: AsyncSession = Depends(get_session)):
return None
# @router.get("/history_appointment_or/history/{patient_id}", response_model=HistoryTravelReimburse)
# async def get_history_travel_reimburse_patient(patient_id: int, session: AsyncSession = Depends(get_session)):
# history_id = await session.execute(select(HistoryTravelReimburse.id).where(HistoryTravelReimburse.patient_id == patient_id))
# history_travel_reimburses = await session.execute(select(HistoryTravelReimburse).where(HistoryTravelReimburse.history_id == history_id))
# history_travel_reimburse = history_travel_reimburses.scalars().first()
# return history_travel_reimburse
|
[
"sqlmodel.Field"
] |
[((274, 285), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (283, 285), False, 'from fastapi import APIRouter, Depends\n'), ((362, 399), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (367, 399), False, 'from sqlmodel import Field, SQLModel\n'), ((671, 708), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (676, 708), False, 'from sqlmodel import Field, SQLModel\n'), ((934, 971), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (939, 971), False, 'from sqlmodel import Field, SQLModel\n'), ((1449, 1486), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (1454, 1486), False, 'from sqlmodel import Field, SQLModel\n'), ((1737, 1774), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (1742, 1774), False, 'from sqlmodel import Field, SQLModel\n'), ((2109, 2129), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (2116, 2129), False, 'from fastapi import APIRouter, Depends\n'), ((2442, 2462), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (2449, 2462), False, 'from fastapi import APIRouter, Depends\n'), ((2745, 2765), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (2752, 2765), False, 'from fastapi import APIRouter, Depends\n'), ((3165, 3185), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (3172, 3185), False, 'from fastapi import APIRouter, Depends\n'), ((3581, 3601), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (3588, 3601), False, 'from fastapi import APIRouter, Depends\n'), ((3733, 3753), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (3740, 3753), False, 'from fastapi import APIRouter, Depends\n'), ((3869, 3889), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (3876, 3889), False, 'from fastapi import APIRouter, Depends\n'), ((2820, 2848), 'sqlalchemy.select', 'select', (['HistoryAppointmentOr'], {}), '(HistoryAppointmentOr)\n', (2826, 2848), False, 'from sqlalchemy import select\n'), ((3240, 3268), 'sqlalchemy.select', 'select', (['HistoryAppointmentOr'], {}), '(HistoryAppointmentOr)\n', (3246, 3268), False, 'from sqlalchemy import select\n')]
|
from typing import Optional
from pydantic import condecimal
from sqlalchemy.orm import declared_attr
from sqlmodel import Field, SQLModel
class Reward(SQLModel, table=True):
tx_hash: Optional[str] = Field(primary_key=True)
address: Optional[str] = Field(..., index=True)
block: Optional[int]
timestamp: Optional[int]
# Come from Tx logs
value: condecimal(max_digits=10, decimal_places=3) = None
iscore: condecimal(max_digits=13, decimal_places=3) = None
@declared_attr
def __tablename__(cls) -> str: # noqa: N805
return "rewards"
|
[
"sqlmodel.Field"
] |
[((206, 229), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (211, 229), False, 'from sqlmodel import Field, SQLModel\n'), ((259, 281), 'sqlmodel.Field', 'Field', (['...'], {'index': '(True)'}), '(..., index=True)\n', (264, 281), False, 'from sqlmodel import Field, SQLModel\n'), ((372, 415), 'pydantic.condecimal', 'condecimal', ([], {'max_digits': '(10)', 'decimal_places': '(3)'}), '(max_digits=10, decimal_places=3)\n', (382, 415), False, 'from pydantic import condecimal\n'), ((435, 478), 'pydantic.condecimal', 'condecimal', ([], {'max_digits': '(13)', 'decimal_places': '(3)'}), '(max_digits=13, decimal_places=3)\n', (445, 478), False, 'from pydantic import condecimal\n')]
|
from typing import Optional
from sqlmodel import Field, SQLModel
class UserBase(SQLModel):
name: str
class User(UserBase, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
class UserCreate(UserBase):
pass
class UserRead(UserBase):
id: int
|
[
"sqlmodel.Field"
] |
[((168, 205), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (173, 205), False, 'from sqlmodel import Field, SQLModel\n')]
|
import asyncio
import pytest
from sqlalchemy.ext.asyncio import create_async_engine
from sqlalchemy.ext.asyncio.engine import AsyncConnection
from sqlmodel.ext.asyncio.session import AsyncSession
from basesqlmodel import Base
engine = create_async_engine("sqlite+aiosqlite:///:memory:")
@pytest.fixture()
async def connection() -> AsyncConnection:
async with engine.begin() as conn:
yield conn
await conn.rollback()
@pytest.fixture()
async def session(connection: AsyncConnection):
async with AsyncSession(connection, expire_on_commit=False) as _session:
yield _session
@pytest.fixture(scope="session", autouse=True)
def event_loop():
"""Reference: https://github.com/pytest-dev/pytest-asyncio/issues/38#issuecomment-264418154"""
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
@pytest.fixture(scope="session", autouse=True)
async def init_database():
import tests.utils # noqa
async with engine.begin() as conn:
await conn.run_sync(Base.metadata.create_all)
|
[
"sqlmodel.ext.asyncio.session.AsyncSession"
] |
[((238, 289), 'sqlalchemy.ext.asyncio.create_async_engine', 'create_async_engine', (['"""sqlite+aiosqlite:///:memory:"""'], {}), "('sqlite+aiosqlite:///:memory:')\n", (257, 289), False, 'from sqlalchemy.ext.asyncio import create_async_engine\n'), ((293, 309), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (307, 309), False, 'import pytest\n'), ((444, 460), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (458, 460), False, 'import pytest\n'), ((612, 657), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""', 'autouse': '(True)'}), "(scope='session', autouse=True)\n", (626, 657), False, 'import pytest\n'), ((870, 915), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""', 'autouse': '(True)'}), "(scope='session', autouse=True)\n", (884, 915), False, 'import pytest\n'), ((524, 572), 'sqlmodel.ext.asyncio.session.AsyncSession', 'AsyncSession', (['connection'], {'expire_on_commit': '(False)'}), '(connection, expire_on_commit=False)\n', (536, 572), False, 'from sqlmodel.ext.asyncio.session import AsyncSession\n'), ((786, 817), 'asyncio.get_event_loop_policy', 'asyncio.get_event_loop_policy', ([], {}), '()\n', (815, 817), False, 'import asyncio\n')]
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
import argparse
import megengine as mge
import numpy as np
from megengine import jit
from ..build import build_and_load
def make_parser():
parser = argparse.ArgumentParser("YOLOX Demo Dump")
parser.add_argument("-n", "--name", type=str, default="yolox-s", help="model name")
parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt for eval")
parser.add_argument(
"--dump_path", default="model.mge", help="path to save the dumped model"
)
return parser
def dump_static_graph(model, graph_name="model.mge"):
model.eval()
model.head.decode_in_inference = False
data = mge.Tensor(np.random.random((1, 3, 640, 640)))
@jit.trace(capture_as_const=True)
def pred_func(data):
outputs = model(data)
return outputs
pred_func(data)
pred_func.dump(
graph_name,
arg_names=["data"],
optimize_for_inference=True,
enable_fuse_conv_bias_nonlinearity=True,
)
def main(args):
model = build_and_load(args.ckpt, name=args.name)
dump_static_graph(model, args.dump_path)
if __name__ == "__main__":
args = make_parser().parse_args()
main(args)
|
[
"megengine.jit.trace"
] |
[((252, 294), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""YOLOX Demo Dump"""'], {}), "('YOLOX Demo Dump')\n", (275, 294), False, 'import argparse\n'), ((780, 812), 'megengine.jit.trace', 'jit.trace', ([], {'capture_as_const': '(True)'}), '(capture_as_const=True)\n', (789, 812), False, 'from megengine import jit\n'), ((738, 772), 'numpy.random.random', 'np.random.random', (['(1, 3, 640, 640)'], {}), '((1, 3, 640, 640))\n', (754, 772), True, 'import numpy as np\n')]
|
"""
Node related APIs.
"""
import logging
from datetime import datetime
from typing import List, Optional
from fastapi import APIRouter, Depends
from sqlmodel import Session, SQLModel, select
from datajunction.models.column import ColumnType
from datajunction.models.node import Node, NodeType
from datajunction.utils import get_session
_logger = logging.getLogger(__name__)
router = APIRouter()
class SimpleColumn(SQLModel):
"""
A simplified column schema, without ID or dimensions.
"""
name: str
type: ColumnType
class NodeMetadata(SQLModel):
"""
A node with information about columns and if it is a metric.
"""
id: int
name: str
description: str = ""
created_at: datetime
updated_at: datetime
type: NodeType
expression: Optional[str] = None
columns: List[SimpleColumn]
@router.get("/nodes/", response_model=List[NodeMetadata])
def read_nodes(*, session: Session = Depends(get_session)) -> List[NodeMetadata]:
"""
List the available nodes.
"""
return session.exec(select(Node)).all()
|
[
"sqlmodel.select"
] |
[((351, 378), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (368, 378), False, 'import logging\n'), ((388, 399), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (397, 399), False, 'from fastapi import APIRouter, Depends\n'), ((946, 966), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (953, 966), False, 'from fastapi import APIRouter, Depends\n'), ((1061, 1073), 'sqlmodel.select', 'select', (['Node'], {}), '(Node)\n', (1067, 1073), False, 'from sqlmodel import Session, SQLModel, select\n')]
|
import numpy as np
from sfepy.base.conf import ProblemConf
from sfepy.discrete.problem import Problem
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.postprocess.viewer import Viewer
from scipy.interpolate import interpn
import matplotlib.pyplot as plt
from matplotlib import ticker
class Block2d:
"""
Small wrapper for sfepy problem class.
Constructs a rectangle of specificed dimensions with a rectangular FEM mesh.
Rectangle is gripped at one end and pulled away at the other end, by a specified
distance. FEM mesh sixe can be specified. Centre of rectangle can be specified.
Having done this, this class exposes the stretch amount and the distribution
of the stiffness tensor D as paramters that can be set, and allows querying
of the displacement at a set of points in the domain.
rectangle is stretched parallel to the y-axis. short side of rectangle is x-axis.
Also drawing. """
def __init__(self, dims, center_location, cell_sizes=np.array([2,2]),
prob_file="C:\\Users\\wbald\\sfepythings\\blocks\\fem\\prob_desc_2d.py",
put_mesh="C:\\Users\\wbald\\sfepythings"):
"""
dims: array, dimensions of rectangle [x,y]
center_location: array, centre of rectangle [x,y]
cell sizes: array, x and y side length of FEM rectangular elements
stretch: default distance by which to displace the upper y axis edge.
prob_file: problem description file
put_mesh: where to save the mesh file
"""
assert(dims.shape[0] == 2)
assert(cell_sizes.shape[0] == 2)
# assume linear elasticity. Fix strain of rectangle to 0.001 and query
# at different strains by scaling linearly
self.dims = dims
self.prob_file = prob_file
self.FEM_model_strain = 0.001
self.elongation= self.FEM_model_strain * self.dims[1]
nums = np.divide(dims, cell_sizes)
nums = np.around(nums).astype(int) + np.array([1,1])
blockmesh = gen_block_mesh(dims, nums, center_location)
blockmesh.write(put_mesh + '\\mesh.vtk')
conf = ProblemConf.from_file(prob_file)
# 'region_ylower__1' holds the edge to be fixed
# 'region_yupper__2' holds the edge to be displaced
conf.regions['region_ylower__1'].select = 'vertices in (y < ' + str(0.01) + ')'
conf.regions['region_yupper__2'].select = 'vertices in (y > ' + str(dims[1] - 0.01) + ')'
conf.ebcs['ebc_Displaced__1'].dofs['u.1'] = self.elongation
self.prob = Problem.from_conf(conf)
# for reshaping sfepy output into xyz displacements
self.reshape_tuple = ((self.prob.fields['displacement'].n_nod, self.prob.fields['displacement'].n_components))
def _set_param_field(self, query):
# the stiffness is specified by the voigt notation tensor D, which is
# passed to the problem class as a queryable function of position
def fun(ts, coors, mode=None, **kwargs):
if mode=='qp':
return {'D' : query(coors)}
self.prob.functions._objs[0].function = fun
def get_disp_at_coords(self, D_query_function, strain, coords):
# get the displacement given a stiffness field D and displacement strain.
self._set_param_field(D_query_function)
state = self.prob.solve()
displacement = self.prob.fields['displacement'].evaluate_at(coords, state.vec.reshape(self.reshape_tuple))
displacement *= strain / self.FEM_model_strain
return displacement
def drawstate(self, state):
# draw the object with sfepy's viewer
# state can be obtained from prob.solve()
self.prob.save_state('curr_run_demo.vtk', state)
view = Viewer('curr_run_demo.vtk')
view(vector_mode='warp_norm', rel_scaling=2, is_scalar_bar=True, is_wireframe=True)
class Experiment_ortho2d_extra:
"""
Class for grouping a FEM model and a stiffness distribution model together,
performing reconstructions, visualising measurements and stiffness.
This class deals with 4 material properties, ['E_x', 'E_y', '\nu_{xy}', 'G_{xy}'],
independently defined on a grid.
The class has a measurent scheme. This is specified by the dataobj object it holds,
which holds the details of a particular experiment (dimensions, strains,
location of measurement points, displacement measurements, stresses).
A linear model can be specified which reduces the number of free paramters.
The class allows the displacement at any point to be queried
"""
def __init__(self, dataobj, nx, ny, method='linear'):
# block and calcs
# fringe is used to describe the ficticious material at each end of the coupon
# block refers to the Block2d object holding the FEM model of the strip
# strip_dimensions is the dimensions of the strip
# the strip is placed centered on the y-axis, with the fixed coincident with the
# x-axis.
self.fringe = dataobj.fringe
self.strip_dimensions = dataobj.object_estimated_dimensions + np.array([0, self.fringe*2])
self.block = Block2d(self.strip_dimensions, np.array([0, self.strip_dimensions[1]/2]))
# all_coordinates is the coordinates of all the the measured locations.
# 2d
self.all_coordinates = dataobj.centered_trunc_coords.copy()
self.dshape = self.all_coordinates.shape
self.n_points = self.all_coordinates.shape[0]
# a measurement is a subset of the full list of measured coordinates
self._measurement_scheme = np.array(range(self.n_points*2))
# D tensor distribution.
# generated by bilinearly interpolating from a (nx by ny) grid.
# tensor is specfied by a 1d array called b_vector which is 4*nx*ny elements large
self.nx = nx
self.ny = ny
self.x_vals = []
self.y_vals = []
self.interp_method = method
self.D_interpolator_space, self.default_b_vector = self._setup_D_field_interpolation()
# model
self.model_matrix = np.eye(4*self.nx*self.ny)
self.default_p_vector = np.ones(4*self.nx*self.ny)
self.coef_names = ['E_x', 'E_y', '\nu_{xy}', 'G_{xy}']
# inversion
self.inverted_at = []
self.derivative = []
self.param_s_vecs = []
self.measurement_s_vecs = []
self.s_vals = []
""" ========================= D field function, models ======================= """
def cs_from_ps(self, ps):
# calc the D tensor elements from the four parameters.
cs = np.zeros(ps.shape)
f_top = ps[0]
f_bottom = ps[0] - np.multiply(ps[1],np.square(ps[2]))
f = np.divide(f_top, f_bottom)
cs[0] = np.multiply(ps[0] ,f)
cs[1] = np.multiply(ps[1] ,f)
cs[2] = ps[3]
cs[3] = np.multiply(np.multiply(ps[1], f), ps[2])
return cs
def _setup_D_field_interpolation(self):
self.x_vals = np.linspace(-self.strip_dimensions[0]/2, self.strip_dimensions[0]/2, self.nx)
self.y_vals = np.linspace(0, self.strip_dimensions[1], self.ny)
def func(query_coords, b_vec):
# will reshape b_vec into params which is d*nx*ny
# convention is to use the 'C' ordering
# this means we will count in y before in x.
# parameters: p1 ... p4.
params = b_vec.reshape((4, self.nx, self.ny))
oneoneT = np.array([[1,0,0],[0,0,0],[0,0,0]])
twotwoT = np.array([[0,0,0],[0,1,0],[0,0,0]])
thrthrT = np.array([[0,0,0],[0,0,0],[0,0,1]])
symparT = np.array([[0,1,0],[1,0,0],[0,0,0]])
x_vals = self.x_vals
y_vals = self.y_vals
cs = self.cs_from_ps(params)
C11 = interpn((x_vals, y_vals), cs[0, :, :], query_coords, method=self.interp_method)
C22 = interpn((x_vals, y_vals), cs[1, :, :], query_coords, method=self.interp_method)
C33 = interpn((x_vals, y_vals), cs[2, :, :], query_coords, method=self.interp_method)
C12 = interpn((x_vals, y_vals), cs[3, :, :], query_coords, method=self.interp_method)
C11enlarge = np.array(C11)[...,None,None]
C22enlarge = np.array(C22)[...,None,None]
C33enlarge = np.array(C33)[...,None,None]
C12enlarge = np.array(C12)[...,None,None]
Mat = C11enlarge*oneoneT + C22enlarge*twotwoT + C33enlarge*thrthrT + C12enlarge*symparT
return Mat
# get default params
psdef = np.array([1, 1, 0.3, 0.38])
ones_shape = np.ones((self.nx, self.ny))
c11def = psdef[0]*ones_shape
c22def = psdef[1]*ones_shape
c33def = psdef[2]*ones_shape
c12def = psdef[3]*ones_shape
default = np.array([c11def, c22def, c33def, c12def])
default = default.reshape(4*self.nx*self.ny)
return func, default
def make_D_interpolator(self, b_vector):
return lambda coords : self.D_interpolator_space(coords, b_vector)
def params_to_b(self, params):
return self.model_matrix.dot(params)
def set_model_matrix(self, matrix):
self.model_matrix = matrix
self.default_p_vector = np.ones(matrix.shape[1])
""" ========================= displacements and measeurements ========================= """
def get_displacement(self, strain, b_vector):
D_interpolator = self.make_D_interpolator(b_vector)
displacement = self.block.get_disp_at_coords(D_interpolator, strain, self.all_coordinates)
return displacement
def get_measurement(self, strain, b_vec):
disp = self.get_displacement(strain, b_vec)
return self.measurement_from_displacement(disp)
def set_measurement_scheme(self, indices):
self._measurement_scheme = indices
def set_special_measurement(self, measurement_type):
accepted = ['all_x', 'all_y']
assert measurement_type in accepted
if measurement_type == 'all_x':
indices = np.arange(0, 2*self.n_points, 2)
else:
indices = np.arange(1, 2*self.n_points, 2)
self.set_measurement_scheme(indices)
def measurement_from_displacement(self, displacement):
# extracts from a displacement array the data correpsonding to the
# measured degrees of freedom as specified by the current measurement scheme.
flat_displacement = displacement.flatten()
return flat_displacement[self._measurement_scheme]
def embed_measurement_in_displacement(self, measurement):
assert measurement.shape == self._measurement_scheme.shape
displacement = np.zeros(self.n_points*2)
displacement[self._measurement_scheme] = measurement
displacement = displacement.reshape(self.dshape)
return displacement
""" ========================= inverting ========================= """
def compute_derivative(self, b_vec0, strain):
# computes derivative w.r.t the model and measurement scheme.
measurement0 = self.get_measurement(strain, b_vec0)
num_params = self.model_matrix.shape[1]
derivative = np.zeros((self._measurement_scheme.shape[0], num_params))
delta = 0.001
for index in range(num_params):
p_perturbed = np.zeros(num_params)
p_perturbed[index] = delta
b_perturbed = b_vec0.copy() + self.model_matrix.dot(p_perturbed)
m_perturbation = self.get_measurement(strain, b_perturbed) - measurement0
derivative[:, index] = m_perturbation / delta
return derivative, measurement0
def restricted_derivative_wrt_svecs(self, b, num_svecs, strain):
# computes derivative w.r.t the model and measurement scheme
# at the point b and only in the place spanned by the first n b-svecs.
# each col corresponds to a s-vec
measurement0 = self.get_measurement(strain, b)
derivative = np.zeros((self._measurement_scheme.shape[0], num_svecs))
delta = 0.01
for index in range(num_svecs):
p_perturbed = delta*self.param_s_vecs[:, index]
b_perturbed = b.copy() + self.model_matrix.dot(p_perturbed)
m_perturbation = self.get_measurement(strain, b_perturbed) - measurement0
derivative[:, index] = m_perturbation / delta
return derivative
def compute_svd_at_b0(self, b_vec, strain):
self.inverted_at = b_vec
der_mat, measurement0 = self.compute_derivative(b_vec, strain)
self.derivative = der_mat.copy()
U, s, Vt = np.linalg.svd(der_mat)
self.measurement_s_vecs = U
self.s_vals = s
self.param_s_vecs = Vt.transpose()
return measurement0
def double_sides_metric(self):
Q = np.eye(self.default_b_vector.shape[0])
# all params on a wall get doubled. ignore corners...
for index in range(self.default_b_vector.shape[0]):
xy_index = index % (self.nx*self.ny)
# yindex = xy_index % self.nx
xindex = xy_index // self.ny
if xindex == 0 or xindex == self.nx - 1:
Q[index, index] = 2.0**0.5
return Q
def invert_measurement(self, rank, strain, measurement):
""" performs the linear FEMU inversion """
# reduced rank matrices
U_red = self.measurement_s_vecs[:, :rank]
s_inv = np.reciprocal(self.s_vals)
S_inv_red = np.diag(s_inv[:rank])
V_red = self.param_s_vecs[:, :rank]
# d side perturbation
measurement_perturbation = measurement - self.get_measurement(strain, self.inverted_at)
# reduced rank expansion coefs
d_side_coefs = U_red.transpose().dot(measurement_perturbation)
p_side_coefs = S_inv_red.dot(d_side_coefs)
# param perturbation
params_perturbation = V_red.dot(p_side_coefs)
inferred_b_vec = self.inverted_at + self.model_matrix.dot(params_perturbation)
return inferred_b_vec, d_side_coefs, U_red, self.s_vals[:rank], p_side_coefs, V_red
def iterate_stiffness(self, rank, strain, measurement, num_iterations, step):
# freezes the singular vectors.
# steps towards solution.
current_b = self.default_b_vector
bs = []
ms = []
for itr in range(num_iterations):
current_m = self.get_measurement(strain, current_b)
ms.append(current_m)
bs.append(current_b)
delta_m = measurement - current_m
A = self.restricted_derivative_wrt_svecs(current_b, rank, strain)
trunc_vecs_coefs_prime, _, _, _ = np.linalg.lstsq(A, delta_m)
V = self.param_s_vecs
delta_b = np.zeros(self.default_b_vector.shape[0])
for ind in range(trunc_vecs_coefs_prime.shape[0]):
delta_b += self.params_to_b( V[:, ind] * trunc_vecs_coefs_prime[ind] )
current_b = current_b + delta_b*step
return bs, ms
def invert_tikhonov(self, rank, strain, measurement, alpha, return_t='b'):
assert return_t in ['b', 'p', 'all']
# reduce the rank
U_red = self.measurement_s_vecs[:, :rank]
s_inv = np.reciprocal(self.s_vals)
S_inv_red = np.diag(s_inv[:rank])
V_red = self.param_s_vecs[:, :rank]
measurement_perturbation = measurement - self.get_measurement(strain, self.inverted_at)
phis = np.zeros(rank)
for i in range(rank):
phis[i] = self.s_vals[i]**2 / (self.s_vals[i]**2 + alpha**2)
d_side_coefs = U_red.transpose().dot(measurement_perturbation)
p_side_coefs = S_inv_red.dot(d_side_coefs)
p_side_coefs = np.multiply(p_side_coefs, phis)
params_perturbation = V_red.dot(p_side_coefs)
if return_t=='b':
inferred_b_vec = self.inverted_at + self.model_matrix.dot(params_perturbation)
return inferred_b_vec
elif return_t=='p':
return params_perturbation
else:
list_of_svecs = [vec for vec in V_red.transpose()]
inferred_b_vec = self.inverted_at + self.model_matrix.dot(params_perturbation)
return inferred_b_vec, params_perturbation, list_of_svecs, self.s_vals, p_side_coefs, d_side_coefs
""" ========================= plotting ========================= """
def _calc_xy_measured(self, dof_measurement_indices):
# need to check through to see if every node has an x or a y disp active, for plotting.
measured_x_points = []
measured_y_points = []
for index in dof_measurement_indices:
if index % 2 == 0:
measured_x_points.append(index//2)
if index % 2 == 1:
measured_y_points.append(index//2)
return np.array(measured_x_points), np.array(measured_y_points)
def scatter_displacement(self, displacement, direction):
plt.figure()
plt.scatter(self.all_coordinates[:,1], self.all_coordinates[:,0], c=displacement[:,direction])
plt.xlim([0, self.strip_dimensions[1]])
plt.ylim([-self.strip_dimensions[0]/2, self.strip_dimensions[0]/2])
plt.colorbar()
plt.show()
def scatter_measurement(self, measurement, direction, fringe=True, cbar=None, orientation='horizontal', **kwargs):
assert direction in [0,1]
assert measurement.shape == self._measurement_scheme.shape
all_displacements = self.embed_measurement_in_displacement(measurement)
measured_x_points, measured_y_points = self._calc_xy_measured(self._measurement_scheme)
if direction == 0:
assert measured_x_points.shape[0] > 0
coordinates = self.all_coordinates[measured_x_points, :]
used_displacements = all_displacements[measured_x_points, 0]
if direction == 1:
assert measured_y_points.shape[0] > 0
coordinates = self.all_coordinates[measured_y_points, :]
used_displacements = all_displacements[measured_y_points, 1]
#plt.figure()
#plt.colorbar()
#plt.show()
fig, ax = plt.subplots(figsize=(15,4))
thing = ax.scatter(coordinates[:,1], coordinates[:,0], c=used_displacements, s=5, cmap='plasma', **kwargs)
plt.xlim([0, self.strip_dimensions[1]])
plt.ylim([-self.strip_dimensions[0]/2, self.strip_dimensions[0]/2])
if not fringe:
plt.xlim([self.fringe, self.strip_dimensions[1]-self.fringe])
ax.set_xlabel('y (mm)')
ax.set_ylabel('x (mm)')
if cbar is None:
fig.colorbar(thing, orientation=orientation)
ax.set_aspect('equal')
else:
fig.colorbar(thing, orientation=orientation)
thing.set_clim(cbar.vmin, cbar.vmax)
ax.set_aspect('equal')
if orientation=='vertical':
thing.colorbar.ax.set_ylabel('y-displacement (mm)', labelpad=15)
else:
thing.colorbar.ax.set_xlabel('y-displacement (mm)', labelpad=15)
plt.show()
return thing.colorbar
def scatter_measurement_vertical(self, measurement, direction, fringe=True, cbar=None, orientation='horizontal', **kwargs):
assert direction in [0,1]
assert measurement.shape == self._measurement_scheme.shape
all_displacements = self.embed_measurement_in_displacement(measurement)
measured_x_points, measured_y_points = self._calc_xy_measured(self._measurement_scheme)
if direction == 0:
assert measured_x_points.shape[0] > 0
coordinates = self.all_coordinates[measured_x_points, :]
used_displacements = all_displacements[measured_x_points, 0]
if direction == 1:
assert measured_y_points.shape[0] > 0
coordinates = self.all_coordinates[measured_y_points, :]
used_displacements = all_displacements[measured_y_points, 1]
#plt.figure()
#plt.colorbar()
#plt.show()
fig, ax = plt.subplots(figsize=(15,4))
thing = ax.scatter(coordinates[:,0], coordinates[:,1], c=used_displacements, s=5, cmap='plasma', **kwargs)
plt.ylim([0, self.strip_dimensions[1]])
plt.xlim([-self.strip_dimensions[0]/2, self.strip_dimensions[0]/2])
if not fringe:
plt.ylim([self.fringe, self.strip_dimensions[1]-self.fringe])
ax.set_xlabel('x (mm)')
ax.set_ylabel('y (mm)')
if cbar is None:
fig.colorbar(thing, orientation=orientation)
ax.set_aspect('equal')
else:
fig.colorbar(thing, orientation=orientation)
thing.set_clim(cbar.vmin, cbar.vmax)
ax.set_aspect('equal')
if orientation=='vertical':
thing.colorbar.ax.set_ylabel('y-displacement (mm)', labelpad=15)
else:
thing.colorbar.ax.set_xlabel('y-displacement (mm)', labelpad=15)
plt.show()
return thing.colorbar
def scatter_measurement_1d(self, measurement1, measurement2, direction):
assert direction in [0,1]
assert measurement1.shape == self._measurement_scheme.shape
measured_x_points, measured_y_points = self._calc_xy_measured(self._measurement_scheme)
all_displacements = self.embed_measurement_in_displacement(measurement1)
if direction == 0:
assert measured_x_points.shape[0] > 0
coordinates = self.all_coordinates[measured_x_points, :]
used_displacements = all_displacements[measured_x_points, 0]
if direction == 1:
assert measured_y_points.shape[0] > 0
coordinates = self.all_coordinates[measured_y_points, :]
used_displacements = all_displacements[measured_y_points, 1]
plt.figure()
plt.scatter(coordinates[:,1], used_displacements, s=0.1)
assert measurement2.shape == self._measurement_scheme.shape
all_displacements = self.embed_measurement_in_displacement(measurement2)
if direction == 0:
assert measured_x_points.shape[0] > 0
coordinates = self.all_coordinates[measured_x_points, :]
used_displacements = all_displacements[measured_x_points, 0]
if direction == 1:
assert measured_y_points.shape[0] > 0
coordinates = self.all_coordinates[measured_y_points, :]
used_displacements = all_displacements[measured_y_points, 1]
plt.scatter(coordinates[:,1], used_displacements, s=0.1)
plt.xlim([0, self.strip_dimensions[1]])
plt.colorbar()
plt.show()
def plot_b_vec(self, b_vec, coef_index, fringe=False):
field = b_vec.reshape((4, self.nx, self.ny))[coef_index]
plot_density = 1.
if fringe:
plot_ranges = [-self.strip_dimensions[0]/2, self.strip_dimensions[0]/2, 0, self.strip_dimensions[1]]
else:
plot_ranges = [-self.strip_dimensions[0]/2, self.strip_dimensions[0]/2, self.fringe, self.strip_dimensions[1] - self.fringe]
xr = np.arange(plot_ranges[0], plot_ranges[1], plot_density)
yr = np.arange(plot_ranges[2], plot_ranges[3], plot_density)
X, Y = np.meshgrid(xr, yr)
vals = interpn((self.x_vals, self.y_vals), field, np.array([X.flatten(), Y.flatten()]).transpose())
fig, ax = plt.subplots()
thing = ax.contourf(Y,X,vals.reshape(X.shape), 100)
fig.colorbar(thing)
ax.set_aspect('equal')
plt.show()
def contour_b_vec(self, b_vec, coef_index, cbar=None, fringe=False, title='', keep_boundaries=0, orientation='horizontal', **kwargs):
field = b_vec.reshape((4, self.nx, self.ny))[coef_index]
plot_density = 1.
if fringe:
plot_ranges = [-self.strip_dimensions[0]/2, self.strip_dimensions[0]/2, 0, self.strip_dimensions[1]]
else:
plot_ranges = [-self.strip_dimensions[0]/2, self.strip_dimensions[0]/2, self.fringe, self.strip_dimensions[1] - self.fringe]
xr = np.arange(plot_ranges[0], plot_ranges[1], plot_density)
yr = np.arange(plot_ranges[2], plot_ranges[3], plot_density)
X, Y = np.meshgrid(xr, yr)
vals = interpn((self.x_vals, self.y_vals), field, np.array([X.flatten(), Y.flatten()]).transpose())
fig, ax = plt.subplots(figsize=(15,4))
ax.set_aspect('equal')
levels = 15
if (cbar is not None) and (keep_boundaries):
levels = cbar._boundaries
thing = ax.contourf(Y,X,vals.reshape(X.shape), **kwargs, levels=levels)
fig.colorbar(thing, orientation=orientation)
if (cbar is not None) and (not keep_boundaries):
thing.set_clim(cbar.vmin, cbar.vmax)
ax.set_xlabel('y (mm)')
ax.set_ylabel('x (mm)')
thing.colorbar.ax.set_xlabel('Axial Youngs Modulus')
plt.title(title)
plt.show()
return thing.colorbar
def contour_b_vec_vertical(self, b_vec, coef_index, cbar=None, fringe=False, keep_boundaries=0, orientation='horizontal', **kwargs):
field = b_vec.reshape((4, self.nx, self.ny))[coef_index]
plot_density = 1.
if fringe:
plot_ranges = [-self.strip_dimensions[0]/2, self.strip_dimensions[0]/2, 0, self.strip_dimensions[1]]
else:
plot_ranges = [-self.strip_dimensions[0]/2, self.strip_dimensions[0]/2, self.fringe, self.strip_dimensions[1] - self.fringe]
xr = np.arange(plot_ranges[0], plot_ranges[1], plot_density)
yr = np.arange(plot_ranges[2], plot_ranges[3], plot_density)
X, Y = np.meshgrid(xr, yr)
vals = interpn((self.x_vals, self.y_vals), field, np.array([X.flatten(), Y.flatten()]).transpose())
fig, ax = plt.subplots(figsize=(8,10))
ax.set_aspect('equal')
levels = 15
if (cbar is not None) and (keep_boundaries):
levels = cbar._boundaries
thing = ax.contourf(X,Y,vals.reshape(X.shape), **kwargs, levels=levels)
cb = fig.colorbar(thing, orientation=orientation)
if (cbar is not None) and (not keep_boundaries):
thing.set_clim(cbar.vmin, cbar.vmax)
ax.set_xlabel('x (mm)')
ax.set_ylabel('y (mm)')
#thing.colorbar.ax.set_ylabel('Axial Youngs Modulus')
plt.show()
return thing.colorbar
def plot_p_vec(self, p_vec, coef_index, **kwargs):
b_vec = self.model_matrix.dot(p_vec)
self.plot_b_vec(b_vec, coef_index, **kwargs)
|
[
"sfepy.base.conf.ProblemConf.from_file",
"sfepy.mesh.mesh_generators.gen_block_mesh",
"sfepy.postprocess.viewer.Viewer",
"sfepy.discrete.problem.Problem.from_conf"
] |
[((1057, 1073), 'numpy.array', 'np.array', (['[2, 2]'], {}), '([2, 2])\n', (1065, 1073), True, 'import numpy as np\n'), ((2094, 2121), 'numpy.divide', 'np.divide', (['dims', 'cell_sizes'], {}), '(dims, cell_sizes)\n', (2103, 2121), True, 'import numpy as np\n'), ((2212, 2255), 'sfepy.mesh.mesh_generators.gen_block_mesh', 'gen_block_mesh', (['dims', 'nums', 'center_location'], {}), '(dims, nums, center_location)\n', (2226, 2255), False, 'from sfepy.mesh.mesh_generators import gen_block_mesh\n'), ((2329, 2361), 'sfepy.base.conf.ProblemConf.from_file', 'ProblemConf.from_file', (['prob_file'], {}), '(prob_file)\n', (2350, 2361), False, 'from sfepy.base.conf import ProblemConf\n'), ((2779, 2802), 'sfepy.discrete.problem.Problem.from_conf', 'Problem.from_conf', (['conf'], {}), '(conf)\n', (2796, 2802), False, 'from sfepy.discrete.problem import Problem\n'), ((4023, 4050), 'sfepy.postprocess.viewer.Viewer', 'Viewer', (['"""curr_run_demo.vtk"""'], {}), "('curr_run_demo.vtk')\n", (4029, 4050), False, 'from sfepy.postprocess.viewer import Viewer\n'), ((6477, 6506), 'numpy.eye', 'np.eye', (['(4 * self.nx * self.ny)'], {}), '(4 * self.nx * self.ny)\n', (6483, 6506), True, 'import numpy as np\n'), ((6535, 6565), 'numpy.ones', 'np.ones', (['(4 * self.nx * self.ny)'], {}), '(4 * self.nx * self.ny)\n', (6542, 6565), True, 'import numpy as np\n'), ((7023, 7041), 'numpy.zeros', 'np.zeros', (['ps.shape'], {}), '(ps.shape)\n', (7031, 7041), True, 'import numpy as np\n'), ((7139, 7165), 'numpy.divide', 'np.divide', (['f_top', 'f_bottom'], {}), '(f_top, f_bottom)\n', (7148, 7165), True, 'import numpy as np\n'), ((7191, 7212), 'numpy.multiply', 'np.multiply', (['ps[0]', 'f'], {}), '(ps[0], f)\n', (7202, 7212), True, 'import numpy as np\n'), ((7229, 7250), 'numpy.multiply', 'np.multiply', (['ps[1]', 'f'], {}), '(ps[1], f)\n', (7240, 7250), True, 'import numpy as np\n'), ((7431, 7516), 'numpy.linspace', 'np.linspace', (['(-self.strip_dimensions[0] / 2)', '(self.strip_dimensions[0] / 2)', 'self.nx'], {}), '(-self.strip_dimensions[0] / 2, self.strip_dimensions[0] / 2,\n self.nx)\n', (7442, 7516), True, 'import numpy as np\n'), ((7531, 7580), 'numpy.linspace', 'np.linspace', (['(0)', 'self.strip_dimensions[1]', 'self.ny'], {}), '(0, self.strip_dimensions[1], self.ny)\n', (7542, 7580), True, 'import numpy as np\n'), ((9141, 9168), 'numpy.array', 'np.array', (['[1, 1, 0.3, 0.38]'], {}), '([1, 1, 0.3, 0.38])\n', (9149, 9168), True, 'import numpy as np\n'), ((9190, 9217), 'numpy.ones', 'np.ones', (['(self.nx, self.ny)'], {}), '((self.nx, self.ny))\n', (9197, 9217), True, 'import numpy as np\n'), ((9401, 9443), 'numpy.array', 'np.array', (['[c11def, c22def, c33def, c12def]'], {}), '([c11def, c22def, c33def, c12def])\n', (9409, 9443), True, 'import numpy as np\n'), ((9857, 9881), 'numpy.ones', 'np.ones', (['matrix.shape[1]'], {}), '(matrix.shape[1])\n', (9864, 9881), True, 'import numpy as np\n'), ((11363, 11390), 'numpy.zeros', 'np.zeros', (['(self.n_points * 2)'], {}), '(self.n_points * 2)\n', (11371, 11390), True, 'import numpy as np\n'), ((11872, 11929), 'numpy.zeros', 'np.zeros', (['(self._measurement_scheme.shape[0], num_params)'], {}), '((self._measurement_scheme.shape[0], num_params))\n', (11880, 11929), True, 'import numpy as np\n'), ((12692, 12748), 'numpy.zeros', 'np.zeros', (['(self._measurement_scheme.shape[0], num_svecs)'], {}), '((self._measurement_scheme.shape[0], num_svecs))\n', (12700, 12748), True, 'import numpy as np\n'), ((13350, 13372), 'numpy.linalg.svd', 'np.linalg.svd', (['der_mat'], {}), '(der_mat)\n', (13363, 13372), True, 'import numpy as np\n'), ((13569, 13607), 'numpy.eye', 'np.eye', (['self.default_b_vector.shape[0]'], {}), '(self.default_b_vector.shape[0])\n', (13575, 13607), True, 'import numpy as np\n'), ((14212, 14238), 'numpy.reciprocal', 'np.reciprocal', (['self.s_vals'], {}), '(self.s_vals)\n', (14225, 14238), True, 'import numpy as np\n'), ((14259, 14280), 'numpy.diag', 'np.diag', (['s_inv[:rank]'], {}), '(s_inv[:rank])\n', (14266, 14280), True, 'import numpy as np\n'), ((16164, 16190), 'numpy.reciprocal', 'np.reciprocal', (['self.s_vals'], {}), '(self.s_vals)\n', (16177, 16190), True, 'import numpy as np\n'), ((16211, 16232), 'numpy.diag', 'np.diag', (['s_inv[:rank]'], {}), '(s_inv[:rank])\n', (16218, 16232), True, 'import numpy as np\n'), ((16397, 16411), 'numpy.zeros', 'np.zeros', (['rank'], {}), '(rank)\n', (16405, 16411), True, 'import numpy as np\n'), ((16677, 16708), 'numpy.multiply', 'np.multiply', (['p_side_coefs', 'phis'], {}), '(p_side_coefs, phis)\n', (16688, 16708), True, 'import numpy as np\n'), ((17963, 17975), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (17973, 17975), True, 'import matplotlib.pyplot as plt\n'), ((17984, 18086), 'matplotlib.pyplot.scatter', 'plt.scatter', (['self.all_coordinates[:, 1]', 'self.all_coordinates[:, 0]'], {'c': 'displacement[:, direction]'}), '(self.all_coordinates[:, 1], self.all_coordinates[:, 0], c=\n displacement[:, direction])\n', (17995, 18086), True, 'import matplotlib.pyplot as plt\n'), ((18087, 18126), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, self.strip_dimensions[1]]'], {}), '([0, self.strip_dimensions[1]])\n', (18095, 18126), True, 'import matplotlib.pyplot as plt\n'), ((18135, 18206), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-self.strip_dimensions[0] / 2, self.strip_dimensions[0] / 2]'], {}), '([-self.strip_dimensions[0] / 2, self.strip_dimensions[0] / 2])\n', (18143, 18206), True, 'import matplotlib.pyplot as plt\n'), ((18211, 18225), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (18223, 18225), True, 'import matplotlib.pyplot as plt\n'), ((18234, 18244), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18242, 18244), True, 'import matplotlib.pyplot as plt\n'), ((19270, 19299), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(15, 4)'}), '(figsize=(15, 4))\n', (19282, 19299), True, 'import matplotlib.pyplot as plt\n'), ((19431, 19470), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, self.strip_dimensions[1]]'], {}), '([0, self.strip_dimensions[1]])\n', (19439, 19470), True, 'import matplotlib.pyplot as plt\n'), ((19479, 19550), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-self.strip_dimensions[0] / 2, self.strip_dimensions[0] / 2]'], {}), '([-self.strip_dimensions[0] / 2, self.strip_dimensions[0] / 2])\n', (19487, 19550), True, 'import matplotlib.pyplot as plt\n'), ((20242, 20252), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20250, 20252), True, 'import matplotlib.pyplot as plt\n'), ((21314, 21343), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(15, 4)'}), '(figsize=(15, 4))\n', (21326, 21343), True, 'import matplotlib.pyplot as plt\n'), ((21475, 21514), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, self.strip_dimensions[1]]'], {}), '([0, self.strip_dimensions[1]])\n', (21483, 21514), True, 'import matplotlib.pyplot as plt\n'), ((21523, 21594), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-self.strip_dimensions[0] / 2, self.strip_dimensions[0] / 2]'], {}), '([-self.strip_dimensions[0] / 2, self.strip_dimensions[0] / 2])\n', (21531, 21594), True, 'import matplotlib.pyplot as plt\n'), ((22286, 22296), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22294, 22296), True, 'import matplotlib.pyplot as plt\n'), ((23229, 23241), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (23239, 23241), True, 'import matplotlib.pyplot as plt\n'), ((23250, 23307), 'matplotlib.pyplot.scatter', 'plt.scatter', (['coordinates[:, 1]', 'used_displacements'], {'s': '(0.1)'}), '(coordinates[:, 1], used_displacements, s=0.1)\n', (23261, 23307), True, 'import matplotlib.pyplot as plt\n'), ((23974, 24031), 'matplotlib.pyplot.scatter', 'plt.scatter', (['coordinates[:, 1]', 'used_displacements'], {'s': '(0.1)'}), '(coordinates[:, 1], used_displacements, s=0.1)\n', (23985, 24031), True, 'import matplotlib.pyplot as plt\n'), ((24048, 24087), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, self.strip_dimensions[1]]'], {}), '([0, self.strip_dimensions[1]])\n', (24056, 24087), True, 'import matplotlib.pyplot as plt\n'), ((24096, 24110), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (24108, 24110), True, 'import matplotlib.pyplot as plt\n'), ((24119, 24129), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24127, 24129), True, 'import matplotlib.pyplot as plt\n'), ((24610, 24665), 'numpy.arange', 'np.arange', (['plot_ranges[0]', 'plot_ranges[1]', 'plot_density'], {}), '(plot_ranges[0], plot_ranges[1], plot_density)\n', (24619, 24665), True, 'import numpy as np\n'), ((24679, 24734), 'numpy.arange', 'np.arange', (['plot_ranges[2]', 'plot_ranges[3]', 'plot_density'], {}), '(plot_ranges[2], plot_ranges[3], plot_density)\n', (24688, 24734), True, 'import numpy as np\n'), ((24750, 24769), 'numpy.meshgrid', 'np.meshgrid', (['xr', 'yr'], {}), '(xr, yr)\n', (24761, 24769), True, 'import numpy as np\n'), ((24905, 24919), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (24917, 24919), True, 'import matplotlib.pyplot as plt\n'), ((25047, 25057), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (25055, 25057), True, 'import matplotlib.pyplot as plt\n'), ((25614, 25669), 'numpy.arange', 'np.arange', (['plot_ranges[0]', 'plot_ranges[1]', 'plot_density'], {}), '(plot_ranges[0], plot_ranges[1], plot_density)\n', (25623, 25669), True, 'import numpy as np\n'), ((25683, 25738), 'numpy.arange', 'np.arange', (['plot_ranges[2]', 'plot_ranges[3]', 'plot_density'], {}), '(plot_ranges[2], plot_ranges[3], plot_density)\n', (25692, 25738), True, 'import numpy as np\n'), ((25754, 25773), 'numpy.meshgrid', 'np.meshgrid', (['xr', 'yr'], {}), '(xr, yr)\n', (25765, 25773), True, 'import numpy as np\n'), ((25909, 25938), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(15, 4)'}), '(figsize=(15, 4))\n', (25921, 25938), True, 'import matplotlib.pyplot as plt\n'), ((26525, 26541), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (26534, 26541), True, 'import matplotlib.pyplot as plt\n'), ((26559, 26569), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26567, 26569), True, 'import matplotlib.pyplot as plt\n'), ((27159, 27214), 'numpy.arange', 'np.arange', (['plot_ranges[0]', 'plot_ranges[1]', 'plot_density'], {}), '(plot_ranges[0], plot_ranges[1], plot_density)\n', (27168, 27214), True, 'import numpy as np\n'), ((27228, 27283), 'numpy.arange', 'np.arange', (['plot_ranges[2]', 'plot_ranges[3]', 'plot_density'], {}), '(plot_ranges[2], plot_ranges[3], plot_density)\n', (27237, 27283), True, 'import numpy as np\n'), ((27299, 27318), 'numpy.meshgrid', 'np.meshgrid', (['xr', 'yr'], {}), '(xr, yr)\n', (27310, 27318), True, 'import numpy as np\n'), ((27454, 27483), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 10)'}), '(figsize=(8, 10))\n', (27466, 27483), True, 'import matplotlib.pyplot as plt\n'), ((28078, 28088), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (28086, 28088), True, 'import matplotlib.pyplot as plt\n'), ((2167, 2183), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (2175, 2183), True, 'import numpy as np\n'), ((5435, 5465), 'numpy.array', 'np.array', (['[0, self.fringe * 2]'], {}), '([0, self.fringe * 2])\n', (5443, 5465), True, 'import numpy as np\n'), ((5516, 5559), 'numpy.array', 'np.array', (['[0, self.strip_dimensions[1] / 2]'], {}), '([0, self.strip_dimensions[1] / 2])\n', (5524, 5559), True, 'import numpy as np\n'), ((7301, 7322), 'numpy.multiply', 'np.multiply', (['ps[1]', 'f'], {}), '(ps[1], f)\n', (7312, 7322), True, 'import numpy as np\n'), ((7956, 7999), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[1, 0, 0], [0, 0, 0], [0, 0, 0]])\n', (7964, 7999), True, 'import numpy as np\n'), ((8014, 8057), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 1, 0], [0, 0, 0]]'], {}), '([[0, 0, 0], [0, 1, 0], [0, 0, 0]])\n', (8022, 8057), True, 'import numpy as np\n'), ((8072, 8115), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 0, 0], [0, 0, 1]]'], {}), '([[0, 0, 0], [0, 0, 0], [0, 0, 1]])\n', (8080, 8115), True, 'import numpy as np\n'), ((8130, 8173), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 0], [0, 0, 0]]'], {}), '([[0, 1, 0], [1, 0, 0], [0, 0, 0]])\n', (8138, 8173), True, 'import numpy as np\n'), ((8330, 8409), 'scipy.interpolate.interpn', 'interpn', (['(x_vals, y_vals)', 'cs[0, :, :]', 'query_coords'], {'method': 'self.interp_method'}), '((x_vals, y_vals), cs[0, :, :], query_coords, method=self.interp_method)\n', (8337, 8409), False, 'from scipy.interpolate import interpn\n'), ((8428, 8507), 'scipy.interpolate.interpn', 'interpn', (['(x_vals, y_vals)', 'cs[1, :, :]', 'query_coords'], {'method': 'self.interp_method'}), '((x_vals, y_vals), cs[1, :, :], query_coords, method=self.interp_method)\n', (8435, 8507), False, 'from scipy.interpolate import interpn\n'), ((8526, 8605), 'scipy.interpolate.interpn', 'interpn', (['(x_vals, y_vals)', 'cs[2, :, :]', 'query_coords'], {'method': 'self.interp_method'}), '((x_vals, y_vals), cs[2, :, :], query_coords, method=self.interp_method)\n', (8533, 8605), False, 'from scipy.interpolate import interpn\n'), ((8624, 8703), 'scipy.interpolate.interpn', 'interpn', (['(x_vals, y_vals)', 'cs[3, :, :]', 'query_coords'], {'method': 'self.interp_method'}), '((x_vals, y_vals), cs[3, :, :], query_coords, method=self.interp_method)\n', (8631, 8703), False, 'from scipy.interpolate import interpn\n'), ((10713, 10747), 'numpy.arange', 'np.arange', (['(0)', '(2 * self.n_points)', '(2)'], {}), '(0, 2 * self.n_points, 2)\n', (10722, 10747), True, 'import numpy as np\n'), ((10782, 10816), 'numpy.arange', 'np.arange', (['(1)', '(2 * self.n_points)', '(2)'], {}), '(1, 2 * self.n_points, 2)\n', (10791, 10816), True, 'import numpy as np\n'), ((12018, 12038), 'numpy.zeros', 'np.zeros', (['num_params'], {}), '(num_params)\n', (12026, 12038), True, 'import numpy as np\n'), ((15510, 15537), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'delta_m'], {}), '(A, delta_m)\n', (15525, 15537), True, 'import numpy as np\n'), ((15607, 15647), 'numpy.zeros', 'np.zeros', (['self.default_b_vector.shape[0]'], {}), '(self.default_b_vector.shape[0])\n', (15615, 15647), True, 'import numpy as np\n'), ((17828, 17855), 'numpy.array', 'np.array', (['measured_x_points'], {}), '(measured_x_points)\n', (17836, 17855), True, 'import numpy as np\n'), ((17857, 17884), 'numpy.array', 'np.array', (['measured_y_points'], {}), '(measured_y_points)\n', (17865, 17884), True, 'import numpy as np\n'), ((19582, 19645), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[self.fringe, self.strip_dimensions[1] - self.fringe]'], {}), '([self.fringe, self.strip_dimensions[1] - self.fringe])\n', (19590, 19645), True, 'import matplotlib.pyplot as plt\n'), ((21626, 21689), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[self.fringe, self.strip_dimensions[1] - self.fringe]'], {}), '([self.fringe, self.strip_dimensions[1] - self.fringe])\n', (21634, 21689), True, 'import matplotlib.pyplot as plt\n'), ((7109, 7125), 'numpy.square', 'np.square', (['ps[2]'], {}), '(ps[2])\n', (7118, 7125), True, 'import numpy as np\n'), ((8742, 8755), 'numpy.array', 'np.array', (['C11'], {}), '(C11)\n', (8750, 8755), True, 'import numpy as np\n'), ((8796, 8809), 'numpy.array', 'np.array', (['C22'], {}), '(C22)\n', (8804, 8809), True, 'import numpy as np\n'), ((8850, 8863), 'numpy.array', 'np.array', (['C33'], {}), '(C33)\n', (8858, 8863), True, 'import numpy as np\n'), ((8904, 8917), 'numpy.array', 'np.array', (['C12'], {}), '(C12)\n', (8912, 8917), True, 'import numpy as np\n'), ((2137, 2152), 'numpy.around', 'np.around', (['nums'], {}), '(nums)\n', (2146, 2152), True, 'import numpy as np\n')]
|
# Copyright (c) 2020 <NAME>
# This code is licensed under MIT license
# (https://github.com/kwotsin/mimicry/blob/master/LICENSE)
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import math
import megengine.functional as F
import megengine.module as M
class GBlock(M.Module):
r"""
Residual block for generator.
Uses bilinear (rather than nearest) interpolation, and align_corners
set to False. This is as per how torchvision does upsampling, as seen in:
https://github.com/pytorch/vision/blob/master/torchvision/models/segmentation/_utils.py
Attributes:
in_channels (int): The channel size of input feature map.
out_channels (int): The channel size of output feature map.
hidden_channels (int): The channel size of intermediate feature maps.
upsample (bool): If True, upsamples the input feature map.
num_classes (int): If more than 0, uses conditional batch norm instead.
spectral_norm (bool): If True, uses spectral norm for convolutional layers.
"""
def __init__(self,
in_channels,
out_channels,
hidden_channels=None,
upsample=False):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.hidden_channels = hidden_channels if hidden_channels is not None else out_channels
self.learnable_sc = in_channels != out_channels or upsample
self.upsample = upsample
self.c1 = M.Conv2d(self.in_channels,
self.hidden_channels,
3,
1,
padding=1)
self.c2 = M.Conv2d(self.hidden_channels,
self.out_channels,
3,
1,
padding=1)
self.b1 = M.BatchNorm2d(self.in_channels)
self.b2 = M.BatchNorm2d(self.hidden_channels)
self.activation = M.ReLU()
M.init.xavier_uniform_(self.c1.weight, math.sqrt(2.0))
M.init.xavier_uniform_(self.c2.weight, math.sqrt(2.0))
# Shortcut layer
if self.learnable_sc:
self.c_sc = M.Conv2d(in_channels,
out_channels,
1,
1,
padding=0)
M.init.xavier_uniform_(self.c_sc.weight, 1.0)
def _upsample_conv(self, x, conv):
r"""
Helper function for performing convolution after upsampling.
"""
return conv(
F.interpolate(x,
scale_factor=2,
mode='bilinear',
align_corners=False))
def _residual(self, x):
r"""
Helper function for feedforwarding through main layers.
"""
h = x
h = self.b1(h)
h = self.activation(h)
h = self._upsample_conv(h, self.c1) if self.upsample else self.c1(h)
h = self.b2(h)
h = self.activation(h)
h = self.c2(h)
return h
def _shortcut(self, x):
r"""
Helper function for feedforwarding through shortcut layers.
"""
if self.learnable_sc:
x = self._upsample_conv(
x, self.c_sc) if self.upsample else self.c_sc(x)
return x
else:
return x
def forward(self, x):
r"""
Residual block feedforward function.
"""
return self._residual(x) + self._shortcut(x)
class DBlock(M.Module):
"""
Residual block for discriminator.
Attributes:
in_channels (int): The channel size of input feature map.
out_channels (int): The channel size of output feature map.
hidden_channels (int): The channel size of intermediate feature maps.
downsample (bool): If True, downsamples the input feature map.
spectral_norm (bool): If True, uses spectral norm for convolutional layers.
"""
def __init__(self,
in_channels,
out_channels,
hidden_channels=None,
downsample=False):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.hidden_channels = hidden_channels if hidden_channels is not None else in_channels
self.downsample = downsample
self.learnable_sc = (in_channels != out_channels) or downsample
# Build the layers
self.c1 = M.Conv2d(self.in_channels, self.hidden_channels, 3, 1,
1)
self.c2 = M.Conv2d(self.hidden_channels, self.out_channels, 3, 1,
1)
self.activation = M.ReLU()
M.init.xavier_uniform_(self.c1.weight, math.sqrt(2.0))
M.init.xavier_uniform_(self.c2.weight, math.sqrt(2.0))
# Shortcut layer
if self.learnable_sc:
self.c_sc = M.Conv2d(in_channels, out_channels, 1, 1, 0)
M.init.xavier_uniform_(self.c_sc.weight, 1.0)
def _residual(self, x):
"""
Helper function for feedforwarding through main layers.
"""
h = x
h = self.activation(h)
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
if self.downsample:
h = F.avg_pool2d(h, 2)
return h
def _shortcut(self, x):
"""
Helper function for feedforwarding through shortcut layers.
"""
if self.learnable_sc:
x = self.c_sc(x)
return F.avg_pool2d(x, 2) if self.downsample else x
else:
return x
def forward(self, x):
"""
Residual block feedforward function.
"""
# NOTE: to completely reproduce pytorch, we use F.relu(x) to replace x in shortcut
# since pytorch use inplace relu in residual branch.
return self._residual(x) + self._shortcut(F.relu(x))
class DBlockOptimized(M.Module):
"""
Optimized residual block for discriminator. This is used as the first residual block,
where there is a definite downsampling involved. Follows the official SNGAN reference implementation
in chainer.
Attributes:
in_channels (int): The channel size of input feature map.
out_channels (int): The channel size of output feature map.
spectral_norm (bool): If True, uses spectral norm for convolutional layers.
"""
def __init__(self, in_channels, out_channels, spectral_norm=False):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.spectral_norm = spectral_norm
# Build the layers
self.c1 = M.Conv2d(self.in_channels, self.out_channels, 3, 1, 1)
self.c2 = M.Conv2d(self.out_channels, self.out_channels, 3, 1, 1)
self.c_sc = M.Conv2d(self.in_channels, self.out_channels, 1, 1, 0)
self.activation = M.ReLU()
M.init.xavier_uniform_(self.c1.weight, math.sqrt(2.0))
M.init.xavier_uniform_(self.c2.weight, math.sqrt(2.0))
M.init.xavier_uniform_(self.c_sc.weight, 1.0)
def _residual(self, x):
"""
Helper function for feedforwarding through main layers.
"""
h = x
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
h = F.avg_pool2d(h, 2)
return h
def _shortcut(self, x):
"""
Helper function for feedforwarding through shortcut layers.
"""
return self.c_sc(F.avg_pool2d(x, 2))
def forward(self, x):
"""
Residual block feedforward function.
"""
return self._residual(x) + self._shortcut(x)
|
[
"megengine.module.ReLU",
"megengine.module.BatchNorm2d",
"megengine.functional.avg_pool2d",
"megengine.functional.relu",
"megengine.module.init.xavier_uniform_",
"megengine.functional.interpolate",
"megengine.module.Conv2d"
] |
[((2136, 2201), 'megengine.module.Conv2d', 'M.Conv2d', (['self.in_channels', 'self.hidden_channels', '(3)', '(1)'], {'padding': '(1)'}), '(self.in_channels, self.hidden_channels, 3, 1, padding=1)\n', (2144, 2201), True, 'import megengine.module as M\n'), ((2328, 2394), 'megengine.module.Conv2d', 'M.Conv2d', (['self.hidden_channels', 'self.out_channels', '(3)', '(1)'], {'padding': '(1)'}), '(self.hidden_channels, self.out_channels, 3, 1, padding=1)\n', (2336, 2394), True, 'import megengine.module as M\n'), ((2522, 2553), 'megengine.module.BatchNorm2d', 'M.BatchNorm2d', (['self.in_channels'], {}), '(self.in_channels)\n', (2535, 2553), True, 'import megengine.module as M\n'), ((2572, 2607), 'megengine.module.BatchNorm2d', 'M.BatchNorm2d', (['self.hidden_channels'], {}), '(self.hidden_channels)\n', (2585, 2607), True, 'import megengine.module as M\n'), ((2635, 2643), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (2641, 2643), True, 'import megengine.module as M\n'), ((5209, 5266), 'megengine.module.Conv2d', 'M.Conv2d', (['self.in_channels', 'self.hidden_channels', '(3)', '(1)', '(1)'], {}), '(self.in_channels, self.hidden_channels, 3, 1, 1)\n', (5217, 5266), True, 'import megengine.module as M\n'), ((5312, 5370), 'megengine.module.Conv2d', 'M.Conv2d', (['self.hidden_channels', 'self.out_channels', '(3)', '(1)', '(1)'], {}), '(self.hidden_channels, self.out_channels, 3, 1, 1)\n', (5320, 5370), True, 'import megengine.module as M\n'), ((5425, 5433), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (5431, 5433), True, 'import megengine.module as M\n'), ((7418, 7472), 'megengine.module.Conv2d', 'M.Conv2d', (['self.in_channels', 'self.out_channels', '(3)', '(1)', '(1)'], {}), '(self.in_channels, self.out_channels, 3, 1, 1)\n', (7426, 7472), True, 'import megengine.module as M\n'), ((7491, 7546), 'megengine.module.Conv2d', 'M.Conv2d', (['self.out_channels', 'self.out_channels', '(3)', '(1)', '(1)'], {}), '(self.out_channels, self.out_channels, 3, 1, 1)\n', (7499, 7546), True, 'import megengine.module as M\n'), ((7567, 7621), 'megengine.module.Conv2d', 'M.Conv2d', (['self.in_channels', 'self.out_channels', '(1)', '(1)', '(0)'], {}), '(self.in_channels, self.out_channels, 1, 1, 0)\n', (7575, 7621), True, 'import megengine.module as M\n'), ((7649, 7657), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (7655, 7657), True, 'import megengine.module as M\n'), ((7793, 7838), 'megengine.module.init.xavier_uniform_', 'M.init.xavier_uniform_', (['self.c_sc.weight', '(1.0)'], {}), '(self.c_sc.weight, 1.0)\n', (7815, 7838), True, 'import megengine.module as M\n'), ((8059, 8077), 'megengine.functional.avg_pool2d', 'F.avg_pool2d', (['h', '(2)'], {}), '(h, 2)\n', (8071, 8077), True, 'import megengine.functional as F\n'), ((2692, 2706), 'math.sqrt', 'math.sqrt', (['(2.0)'], {}), '(2.0)\n', (2701, 2706), False, 'import math\n'), ((2755, 2769), 'math.sqrt', 'math.sqrt', (['(2.0)'], {}), '(2.0)\n', (2764, 2769), False, 'import math\n'), ((2851, 2903), 'megengine.module.Conv2d', 'M.Conv2d', (['in_channels', 'out_channels', '(1)', '(1)'], {'padding': '(0)'}), '(in_channels, out_channels, 1, 1, padding=0)\n', (2859, 2903), True, 'import megengine.module as M\n'), ((3048, 3093), 'megengine.module.init.xavier_uniform_', 'M.init.xavier_uniform_', (['self.c_sc.weight', '(1.0)'], {}), '(self.c_sc.weight, 1.0)\n', (3070, 3093), True, 'import megengine.module as M\n'), ((3261, 3331), 'megengine.functional.interpolate', 'F.interpolate', (['x'], {'scale_factor': '(2)', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(x, scale_factor=2, mode='bilinear', align_corners=False)\n", (3274, 3331), True, 'import megengine.functional as F\n'), ((5482, 5496), 'math.sqrt', 'math.sqrt', (['(2.0)'], {}), '(2.0)\n', (5491, 5496), False, 'import math\n'), ((5545, 5559), 'math.sqrt', 'math.sqrt', (['(2.0)'], {}), '(2.0)\n', (5554, 5559), False, 'import math\n'), ((5641, 5685), 'megengine.module.Conv2d', 'M.Conv2d', (['in_channels', 'out_channels', '(1)', '(1)', '(0)'], {}), '(in_channels, out_channels, 1, 1, 0)\n', (5649, 5685), True, 'import megengine.module as M\n'), ((5698, 5743), 'megengine.module.init.xavier_uniform_', 'M.init.xavier_uniform_', (['self.c_sc.weight', '(1.0)'], {}), '(self.c_sc.weight, 1.0)\n', (5720, 5743), True, 'import megengine.module as M\n'), ((6027, 6045), 'megengine.functional.avg_pool2d', 'F.avg_pool2d', (['h', '(2)'], {}), '(h, 2)\n', (6039, 6045), True, 'import megengine.functional as F\n'), ((7706, 7720), 'math.sqrt', 'math.sqrt', (['(2.0)'], {}), '(2.0)\n', (7715, 7720), False, 'import math\n'), ((7769, 7783), 'math.sqrt', 'math.sqrt', (['(2.0)'], {}), '(2.0)\n', (7778, 7783), False, 'import math\n'), ((8242, 8260), 'megengine.functional.avg_pool2d', 'F.avg_pool2d', (['x', '(2)'], {}), '(x, 2)\n', (8254, 8260), True, 'import megengine.functional as F\n'), ((6263, 6281), 'megengine.functional.avg_pool2d', 'F.avg_pool2d', (['x', '(2)'], {}), '(x, 2)\n', (6275, 6281), True, 'import megengine.functional as F\n'), ((6642, 6651), 'megengine.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (6648, 6651), True, 'import megengine.functional as F\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.