code
stringlengths 110
64.5k
| apis
list | extract_api
stringlengths 123
69.9k
|
---|---|---|
import os
import os.path as osp
import bisect
import argparse
import multiprocessing as mp
import numpy as np
from tqdm import tqdm
import megengine as mge
from megengine import distributed as dist
from megengine import optimizer as optim
import megengine.autodiff as autodiff
from megengine import jit
import dataset, network
from config import config as cfg
from misc_utils import ensure_dir
import socket
import pdb
ensure_dir(cfg.output_dir)
logger = mge.get_logger(__name__)
log_path = osp.join(cfg.output_dir, 'logger.log')
mge.set_log_file(log_path, mode='a')
def find_free_port():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", 0))
port = sock.getsockname()[1]
sock.close()
return port
def allreduce_cb(param, grad, group=dist.WORLD):
return dist.functional.all_reduce_sum(grad, group) / group.size
def train_one_epoch(model, gm, data_iter, opt, max_steps, rank, epoch_id, gpu_num):
# @jit.trace(symbolic=False,)
def propagate():
with gm:
loss_dict = model(model.inputs)
total_loss = sum([loss_dict[key].mean() for key in loss_dict.keys()])
gm.backward(total_loss)
opt.step().clear_grad()
loss_dict['total_loss'] = total_loss
return loss_dict
for step in range(max_steps):
# learing rate
if epoch_id == 0 and step < cfg.warm_iters:
base_lr = (
cfg.basic_lr * gpu_num * cfg.batch_per_gpu
* (cfg.lr_decay_rate ** bisect.bisect_right(cfg.lr_decay_sates, epoch_id)
)
)
lr_factor = (step + 1.0) / cfg.warm_iters
for param_group in opt.param_groups:
param_group['lr'] = (0.33 + 0.67 * lr_factor) * base_lr
mini_batch = next(data_iter)
im_info = mini_batch["im_info"]
image = mini_batch["data"][:, :, :int(im_info[0, 0]), :int(im_info[0, 1])]
model.inputs["image"].set_value(image)
model.inputs["gt_boxes"].set_value(mini_batch["boxes"])
model.inputs["im_info"].set_value(mini_batch["im_info"])
del mini_batch
losses = propagate()
print_str = ' '
for loss_name, loss_value in losses.items():
print_str += ', {}: {:.4f}'.format(loss_name, loss_value.numpy())
if rank == 0:
if step % cfg.log_dump_interval == 0:
logger.info(
"epoch-{}, {}/{}, lr: {:.4f}{}.\n{}".format(
epoch_id,
step,
max_steps,
opt.param_groups[0]["lr"],
print_str,
cfg.workspace,
)
)
def worker(rank, gpu_num, args):
# using sublinear
os.environ["MGB_COMP_GRAPH_OPT"] = "enable_sublinear_memory_opt=1;seq_opt.enable_seq_comp_node_opt=0"
os.environ["MGB_SUBLINEAR_MEMORY_GENETIC_NR_ITER"] = '10'
os.environ['MGB_CUDA_RESERVE_MEMORY'] = '1'
# establish the server if is the master
dist_port = args.port
if rank == 0:
dist.Server(port=dist_port)
if gpu_num> 1:
dist.init_process_group(
master_ip="localhost",
port=dist_port,
world_size=gpu_num,
rank=rank,
device=rank,
)
logger.info("Init process group for gpu%d done", rank)
model = network.Network()
params = model.parameters(requires_grad=True)
model.train()
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=allreduce_cb,
)
opt = optim.SGD(
params,
lr=cfg.basic_lr * gpu_num * cfg.batch_per_gpu,
momentum=cfg.momentum,
weight_decay=cfg.weight_decay,
)
if cfg.pretrain_weight is not None:
weights = mge.load(cfg.pretrain_weight)
del weights['fc.weight']
del weights['fc.bias']
model.resnet50.load_state_dict(weights)
start_epoch = 0
if args.resume_weights is not None:
assert osp.exists(args.resume_weights)
model_file = args.resume_weights
model_dict = mge.load(model_file)
start_epoch, weights = model_dict['epoch'] + 1, model_dict['state_dict']
model.load_state_dict(weights, strict=False)
logger.info("Prepare dataset")
train_loader = dataset.train_dataset(rank)
logger.info("Training...")
for epoch_id in range(start_epoch, cfg.max_epoch):
for param_group in opt.param_groups:
param_group["lr"] = (
cfg.basic_lr * gpu_num * cfg.batch_per_gpu
* (cfg.lr_decay_rate ** bisect.bisect_right(cfg.lr_decay_sates, epoch_id))
)
max_steps = cfg.nr_images_epoch // (cfg.batch_per_gpu * gpu_num)
train_one_epoch(model, gm, train_loader, opt, max_steps, rank, epoch_id, gpu_num)
if rank == 0:
save_path = osp.join(cfg.model_dir, 'epoch-{}.pkl'.format(epoch_id + 1))
state_dict = model.state_dict()
names = [k for k, _ in state_dict.items()]
for name in names:
if name.startswith('inputs.'):
del state_dict[name]
mge.save(
{"epoch": epoch_id, "state_dict": state_dict}, save_path,
)
logger.info("dump weights to %s", save_path)
def train(args):
# ------------------------ begin training -------------------------- #
valid_nr_dev = mge.get_device_count("gpu")
gpu_num = min(valid_nr_dev, args.num_gpus)
assert gpu_num > 0
logger.info('Device Count: {}'.format(gpu_num))
ensure_dir(cfg.model_dir)
if not osp.exists('output'):
os.symlink(cfg.output_dir,'output')
if gpu_num > 1:
args.port =find_free_port()
mp.set_start_method("spawn")
processes = list()
for i in range(gpu_num):
process = mp.Process(target=worker, args=(i, gpu_num, args))
process.start()
processes.append(process)
for p in processes:
p.join()
else:
worker(0, 1, args)
def run_train():
parser = argparse.ArgumentParser()
parser.add_argument("--num_gpus", "-d", default=-1, type=int, help="total number of gpus for training")
parser.add_argument('--resume_weights', '-r', default=None, type=str)
parser.add_argument('--progressbar', '-p', action='store_true', default=False)
parser.add_argument('--port', '-pt', type=int, default=11123)
args = parser.parse_args()
train(args)
if __name__ == '__main__':
run_train()
|
[
"megengine.set_log_file",
"megengine.get_device_count",
"megengine.save",
"megengine.get_logger",
"megengine.optimizer.SGD",
"megengine.distributed.functional.all_reduce_sum",
"megengine.distributed.init_process_group",
"megengine.autodiff.GradManager",
"megengine.distributed.Server",
"megengine.load"
] |
[((420, 446), 'misc_utils.ensure_dir', 'ensure_dir', (['cfg.output_dir'], {}), '(cfg.output_dir)\n', (430, 446), False, 'from misc_utils import ensure_dir\n'), ((456, 480), 'megengine.get_logger', 'mge.get_logger', (['__name__'], {}), '(__name__)\n', (470, 480), True, 'import megengine as mge\n'), ((492, 530), 'os.path.join', 'osp.join', (['cfg.output_dir', '"""logger.log"""'], {}), "(cfg.output_dir, 'logger.log')\n", (500, 530), True, 'import os.path as osp\n'), ((531, 567), 'megengine.set_log_file', 'mge.set_log_file', (['log_path'], {'mode': '"""a"""'}), "(log_path, mode='a')\n", (547, 567), True, 'import megengine as mge\n'), ((603, 652), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (616, 652), False, 'import socket\n'), ((3444, 3461), 'network.Network', 'network.Network', ([], {}), '()\n', (3459, 3461), False, 'import dataset, network\n'), ((3680, 3803), 'megengine.optimizer.SGD', 'optim.SGD', (['params'], {'lr': '(cfg.basic_lr * gpu_num * cfg.batch_per_gpu)', 'momentum': 'cfg.momentum', 'weight_decay': 'cfg.weight_decay'}), '(params, lr=cfg.basic_lr * gpu_num * cfg.batch_per_gpu, momentum=\n cfg.momentum, weight_decay=cfg.weight_decay)\n', (3689, 3803), True, 'from megengine import optimizer as optim\n'), ((4423, 4450), 'dataset.train_dataset', 'dataset.train_dataset', (['rank'], {}), '(rank)\n', (4444, 4450), False, 'import dataset, network\n'), ((5549, 5576), 'megengine.get_device_count', 'mge.get_device_count', (['"""gpu"""'], {}), "('gpu')\n", (5569, 5576), True, 'import megengine as mge\n'), ((5704, 5729), 'misc_utils.ensure_dir', 'ensure_dir', (['cfg.model_dir'], {}), '(cfg.model_dir)\n', (5714, 5729), False, 'from misc_utils import ensure_dir\n'), ((6224, 6249), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6247, 6249), False, 'import argparse\n'), ((803, 846), 'megengine.distributed.functional.all_reduce_sum', 'dist.functional.all_reduce_sum', (['grad', 'group'], {}), '(grad, group)\n', (833, 846), True, 'from megengine import distributed as dist\n'), ((3134, 3161), 'megengine.distributed.Server', 'dist.Server', ([], {'port': 'dist_port'}), '(port=dist_port)\n', (3145, 3161), True, 'from megengine import distributed as dist\n'), ((3190, 3301), 'megengine.distributed.init_process_group', 'dist.init_process_group', ([], {'master_ip': '"""localhost"""', 'port': 'dist_port', 'world_size': 'gpu_num', 'rank': 'rank', 'device': 'rank'}), "(master_ip='localhost', port=dist_port, world_size=\n gpu_num, rank=rank, device=rank)\n", (3213, 3301), True, 'from megengine import distributed as dist\n'), ((3897, 3926), 'megengine.load', 'mge.load', (['cfg.pretrain_weight'], {}), '(cfg.pretrain_weight)\n', (3905, 3926), True, 'import megengine as mge\n'), ((4115, 4146), 'os.path.exists', 'osp.exists', (['args.resume_weights'], {}), '(args.resume_weights)\n', (4125, 4146), True, 'import os.path as osp\n'), ((4209, 4229), 'megengine.load', 'mge.load', (['model_file'], {}), '(model_file)\n', (4217, 4229), True, 'import megengine as mge\n'), ((5742, 5762), 'os.path.exists', 'osp.exists', (['"""output"""'], {}), "('output')\n", (5752, 5762), True, 'import os.path as osp\n'), ((5772, 5808), 'os.symlink', 'os.symlink', (['cfg.output_dir', '"""output"""'], {}), "(cfg.output_dir, 'output')\n", (5782, 5808), False, 'import os\n'), ((5873, 5901), 'multiprocessing.set_start_method', 'mp.set_start_method', (['"""spawn"""'], {}), "('spawn')\n", (5892, 5901), True, 'import multiprocessing as mp\n'), ((3572, 3594), 'megengine.autodiff.GradManager', 'autodiff.GradManager', ([], {}), '()\n', (3592, 3594), True, 'import megengine.autodiff as autodiff\n'), ((5282, 5348), 'megengine.save', 'mge.save', (["{'epoch': epoch_id, 'state_dict': state_dict}", 'save_path'], {}), "({'epoch': epoch_id, 'state_dict': state_dict}, save_path)\n", (5290, 5348), True, 'import megengine as mge\n'), ((5984, 6034), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(i, gpu_num, args)'}), '(target=worker, args=(i, gpu_num, args))\n', (5994, 6034), True, 'import multiprocessing as mp\n'), ((1531, 1580), 'bisect.bisect_right', 'bisect.bisect_right', (['cfg.lr_decay_sates', 'epoch_id'], {}), '(cfg.lr_decay_sates, epoch_id)\n', (1550, 1580), False, 'import bisect\n'), ((4715, 4764), 'bisect.bisect_right', 'bisect.bisect_right', (['cfg.lr_decay_sates', 'epoch_id'], {}), '(cfg.lr_decay_sates, epoch_id)\n', (4734, 4764), False, 'import bisect\n')]
|
#!/usr/bin/env python
"""
Plot logs of variables saved in a text file by sfepy.base.log.Log class.
The plot should be almost the same as the plot that would be generated by the
Log directly.
"""
from optparse import OptionParser
import matplotlib.pyplot as plt
from sfepy.base.log import read_log, plot_log
usage = '%prog [options] filename\n' + __doc__.rstrip()
def parse_rc(option, opt, value, parser):
pars = {}
for pair in value.split(','):
key, val = pair.split('=')
pars[key] = eval(val)
setattr(parser.values, option.dest, pars)
helps = {
'output_filename' :
'save the figure using the given file name',
'rc' : 'matplotlib resources',
'no_show' :
'do not show the figure',
}
def main():
parser = OptionParser(usage=usage)
parser.add_option('-o', '--output', metavar='filename',
action='store', dest='output_filename',
default=None, help=helps['output_filename'])
parser.add_option('--rc', type='str', metavar='key=val,...',
action='callback', dest='rc',
callback=parse_rc, default={}, help=helps['rc'])
parser.add_option('-n', '--no-show',
action='store_true', dest='no_show',
default=False, help=helps['no_show'])
options, args = parser.parse_args()
if len(args) == 1:
filename = args[0]
else:
parser.print_help()
return
log, info = read_log(filename)
plt.rcParams.update(options.rc)
plot_log(1, log, info)
if options.output_filename:
plt.savefig(options.output_filename)
if not options.no_show:
plt.show()
if __name__ == '__main__':
main()
|
[
"sfepy.base.log.plot_log",
"sfepy.base.log.read_log"
] |
[((763, 788), 'optparse.OptionParser', 'OptionParser', ([], {'usage': 'usage'}), '(usage=usage)\n', (775, 788), False, 'from optparse import OptionParser\n'), ((1488, 1506), 'sfepy.base.log.read_log', 'read_log', (['filename'], {}), '(filename)\n', (1496, 1506), False, 'from sfepy.base.log import read_log, plot_log\n'), ((1512, 1543), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (['options.rc'], {}), '(options.rc)\n', (1531, 1543), True, 'import matplotlib.pyplot as plt\n'), ((1549, 1571), 'sfepy.base.log.plot_log', 'plot_log', (['(1)', 'log', 'info'], {}), '(1, log, info)\n', (1557, 1571), False, 'from sfepy.base.log import read_log, plot_log\n'), ((1613, 1649), 'matplotlib.pyplot.savefig', 'plt.savefig', (['options.output_filename'], {}), '(options.output_filename)\n', (1624, 1649), True, 'import matplotlib.pyplot as plt\n'), ((1687, 1697), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1695, 1697), True, 'import matplotlib.pyplot as plt\n')]
|
from typing import Optional
from sqlmodel import Field, SQLModel
from fastapi_server.models.user import User
class ChatMessage(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
timestamp: int
message: str
user_id: int = Field(foreign_key='user.id')
user: User
|
[
"sqlmodel.Field"
] |
[((178, 215), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (183, 215), False, 'from sqlmodel import Field, SQLModel\n'), ((271, 299), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""user.id"""'}), "(foreign_key='user.id')\n", (276, 299), False, 'from sqlmodel import Field, SQLModel\n')]
|
from fastapi import APIRouter, Depends
from ..utils import engine, get_session
from sqlmodel import Session, select
from sqlalchemy.exc import NoResultFound
from ..models.client import Client
from ..models.epic import Epic
from datetime import datetime
router = APIRouter(prefix="/api/clients", tags=["client"])
@router.post("/")
async def post_client(*, client: Client, session: Session = Depends(get_session)):
"""
Post a new client.
Parameters
----------
client : Client
Client that is to be added to the database.
session : Session
SQL session that is to be used to add the client.
Defaults to creating a dependency on the running SQL model session.
"""
statement = select(Client).where(Client.name == client.name)
try:
result = session.exec(statement).one()
return False
except NoResultFound:
session.add(client)
session.commit()
session.refresh(client)
return client
@router.get("/")
async def read_clients(session: Session = Depends(get_session)):
"""
Get a list of all clients.
Parameters
----------
session : Session
SQL session that is to be used to get a list of the clients.
Defaults to creating a dependency on the running SQL model session.
"""
statement = select(Client)
results = session.exec(statement).all()
return results
@router.get("/active")
async def read_clients(session: Session = Depends(get_session)):
"""
Get a list of all active clients.
Parameters
----------
session : Session
SQL session that is to be used to get a list of all of the active clients.
Defaults to creating a dependency on the running SQL model session.
"""
statement = select(Client).where(Client.is_active == True).order_by(Client.id.asc())
results = session.exec(statement).all()
return results
@router.get("/{client_id}")
async def read_clients(
*, client_id: int = None, session: Session = Depends(get_session)
):
"""
Get a client by client_id.
Parameters
----------
client_id : int
ID of client that is to be read.
session : Session
SQL session that is to be used to read a client.
Defaults to creating a dependency on the running SQL model session.
"""
statement = select(Client).where(Client.id == client_id)
try:
result = session.exec(statement).one()
return result
except NoResultFound:
msg = f"""There is no client with id = {client_id}"""
return msg
@router.get("/names/{name}")
async def read_clients_by_name(
*, name: str = None, session: Session = Depends(get_session)
):
"""
Get a client by client_name.
Parameters
----------
name : str
Name of client to be read.
session : Session
SQL session that is to be used to read a client.
Defaults to creating a dependency on the running SQL model session.
"""
statement = select(Client).where(Client.name == name)
result = session.exec(statement).one()
return result
@router.get("/{client_id}/epics/")
async def read_clients_epics(
client_id: int = None, session: Session = Depends(get_session)
):
"""
Get epics from a client_id.
Parameters
----------
client_id : int
ID of client that is to be used to pull epics from.
session : Session
SQL session that is to be used to pull the epics.
Defaults to creating a dependency on the running SQL model session.
"""
statement = (
select(Client.id, Client.name, Epic.name)
.select_from(Client)
.join(Epic)
.where(Client.id == client_id)
)
results = session.exec(statement).all()
return results
@router.put("/{client_id}/deactivate-client")
async def update_clients(
*,
client_id: int,
session: Session = Depends(get_session),
):
"""Deactivate a client"""
statement = select(Client).where(Client.id == client_id)
client_to_update = session.exec(statement).one()
client_to_update.active = False
statement2 = select(Epic).join(Client)
client_to_update = session.exec(statement).one()
client_to_update.active = False
session.add(client_to_update)
session.commit()
session.refresh(client_to_update)
return client_to_update
@router.put("/{client_id}/activate")
async def activate_clients(
*,
client_id: int,
session: Session = Depends(get_session),
):
"""
Activate a client using its id as a key.
Parameters
----------
client_id : int
ID of the client to be activated.
session : Session
SQL session that is to be used to activate a client.
Defaults to creating a dependency on the running SQL model session.
"""
statement = select(Client).where(Client.id == client_id)
client_to_update = session.exec(statement).one()
client_to_update.is_active = True
client_to_update.updated_at = datetime.now()
session.add(client_to_update)
session.commit()
session.refresh(client_to_update)
return client_to_update
@router.put("/{client_id}/deactivate")
async def deactivate_clients(
*,
client_id: int,
session: Session = Depends(get_session),
):
"""
Deactivate a client using its id as a key.
Parameters
----------
client_id : int
ID of the client to be deactivated.
session : Session
SQL session that is to be used to deactivate a client.
Defaults to creating a dependency on the running SQL model session.
"""
statement = select(Client).where(Client.id == client_id)
client_to_update = session.exec(statement).one()
client_to_update.is_active = False
client_to_update.updated_at = datetime.now()
session.add(client_to_update)
session.commit()
session.refresh(client_to_update)
return client_to_update
@router.put("/{client_id}/deactivate-epics")
async def update_clients_and_epics(
*,
client_id: int,
session: Session = Depends(get_session),
):
"""Deactivate a client and its epics"""
"""
Deactivate a client and its epics using the client's ID as a key.
Parameters
----------
client_id : int
ID of the client to deactivate.
session : Session
SQL session that is to be used to deactivate the client and its respective epics.
Defaults to creating a dependency on the running SQL model session.
"""
statement1 = select(Client).where(Client.id == client_id)
client_to_update = session.exec(statement1).one()
client_to_update.is_active = False
client_to_update.updated_at = datetime.now()
session.add(client_to_update)
statement2 = select(Epic).where(Epic.client_id == client_id)
epics_to_update = session.exec(statement2).all()
for epic in epics_to_update:
epic.is_active = False
session.add(epic)
session.commit()
return True
@router.put("/{client_id}")
async def update_clients(
*,
client_id: int = None,
new_client_name: str = None,
is_active: bool = None,
session: Session = Depends(get_session),
):
"""
Update a client from a client_id.
Parameters
----------
client_id : int
ID of the client to update.
new_client_name : str
New name of the client.
session : Session
SQL session that is to be used to update a client.
Defaults to creating a dependency on the running SQL model session.
"""
statement = select(Client).where(Client.id == client_id)
client_to_update = session.exec(statement).one()
if new_client_name != None:
client_to_update.name = new_client_name
if is_active != None:
client_to_update.is_active = is_active
client_to_update.updated_at = datetime.now()
session.add(client_to_update)
session.commit()
session.refresh(client_to_update)
return client_to_update
|
[
"sqlmodel.select"
] |
[((263, 312), 'fastapi.APIRouter', 'APIRouter', ([], {'prefix': '"""/api/clients"""', 'tags': "['client']"}), "(prefix='/api/clients', tags=['client'])\n", (272, 312), False, 'from fastapi import APIRouter, Depends\n'), ((393, 413), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (400, 413), False, 'from fastapi import APIRouter, Depends\n'), ((1050, 1070), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1057, 1070), False, 'from fastapi import APIRouter, Depends\n'), ((1334, 1348), 'sqlmodel.select', 'select', (['Client'], {}), '(Client)\n', (1340, 1348), False, 'from sqlmodel import Session, select\n'), ((1479, 1499), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1486, 1499), False, 'from fastapi import APIRouter, Depends\n'), ((2023, 2043), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (2030, 2043), False, 'from fastapi import APIRouter, Depends\n'), ((2694, 2714), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (2701, 2714), False, 'from fastapi import APIRouter, Depends\n'), ((3235, 3255), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (3242, 3255), False, 'from fastapi import APIRouter, Depends\n'), ((3923, 3943), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (3930, 3943), False, 'from fastapi import APIRouter, Depends\n'), ((4499, 4519), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (4506, 4519), False, 'from fastapi import APIRouter, Depends\n'), ((5023, 5037), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5035, 5037), False, 'from datetime import datetime\n'), ((5280, 5300), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (5287, 5300), False, 'from fastapi import APIRouter, Depends\n'), ((5811, 5825), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5823, 5825), False, 'from datetime import datetime\n'), ((6081, 6101), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (6088, 6101), False, 'from fastapi import APIRouter, Depends\n'), ((6704, 6718), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6716, 6718), False, 'from datetime import datetime\n'), ((7172, 7192), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (7179, 7192), False, 'from fastapi import APIRouter, Depends\n'), ((7855, 7869), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7867, 7869), False, 'from datetime import datetime\n'), ((730, 744), 'sqlmodel.select', 'select', (['Client'], {}), '(Client)\n', (736, 744), False, 'from sqlmodel import Session, select\n'), ((2357, 2371), 'sqlmodel.select', 'select', (['Client'], {}), '(Client)\n', (2363, 2371), False, 'from sqlmodel import Session, select\n'), ((3019, 3033), 'sqlmodel.select', 'select', (['Client'], {}), '(Client)\n', (3025, 3033), False, 'from sqlmodel import Session, select\n'), ((3994, 4008), 'sqlmodel.select', 'select', (['Client'], {}), '(Client)\n', (4000, 4008), False, 'from sqlmodel import Session, select\n'), ((4145, 4157), 'sqlmodel.select', 'select', (['Epic'], {}), '(Epic)\n', (4151, 4157), False, 'from sqlmodel import Session, select\n'), ((4853, 4867), 'sqlmodel.select', 'select', (['Client'], {}), '(Client)\n', (4859, 4867), False, 'from sqlmodel import Session, select\n'), ((5640, 5654), 'sqlmodel.select', 'select', (['Client'], {}), '(Client)\n', (5646, 5654), False, 'from sqlmodel import Session, select\n'), ((6532, 6546), 'sqlmodel.select', 'select', (['Client'], {}), '(Client)\n', (6538, 6546), False, 'from sqlmodel import Session, select\n'), ((6770, 6782), 'sqlmodel.select', 'select', (['Epic'], {}), '(Epic)\n', (6776, 6782), False, 'from sqlmodel import Session, select\n'), ((7570, 7584), 'sqlmodel.select', 'select', (['Client'], {}), '(Client)\n', (7576, 7584), False, 'from sqlmodel import Session, select\n'), ((1784, 1798), 'sqlmodel.select', 'select', (['Client'], {}), '(Client)\n', (1790, 1798), False, 'from sqlmodel import Session, select\n'), ((3600, 3641), 'sqlmodel.select', 'select', (['Client.id', 'Client.name', 'Epic.name'], {}), '(Client.id, Client.name, Epic.name)\n', (3606, 3641), False, 'from sqlmodel import Session, select\n')]
|
"""add messages
Revision ID: d2388da5bbfd
Revises: <PASSWORD>
Create Date: 2022-05-04 21:49:29.234380+00:00
"""
import sqlalchemy as sa
import sqlmodel
from alembic import op
from common.database.tables.types import TimeStamp
# revision identifiers, used by Alembic.
revision = "d2388da5bbfd"
down_revision = "3<PASSWORD>"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"messages",
sa.Column(
"created_at",
TimeStamp(),
server_default=sa.func.now(),
nullable=False,
),
sa.Column(
"updated_at",
TimeStamp(),
server_default=sa.func.now(),
nullable=False,
),
sa.Column("sent", sa.Boolean(), nullable=False),
sa.Column("subject", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("content", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("id", sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"recipients",
sa.Column("message_id", sa.Integer(), nullable=True),
sa.Column(
"group",
sa.Enum(
"EVERYONE",
"APPLICATION_COMPLETE",
"APPLICATION_INCOMPLETE",
"STATUS_ACCEPTED",
"STATUS_DENIED",
"STATUS_PENDING",
name="group",
),
nullable=False,
),
sa.Column("id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(["message_id"], ["messages.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("recipients")
op.drop_table("messages")
# ### end Alembic commands ###
|
[
"sqlmodel.sql.sqltypes.AutoString"
] |
[((1885, 1912), 'alembic.op.drop_table', 'op.drop_table', (['"""recipients"""'], {}), "('recipients')\n", (1898, 1912), False, 'from alembic import op\n'), ((1917, 1942), 'alembic.op.drop_table', 'op.drop_table', (['"""messages"""'], {}), "('messages')\n", (1930, 1942), False, 'from alembic import op\n'), ((1076, 1105), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (1099, 1105), True, 'import sqlalchemy as sa\n'), ((1638, 1714), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['message_id']", "['messages.id']"], {'ondelete': '"""CASCADE"""'}), "(['message_id'], ['messages.id'], ondelete='CASCADE')\n", (1661, 1714), True, 'import sqlalchemy as sa\n'), ((1724, 1753), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (1747, 1753), True, 'import sqlalchemy as sa\n'), ((547, 558), 'common.database.tables.types.TimeStamp', 'TimeStamp', ([], {}), '()\n', (556, 558), False, 'from common.database.tables.types import TimeStamp\n'), ((698, 709), 'common.database.tables.types.TimeStamp', 'TimeStamp', ([], {}), '()\n', (707, 709), False, 'from common.database.tables.types import TimeStamp\n'), ((818, 830), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (828, 830), True, 'import sqlalchemy as sa\n'), ((878, 912), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (910, 912), False, 'import sqlmodel\n'), ((960, 994), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (992, 994), False, 'import sqlmodel\n'), ((1037, 1049), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1047, 1049), True, 'import sqlalchemy as sa\n'), ((1188, 1200), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1198, 1200), True, 'import sqlalchemy as sa\n'), ((1270, 1411), 'sqlalchemy.Enum', 'sa.Enum', (['"""EVERYONE"""', '"""APPLICATION_COMPLETE"""', '"""APPLICATION_INCOMPLETE"""', '"""STATUS_ACCEPTED"""', '"""STATUS_DENIED"""', '"""STATUS_PENDING"""'], {'name': '"""group"""'}), "('EVERYONE', 'APPLICATION_COMPLETE', 'APPLICATION_INCOMPLETE',\n 'STATUS_ACCEPTED', 'STATUS_DENIED', 'STATUS_PENDING', name='group')\n", (1277, 1411), True, 'import sqlalchemy as sa\n'), ((1599, 1611), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1609, 1611), True, 'import sqlalchemy as sa\n'), ((587, 600), 'sqlalchemy.func.now', 'sa.func.now', ([], {}), '()\n', (598, 600), True, 'import sqlalchemy as sa\n'), ((738, 751), 'sqlalchemy.func.now', 'sa.func.now', ([], {}), '()\n', (749, 751), True, 'import sqlalchemy as sa\n')]
|
# Vibroacoustics
#
# E.Rohan, V.Lukeš
# Homogenization of the vibro–acoustic transmission on periodically
# perforated elastic plates with arrays of resonators.
# https://arxiv.org/abs/2104.01367 (arXiv:2104.01367v1)
import os.path as op
import numpy as nm
from collections.abc import Iterable
from scipy.io import savemat, loadmat
from sfepy.base.base import output, debug, Struct
from sfepy import data_dir
from sfepy.discrete.fem.periodic import match_y_plane, match_x_plane
from acoustics_macro_utils import eval_phi, post_process,\
generate_plate_mesh, get_region_entities
from sfepy.discrete.projections import project_by_component
from sfepy.discrete.fem import Mesh, FEDomain
wdir = op.dirname(__file__)
def post_process_macro(out, pb, state, extend=False):
pbvars = pb.get_variables()
n1, ng1, c1, cg1, ds1, nmap1 = get_region_entities(pbvars['p1'])
noff = n1.shape[0]
n2, ng2, c2, cg2, _, nmap2 = get_region_entities(pbvars['p2'], noff=noff)
nend = nm.max(c2) + 1
nmap = nm.hstack([nmap1, nmap2])
n1[:, 2] += pb.conf.eps0 * 0.5
n2[:, 2] -= pb.conf.eps0 * 0.5
mesh2 = Mesh.from_data('m2', nm.vstack([n1, n2]), nm.hstack([ng1, ng2]),
[nm.vstack([c1, c2])], [nm.hstack([cg1, cg2])],
[ds1])
oname = op.join(pb.output_dir, pb.ofn_trunk + '_p.vtk')
out2 = {}
for ir in ['real.', 'imag.']:
pdata = nm.zeros((nmap.shape[0], 1), dtype=nm.float64)
for v, idxs in [('p1', slice(0, noff)), ('p2', slice(noff, nend))]:
pdata[idxs, :] = out[ir + v].data
out2[ir + 'p'] = Struct(name='p', mode='vertex', data=pdata)
mesh2.write(oname, out=out2)
post_process(out, pb, state, save_var0='dp0')
for k1 in ['g01', 'imag.g01', 'real.g01']:
o = out[k1]
k0 = k1.replace('01', '0')
k2 = k1.replace('01', '02')
out[k0] = Struct(name=o.name,
mode=o.mode,
dofs=o.dofs,
var_name=o.var_name,
data=(out[k1].data - out[k2].data) / pb.conf.eps0)
for k in ['', 'imag.', 'real.']:
o = out[k + 'dp0']
k0 = k + 'jP1'
out[k0] = Struct(name=o.name,
mode=o.mode,
dofs=o.dofs,
var_name=o.var_name,
data=o.data / pb.conf.eps0)
o = out[k + 'g01']
o2 = out[k + 'g02']
out[k + 'G1'] = Struct(name=o.name,
mode=o.mode,
dofs=o.dofs,
var_name=o.var_name,
data=(o.data - o2.data) / pb.conf.eps0)
return out
def get_mat(coors, mode, pb):
if mode == 'qp':
conf = pb.conf
c = conf.sound_speed
w = conf.wave_num * c
nqp = coors.shape[0]
aux = nm.ones((nqp, 1, 1), dtype=nm.float64)
out = {
'c2': aux * c**2,
'w2': aux * w**2,
'wc': aux * w * c,
'wc2': aux * w * c**2,
}
print('### material: wave number = ', conf.wave_num)
return out
def param_w(pb):
out = []
tl_out = []
conf = pb.conf
ofn_trunk = pb.ofn_trunk
for k in conf.wave_nums:
print('### wave number: ', k)
conf.wave_num = k
pb.ofn_trunk = ofn_trunk + '_w%d' % (k * pb.conf.sound_speed)
pb.conf.ofn_trunk = pb.ofn_trunk
yield pb, out
state = out[-1][1].get_parts()
tl_out.append(eval_phi(pb, state['p1'], state['p2'], conf.p_inc))
print('>>> TL: ', tl_out[-1])
yield None
savemat(op.join(wdir, 'results', 'tloss.mat'), {'k': conf.wave_nums, 'tl': tl_out})
############################################################
def define(filename_mesh=None, sound_speed=None, rho0=None,
freqs=None, p_inc=None, eps0=None,
coefs_filename=None, coefs_filename_plate=None):
# generate mid mesh
filename_mesh_plate = generate_plate_mesh(op.join(wdir, filename_mesh))
wave_num = nm.array(freqs) / sound_speed
wave_nums, wave_num = wave_num, wave_num[0]
regions = {
'Omega1': 'cells of group 1',
'Omega2': 'cells of group 2',
'GammaIn': ('vertices of group 1', 'facet'),
'GammaOut': ('vertices of group 2', 'facet'),
'Gamma_aux': ('r.Omega1 *v r.Omega2', 'facet'),
'Gamma0_1': ('copy r.Gamma_aux', 'facet', 'Omega1'),
'Gamma0_2': ('copy r.Gamma_aux', 'facet', 'Omega2'),
'Recovery': ('copy r.Gamma0_1', 'facet'),
}
fields = {
'pressure1': ('complex', 'scalar', 'Omega1', 1),
'pressure2': ('complex', 'scalar', 'Omega2', 1),
'tvelocity0': ('complex', 'scalar', 'Gamma0_1', 1),
'pressure0': ('complex', 'scalar', 'Gamma0_1', 1),
'vfield1': ('complex', 'vector', 'Omega1', 1),
'vfield2': ('complex', 'vector', 'Omega2', 1),
}
variables = {
'p1': ('unknown field', 'pressure1', 0),
'q1': ('test field', 'pressure1', 'p1'),
'p2': ('unknown field', 'pressure2', 1),
'q2': ('test field', 'pressure2', 'p2'),
'sp0': ('unknown field', 'pressure0', 2),
'sq0': ('test field', 'pressure0', 'sp0'),
'dp0': ('unknown field', 'pressure0', 3),
'dq0': ('test field', 'pressure0', 'dp0'),
'g01': ('unknown field', 'tvelocity0', 4),
'f01': ('test field', 'tvelocity0', 'g01'),
'g02': ('unknown field', 'tvelocity0', 5),
'f02': ('test field', 'tvelocity0', 'g02'),
'P1': ('parameter field', 'pressure1', '(set-to-None)'),
'P2': ('parameter field', 'pressure2', '(set-to-None)'),
's1': ('parameter field', 'pressure1', '(set-to-None)'),
's2': ('parameter field', 'pressure2', '(set-to-None)'),
'v1': ('parameter field', 'vfield1', '(set-to-None)'),
'v2': ('parameter field', 'vfield2', '(set-to-None)'),
}
integrals = {
'i': 2,
}
ebcs = {}
functions = {
'get_mat': (lambda ts, coors, mode=None, problem=None, **kwargs:
get_mat(coors, mode, problem),),
'match_y_plane': (match_y_plane,),
}
materials = {
'ac': 'get_mat',
}
regions.update({
'Near': ('vertices of group 3', 'facet'),
'Far': ('vertices of group 4', 'facet'),
})
epbcs = {
'per_p1': (['Near', 'Far'], {'p1.0': 'p1.0'}, 'match_y_plane'),
'per_p2': (['Near', 'Far'], {'p2.0': 'p2.0'}, 'match_y_plane'),
}
options = {
'output_dir': op.join(wdir, 'results'),
'file_per_var': True,
'post_process_hook': 'post_process_macro',
'parametric_hook': 'param_w',
}
# p1 = P^+, p2 = P^-
equations = {
'eq_p1': """
dw_laplace.i.Omega1(ac.c2, q1, p1)
- dw_volume_dot.i.Omega1(ac.w2, q1, p1)
+ %s * dw_surface_dot.i.GammaOut(ac.wc, q1, p1)
- %s * dw_surface_dot.i.Gamma0_1(ac.wc2, q1, g01)
= 0""" % (1j, 1j),
'eq_p2': """
dw_laplace.i.Omega2(ac.c2, q2, p2)
- dw_volume_dot.i.Omega2(ac.w2, q2, p2)
+ %s * dw_surface_dot.i.GammaIn(ac.wc, q2, p2)
+ %s * dw_surface_dot.i.Gamma0_2(ac.wc2, q2, tr(g02))
= %s * dw_surface_integrate.i.GammaIn(ac.wc, q2)"""
% (1j, 1j, 2j * p_inc),
'eq_dp': """
dw_surface_dot.i.Gamma0_1(dq0, p1)
- dw_surface_dot.i.Gamma0_1(dq0, tr(p2))
- dw_surface_dot.i.Gamma0_1(dq0, dp0)
= 0""",
'eq_sp': """
dw_surface_dot.i.Gamma0_1(sq0, p1)
+ dw_surface_dot.i.Gamma0_1(sq0, tr(p2))
- dw_surface_dot.i.Gamma0_1(sq0, sp0)
= 0""",
}
solvers = {
'nls': ('nls.newton', {'i_max': 1,
'eps_a': 1e-6,
'eps_r': 1e-6,
'problem': 'nonlinear', })
}
mid_file = op.join(wdir, 'acoustics_macro_plate.py')
solvers.update({
'ls': ('ls.cm_pb',
{'others': [mid_file],
'coupling_variables': ['g01', 'g02', 'dp0', 'sp0'],
'needs_problem_instance': True,
})
})
return locals()
|
[
"sfepy.base.base.Struct"
] |
[((697, 717), 'os.path.dirname', 'op.dirname', (['__file__'], {}), '(__file__)\n', (707, 717), True, 'import os.path as op\n'), ((842, 875), 'acoustics_macro_utils.get_region_entities', 'get_region_entities', (["pbvars['p1']"], {}), "(pbvars['p1'])\n", (861, 875), False, 'from acoustics_macro_utils import eval_phi, post_process, generate_plate_mesh, get_region_entities\n'), ((932, 976), 'acoustics_macro_utils.get_region_entities', 'get_region_entities', (["pbvars['p2']"], {'noff': 'noff'}), "(pbvars['p2'], noff=noff)\n", (951, 976), False, 'from acoustics_macro_utils import eval_phi, post_process, generate_plate_mesh, get_region_entities\n'), ((1014, 1039), 'numpy.hstack', 'nm.hstack', (['[nmap1, nmap2]'], {}), '([nmap1, nmap2])\n', (1023, 1039), True, 'import numpy as nm\n'), ((1310, 1357), 'os.path.join', 'op.join', (['pb.output_dir', "(pb.ofn_trunk + '_p.vtk')"], {}), "(pb.output_dir, pb.ofn_trunk + '_p.vtk')\n", (1317, 1357), True, 'import os.path as op\n'), ((1701, 1746), 'acoustics_macro_utils.post_process', 'post_process', (['out', 'pb', 'state'], {'save_var0': '"""dp0"""'}), "(out, pb, state, save_var0='dp0')\n", (1713, 1746), False, 'from acoustics_macro_utils import eval_phi, post_process, generate_plate_mesh, get_region_entities\n'), ((8070, 8111), 'os.path.join', 'op.join', (['wdir', '"""acoustics_macro_plate.py"""'], {}), "(wdir, 'acoustics_macro_plate.py')\n", (8077, 8111), True, 'import os.path as op\n'), ((988, 998), 'numpy.max', 'nm.max', (['c2'], {}), '(c2)\n', (994, 998), True, 'import numpy as nm\n'), ((1144, 1163), 'numpy.vstack', 'nm.vstack', (['[n1, n2]'], {}), '([n1, n2])\n', (1153, 1163), True, 'import numpy as nm\n'), ((1165, 1186), 'numpy.hstack', 'nm.hstack', (['[ng1, ng2]'], {}), '([ng1, ng2])\n', (1174, 1186), True, 'import numpy as nm\n'), ((1423, 1469), 'numpy.zeros', 'nm.zeros', (['(nmap.shape[0], 1)'], {'dtype': 'nm.float64'}), '((nmap.shape[0], 1), dtype=nm.float64)\n', (1431, 1469), True, 'import numpy as nm\n'), ((1618, 1661), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""p"""', 'mode': '"""vertex"""', 'data': 'pdata'}), "(name='p', mode='vertex', data=pdata)\n", (1624, 1661), False, 'from sfepy.base.base import output, debug, Struct\n'), ((1904, 2026), 'sfepy.base.base.Struct', 'Struct', ([], {'name': 'o.name', 'mode': 'o.mode', 'dofs': 'o.dofs', 'var_name': 'o.var_name', 'data': '((out[k1].data - out[k2].data) / pb.conf.eps0)'}), '(name=o.name, mode=o.mode, dofs=o.dofs, var_name=o.var_name, data=(\n out[k1].data - out[k2].data) / pb.conf.eps0)\n', (1910, 2026), False, 'from sfepy.base.base import output, debug, Struct\n'), ((2228, 2327), 'sfepy.base.base.Struct', 'Struct', ([], {'name': 'o.name', 'mode': 'o.mode', 'dofs': 'o.dofs', 'var_name': 'o.var_name', 'data': '(o.data / pb.conf.eps0)'}), '(name=o.name, mode=o.mode, dofs=o.dofs, var_name=o.var_name, data=o.\n data / pb.conf.eps0)\n', (2234, 2327), False, 'from sfepy.base.base import output, debug, Struct\n'), ((2503, 2614), 'sfepy.base.base.Struct', 'Struct', ([], {'name': 'o.name', 'mode': 'o.mode', 'dofs': 'o.dofs', 'var_name': 'o.var_name', 'data': '((o.data - o2.data) / pb.conf.eps0)'}), '(name=o.name, mode=o.mode, dofs=o.dofs, var_name=o.var_name, data=(o.\n data - o2.data) / pb.conf.eps0)\n', (2509, 2614), False, 'from sfepy.base.base import output, debug, Struct\n'), ((2928, 2966), 'numpy.ones', 'nm.ones', (['(nqp, 1, 1)'], {'dtype': 'nm.float64'}), '((nqp, 1, 1), dtype=nm.float64)\n', (2935, 2966), True, 'import numpy as nm\n'), ((3711, 3748), 'os.path.join', 'op.join', (['wdir', '"""results"""', '"""tloss.mat"""'], {}), "(wdir, 'results', 'tloss.mat')\n", (3718, 3748), True, 'import os.path as op\n'), ((4087, 4115), 'os.path.join', 'op.join', (['wdir', 'filename_mesh'], {}), '(wdir, filename_mesh)\n', (4094, 4115), True, 'import os.path as op\n'), ((4132, 4147), 'numpy.array', 'nm.array', (['freqs'], {}), '(freqs)\n', (4140, 4147), True, 'import numpy as nm\n'), ((6659, 6683), 'os.path.join', 'op.join', (['wdir', '"""results"""'], {}), "(wdir, 'results')\n", (6666, 6683), True, 'import os.path as op\n'), ((1216, 1235), 'numpy.vstack', 'nm.vstack', (['[c1, c2]'], {}), '([c1, c2])\n', (1225, 1235), True, 'import numpy as nm\n'), ((1239, 1260), 'numpy.hstack', 'nm.hstack', (['[cg1, cg2]'], {}), '([cg1, cg2])\n', (1248, 1260), True, 'import numpy as nm\n'), ((3588, 3638), 'acoustics_macro_utils.eval_phi', 'eval_phi', (['pb', "state['p1']", "state['p2']", 'conf.p_inc'], {}), "(pb, state['p1'], state['p2'], conf.p_inc)\n", (3596, 3638), False, 'from acoustics_macro_utils import eval_phi, post_process, generate_plate_mesh, get_region_entities\n')]
|
import numpy as np
import pytest
import megengine.functional as F
from megengine import tensor
from megengine.test import assertTensorClose
def test_onehot_low_dimension():
inp = tensor(np.arange(1, 4, dtype=np.int32))
out = F.one_hot(inp)
assertTensorClose(
out.numpy(), np.eye(4, dtype=np.int32)[np.arange(1, 4, dtype=np.int32)]
)
def test_onehot_high_dimension():
arr = np.array(
[[3, 2, 4, 4, 2, 4, 0, 4, 4, 1], [4, 1, 1, 3, 2, 2, 4, 2, 4, 3]], dtype=np.int32
)
inp = tensor(arr)
out = F.one_hot(inp, 10)
assertTensorClose(out.numpy(), np.eye(10, dtype=np.int32)[arr])
|
[
"megengine.functional.one_hot",
"megengine.tensor"
] |
[((236, 250), 'megengine.functional.one_hot', 'F.one_hot', (['inp'], {}), '(inp)\n', (245, 250), True, 'import megengine.functional as F\n'), ((407, 501), 'numpy.array', 'np.array', (['[[3, 2, 4, 4, 2, 4, 0, 4, 4, 1], [4, 1, 1, 3, 2, 2, 4, 2, 4, 3]]'], {'dtype': 'np.int32'}), '([[3, 2, 4, 4, 2, 4, 0, 4, 4, 1], [4, 1, 1, 3, 2, 2, 4, 2, 4, 3]],\n dtype=np.int32)\n', (415, 501), True, 'import numpy as np\n'), ((523, 534), 'megengine.tensor', 'tensor', (['arr'], {}), '(arr)\n', (529, 534), False, 'from megengine import tensor\n'), ((545, 563), 'megengine.functional.one_hot', 'F.one_hot', (['inp', '(10)'], {}), '(inp, 10)\n', (554, 563), True, 'import megengine.functional as F\n'), ((193, 224), 'numpy.arange', 'np.arange', (['(1)', '(4)'], {'dtype': 'np.int32'}), '(1, 4, dtype=np.int32)\n', (202, 224), True, 'import numpy as np\n'), ((296, 321), 'numpy.eye', 'np.eye', (['(4)'], {'dtype': 'np.int32'}), '(4, dtype=np.int32)\n', (302, 321), True, 'import numpy as np\n'), ((322, 353), 'numpy.arange', 'np.arange', (['(1)', '(4)'], {'dtype': 'np.int32'}), '(1, 4, dtype=np.int32)\n', (331, 353), True, 'import numpy as np\n'), ((600, 626), 'numpy.eye', 'np.eye', (['(10)'], {'dtype': 'np.int32'}), '(10, dtype=np.int32)\n', (606, 626), True, 'import numpy as np\n')]
|
from sqlmodel import SQLModel, create_engine
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
|
[
"sqlmodel.create_engine",
"sqlmodel.SQLModel.metadata.create_all"
] |
[((134, 159), 'sqlmodel.create_engine', 'create_engine', (['sqlite_url'], {}), '(sqlite_url)\n', (147, 159), False, 'from sqlmodel import SQLModel, create_engine\n'), ((194, 230), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (222, 230), False, 'from sqlmodel import SQLModel, create_engine\n')]
|
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None):
assert clamp is None or clamp >= 0
def_gain = 1.0
if act in ['relu', 'lrelu', 'swish']: # 除了这些激活函数的def_gain = np.sqrt(2),其余激活函数的def_gain = 1.0
def_gain = np.sqrt(2)
def_alpha = 0.0
if act in ['lrelu']: # 除了这些激活函数的def_alpha = 0.2,其余激活函数的def_alpha = 0.0
def_alpha = 0.2
alpha = float(alpha if alpha is not None else def_alpha)
gain = float(gain if gain is not None else def_gain)
clamp = float(clamp if clamp is not None else -1)
# 加上偏移
if b is not None:
new_shape = [-1 if i == dim else 1 for i in range(x.ndim)]
b_ = F.reshape(b, new_shape)
x = x + b_
x_add_b = x
# 经过激活函数
alpha = float(alpha) # 只有leaky_relu需要
if act == 'linear':
pass
elif act == 'relu':
x = F.relu(x)
elif act == 'lrelu':
x = F.leaky_relu(x, alpha)
elif act == 'tanh':
x = F.tanh(x)
elif act == 'sigmoid':
x = F.sigmoid(x)
elif act == 'elu':
x = F.elu(x)
elif act == 'selu':
x = F.selu(x)
elif act == 'softplus':
x = F.softplus(x)
elif act == 'swish':
x = F.sigmoid(x) * x
else:
raise NotImplementedError("activation \'{}\' is not implemented.".format(act))
act_x = x
# 乘以缩放因子
gain = float(gain)
if gain != 1:
x = x * gain
gain_x = x
# 限制范围
if clamp >= 0:
x = F.clip(x, -clamp, clamp)
clamp_x = x
return clamp_x
class FullyConnectedLayer(M.Module):
def __init__(self,
in_features, # Number of input features.
out_features, # Number of output features.
bias = True, # Apply additive bias before the activation function?
activation = 'linear', # Activation function: 'relu', 'lrelu', etc.
lr_multiplier = 1, # Learning rate multiplier.
bias_init = 0, # Initial value for the additive bias.
):
super().__init__()
self.activation = activation
self.weight = mge.Parameter(mge.tensor(np.random.randn(out_features, in_features).astype(np.float32)) / lr_multiplier)
self.bias = mge.Parameter(mge.tensor(np.ones(out_features, ).astype(np.float32) * bias_init)) if bias else None
self.weight_gain = lr_multiplier / np.sqrt(in_features)
self.weight_gain = float(self.weight_gain)
self.bias_gain = lr_multiplier
def forward(self, x):
# w = self.weight.to(x.dtype) * self.weight_gain
w = self.weight * self.weight_gain
b = self.bias
if b is not None:
# b = b.to(x.dtype)
if self.bias_gain != 1:
b = b * self.bias_gain
if self.activation == 'linear' and b is not None:
out = F.matmul(x, w, transpose_b=True) + F.expand_dims(b, 0)
else:
# r = x.matmul(w.t())
r = F.matmul(x, w, transpose_b=True)
out = bias_act(r, b, act=self.activation)
return out
|
[
"megengine.functional.sigmoid",
"megengine.functional.softplus",
"megengine.functional.matmul",
"megengine.functional.expand_dims",
"megengine.functional.relu",
"megengine.functional.leaky_relu",
"megengine.functional.elu",
"megengine.functional.clip",
"megengine.functional.selu",
"megengine.functional.reshape",
"megengine.functional.tanh"
] |
[((379, 389), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (386, 389), True, 'import numpy as np\n'), ((797, 820), 'megengine.functional.reshape', 'F.reshape', (['b', 'new_shape'], {}), '(b, new_shape)\n', (806, 820), True, 'import megengine.functional as F\n'), ((1597, 1621), 'megengine.functional.clip', 'F.clip', (['x', '(-clamp)', 'clamp'], {}), '(x, -clamp, clamp)\n', (1603, 1621), True, 'import megengine.functional as F\n'), ((986, 995), 'megengine.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (992, 995), True, 'import megengine.functional as F\n'), ((2521, 2541), 'numpy.sqrt', 'np.sqrt', (['in_features'], {}), '(in_features)\n', (2528, 2541), True, 'import numpy as np\n'), ((3110, 3142), 'megengine.functional.matmul', 'F.matmul', (['x', 'w'], {'transpose_b': '(True)'}), '(x, w, transpose_b=True)\n', (3118, 3142), True, 'import megengine.functional as F\n'), ((1033, 1055), 'megengine.functional.leaky_relu', 'F.leaky_relu', (['x', 'alpha'], {}), '(x, alpha)\n', (1045, 1055), True, 'import megengine.functional as F\n'), ((2991, 3023), 'megengine.functional.matmul', 'F.matmul', (['x', 'w'], {'transpose_b': '(True)'}), '(x, w, transpose_b=True)\n', (2999, 3023), True, 'import megengine.functional as F\n'), ((3026, 3045), 'megengine.functional.expand_dims', 'F.expand_dims', (['b', '(0)'], {}), '(b, 0)\n', (3039, 3045), True, 'import megengine.functional as F\n'), ((1092, 1101), 'megengine.functional.tanh', 'F.tanh', (['x'], {}), '(x)\n', (1098, 1101), True, 'import megengine.functional as F\n'), ((1141, 1153), 'megengine.functional.sigmoid', 'F.sigmoid', (['x'], {}), '(x)\n', (1150, 1153), True, 'import megengine.functional as F\n'), ((1189, 1197), 'megengine.functional.elu', 'F.elu', (['x'], {}), '(x)\n', (1194, 1197), True, 'import megengine.functional as F\n'), ((2278, 2320), 'numpy.random.randn', 'np.random.randn', (['out_features', 'in_features'], {}), '(out_features, in_features)\n', (2293, 2320), True, 'import numpy as np\n'), ((1234, 1243), 'megengine.functional.selu', 'F.selu', (['x'], {}), '(x)\n', (1240, 1243), True, 'import megengine.functional as F\n'), ((2403, 2424), 'numpy.ones', 'np.ones', (['out_features'], {}), '(out_features)\n', (2410, 2424), True, 'import numpy as np\n'), ((1284, 1297), 'megengine.functional.softplus', 'F.softplus', (['x'], {}), '(x)\n', (1294, 1297), True, 'import megengine.functional as F\n'), ((1335, 1347), 'megengine.functional.sigmoid', 'F.sigmoid', (['x'], {}), '(x)\n', (1344, 1347), True, 'import megengine.functional as F\n')]
|
"""add events
Revision ID: 02338256c6aa
Revises: 108677b68119
Create Date: 2022-06-01 03:17:51.063172+00:00
"""
import sqlalchemy as sa
import sqlmodel
from alembic import op
from common.database.tables.types import TimeStamp
# revision identifiers, used by Alembic.
revision = "02338256c6aa"
down_revision = "108677b68119"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"events",
sa.Column(
"valid_from",
TimeStamp(timezone=True),
nullable=False,
),
sa.Column(
"valid_until",
TimeStamp(timezone=True),
nullable=False,
),
sa.Column("name", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("code", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("enabled", sa.Boolean(), nullable=False),
sa.Column("id", sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("events")
# ### end Alembic commands ###
|
[
"sqlmodel.sql.sqltypes.AutoString"
] |
[((1176, 1199), 'alembic.op.drop_table', 'op.drop_table', (['"""events"""'], {}), "('events')\n", (1189, 1199), False, 'from alembic import op\n'), ((1015, 1044), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (1038, 1044), True, 'import sqlalchemy as sa\n'), ((546, 570), 'common.database.tables.types.TimeStamp', 'TimeStamp', ([], {'timezone': '(True)'}), '(timezone=True)\n', (555, 570), False, 'from common.database.tables.types import TimeStamp\n'), ((669, 693), 'common.database.tables.types.TimeStamp', 'TimeStamp', ([], {'timezone': '(True)'}), '(timezone=True)\n', (678, 693), False, 'from common.database.tables.types import TimeStamp\n'), ((760, 794), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (792, 794), False, 'import sqlmodel\n'), ((839, 873), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (871, 873), False, 'import sqlmodel\n'), ((921, 933), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (931, 933), True, 'import sqlalchemy as sa\n'), ((976, 988), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (986, 988), True, 'import sqlalchemy as sa\n')]
|
from datetime import datetime
from typing import Optional
from fastapi import APIRouter, Depends
from sqlmodel import Field, SQLModel
from ...db import get_session
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
router = APIRouter()
class HistoryEntPlasticConference(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
history_id_order: int
history_id_conference: int
ent_plastic_conference_id: int
state: str
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class EntPlasticConference(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
problem: str
question: str
ent_plan: str
surgeon_plant: str
post_plan: str
surgeon_post_plan: str
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class EntPlasticConferenceDoctorMap(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
ent_plastic_conference_id: int
doctor_id: int
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
@router.post("/history_ent_conference", response_model=HistoryEntPlasticConference)
async def create_history_ent_conference(history_ent_conference: HistoryEntPlasticConference, session: AsyncSession = Depends(get_session)):
session.add(history_ent_conference)
await session.commit()
await session.refresh(history_ent_conference)
return history_ent_conference
@router.post("/ent_conference", response_model=EntPlasticConference)
async def create_ent_conference(ent_conference: EntPlasticConference, session: AsyncSession = Depends(get_session)):
session.add(ent_conference)
await session.commit()
await session.refresh(ent_conference)
return ent_conference
@router.get("/history_ent_conference/{id}", response_model=HistoryEntPlasticConference)
async def get_history_ent_conference(id: int, session: AsyncSession = Depends(get_session)):
history_ent_conferences = await session.execute(select(HistoryEntPlasticConference).where(HistoryEntPlasticConference.id == id))
history_ent_conference = history_ent_conferences.scalars().first()
return history_ent_conference
@router.put("/history_ent_conference/{id}", response_model=HistoryEntPlasticConference)
async def update_history_ent_conference(id: int, session: AsyncSession = Depends(get_session)):
return None
@router.delete("/history_ent_conference/{id}")
async def delete_history_ent_conference(session: AsyncSession = Depends(get_session)):
return None
@router.delete("/history_ent_conference/{id}")
async def delete_ent_conference(session: AsyncSession = Depends(get_session)):
return None
|
[
"sqlmodel.Field"
] |
[((256, 267), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (265, 267), False, 'from fastapi import APIRouter, Depends\n'), ((351, 388), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (356, 388), False, 'from sqlmodel import Field, SQLModel\n'), ((679, 716), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (684, 716), False, 'from sqlmodel import Field, SQLModel\n'), ((1031, 1068), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (1036, 1068), False, 'from sqlmodel import Field, SQLModel\n'), ((1433, 1453), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1440, 1453), False, 'from fastapi import APIRouter, Depends\n'), ((1772, 1792), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1779, 1792), False, 'from fastapi import APIRouter, Depends\n'), ((2082, 2102), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (2089, 2102), False, 'from fastapi import APIRouter, Depends\n'), ((2507, 2527), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (2514, 2527), False, 'from fastapi import APIRouter, Depends\n'), ((2659, 2679), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (2666, 2679), False, 'from fastapi import APIRouter, Depends\n'), ((2803, 2823), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (2810, 2823), False, 'from fastapi import APIRouter, Depends\n'), ((2157, 2192), 'sqlalchemy.select', 'select', (['HistoryEntPlasticConference'], {}), '(HistoryEntPlasticConference)\n', (2163, 2192), False, 'from sqlalchemy import select\n')]
|
from unittest.mock import patch
import pytest
from sqlalchemy.exc import NoResultFound
from sqlmodel import Session, create_engine, delete
from ...conftest import get_testing_print_function
def test_tutorial(clear_sqlmodel):
from docs_src.tutorial.one import tutorial005 as mod
mod.sqlite_url = "sqlite://"
mod.engine = create_engine(mod.sqlite_url)
with pytest.raises(NoResultFound):
mod.main()
with Session(mod.engine) as session:
# TODO: create delete() function
# TODO: add overloads for .exec() with delete object
session.exec(delete(mod.Hero))
session.add(mod.Hero(name="<NAME>", secret_name="<NAME>", age=24))
session.commit()
calls = []
new_print = get_testing_print_function(calls)
with patch("builtins.print", new=new_print):
mod.select_heroes()
assert calls == [
[
"Hero:",
{
"id": 1,
"name": "<NAME>",
"secret_name": "<NAME>",
"age": 24,
},
]
]
|
[
"sqlmodel.delete",
"sqlmodel.Session",
"sqlmodel.create_engine"
] |
[((337, 366), 'sqlmodel.create_engine', 'create_engine', (['mod.sqlite_url'], {}), '(mod.sqlite_url)\n', (350, 366), False, 'from sqlmodel import Session, create_engine, delete\n'), ((376, 404), 'pytest.raises', 'pytest.raises', (['NoResultFound'], {}), '(NoResultFound)\n', (389, 404), False, 'import pytest\n'), ((414, 424), 'docs_src.tutorial.one.tutorial005.main', 'mod.main', ([], {}), '()\n', (422, 424), True, 'from docs_src.tutorial.one import tutorial005 as mod\n'), ((434, 453), 'sqlmodel.Session', 'Session', (['mod.engine'], {}), '(mod.engine)\n', (441, 453), False, 'from sqlmodel import Session, create_engine, delete\n'), ((784, 822), 'unittest.mock.patch', 'patch', (['"""builtins.print"""'], {'new': 'new_print'}), "('builtins.print', new=new_print)\n", (789, 822), False, 'from unittest.mock import patch\n'), ((832, 851), 'docs_src.tutorial.one.tutorial005.select_heroes', 'mod.select_heroes', ([], {}), '()\n', (849, 851), True, 'from docs_src.tutorial.one import tutorial005 as mod\n'), ((589, 605), 'sqlmodel.delete', 'delete', (['mod.Hero'], {}), '(mod.Hero)\n', (595, 605), False, 'from sqlmodel import Session, create_engine, delete\n'), ((627, 680), 'docs_src.tutorial.one.tutorial005.Hero', 'mod.Hero', ([], {'name': '"""<NAME>"""', 'secret_name': '"""<NAME>"""', 'age': '(24)'}), "(name='<NAME>', secret_name='<NAME>', age=24)\n", (635, 680), True, 'from docs_src.tutorial.one import tutorial005 as mod\n')]
|
import requests
import logging
import us
from pydantic import validator
from sqlmodel import Field, SQLModel, Session, create_engine
import json
from os.path import exists
# Define the Store model
# The model is a SQLModel so it gets Pydantic and SQLAlchemy methods
# It will be a SQL table with the name 'store'
class Store(SQLModel, table=True):
id: str = Field(default=None, primary_key=True)
name: str
state: str
latitude: float
longitude: float
# Grab the needed fields from the data from the API
def __init__(self, source_data):
self.id = source_data['key']['id']
self.name = source_data['name']['label']
self.state = source_data['location']['address']['administrativeArea']
self.latitude = source_data['location']['geo']['latitude']
self.longitude = source_data['location']['geo']['longitude']
# Verify that the State is a valid US state or territory
@validator('state')
def validate_state(cls, v):
v = us.states.lookup(v)
if v not in us.states.STATES_AND_TERRITORIES:
raise ValueError(f'{v} is not a valid state')
return v
# Verify that the latitude is a float and between -90 and 90
@validator('latitude')
def validate_latitude(cls, v):
v = float(v)
if v < -90 or v > 90:
raise ValueError(f'{v} is not a valid latitude')
return v
# Verify that the longitude is a float and between -180 and 180
@validator('longitude')
def validate_longitude(cls, v):
v = float(v)
if v < -180 or v > 180:
raise ValueError(f'{v} is not a valid longitude')
return v
# Verify that the name is a string and not empty
@validator('name')
def validate_name(cls, v):
if len(v) < 1:
raise ValueError(f'{v} is not a valid name')
return v
# read the store data from the provided filepath or if the file does not exist pull the store data from the API
def extract(filepath: str, url: str) -> dict:
# try filepath
if exists(filepath):
with open(filepath, 'r') as f:
return json.load(f)
# try API
try:
r = requests.get(url)
r.raise_for_status()
logging.info('Successfully retrieved stores data')
return r.json()
except Exception as e:
logging.warning(f'Error getting stores data: {e}')
return None
# transform the data into a list of Store objects
def transform(data: dict) -> dict:
stores = []
if 'store' in data:
for s in data['store']:
try:
stores.append(Store(s))
except Exception as e:
logging.warning(f'Error transforming store: {e}')
return stores
return None
# load the data into the database
def load(stores: list, engine) -> None:
if not stores:
logging.warning('No stores to load')
return
try:
# Create a session to connect to the database
with Session(engine) as session:
# add the stores to the database
session.add_all(stores)
session.commit()
logging.info(f'Successfully loaded {len(stores)} stores')
except Exception as e:
logging.warning(f'Error loading stores: {e}')
# main function to be called by the pipeline
def run(filepath: str, stores_url: str, engine):
data = extract(filepath, stores_url)
logging.debug(data.keys())
stores = transform(data)
load(stores, engine)
|
[
"sqlmodel.Session",
"sqlmodel.Field"
] |
[((363, 400), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (368, 400), False, 'from sqlmodel import Field, SQLModel, Session, create_engine\n'), ((938, 956), 'pydantic.validator', 'validator', (['"""state"""'], {}), "('state')\n", (947, 956), False, 'from pydantic import validator\n'), ((1221, 1242), 'pydantic.validator', 'validator', (['"""latitude"""'], {}), "('latitude')\n", (1230, 1242), False, 'from pydantic import validator\n'), ((1481, 1503), 'pydantic.validator', 'validator', (['"""longitude"""'], {}), "('longitude')\n", (1490, 1503), False, 'from pydantic import validator\n'), ((1731, 1748), 'pydantic.validator', 'validator', (['"""name"""'], {}), "('name')\n", (1740, 1748), False, 'from pydantic import validator\n'), ((2062, 2078), 'os.path.exists', 'exists', (['filepath'], {}), '(filepath)\n', (2068, 2078), False, 'from os.path import exists\n'), ((1001, 1020), 'us.states.lookup', 'us.states.lookup', (['v'], {}), '(v)\n', (1017, 1020), False, 'import us\n'), ((2191, 2208), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2203, 2208), False, 'import requests\n'), ((2246, 2296), 'logging.info', 'logging.info', (['"""Successfully retrieved stores data"""'], {}), "('Successfully retrieved stores data')\n", (2258, 2296), False, 'import logging\n'), ((2883, 2919), 'logging.warning', 'logging.warning', (['"""No stores to load"""'], {}), "('No stores to load')\n", (2898, 2919), False, 'import logging\n'), ((2138, 2150), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2147, 2150), False, 'import json\n'), ((2356, 2406), 'logging.warning', 'logging.warning', (['f"""Error getting stores data: {e}"""'], {}), "(f'Error getting stores data: {e}')\n", (2371, 2406), False, 'import logging\n'), ((3011, 3026), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (3018, 3026), False, 'from sqlmodel import Field, SQLModel, Session, create_engine\n'), ((3254, 3299), 'logging.warning', 'logging.warning', (['f"""Error loading stores: {e}"""'], {}), "(f'Error loading stores: {e}')\n", (3269, 3299), False, 'import logging\n'), ((2693, 2742), 'logging.warning', 'logging.warning', (['f"""Error transforming store: {e}"""'], {}), "(f'Error transforming store: {e}')\n", (2708, 2742), False, 'import logging\n')]
|
from datetime import date
from typing import Optional, Iterator
from sqlmodel import SQLModel, Field, create_engine, Session # type: ignore[import]
from my_feed.log import logger
from app.settings import settings
# base non-table sql model
class FeedBase(SQLModel): # type: ignore[misc]
model_id: str
ftype: str # feed item type
title: str
score: Optional[float] = Field(default=None)
# more metadata
subtitle: Optional[str] = Field(default=None)
creator: Optional[str] = Field(default=None)
part: Optional[int] = Field(default=None)
subpart: Optional[int] = Field(default=None)
collection: Optional[str] = Field(default=None)
# dates
when: int
release_date: Optional[date] = Field(default=None)
# urls
image_url: Optional[str] = Field(default=None)
url: Optional[str] = Field(default=None)
# feedbase, with an ID/table
class FeedModel(FeedBase, table=True): # type: ignore
id: int = Field(index=True, primary_key=True)
# store JSON as strings, these are only used on the frontend anyways
tags: str = Field(default=r"[]") # List[str]
data: Optional[bytes] = Field(default=None) # Dict[str, Any]
feed_engine = create_engine(
settings.SQLITE_DB_PATH,
echo=settings.SQL_ECHO,
)
def init_db() -> None:
logger.info("Creating tables...")
SQLModel.metadata.create_all(feed_engine)
def get_db() -> Iterator[Session]:
with Session(feed_engine) as session:
yield session
|
[
"sqlmodel.create_engine",
"sqlmodel.Session",
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.Field"
] |
[((1207, 1269), 'sqlmodel.create_engine', 'create_engine', (['settings.SQLITE_DB_PATH'], {'echo': 'settings.SQL_ECHO'}), '(settings.SQLITE_DB_PATH, echo=settings.SQL_ECHO)\n', (1220, 1269), False, 'from sqlmodel import SQLModel, Field, create_engine, Session\n'), ((388, 407), 'sqlmodel.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (393, 407), False, 'from sqlmodel import SQLModel, Field, create_engine, Session\n'), ((459, 478), 'sqlmodel.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (464, 478), False, 'from sqlmodel import SQLModel, Field, create_engine, Session\n'), ((508, 527), 'sqlmodel.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (513, 527), False, 'from sqlmodel import SQLModel, Field, create_engine, Session\n'), ((554, 573), 'sqlmodel.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (559, 573), False, 'from sqlmodel import SQLModel, Field, create_engine, Session\n'), ((603, 622), 'sqlmodel.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (608, 622), False, 'from sqlmodel import SQLModel, Field, create_engine, Session\n'), ((655, 674), 'sqlmodel.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (660, 674), False, 'from sqlmodel import SQLModel, Field, create_engine, Session\n'), ((737, 756), 'sqlmodel.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (742, 756), False, 'from sqlmodel import SQLModel, Field, create_engine, Session\n'), ((800, 819), 'sqlmodel.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (805, 819), False, 'from sqlmodel import SQLModel, Field, create_engine, Session\n'), ((845, 864), 'sqlmodel.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (850, 864), False, 'from sqlmodel import SQLModel, Field, create_engine, Session\n'), ((965, 1000), 'sqlmodel.Field', 'Field', ([], {'index': '(True)', 'primary_key': '(True)'}), '(index=True, primary_key=True)\n', (970, 1000), False, 'from sqlmodel import SQLModel, Field, create_engine, Session\n'), ((1091, 1110), 'sqlmodel.Field', 'Field', ([], {'default': '"""[]"""'}), "(default='[]')\n", (1096, 1110), False, 'from sqlmodel import SQLModel, Field, create_engine, Session\n'), ((1153, 1172), 'sqlmodel.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (1158, 1172), False, 'from sqlmodel import SQLModel, Field, create_engine, Session\n'), ((1310, 1343), 'my_feed.log.logger.info', 'logger.info', (['"""Creating tables..."""'], {}), "('Creating tables...')\n", (1321, 1343), False, 'from my_feed.log import logger\n'), ((1348, 1389), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['feed_engine'], {}), '(feed_engine)\n', (1376, 1389), False, 'from sqlmodel import SQLModel, Field, create_engine, Session\n'), ((1436, 1456), 'sqlmodel.Session', 'Session', (['feed_engine'], {}), '(feed_engine)\n', (1443, 1456), False, 'from sqlmodel import SQLModel, Field, create_engine, Session\n')]
|
import numpy as np
import itertools
import os
import scipy.linalg
from sfepy.discrete import fem
from .algo_core import generalized_courant_fischer, spring_energy_matrix_accelerate_3D
import util.geometry_util as geo_util
import util.meshgen as meshgen
from util.timer import SimpleTimer
from visualization.model_visualizer import visualize_hinges, visualize_3D
import visualization.model_visualizer as vis
from .constraints_3d import select_non_colinear_points, direction_for_relative_disallowed_motions
from .internal_structure import tetrahedron
from .stiffness_matrix import stiffness_matrix_from_mesh
class Model:
"""
Represent an assembly
"""
def __init__(self):
self.beams = []
self.joints = []
def point_matrix(self) -> np.ndarray:
beam_points = np.vstack([b.points for b in self.beams]).reshape(-1, 3)
# joint_points = np.array([j.virtual_points for j in self.joints]).reshape(-1, 3)
return np.vstack((
beam_points,
))
def point_indices(self):
beam_point_count = np.array([b.point_count for b in self.beams])
end_indices = np.cumsum(beam_point_count)
start_indices = end_indices - beam_point_count
return [np.arange(start, end) for start, end in zip(start_indices, end_indices)]
def edge_matrix(self) -> np.ndarray:
edge_indices = []
index_offset = 0
for beam in self.beams:
edge_indices.append(beam.edges + index_offset)
index_offset += beam.point_count
matrix = np.vstack([edges for edges in edge_indices if edges.size > 0])
return matrix
def constraint_matrix(self) -> np.ndarray:
matrix = []
# collect constraints for each joint and stack them
for joint in self.joints:
constraints = joint.linear_constraints(self)
matrix.append(constraints)
numpy_matrix = np.vstack(matrix) if len(matrix) > 0 else np.empty(0)
return numpy_matrix
def constraints_fixing_first_part(self):
count = len(self.beams[0].points)
fixed_coordinates = np.zeros((count * 3, self.point_count * 3))
for r, c in enumerate(range(count * 3)):
fixed_coordinates[r, c] = 1
return fixed_coordinates
@property
def point_count(self):
return sum(beam.point_count for beam in self.beams)
def add_beam(self, beam):
self.beams.append(beam)
def add_beams(self, beams):
for beam in beams:
self.add_beam(beam)
def add_joint(self, joint):
self.joints.append(joint)
def add_joints(self, joints):
for joint in joints:
self.add_joint(joint)
def beam_point_index(self, beam):
beam_index = self.beams.index(beam)
return sum(b.point_count for b in self.beams[:beam_index])
def joint_point_indices(self):
indices = []
for joint in self.joints:
offset_part_1 = self.beam_point_index(joint.part1)
offset_part_2 = self.beam_point_index(joint.part2)
indice_on_part_1 = select_non_colinear_points(joint.part1.points, near=joint.pivot_point)[1] + offset_part_1
indice_on_part_2 = select_non_colinear_points(joint.part2.points, near=joint.pivot_point)[1] + offset_part_2
indices.append((indice_on_part_1, indice_on_part_2))
return np.array(indices)
def save_json(self, filename: str, **kwargs):
import json
from util.json_encoder import ModelEncoder
with open(filename, "w") as f:
json.dump(self, f, cls=ModelEncoder, **kwargs)
def visualize(self, arrows=None, show_axis=True, show_hinge=True, arrow_style=None):
defaults = {
"length_coeff": 0.2,
"radius_coeff": 0.2,
}
if arrow_style is not None:
arrow_style = {
**defaults,
**arrow_style,
}
else:
arrow_style = defaults
geometries = []
model_mesh = vis.get_lineset_for_edges(self.point_matrix(), self.edge_matrix())
geometries.append(model_mesh)
if show_hinge:
rotation_axes_pairs = [(j.pivot, j.rotation_axes[0]) for j in self.joints if j.rotation_axes is not None]
if len(rotation_axes_pairs) > 0:
rotation_pivots, rotation_axes = zip(*rotation_axes_pairs)
axes_arrows = vis.get_mesh_for_arrows(
rotation_pivots,
geo_util.normalize(rotation_axes),
length_coeff=0.01, radius_coeff=0.4)
axes_arrows.paint_uniform_color([0.5, 0.2, 0.8])
geometries.append(axes_arrows)
translation_vector_pairs = [(j.pivot, j.translation_vectors[0]) for j in self.joints if j.translation_vectors is not None]
if len(translation_vector_pairs) > 0:
translation_pivots, translation_vector = zip(*translation_vector_pairs)
vector_arrows = vis.get_mesh_for_arrows(translation_pivots, translation_vector, length_coeff=0.01, radius_coeff=0.4)
vector_arrows.paint_uniform_color([0.2, 0.8, 0.5])
geometries.append(vector_arrows)
melded_points = [j.pivot for j in self.joints if j.translation_vectors is None and j.rotation_axes is None]
if len(melded_points) > 0:
point_meshes = vis.get_mesh_for_points(melded_points)
geometries.append(point_meshes)
mesh_frame = vis.o3d.geometry.TriangleMesh.create_coordinate_frame(size=10, origin=[0, 0, 0])
geometries.append(mesh_frame)
if arrows is not None:
points = self.point_matrix()
arrow_mesh = vis.get_mesh_for_arrows(points, arrows.reshape(-1, points.shape[1]), **arrow_style)
model_meshes = vis.get_geometries_3D(self.point_matrix(), edges=self.edge_matrix(), show_axis=False, show_point=False)
geometries.extend([arrow_mesh, *model_meshes])
vis.o3d.visualization.draw_geometries(geometries)
def joint_stiffness_matrix(self):
from functools import reduce
matrix = reduce(lambda x, y: x + y, [j.joint_stiffness(self) for j in self.joints])
return matrix
def soft_solve(self, num_pairs=-1, extra_constr=None, verbose=False):
points = self.point_matrix()
edges = self.edge_matrix()
part_stiffness = spring_energy_matrix_accelerate_3D(points, edges, abstract_edges=[])
joint_stiffness = self.joint_stiffness_matrix()
K = part_stiffness + joint_stiffness # global stiffness
eigenpairs = geo_util.eigen(K, symmetric=True)
if verbose:
print(self.report())
if num_pairs == -1:
return [(e, v) for e, v in eigenpairs]
else:
return [(e, v) for e, v in eigenpairs[:num_pairs]]
def eigen_solve(self, num_pairs=-1, extra_constr=None, verbose=False):
points = self.point_matrix()
edges = self.edge_matrix()
timer = SimpleTimer()
stiffness = spring_energy_matrix_accelerate_3D(points, edges, abstract_edges=[])
timer.checkpoint("K")
constraints = self.constraint_matrix()
if extra_constr is not None:
constraints = np.vstack((constraints, extra_constr))
K, B = generalized_courant_fischer(stiffness, constraints)
eigenpairs = geo_util.eigen(K, symmetric=True)
timer.checkpoint("eig")
if verbose:
print(self.report())
timer.report()
if num_pairs == -1:
return [(e, B @ v) for e, v in eigenpairs[:]]
else:
return [(e, B @ v) for e, v in eigenpairs[:num_pairs]]
def __str__(self):
return str(self.report())
def report(self) -> dict:
return {
**{
"#parts": len(self.beams),
"#points": self.point_count,
"#joints": len(self.joints),
"#constraints": len(self.constraint_matrix())
},
**vars(self)
}
class Beam:
def __init__(self, points, edges=None, principle_points=None):
if edges is None:
index_range = range(len(points))
edges = np.array(list(itertools.combinations(index_range, 2)))
self._edges = edges
self.points = points
self.principle_points = principle_points
@classmethod
def crystal(cls, p1, p2, crystal_counts):
from solvers.rigidity_solver.internal_structure import get_crystal_vertices
orient = (p2 - p1) / np.linalg.norm(p2 - p1)
crystals = [get_crystal_vertices(c, orient) for c in np.linspace(p1, p2, num=crystal_counts)]
points = np.vstack(crystals)
return Beam(points)
@classmethod
def tetra(cls, p, q, thickness=1, density=0.333333, ori=None):
points, edges = tetrahedron(p, q, thickness=thickness, density=density, ori=ori)
return Beam(points, edges, principle_points=(p, q))
@classmethod
def dense_tetra(cls, p, q, density=0.333333, thickness=1, ori=None):
points, _ = tetrahedron(p, q, density=density, thickness=thickness, ori=ori)
return Beam(points, principle_points=(p, q))
@classmethod
def vertices(cls, points, orient):
orient = orient / np.linalg.norm(orient) * 10
points = np.vstack((points, points + orient))
return Beam(points)
@classmethod
def cube_as_mesh(cls, pivot, u, v, w):
hashes = hash((tuple(pivot), tuple(u), tuple(v), tuple(w)))
soup_filename = f"data/{hashes}.stl"
mesh_filename = f"data/{hashes}.mesh"
import os
if not os.path.exists(mesh_filename):
meshgen.cube_surface_mesh(soup_filename, pivot, u, v, w)
meshgen.tetrahedralize(soup_filename, mesh_filename)
mesh = fem.Mesh.from_file(mesh_filename)
points = mesh.coors
nonzero_x, nonzero_y = mesh.create_conn_graph().nonzero()
edges = np.hstack((nonzero_x.reshape(-1, 1), nonzero_y.reshape(-1, 1)))
beam = Beam(points, edges)
beam.stiffness = stiffness_matrix_from_mesh(mesh_filename)
beam.mesh_filename = mesh_filename
return beam
@classmethod
def from_soup_file(cls, soup_filename: str):
mesh_filename = soup_filename.replace(".obj", ".mesh")
if not os.path.exists(mesh_filename):
meshgen.tetrahedralize(soup_filename, mesh_filename)
beam = cls.from_mesh_file(mesh_filename)
return beam
@classmethod
def from_mesh_file(cls, mesh_filename):
mesh = fem.Mesh.from_file(mesh_filename)
points = mesh.coors
nonzero_x, nonzero_y = mesh.create_conn_graph().nonzero()
edges = np.hstack((nonzero_x.reshape(-1, 1), nonzero_y.reshape(-1, 1)))
beam = Beam(points, edges)
beam.stiffness = stiffness_matrix_from_mesh(mesh_filename)
beam.mesh_filename = mesh_filename
return beam
@property
def edges(self) -> np.ndarray:
return self._edges
@property
def point_count(self):
return len(self.points)
class Joint:
def __init__(self, part1, part2, pivot,
rotation_axes=None, translation_vectors=None,
soft_translation=None, soft_rotation=None,
soft_translation_coeff=None, soft_rotation_coeff=None,
):
self.part1 = part1
self.part2 = part2
self.soft_translation = soft_translation
self.soft_rotation = soft_rotation
self.soft_translation_coeff = soft_translation_coeff
self.soft_rotation_coeff = soft_rotation_coeff
self.pivot = np.array(pivot)
assert self.pivot.shape == (3,), f"received pivot {self.pivot}, shape {self.pivot.shape}"
if rotation_axes is not None:
self.rotation_axes = np.array(rotation_axes).reshape(-1, 3)
assert np.linalg.matrix_rank(self.rotation_axes) == len(self.rotation_axes)
else:
self.rotation_axes = None
if translation_vectors is not None:
self.translation_vectors = np.array(translation_vectors).reshape(-1, 3)
assert self.translation_vectors.shape[1] == 3
assert np.linalg.matrix_rank(self.translation_vectors) == len(self.translation_vectors)
else:
self.translation_vectors = None
if soft_rotation is not None:
self.soft_rotation = np.array(soft_rotation).reshape(-1, 3)
assert np.linalg.matrix_rank(self.soft_rotation) == len(self.soft_rotation)
else:
self.soft_rotation = None
if soft_translation is not None:
self.soft_translation = np.array(soft_translation).reshape(-1, 3)
assert self.soft_translation.shape[1] == 3
assert np.linalg.matrix_rank(self.soft_translation) == len(self.soft_translation)
else:
self.soft_translation = None
def joint_stiffness(self, model: Model) -> np.ndarray:
dim = 3
source, target = self.part1, self.part2 # aliases
source_points, source_point_indices = select_non_colinear_points(source.points, num=3, near=self.pivot)
target_points, target_point_indices = select_non_colinear_points(target.points, num=3, near=self.pivot)
source_point_indices += model.beam_point_index(source)
target_point_indices += model.beam_point_index(target)
# (n x 18) matrix, standing for prohibitive motion space
soft_allowed_translation = np.vstack([vectors for vectors in (self.translation_vectors, self.soft_translation) if vectors is not None])
soft_allowed_rotation = np.vstack([vectors for vectors in (self.rotation_axes, self.soft_rotation) if vectors is not None])
prohibitive = direction_for_relative_disallowed_motions(
source_points,
target_points,
rotation_pivot=self.pivot,
rotation_axes=soft_allowed_rotation,
translation_vectors=soft_allowed_translation,
)
prohibitive = geo_util.rowwise_normalize(prohibitive)
motion_basis = [prohibitive]
coefficients = [np.ones(prohibitive.shape[0])]
if self.soft_translation is not None:
relative_translation = np.vstack([direction_for_relative_disallowed_motions(source_points, target_points, rotation_pivot=self.pivot,
translation_vectors=scipy.linalg.null_space(translation.reshape(-1, 3)).T,
rotation_axes=np.eye(3))
for translation in self.soft_translation])
assert relative_translation.shape == (len(self.soft_translation_coeff), 18), f"number of soft translation ({relative_translation.shape} and coefficient don't match ({len(self.soft_translation_coeff), 18})"
motion_basis.append(relative_translation)
coefficients.append(self.soft_translation_coeff)
if self.soft_rotation is not None:
relative_rotation = np.vstack([direction_for_relative_disallowed_motions(source_points, target_points, rotation_pivot=self.pivot,
rotation_axes=scipy.linalg.null_space(rotation.reshape(-1, 3)).T,
translation_vectors=np.eye(3))
for rotation in self.soft_rotation])
assert relative_rotation.shape == (len(self.soft_rotation_coeff), 18)
motion_basis.append(relative_rotation)
coefficients.append(self.soft_rotation_coeff)
# cast to numpy array
motion_basis = np.vstack(motion_basis)
coefficients = np.concatenate(coefficients)
# (18 x m) @ (m x m) @ (m x 18) matrix
local_stiffness = motion_basis.T @ np.diag(coefficients) @ motion_basis
assert local_stiffness.shape == (18, 18)
# clip the stiffness matrix to a zero matrix of the same size as the global stiffness matrix
global_indices = np.concatenate((source_point_indices, target_point_indices))
stiffness_at_global = np.zeros((model.point_count * dim, model.point_count * dim))
for local_row_index, global_row_index in enumerate(global_indices):
for local_col_index, global_col_index in enumerate(global_indices):
l_row_slice = slice(local_row_index * 3, local_row_index * 3 + 3)
l_col_slice = slice(local_col_index * 3, local_col_index * 3 + 3)
g_row_slice = slice(global_row_index * 3, global_row_index * 3 + 3)
g_col_slice = slice(global_col_index * 3, global_col_index * 3 + 3)
stiffness_at_global[g_row_slice, g_col_slice] = local_stiffness[l_row_slice, l_col_slice]
return stiffness_at_global
def linear_constraints(self, model: Model) -> np.ndarray:
dim = 3
constraint_matrices = []
for source, target in [
(self.part1, self.part2),
(self.part2, self.part1)
]:
source_points, source_point_indices = select_non_colinear_points(source.points, num=3, near=self.pivot)
target_points, target_point_indices = select_non_colinear_points(target.points, num=3, near=self.pivot)
source_point_indices += model.beam_point_index(source)
target_point_indices += model.beam_point_index(target)
constraints = direction_for_relative_disallowed_motions(
source_points,
target_points,
rotation_pivot=self.pivot,
rotation_axes=self.rotation_axes,
translation_vectors=self.translation_vectors,
)
i, j, k = source_point_indices
t1, t2, t3 = target_point_indices
zero_constraint = np.zeros((constraints.shape[0], model.point_count * dim))
for index, target_index in enumerate(target_point_indices):
l = target_index
zero_constraint[:, i * 3: (i + 1) * 3] = constraints[:, 0: 3]
zero_constraint[:, j * 3: (j + 1) * 3] = constraints[:, 3: 6]
zero_constraint[:, k * 3: (k + 1) * 3] = constraints[:, 6: 9]
zero_constraint[:, l * 3: (l + 1) * 3] = constraints[:, (index + 3) * 3: (index + 4) * 3]
constraint_matrices.append(zero_constraint)
matrix = np.vstack(constraint_matrices)
return matrix
|
[
"sfepy.discrete.fem.Mesh.from_file"
] |
[((965, 990), 'numpy.vstack', 'np.vstack', (['(beam_points,)'], {}), '((beam_points,))\n', (974, 990), True, 'import numpy as np\n'), ((1070, 1115), 'numpy.array', 'np.array', (['[b.point_count for b in self.beams]'], {}), '([b.point_count for b in self.beams])\n', (1078, 1115), True, 'import numpy as np\n'), ((1138, 1165), 'numpy.cumsum', 'np.cumsum', (['beam_point_count'], {}), '(beam_point_count)\n', (1147, 1165), True, 'import numpy as np\n'), ((1558, 1620), 'numpy.vstack', 'np.vstack', (['[edges for edges in edge_indices if edges.size > 0]'], {}), '([edges for edges in edge_indices if edges.size > 0])\n', (1567, 1620), True, 'import numpy as np\n'), ((2125, 2168), 'numpy.zeros', 'np.zeros', (['(count * 3, self.point_count * 3)'], {}), '((count * 3, self.point_count * 3))\n', (2133, 2168), True, 'import numpy as np\n'), ((3406, 3423), 'numpy.array', 'np.array', (['indices'], {}), '(indices)\n', (3414, 3423), True, 'import numpy as np\n'), ((6078, 6127), 'visualization.model_visualizer.o3d.visualization.draw_geometries', 'vis.o3d.visualization.draw_geometries', (['geometries'], {}), '(geometries)\n', (6115, 6127), True, 'import visualization.model_visualizer as vis\n'), ((6704, 6737), 'util.geometry_util.eigen', 'geo_util.eigen', (['K'], {'symmetric': '(True)'}), '(K, symmetric=True)\n', (6718, 6737), True, 'import util.geometry_util as geo_util\n'), ((7114, 7127), 'util.timer.SimpleTimer', 'SimpleTimer', ([], {}), '()\n', (7125, 7127), False, 'from util.timer import SimpleTimer\n'), ((7485, 7518), 'util.geometry_util.eigen', 'geo_util.eigen', (['K'], {'symmetric': '(True)'}), '(K, symmetric=True)\n', (7499, 7518), True, 'import util.geometry_util as geo_util\n'), ((8820, 8839), 'numpy.vstack', 'np.vstack', (['crystals'], {}), '(crystals)\n', (8829, 8839), True, 'import numpy as np\n'), ((9459, 9495), 'numpy.vstack', 'np.vstack', (['(points, points + orient)'], {}), '((points, points + orient))\n', (9468, 9495), True, 'import numpy as np\n'), ((9959, 9992), 'sfepy.discrete.fem.Mesh.from_file', 'fem.Mesh.from_file', (['mesh_filename'], {}), '(mesh_filename)\n', (9977, 9992), False, 'from sfepy.discrete import fem\n'), ((10722, 10755), 'sfepy.discrete.fem.Mesh.from_file', 'fem.Mesh.from_file', (['mesh_filename'], {}), '(mesh_filename)\n', (10740, 10755), False, 'from sfepy.discrete import fem\n'), ((11808, 11823), 'numpy.array', 'np.array', (['pivot'], {}), '(pivot)\n', (11816, 11823), True, 'import numpy as np\n'), ((13681, 13794), 'numpy.vstack', 'np.vstack', (['[vectors for vectors in (self.translation_vectors, self.soft_translation) if\n vectors is not None]'], {}), '([vectors for vectors in (self.translation_vectors, self.\n soft_translation) if vectors is not None])\n', (13690, 13794), True, 'import numpy as np\n'), ((13822, 13925), 'numpy.vstack', 'np.vstack', (['[vectors for vectors in (self.rotation_axes, self.soft_rotation) if vectors\n is not None]'], {}), '([vectors for vectors in (self.rotation_axes, self.soft_rotation) if\n vectors is not None])\n', (13831, 13925), True, 'import numpy as np\n'), ((14220, 14259), 'util.geometry_util.rowwise_normalize', 'geo_util.rowwise_normalize', (['prohibitive'], {}), '(prohibitive)\n', (14246, 14259), True, 'import util.geometry_util as geo_util\n'), ((15905, 15928), 'numpy.vstack', 'np.vstack', (['motion_basis'], {}), '(motion_basis)\n', (15914, 15928), True, 'import numpy as np\n'), ((15952, 15980), 'numpy.concatenate', 'np.concatenate', (['coefficients'], {}), '(coefficients)\n', (15966, 15980), True, 'import numpy as np\n'), ((16285, 16345), 'numpy.concatenate', 'np.concatenate', (['(source_point_indices, target_point_indices)'], {}), '((source_point_indices, target_point_indices))\n', (16299, 16345), True, 'import numpy as np\n'), ((16376, 16436), 'numpy.zeros', 'np.zeros', (['(model.point_count * dim, model.point_count * dim)'], {}), '((model.point_count * dim, model.point_count * dim))\n', (16384, 16436), True, 'import numpy as np\n'), ((18668, 18698), 'numpy.vstack', 'np.vstack', (['constraint_matrices'], {}), '(constraint_matrices)\n', (18677, 18698), True, 'import numpy as np\n'), ((1237, 1258), 'numpy.arange', 'np.arange', (['start', 'end'], {}), '(start, end)\n', (1246, 1258), True, 'import numpy as np\n'), ((1926, 1943), 'numpy.vstack', 'np.vstack', (['matrix'], {}), '(matrix)\n', (1935, 1943), True, 'import numpy as np\n'), ((1968, 1979), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (1976, 1979), True, 'import numpy as np\n'), ((3597, 3643), 'json.dump', 'json.dump', (['self', 'f'], {'cls': 'ModelEncoder'}), '(self, f, cls=ModelEncoder, **kwargs)\n', (3606, 3643), False, 'import json\n'), ((5574, 5659), 'visualization.model_visualizer.o3d.geometry.TriangleMesh.create_coordinate_frame', 'vis.o3d.geometry.TriangleMesh.create_coordinate_frame', ([], {'size': '(10)', 'origin': '[0, 0, 0]'}), '(size=10, origin=[0, 0, 0]\n )\n', (5627, 5659), True, 'import visualization.model_visualizer as vis\n'), ((7358, 7396), 'numpy.vstack', 'np.vstack', (['(constraints, extra_constr)'], {}), '((constraints, extra_constr))\n', (7367, 7396), True, 'import numpy as np\n'), ((8677, 8700), 'numpy.linalg.norm', 'np.linalg.norm', (['(p2 - p1)'], {}), '(p2 - p1)\n', (8691, 8700), True, 'import numpy as np\n'), ((8721, 8752), 'solvers.rigidity_solver.internal_structure.get_crystal_vertices', 'get_crystal_vertices', (['c', 'orient'], {}), '(c, orient)\n', (8741, 8752), False, 'from solvers.rigidity_solver.internal_structure import get_crystal_vertices\n'), ((9778, 9807), 'os.path.exists', 'os.path.exists', (['mesh_filename'], {}), '(mesh_filename)\n', (9792, 9807), False, 'import os\n'), ((9821, 9877), 'util.meshgen.cube_surface_mesh', 'meshgen.cube_surface_mesh', (['soup_filename', 'pivot', 'u', 'v', 'w'], {}), '(soup_filename, pivot, u, v, w)\n', (9846, 9877), True, 'import util.meshgen as meshgen\n'), ((9890, 9942), 'util.meshgen.tetrahedralize', 'meshgen.tetrahedralize', (['soup_filename', 'mesh_filename'], {}), '(soup_filename, mesh_filename)\n', (9912, 9942), True, 'import util.meshgen as meshgen\n'), ((10479, 10508), 'os.path.exists', 'os.path.exists', (['mesh_filename'], {}), '(mesh_filename)\n', (10493, 10508), False, 'import os\n'), ((10522, 10574), 'util.meshgen.tetrahedralize', 'meshgen.tetrahedralize', (['soup_filename', 'mesh_filename'], {}), '(soup_filename, mesh_filename)\n', (10544, 10574), True, 'import util.meshgen as meshgen\n'), ((14322, 14351), 'numpy.ones', 'np.ones', (['prohibitive.shape[0]'], {}), '(prohibitive.shape[0])\n', (14329, 14351), True, 'import numpy as np\n'), ((18087, 18144), 'numpy.zeros', 'np.zeros', (['(constraints.shape[0], model.point_count * dim)'], {}), '((constraints.shape[0], model.point_count * dim))\n', (18095, 18144), True, 'import numpy as np\n'), ((803, 844), 'numpy.vstack', 'np.vstack', (['[b.points for b in self.beams]'], {}), '([b.points for b in self.beams])\n', (812, 844), True, 'import numpy as np\n'), ((5053, 5157), 'visualization.model_visualizer.get_mesh_for_arrows', 'vis.get_mesh_for_arrows', (['translation_pivots', 'translation_vector'], {'length_coeff': '(0.01)', 'radius_coeff': '(0.4)'}), '(translation_pivots, translation_vector,\n length_coeff=0.01, radius_coeff=0.4)\n', (5076, 5157), True, 'import visualization.model_visualizer as vis\n'), ((5461, 5499), 'visualization.model_visualizer.get_mesh_for_points', 'vis.get_mesh_for_points', (['melded_points'], {}), '(melded_points)\n', (5484, 5499), True, 'import visualization.model_visualizer as vis\n'), ((8762, 8801), 'numpy.linspace', 'np.linspace', (['p1', 'p2'], {'num': 'crystal_counts'}), '(p1, p2, num=crystal_counts)\n', (8773, 8801), True, 'import numpy as np\n'), ((9414, 9436), 'numpy.linalg.norm', 'np.linalg.norm', (['orient'], {}), '(orient)\n', (9428, 9436), True, 'import numpy as np\n'), ((12052, 12093), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['self.rotation_axes'], {}), '(self.rotation_axes)\n', (12073, 12093), True, 'import numpy as np\n'), ((12379, 12426), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['self.translation_vectors'], {}), '(self.translation_vectors)\n', (12400, 12426), True, 'import numpy as np\n'), ((12648, 12689), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['self.soft_rotation'], {}), '(self.soft_rotation)\n', (12669, 12689), True, 'import numpy as np\n'), ((12963, 13007), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['self.soft_translation'], {}), '(self.soft_translation)\n', (12984, 13007), True, 'import numpy as np\n'), ((16072, 16093), 'numpy.diag', 'np.diag', (['coefficients'], {}), '(coefficients)\n', (16079, 16093), True, 'import numpy as np\n'), ((4543, 4576), 'util.geometry_util.normalize', 'geo_util.normalize', (['rotation_axes'], {}), '(rotation_axes)\n', (4561, 4576), True, 'import util.geometry_util as geo_util\n'), ((8352, 8390), 'itertools.combinations', 'itertools.combinations', (['index_range', '(2)'], {}), '(index_range, 2)\n', (8374, 8390), False, 'import itertools\n'), ((11994, 12017), 'numpy.array', 'np.array', (['rotation_axes'], {}), '(rotation_axes)\n', (12002, 12017), True, 'import numpy as np\n'), ((12257, 12286), 'numpy.array', 'np.array', (['translation_vectors'], {}), '(translation_vectors)\n', (12265, 12286), True, 'import numpy as np\n'), ((12590, 12613), 'numpy.array', 'np.array', (['soft_rotation'], {}), '(soft_rotation)\n', (12598, 12613), True, 'import numpy as np\n'), ((12847, 12873), 'numpy.array', 'np.array', (['soft_translation'], {}), '(soft_translation)\n', (12855, 12873), True, 'import numpy as np\n'), ((14749, 14758), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (14755, 14758), True, 'import numpy as np\n'), ((15569, 15578), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (15575, 15578), True, 'import numpy as np\n')]
|
"""record model
Revision ID: 6c2a16b349b1
Revises: 2bafd0d01ae2
Create Date: 2021-11-20 18:51:45.427996
"""
import sqlalchemy as sa
import sqlmodel
import sqlmodel.sql.sqltypes
from alembic import op
# revision identifiers, used by Alembic.
revision = "6c2a16b349b1"
down_revision = "2bafd0d01ae2"
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"records",
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("TIMEZONE('utc', CURRENT_TIMESTAMP)"),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("TIMEZONE('utc', CURRENT_TIMESTAMP)"),
nullable=False,
),
sa.Column("problem_set_id", sqlmodel.sql.sqltypes.GUID(), nullable=True),
sa.Column("problem_id", sqlmodel.sql.sqltypes.GUID(), nullable=True),
sa.Column("user_id", sqlmodel.sql.sqltypes.GUID(), nullable=True),
sa.Column("id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
sa.Column("status", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("score", sa.Integer(), nullable=True),
sa.Column("time_ms", sa.Integer(), nullable=True),
sa.Column("memory_kb", sa.Integer(), nullable=True),
sa.Column("commit_id", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.ForeignKeyConstraint(["problem_id"], ["problems.id"], ondelete="SET NULL"),
sa.ForeignKeyConstraint(
["problem_set_id"], ["problem_sets.id"], ondelete="SET NULL"
),
sa.ForeignKeyConstraint(["user_id"], ["users.id"], ondelete="SET NULL"),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_records_commit_id"), "records", ["commit_id"], unique=False
)
op.create_index(
op.f("ix_records_created_at"), "records", ["created_at"], unique=False
)
op.create_index(op.f("ix_records_id"), "records", ["id"], unique=False)
op.create_index(
op.f("ix_records_memory_kb"), "records", ["memory_kb"], unique=False
)
op.create_index(op.f("ix_records_score"), "records", ["score"], unique=False)
op.create_index(op.f("ix_records_status"), "records", ["status"], unique=False)
op.create_index(op.f("ix_records_time_ms"), "records", ["time_ms"], unique=False)
op.create_index(
op.f("ix_records_updated_at"), "records", ["updated_at"], unique=False
)
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f("ix_records_updated_at"), table_name="records")
op.drop_index(op.f("ix_records_time_ms"), table_name="records")
op.drop_index(op.f("ix_records_status"), table_name="records")
op.drop_index(op.f("ix_records_score"), table_name="records")
op.drop_index(op.f("ix_records_memory_kb"), table_name="records")
op.drop_index(op.f("ix_records_id"), table_name="records")
op.drop_index(op.f("ix_records_created_at"), table_name="records")
op.drop_index(op.f("ix_records_commit_id"), table_name="records")
op.drop_table("records")
# ### end Alembic commands ###
|
[
"sqlmodel.sql.sqltypes.GUID",
"sqlmodel.sql.sqltypes.AutoString"
] |
[((3269, 3293), 'alembic.op.drop_table', 'op.drop_table', (['"""records"""'], {}), "('records')\n", (3282, 3293), False, 'from alembic import op\n'), ((1521, 1598), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['problem_id']", "['problems.id']"], {'ondelete': '"""SET NULL"""'}), "(['problem_id'], ['problems.id'], ondelete='SET NULL')\n", (1544, 1598), True, 'import sqlalchemy as sa\n'), ((1608, 1698), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['problem_set_id']", "['problem_sets.id']"], {'ondelete': '"""SET NULL"""'}), "(['problem_set_id'], ['problem_sets.id'], ondelete=\n 'SET NULL')\n", (1631, 1698), True, 'import sqlalchemy as sa\n'), ((1725, 1796), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['user_id']", "['users.id']"], {'ondelete': '"""SET NULL"""'}), "(['user_id'], ['users.id'], ondelete='SET NULL')\n", (1748, 1796), True, 'import sqlalchemy as sa\n'), ((1806, 1835), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (1829, 1835), True, 'import sqlalchemy as sa\n'), ((1872, 1900), 'alembic.op.f', 'op.f', (['"""ix_records_commit_id"""'], {}), "('ix_records_commit_id')\n", (1876, 1900), False, 'from alembic import op\n'), ((1976, 2005), 'alembic.op.f', 'op.f', (['"""ix_records_created_at"""'], {}), "('ix_records_created_at')\n", (1980, 2005), False, 'from alembic import op\n'), ((2073, 2094), 'alembic.op.f', 'op.f', (['"""ix_records_id"""'], {}), "('ix_records_id')\n", (2077, 2094), False, 'from alembic import op\n'), ((2158, 2186), 'alembic.op.f', 'op.f', (['"""ix_records_memory_kb"""'], {}), "('ix_records_memory_kb')\n", (2162, 2186), False, 'from alembic import op\n'), ((2253, 2277), 'alembic.op.f', 'op.f', (['"""ix_records_score"""'], {}), "('ix_records_score')\n", (2257, 2277), False, 'from alembic import op\n'), ((2335, 2360), 'alembic.op.f', 'op.f', (['"""ix_records_status"""'], {}), "('ix_records_status')\n", (2339, 2360), False, 'from alembic import op\n'), ((2419, 2445), 'alembic.op.f', 'op.f', (['"""ix_records_time_ms"""'], {}), "('ix_records_time_ms')\n", (2423, 2445), False, 'from alembic import op\n'), ((2514, 2543), 'alembic.op.f', 'op.f', (['"""ix_records_updated_at"""'], {}), "('ix_records_updated_at')\n", (2518, 2543), False, 'from alembic import op\n'), ((2737, 2766), 'alembic.op.f', 'op.f', (['"""ix_records_updated_at"""'], {}), "('ix_records_updated_at')\n", (2741, 2766), False, 'from alembic import op\n'), ((2808, 2834), 'alembic.op.f', 'op.f', (['"""ix_records_time_ms"""'], {}), "('ix_records_time_ms')\n", (2812, 2834), False, 'from alembic import op\n'), ((2876, 2901), 'alembic.op.f', 'op.f', (['"""ix_records_status"""'], {}), "('ix_records_status')\n", (2880, 2901), False, 'from alembic import op\n'), ((2943, 2967), 'alembic.op.f', 'op.f', (['"""ix_records_score"""'], {}), "('ix_records_score')\n", (2947, 2967), False, 'from alembic import op\n'), ((3009, 3037), 'alembic.op.f', 'op.f', (['"""ix_records_memory_kb"""'], {}), "('ix_records_memory_kb')\n", (3013, 3037), False, 'from alembic import op\n'), ((3079, 3100), 'alembic.op.f', 'op.f', (['"""ix_records_id"""'], {}), "('ix_records_id')\n", (3083, 3100), False, 'from alembic import op\n'), ((3142, 3171), 'alembic.op.f', 'op.f', (['"""ix_records_created_at"""'], {}), "('ix_records_created_at')\n", (3146, 3171), False, 'from alembic import op\n'), ((3213, 3241), 'alembic.op.f', 'op.f', (['"""ix_records_commit_id"""'], {}), "('ix_records_commit_id')\n", (3217, 3241), False, 'from alembic import op\n'), ((528, 554), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {'timezone': '(True)'}), '(timezone=True)\n', (539, 554), True, 'import sqlalchemy as sa\n'), ((726, 752), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {'timezone': '(True)'}), '(timezone=True)\n', (737, 752), True, 'import sqlalchemy as sa\n'), ((903, 931), 'sqlmodel.sql.sqltypes.GUID', 'sqlmodel.sql.sqltypes.GUID', ([], {}), '()\n', (929, 931), False, 'import sqlmodel\n'), ((981, 1009), 'sqlmodel.sql.sqltypes.GUID', 'sqlmodel.sql.sqltypes.GUID', ([], {}), '()\n', (1007, 1009), False, 'import sqlmodel\n'), ((1056, 1084), 'sqlmodel.sql.sqltypes.GUID', 'sqlmodel.sql.sqltypes.GUID', ([], {}), '()\n', (1082, 1084), False, 'import sqlmodel\n'), ((1126, 1154), 'sqlmodel.sql.sqltypes.GUID', 'sqlmodel.sql.sqltypes.GUID', ([], {}), '()\n', (1152, 1154), False, 'import sqlmodel\n'), ((1201, 1235), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1233, 1235), False, 'import sqlmodel\n'), ((1280, 1292), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1290, 1292), True, 'import sqlalchemy as sa\n'), ((1339, 1351), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1349, 1351), True, 'import sqlalchemy as sa\n'), ((1400, 1412), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1410, 1412), True, 'import sqlalchemy as sa\n'), ((1461, 1495), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1493, 1495), False, 'import sqlmodel\n'), ((583, 628), 'sqlalchemy.text', 'sa.text', (['"""TIMEZONE(\'utc\', CURRENT_TIMESTAMP)"""'], {}), '("TIMEZONE(\'utc\', CURRENT_TIMESTAMP)")\n', (590, 628), True, 'import sqlalchemy as sa\n'), ((781, 826), 'sqlalchemy.text', 'sa.text', (['"""TIMEZONE(\'utc\', CURRENT_TIMESTAMP)"""'], {}), '("TIMEZONE(\'utc\', CURRENT_TIMESTAMP)")\n', (788, 826), True, 'import sqlalchemy as sa\n')]
|
#!/usr/bin/env python
"""
Compare various elastic materials w.r.t. uniaxial tension/compression test.
Requires Matplotlib.
"""
from optparse import OptionParser
import sys
sys.path.append('.')
import numpy as nm
def define():
"""Define the problem to solve."""
from sfepy.discrete.fem.meshio import UserMeshIO
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.mechanics.matcoefs import stiffness_from_lame
def mesh_hook(mesh, mode):
"""
Generate the block mesh.
"""
if mode == 'read':
mesh = gen_block_mesh([2, 2, 3], [2, 2, 4], [0, 0, 1.5], name='el3',
verbose=False)
return mesh
elif mode == 'write':
pass
filename_mesh = UserMeshIO(mesh_hook)
options = {
'nls' : 'newton',
'ls' : 'ls',
'ts' : 'ts',
'save_steps' : -1,
}
functions = {
'linear_tension' : (linear_tension,),
'linear_compression' : (linear_compression,),
'empty' : (lambda ts, coor, mode, region, ig: None,),
}
fields = {
'displacement' : ('real', 3, 'Omega', 1),
}
# Coefficients are chosen so that the tangent stiffness is the same for all
# material for zero strains.
# Young modulus = 10 kPa, Poisson's ratio = 0.3
materials = {
'solid' : ({
'K' : 8.333, # bulk modulus
'mu_nh' : 3.846, # shear modulus of neoHookean term
'mu_mr' : 1.923, # shear modulus of Mooney-Rivlin term
'kappa' : 1.923, # second modulus of Mooney-Rivlin term
# elasticity for LE term
'D' : stiffness_from_lame(dim=3, lam=5.769, mu=3.846),
},),
'load' : 'empty',
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
}
regions = {
'Omega' : 'all',
'Bottom' : ('vertices in (z < 0.1)', 'facet'),
'Top' : ('vertices in (z > 2.9)', 'facet'),
}
ebcs = {
'fixb' : ('Bottom', {'u.all' : 0.0}),
'fixt' : ('Top', {'u.[0,1]' : 0.0}),
}
integrals = {
'i' : 1,
'isurf' : 2,
}
equations = {
'linear' : """dw_lin_elastic.i.Omega(solid.D, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
'neo-Hookean' : """dw_tl_he_neohook.i.Omega(solid.mu_nh, v, u)
+ dw_tl_bulk_penalty.i.Omega(solid.K, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
'Mooney-Rivlin' : """dw_tl_he_neohook.i.Omega(solid.mu_mr, v, u)
+ dw_tl_he_mooney_rivlin.i.Omega(solid.kappa, v, u)
+ dw_tl_bulk_penalty.i.Omega(solid.K, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 5,
'eps_a' : 1e-10,
'eps_r' : 1.0,
}),
'ts' : ('ts.simple', {
't0' : 0,
't1' : 1,
'dt' : None,
'n_step' : 101, # has precedence over dt!
}),
}
return locals()
##
# Pressure tractions.
def linear_tension(ts, coor, mode=None, **kwargs):
if mode == 'qp':
val = nm.tile(0.1 * ts.step, (coor.shape[0], 1, 1))
return {'val' : val}
def linear_compression(ts, coor, mode=None, **kwargs):
if mode == 'qp':
val = nm.tile(-0.1 * ts.step, (coor.shape[0], 1, 1))
return {'val' : val}
def store_top_u(displacements):
"""Function _store() will be called at the end of each loading step. Top
displacements will be stored into `displacements`."""
def _store(problem, ts, state):
top = problem.domain.regions['Top']
top_u = problem.get_variables()['u'].get_state_in_region(top)
displacements.append(nm.mean(top_u[:,-1]))
return _store
def solve_branch(problem, branch_function):
displacements = {}
for key, eq in problem.conf.equations.iteritems():
problem.set_equations({key : eq})
load = problem.get_materials()['load']
load.set_function(branch_function)
time_solver = problem.get_time_solver()
time_solver.init_time()
out = []
for _ in time_solver(save_results=False, step_hook=store_top_u(out)):
pass
displacements[key] = nm.array(out, dtype=nm.float64)
return displacements
usage = '%prog [options]\n' + __doc__.rstrip()
helps = {
'no_plot' : 'do not show plot window',
}
def main():
from sfepy.base.base import output
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.discrete import Problem
from sfepy.base.plotutils import plt
parser = OptionParser(usage=usage, version='%prog')
parser.add_option('-n', '--no-plot',
action="store_true", dest='no_plot',
default=False, help=helps['no_plot'])
options, args = parser.parse_args()
required, other = get_standard_keywords()
# Use this file as the input file.
conf = ProblemConf.from_file(__file__, required, other)
# Create problem instance, but do not set equations.
problem = Problem.from_conf(conf, init_equations=False)
# Solve the problem. Output is ignored, results stored by using the
# step_hook.
u_t = solve_branch(problem, linear_tension)
u_c = solve_branch(problem, linear_compression)
# Get pressure load by calling linear_*() for each time step.
ts = problem.get_timestepper()
load_t = nm.array([linear_tension(ts, nm.array([[0.0]]), 'qp')['val']
for aux in ts.iter_from(0)],
dtype=nm.float64).squeeze()
load_c = nm.array([linear_compression(ts, nm.array([[0.0]]), 'qp')['val']
for aux in ts.iter_from(0)],
dtype=nm.float64).squeeze()
# Join the branches.
displacements = {}
for key in u_t.keys():
displacements[key] = nm.r_[u_c[key][::-1], u_t[key]]
load = nm.r_[load_c[::-1], load_t]
if plt is None:
output('matplotlib cannot be imported, printing raw data!')
output(displacements)
output(load)
else:
legend = []
for key, val in displacements.iteritems():
plt.plot(load, val)
legend.append(key)
plt.legend(legend, loc = 2)
plt.xlabel('tension [kPa]')
plt.ylabel('displacement [mm]')
plt.grid(True)
plt.gcf().savefig('pressure_displacement.png')
if not options.no_plot:
plt.show()
if __name__ == '__main__':
main()
|
[
"sfepy.base.plotutils.plt.ylabel",
"sfepy.mesh.mesh_generators.gen_block_mesh",
"sfepy.base.plotutils.plt.xlabel",
"sfepy.base.plotutils.plt.legend",
"sfepy.base.conf.ProblemConf.from_file",
"sfepy.discrete.fem.meshio.UserMeshIO",
"sfepy.base.plotutils.plt.grid",
"sfepy.base.plotutils.plt.plot",
"sfepy.base.plotutils.plt.gcf",
"sfepy.base.plotutils.plt.show",
"sfepy.base.conf.get_standard_keywords",
"sfepy.base.base.output",
"sfepy.discrete.Problem.from_conf",
"sfepy.mechanics.matcoefs.stiffness_from_lame"
] |
[((173, 193), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (188, 193), False, 'import sys\n'), ((779, 800), 'sfepy.discrete.fem.meshio.UserMeshIO', 'UserMeshIO', (['mesh_hook'], {}), '(mesh_hook)\n', (789, 800), False, 'from sfepy.discrete.fem.meshio import UserMeshIO\n'), ((4880, 4922), 'optparse.OptionParser', 'OptionParser', ([], {'usage': 'usage', 'version': '"""%prog"""'}), "(usage=usage, version='%prog')\n", (4892, 4922), False, 'from optparse import OptionParser\n'), ((5146, 5169), 'sfepy.base.conf.get_standard_keywords', 'get_standard_keywords', ([], {}), '()\n', (5167, 5169), False, 'from sfepy.base.conf import ProblemConf, get_standard_keywords\n'), ((5220, 5268), 'sfepy.base.conf.ProblemConf.from_file', 'ProblemConf.from_file', (['__file__', 'required', 'other'], {}), '(__file__, required, other)\n', (5241, 5268), False, 'from sfepy.base.conf import ProblemConf, get_standard_keywords\n'), ((5341, 5386), 'sfepy.discrete.Problem.from_conf', 'Problem.from_conf', (['conf'], {'init_equations': '(False)'}), '(conf, init_equations=False)\n', (5358, 5386), False, 'from sfepy.discrete import Problem\n'), ((3395, 3440), 'numpy.tile', 'nm.tile', (['(0.1 * ts.step)', '(coor.shape[0], 1, 1)'], {}), '(0.1 * ts.step, (coor.shape[0], 1, 1))\n', (3402, 3440), True, 'import numpy as nm\n'), ((3561, 3607), 'numpy.tile', 'nm.tile', (['(-0.1 * ts.step)', '(coor.shape[0], 1, 1)'], {}), '(-0.1 * ts.step, (coor.shape[0], 1, 1))\n', (3568, 3607), True, 'import numpy as nm\n'), ((4506, 4537), 'numpy.array', 'nm.array', (['out'], {'dtype': 'nm.float64'}), '(out, dtype=nm.float64)\n', (4514, 4537), True, 'import numpy as nm\n'), ((6241, 6300), 'sfepy.base.base.output', 'output', (['"""matplotlib cannot be imported, printing raw data!"""'], {}), "('matplotlib cannot be imported, printing raw data!')\n", (6247, 6300), False, 'from sfepy.base.base import output\n'), ((6309, 6330), 'sfepy.base.base.output', 'output', (['displacements'], {}), '(displacements)\n', (6315, 6330), False, 'from sfepy.base.base import output\n'), ((6339, 6351), 'sfepy.base.base.output', 'output', (['load'], {}), '(load)\n', (6345, 6351), False, 'from sfepy.base.base import output\n'), ((6505, 6530), 'sfepy.base.plotutils.plt.legend', 'plt.legend', (['legend'], {'loc': '(2)'}), '(legend, loc=2)\n', (6515, 6530), False, 'from sfepy.base.plotutils import plt\n'), ((6541, 6568), 'sfepy.base.plotutils.plt.xlabel', 'plt.xlabel', (['"""tension [kPa]"""'], {}), "('tension [kPa]')\n", (6551, 6568), False, 'from sfepy.base.plotutils import plt\n'), ((6577, 6608), 'sfepy.base.plotutils.plt.ylabel', 'plt.ylabel', (['"""displacement [mm]"""'], {}), "('displacement [mm]')\n", (6587, 6608), False, 'from sfepy.base.plotutils import plt\n'), ((6617, 6631), 'sfepy.base.plotutils.plt.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (6625, 6631), False, 'from sfepy.base.plotutils import plt\n'), ((575, 651), 'sfepy.mesh.mesh_generators.gen_block_mesh', 'gen_block_mesh', (['[2, 2, 3]', '[2, 2, 4]', '[0, 0, 1.5]'], {'name': '"""el3"""', 'verbose': '(False)'}), "([2, 2, 3], [2, 2, 4], [0, 0, 1.5], name='el3', verbose=False)\n", (589, 651), False, 'from sfepy.mesh.mesh_generators import gen_block_mesh\n'), ((3986, 4007), 'numpy.mean', 'nm.mean', (['top_u[:, -1]'], {}), '(top_u[:, -1])\n', (3993, 4007), True, 'import numpy as nm\n'), ((6445, 6464), 'sfepy.base.plotutils.plt.plot', 'plt.plot', (['load', 'val'], {}), '(load, val)\n', (6453, 6464), False, 'from sfepy.base.plotutils import plt\n'), ((6733, 6743), 'sfepy.base.plotutils.plt.show', 'plt.show', ([], {}), '()\n', (6741, 6743), False, 'from sfepy.base.plotutils import plt\n'), ((1678, 1725), 'sfepy.mechanics.matcoefs.stiffness_from_lame', 'stiffness_from_lame', ([], {'dim': '(3)', 'lam': '(5.769)', 'mu': '(3.846)'}), '(dim=3, lam=5.769, mu=3.846)\n', (1697, 1725), False, 'from sfepy.mechanics.matcoefs import stiffness_from_lame\n'), ((6641, 6650), 'sfepy.base.plotutils.plt.gcf', 'plt.gcf', ([], {}), '()\n', (6648, 6650), False, 'from sfepy.base.plotutils import plt\n'), ((5721, 5738), 'numpy.array', 'nm.array', (['[[0.0]]'], {}), '([[0.0]])\n', (5729, 5738), True, 'import numpy as nm\n'), ((5901, 5918), 'numpy.array', 'nm.array', (['[[0.0]]'], {}), '([[0.0]])\n', (5909, 5918), True, 'import numpy as nm\n')]
|
"""v1-messages
Revision ID: b01986f67aa3
Revises: <KEY>
Create Date: 2022-06-01 16:00:25.954662
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "c9b007919a5d"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"message",
sa.Column(
"message_id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column("tags", postgresql.ARRAY(sa.String()), nullable=True),
sa.Column("sent_time", postgresql.TIMESTAMP(), nullable=True),
sa.Column(
"created_at",
postgresql.TIMESTAMP(),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column(
"updated_at",
postgresql.TIMESTAMP(),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column("tenant_id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
sa.Column("contact_id", sqlmodel.sql.sqltypes.GUID(), nullable=False),
sa.Column("status", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("role", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("deleted", sa.Boolean(), nullable=False),
sa.Column("content", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column(
"revocation_comment", sqlmodel.sql.sqltypes.AutoString(), nullable=True
),
sa.Column("state", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.ForeignKeyConstraint(
["contact_id"],
["contact.contact_id"],
),
sa.ForeignKeyConstraint(
["tenant_id"],
["tenant.id"],
),
sa.PrimaryKeyConstraint("message_id"),
)
op.create_index(
op.f("ix_message_contact_id"), "message", ["contact_id"], unique=False
)
op.create_index(
op.f("ix_message_tenant_id"), "message", ["tenant_id"], unique=False
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f("ix_message_tenant_id"), table_name="message")
op.drop_index(op.f("ix_message_contact_id"), table_name="message")
op.drop_table("message")
# ### end Alembic commands ###
|
[
"sqlmodel.sql.sqltypes.AutoString",
"sqlmodel.sql.sqltypes.GUID"
] |
[((2506, 2530), 'alembic.op.drop_table', 'op.drop_table', (['"""message"""'], {}), "('message')\n", (2519, 2530), False, 'from alembic import op\n'), ((1780, 1843), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['contact_id']", "['contact.contact_id']"], {}), "(['contact_id'], ['contact.contact_id'])\n", (1803, 1843), True, 'import sqlalchemy as sa\n'), ((1888, 1941), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['tenant_id']", "['tenant.id']"], {}), "(['tenant_id'], ['tenant.id'])\n", (1911, 1941), True, 'import sqlalchemy as sa\n'), ((1986, 2023), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""message_id"""'], {}), "('message_id')\n", (2009, 2023), True, 'import sqlalchemy as sa\n'), ((2060, 2089), 'alembic.op.f', 'op.f', (['"""ix_message_contact_id"""'], {}), "('ix_message_contact_id')\n", (2064, 2089), False, 'from alembic import op\n'), ((2166, 2194), 'alembic.op.f', 'op.f', (['"""ix_message_tenant_id"""'], {}), "('ix_message_tenant_id')\n", (2170, 2194), False, 'from alembic import op\n'), ((2379, 2407), 'alembic.op.f', 'op.f', (['"""ix_message_tenant_id"""'], {}), "('ix_message_tenant_id')\n", (2383, 2407), False, 'from alembic import op\n'), ((2449, 2478), 'alembic.op.f', 'op.f', (['"""ix_message_contact_id"""'], {}), "('ix_message_contact_id')\n", (2453, 2478), False, 'from alembic import op\n'), ((519, 548), 'sqlalchemy.dialects.postgresql.UUID', 'postgresql.UUID', ([], {'as_uuid': '(True)'}), '(as_uuid=True)\n', (534, 548), False, 'from sqlalchemy.dialects import postgresql\n'), ((750, 772), 'sqlalchemy.dialects.postgresql.TIMESTAMP', 'postgresql.TIMESTAMP', ([], {}), '()\n', (770, 772), False, 'from sqlalchemy.dialects import postgresql\n'), ((847, 869), 'sqlalchemy.dialects.postgresql.TIMESTAMP', 'postgresql.TIMESTAMP', ([], {}), '()\n', (867, 869), False, 'from sqlalchemy.dialects import postgresql\n'), ((1012, 1034), 'sqlalchemy.dialects.postgresql.TIMESTAMP', 'postgresql.TIMESTAMP', ([], {}), '()\n', (1032, 1034), False, 'from sqlalchemy.dialects import postgresql\n'), ((1151, 1179), 'sqlmodel.sql.sqltypes.GUID', 'sqlmodel.sql.sqltypes.GUID', ([], {}), '()\n', (1177, 1179), False, 'import sqlmodel\n'), ((1230, 1258), 'sqlmodel.sql.sqltypes.GUID', 'sqlmodel.sql.sqltypes.GUID', ([], {}), '()\n', (1256, 1258), False, 'import sqlmodel\n'), ((1305, 1339), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1337, 1339), False, 'import sqlmodel\n'), ((1384, 1418), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1416, 1418), False, 'import sqlmodel\n'), ((1466, 1478), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (1476, 1478), True, 'import sqlalchemy as sa\n'), ((1526, 1560), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1558, 1560), False, 'import sqlmodel\n'), ((1631, 1665), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1663, 1665), False, 'import sqlmodel\n'), ((1719, 1753), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1751, 1753), False, 'import sqlmodel\n'), ((577, 605), 'sqlalchemy.text', 'sa.text', (['"""gen_random_uuid()"""'], {}), "('gen_random_uuid()')\n", (584, 605), True, 'import sqlalchemy as sa\n'), ((689, 700), 'sqlalchemy.String', 'sa.String', ([], {}), '()\n', (698, 700), True, 'import sqlalchemy as sa\n'), ((898, 914), 'sqlalchemy.text', 'sa.text', (['"""now()"""'], {}), "('now()')\n", (905, 914), True, 'import sqlalchemy as sa\n'), ((1063, 1079), 'sqlalchemy.text', 'sa.text', (['"""now()"""'], {}), "('now()')\n", (1070, 1079), True, 'import sqlalchemy as sa\n')]
|
#!/usr/bin/env python
import sys
sys.path.append('.')
from optparse import OptionParser
from sfepy.base.base import nm, output
from sfepy.fem import Mesh
from sfepy.fem.meshio import io_table, supported_capabilities
usage = """%prog [options] filename_in filename_out
Convert a mesh file from one SfePy-supported format to another.
Examples:
$ ./script/convert_mesh.py meshes/3d/cylinder.mesh new.vtk
$ ./script/convert_mesh.py meshes/3d/cylinder.mesh new.vtk -s2.5
$ ./script/convert_mesh.py meshes/3d/cylinder.mesh new.vtk -s0.5,2,1
"""
help = {
'scale' : 'scale factor [default: %default]',
'format' : 'output mesh format (overrides filename_out extension)',
'list' : 'list supported writable output mesh formats',
}
def output_writable_meshes():
output('Supported writable mesh formats are:')
for key, val in supported_capabilities.iteritems():
if 'w' in val:
output(key)
def main():
parser = OptionParser(usage=usage)
parser.add_option("-s", "--scale", metavar='scale',
action="store", dest="scale",
default=None, help=help['scale'])
parser.add_option("-f", "--format", metavar='format',
action="store", type='string', dest="format",
default=None, help=help['format'])
parser.add_option("-l", "--list", action="store_true",
dest="list", help=help['list'])
(options, args) = parser.parse_args()
if options.list:
output_writable_meshes()
sys.exit(0)
if len(args) != 2:
parser.print_help()
sys.exit(1)
scale = options.scale
if scale is not None:
try:
try:
scale = float(scale)
except ValueError:
scale = [float(ii) for ii in scale.split(',')]
scale = nm.array(scale, dtype=nm.float64, ndmin=1)
except:
output('bad scale! (%s)' % scale)
parser.print_help()
sys.exit(1)
filename_in, filename_out = args
mesh = Mesh.from_file(filename_in)
if scale is not None:
if len(scale) == 1:
tr = nm.eye(mesh.dim, dtype=nm.float64) * scale
elif len(scale) == mesh.dim:
tr = nm.diag(scale)
else:
raise ValueError('bad scale! (%s)' % scale)
mesh.transform_coors(tr)
io = 'auto'
if options.format:
try:
io = io_table[options.format](filename_out)
except KeyError:
output('unknown output mesh format! (%s)' % options.format)
output_writable_meshes()
sys.exit(1)
if 'w' not in supported_capabilities[options.format]:
output('write support not implemented for output mesh format! (%s)'
% options.format)
output_writable_meshes()
sys.exit(1)
output('writing %s...' % filename_out)
mesh.write(filename_out, io=io)
output('...done')
if __name__ == '__main__':
main()
|
[
"sfepy.base.base.output",
"sfepy.base.base.nm.array",
"sfepy.base.base.nm.diag",
"sfepy.fem.meshio.supported_capabilities.iteritems",
"sfepy.base.base.nm.eye",
"sfepy.fem.Mesh.from_file"
] |
[((33, 53), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (48, 53), False, 'import sys\n'), ((773, 819), 'sfepy.base.base.output', 'output', (['"""Supported writable mesh formats are:"""'], {}), "('Supported writable mesh formats are:')\n", (779, 819), False, 'from sfepy.base.base import nm, output\n'), ((840, 874), 'sfepy.fem.meshio.supported_capabilities.iteritems', 'supported_capabilities.iteritems', ([], {}), '()\n', (872, 874), False, 'from sfepy.fem.meshio import io_table, supported_capabilities\n'), ((949, 974), 'optparse.OptionParser', 'OptionParser', ([], {'usage': 'usage'}), '(usage=usage)\n', (961, 974), False, 'from optparse import OptionParser\n'), ((2086, 2113), 'sfepy.fem.Mesh.from_file', 'Mesh.from_file', (['filename_in'], {}), '(filename_in)\n', (2100, 2113), False, 'from sfepy.fem import Mesh\n'), ((2927, 2965), 'sfepy.base.base.output', 'output', (["('writing %s...' % filename_out)"], {}), "('writing %s...' % filename_out)\n", (2933, 2965), False, 'from sfepy.base.base import nm, output\n'), ((3006, 3023), 'sfepy.base.base.output', 'output', (['"""...done"""'], {}), "('...done')\n", (3012, 3023), False, 'from sfepy.base.base import nm, output\n'), ((1541, 1552), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1549, 1552), False, 'import sys\n'), ((1613, 1624), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1621, 1624), False, 'import sys\n'), ((911, 922), 'sfepy.base.base.output', 'output', (['key'], {}), '(key)\n', (917, 922), False, 'from sfepy.base.base import nm, output\n'), ((1863, 1905), 'sfepy.base.base.nm.array', 'nm.array', (['scale'], {'dtype': 'nm.float64', 'ndmin': '(1)'}), '(scale, dtype=nm.float64, ndmin=1)\n', (1871, 1905), False, 'from sfepy.base.base import nm, output\n'), ((2755, 2844), 'sfepy.base.base.output', 'output', (["('write support not implemented for output mesh format! (%s)' % options.format)"], {}), "('write support not implemented for output mesh format! (%s)' %\n options.format)\n", (2761, 2844), False, 'from sfepy.base.base import nm, output\n'), ((2910, 2921), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2918, 2921), False, 'import sys\n'), ((1934, 1967), 'sfepy.base.base.output', 'output', (["('bad scale! (%s)' % scale)"], {}), "('bad scale! (%s)' % scale)\n", (1940, 1967), False, 'from sfepy.base.base import nm, output\n'), ((2012, 2023), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2020, 2023), False, 'import sys\n'), ((2186, 2220), 'sfepy.base.base.nm.eye', 'nm.eye', (['mesh.dim'], {'dtype': 'nm.float64'}), '(mesh.dim, dtype=nm.float64)\n', (2192, 2220), False, 'from sfepy.base.base import nm, output\n'), ((2283, 2297), 'sfepy.base.base.nm.diag', 'nm.diag', (['scale'], {}), '(scale)\n', (2290, 2297), False, 'from sfepy.base.base import nm, output\n'), ((2547, 2606), 'sfepy.base.base.output', 'output', (["('unknown output mesh format! (%s)' % options.format)"], {}), "('unknown output mesh format! (%s)' % options.format)\n", (2553, 2606), False, 'from sfepy.base.base import nm, output\n'), ((2656, 2667), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2664, 2667), False, 'import sys\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import copy
from typing import Optional, Sequence
import cv2
import megengine.data as data
import megengine.data.transform as T
import numpy as np
from basecore.config import ConfigDict
from loguru import logger
from basecls.utils import registers
from .augment import WARP_PARAMS, TorchAutoAugment, TorchRandAugment
from .const import CV2_INTERP, PIL_INTERP
from .mixup import MixupCutmixCollator
from .rand_erase import RandomErasing
__all__ = [
"build_transform",
"AutoAugment",
"SimpleAugment",
"ColorAugment",
"RandAugment",
"build_mixup",
]
def build_transform(
cfg: ConfigDict, train: bool = True, augments: T.Transform = None
) -> T.Transform:
"""Build function for MegEngine transform.
Args:
cfg: config for building transform.
train: train set or test set. Default: ``True``
augments: augments for building transform.
Returns:
A transform.
"""
if train:
assert augments is not None
bgr_mean = copy.deepcopy(cfg.preprocess.img_mean)
bgr_std = copy.deepcopy(cfg.preprocess.img_std)
if cfg.preprocess.img_color_space == "RGB":
bgr_mean = bgr_mean[::-1]
bgr_std = bgr_std[::-1]
WARP_PARAMS["fillcolor"] = tuple(round(v) for v in bgr_mean[::-1]) # need RGB
WARP_PARAMS["resample"] = PIL_INTERP[cfg.augments.resize.interpolation]
transforms = [
T.RandomResizedCrop(
cfg.preprocess.img_size,
cfg.augments.resize.scale_range,
cfg.augments.resize.ratio_range,
CV2_INTERP[cfg.augments.resize.interpolation],
),
T.RandomHorizontalFlip(),
augments,
RandomErasing(
**cfg.augments.rand_erase.to_dict(),
pad_mean=bgr_mean, # need BGR
pad_std=bgr_std, # need BGR
),
ToColorSpace(cfg.preprocess.img_color_space),
T.ToMode(),
]
else:
assert augments is None
transforms = [
T.Resize(
int(cfg.test.img_size / cfg.test.crop_pct / 2 + 0.5) * 2, # make it even
CV2_INTERP[cfg.augments.resize.interpolation],
),
T.CenterCrop(cfg.test.img_size),
ToColorSpace(cfg.preprocess.img_color_space),
T.ToMode(),
]
return T.Compose(transforms=transforms, order=["image", "image_category"])
class ToColorSpace(T.VisionTransform):
"""Transform to transfer color space.
Args:
color_space: color space, supports ``"BGR"``, ``"RGB"`` and ``"GRAY"``.
"""
def __init__(self, color_space: str, *, order: Sequence = None):
super().__init__(order)
if color_space not in ("BGR", "RGB", "GRAY"):
raise ValueError(f"Color space '{color_space}' not supported")
self.color_space = color_space
def _apply_image(self, image: np.ndarray) -> np.ndarray:
if self.color_space == "BGR":
return image
elif self.color_space == "RGB":
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
elif self.color_space == "GRAY":
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)[..., np.newaxis]
else:
raise ValueError(f"Color space '{self.color_space}' not supported")
@registers.augments.register()
class SimpleAugment:
"""Simple augmentation."""
@classmethod
def build(cls, cfg: ConfigDict) -> T.Transform:
return T.PseudoTransform()
@registers.augments.register()
class ColorAugment:
"""Color augmentation."""
@classmethod
def build(cls, cfg: ConfigDict) -> T.Transform:
aug_args = cfg.augments.color_aug.to_dict()
lighting_scale = aug_args.pop("lighting")
return T.Compose([T.ColorJitter(**aug_args), T.Lighting(lighting_scale)])
@registers.augments.register()
class AutoAugment:
"""AutoAugment."""
@classmethod
def build(cls, cfg: ConfigDict) -> T.Transform:
return T.TorchTransformCompose([TorchAutoAugment()])
@registers.augments.register()
class RandAugment:
"""Random augmentation."""
@classmethod
def build(cls, cfg: ConfigDict) -> T.Transform:
return T.TorchTransformCompose([TorchRandAugment(**cfg.augments.rand_aug.to_dict())])
def build_mixup(cfg: ConfigDict, train: bool = True) -> Optional[data.Collator]:
"""Build (optionally) Mixup/CutMix augment.
Args:
cfg: config for building Mixup/CutMix collator.
train: train set or test set. Default: ``True``
Returns:
:py:class:`~basecls.data.mixup.MixupCutmixCollator` or ``None``
"""
mixup_cfg = cfg.augments.mixup
if train and (
mixup_cfg.mixup_alpha > 0.0
or mixup_cfg.cutmix_alpha > 0.0
or mixup_cfg.cutmix_minmax is not None
):
mixup_collator = MixupCutmixCollator(**mixup_cfg.to_dict(), num_classes=cfg.num_classes)
logger.info(f"Using mixup with configuration:\n{mixup_cfg}")
else:
mixup_collator = None
return mixup_collator
|
[
"megengine.data.transform.Lighting",
"megengine.data.transform.PseudoTransform",
"megengine.data.transform.RandomResizedCrop",
"megengine.data.transform.RandomHorizontalFlip",
"megengine.data.transform.ColorJitter",
"megengine.data.transform.Compose",
"megengine.data.transform.ToMode",
"megengine.data.transform.CenterCrop"
] |
[((3450, 3479), 'basecls.utils.registers.augments.register', 'registers.augments.register', ([], {}), '()\n', (3477, 3479), False, 'from basecls.utils import registers\n'), ((3640, 3669), 'basecls.utils.registers.augments.register', 'registers.augments.register', ([], {}), '()\n', (3667, 3669), False, 'from basecls.utils import registers\n'), ((3977, 4006), 'basecls.utils.registers.augments.register', 'registers.augments.register', ([], {}), '()\n', (4004, 4006), False, 'from basecls.utils import registers\n'), ((4183, 4212), 'basecls.utils.registers.augments.register', 'registers.augments.register', ([], {}), '()\n', (4210, 4212), False, 'from basecls.utils import registers\n'), ((2493, 2560), 'megengine.data.transform.Compose', 'T.Compose', ([], {'transforms': 'transforms', 'order': "['image', 'image_category']"}), "(transforms=transforms, order=['image', 'image_category'])\n", (2502, 2560), True, 'import megengine.data.transform as T\n'), ((1089, 1127), 'copy.deepcopy', 'copy.deepcopy', (['cfg.preprocess.img_mean'], {}), '(cfg.preprocess.img_mean)\n', (1102, 1127), False, 'import copy\n'), ((1146, 1183), 'copy.deepcopy', 'copy.deepcopy', (['cfg.preprocess.img_std'], {}), '(cfg.preprocess.img_std)\n', (1159, 1183), False, 'import copy\n'), ((3617, 3636), 'megengine.data.transform.PseudoTransform', 'T.PseudoTransform', ([], {}), '()\n', (3634, 3636), True, 'import megengine.data.transform as T\n'), ((5064, 5127), 'loguru.logger.info', 'logger.info', (['f"""Using mixup with configuration:\n{mixup_cfg}"""'], {}), '(f"""Using mixup with configuration:\n{mixup_cfg}""")\n', (5075, 5127), False, 'from loguru import logger\n'), ((1513, 1680), 'megengine.data.transform.RandomResizedCrop', 'T.RandomResizedCrop', (['cfg.preprocess.img_size', 'cfg.augments.resize.scale_range', 'cfg.augments.resize.ratio_range', 'CV2_INTERP[cfg.augments.resize.interpolation]'], {}), '(cfg.preprocess.img_size, cfg.augments.resize.\n scale_range, cfg.augments.resize.ratio_range, CV2_INTERP[cfg.augments.\n resize.interpolation])\n', (1532, 1680), True, 'import megengine.data.transform as T\n'), ((1763, 1787), 'megengine.data.transform.RandomHorizontalFlip', 'T.RandomHorizontalFlip', ([], {}), '()\n', (1785, 1787), True, 'import megengine.data.transform as T\n'), ((2068, 2078), 'megengine.data.transform.ToMode', 'T.ToMode', ([], {}), '()\n', (2076, 2078), True, 'import megengine.data.transform as T\n'), ((2357, 2388), 'megengine.data.transform.CenterCrop', 'T.CenterCrop', (['cfg.test.img_size'], {}), '(cfg.test.img_size)\n', (2369, 2388), True, 'import megengine.data.transform as T\n'), ((2460, 2470), 'megengine.data.transform.ToMode', 'T.ToMode', ([], {}), '()\n', (2468, 2470), True, 'import megengine.data.transform as T\n'), ((3197, 3235), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (3209, 3235), False, 'import cv2\n'), ((3918, 3943), 'megengine.data.transform.ColorJitter', 'T.ColorJitter', ([], {}), '(**aug_args)\n', (3931, 3943), True, 'import megengine.data.transform as T\n'), ((3945, 3971), 'megengine.data.transform.Lighting', 'T.Lighting', (['lighting_scale'], {}), '(lighting_scale)\n', (3955, 3971), True, 'import megengine.data.transform as T\n'), ((3296, 3335), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (3308, 3335), False, 'import cv2\n')]
|
import uuid
from datetime import datetime
from sqlmodel import Field
from api.db.models.base import BaseModel, BaseTable
class TenantSchemaBase(BaseModel):
tenant_id: uuid.UUID = Field(nullable=False)
wallet_id: uuid.UUID = Field(nullable=False)
# workflow_id will be null until the tenant kcks it off
workflow_id: uuid.UUID = Field(nullable=True, default=None)
schema_id: str = Field(nullable=True, default=None)
schema_name: str = Field(nullable=True, default=None)
schema_version: str = Field(nullable=True, default=None)
schema_attrs: str = Field(nullable=True, default=None)
schema_txn_id: uuid.UUID = Field(nullable=True, default=None)
schema_state: str = Field(nullable=True, default=None)
cred_def_tag: str = Field(nullable=True, default=None)
cred_def_txn_id: uuid.UUID = Field(nullable=True, default=None)
cred_def_id: str = Field(nullable=True, default=None)
cred_def_state: str = Field(nullable=True, default=None)
cred_revocation: bool = Field(nullable=True, default=None)
cred_revoc_reg_size: int = Field(nullable=True, default=None)
revoc_reg_state: str = Field(nullable=True, default=None)
class TenantSchema(TenantSchemaBase, BaseTable, table=True):
# This is the class that represents the table
pass
class TenantSchemaCreate(TenantSchemaBase):
# This is the class that represents interface for creating a tenant
# we must set all the required fields,
# but do not need to set optional (and shouldn't)
pass
class TenantSchemaRead(TenantSchemaBase):
# This is the class that represents interface for reading a tenant
# here we indicate id, created_at and updated_at must be included
id: uuid.UUID
created_at: datetime
updated_at: datetime
class TenantSchemaUpdate(BaseModel):
# This is our update interface
# This does NOT inherit from TenantSchemaBase,
# so no need to worry about accidentally updating id or other fields
id: uuid.UUID
workflow_id: uuid.UUID = Field(nullable=True, default=None)
schema_id: str = Field(nullable=True, default=None)
schema_txn_id: uuid.UUID = Field(nullable=True, default=None)
schema_state: str = Field(nullable=True, default=None)
cred_def_txn_id: uuid.UUID = Field(nullable=True, default=None)
cred_def_id: str = Field(nullable=True, default=None)
cred_def_state: str = Field(nullable=True, default=None)
revoc_reg_state: str = Field(nullable=True, default=None)
|
[
"sqlmodel.Field"
] |
[((187, 208), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (192, 208), False, 'from sqlmodel import Field\n'), ((236, 257), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (241, 257), False, 'from sqlmodel import Field\n'), ((347, 381), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (352, 381), False, 'from sqlmodel import Field\n'), ((403, 437), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (408, 437), False, 'from sqlmodel import Field\n'), ((461, 495), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (466, 495), False, 'from sqlmodel import Field\n'), ((522, 556), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (527, 556), False, 'from sqlmodel import Field\n'), ((581, 615), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (586, 615), False, 'from sqlmodel import Field\n'), ((647, 681), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (652, 681), False, 'from sqlmodel import Field\n'), ((706, 740), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (711, 740), False, 'from sqlmodel import Field\n'), ((765, 799), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (770, 799), False, 'from sqlmodel import Field\n'), ((833, 867), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (838, 867), False, 'from sqlmodel import Field\n'), ((891, 925), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (896, 925), False, 'from sqlmodel import Field\n'), ((952, 986), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (957, 986), False, 'from sqlmodel import Field\n'), ((1015, 1049), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (1020, 1049), False, 'from sqlmodel import Field\n'), ((1081, 1115), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (1086, 1115), False, 'from sqlmodel import Field\n'), ((1143, 1177), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (1148, 1177), False, 'from sqlmodel import Field\n'), ((2022, 2056), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (2027, 2056), False, 'from sqlmodel import Field\n'), ((2078, 2112), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (2083, 2112), False, 'from sqlmodel import Field\n'), ((2144, 2178), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (2149, 2178), False, 'from sqlmodel import Field\n'), ((2203, 2237), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (2208, 2237), False, 'from sqlmodel import Field\n'), ((2271, 2305), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (2276, 2305), False, 'from sqlmodel import Field\n'), ((2329, 2363), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (2334, 2363), False, 'from sqlmodel import Field\n'), ((2390, 2424), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (2395, 2424), False, 'from sqlmodel import Field\n'), ((2452, 2486), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (2457, 2486), False, 'from sqlmodel import Field\n')]
|
"""Add playlist and item models
Revision ID: 979da9b7aff0
Revises: <PASSWORD>
Create Date: 2021-10-31 13:09:14.064217
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = '979da9b7aff0'
down_revision = 'a<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('item',
sa.Column('title', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('artist', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('id', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_item_artist'), 'item', ['artist'], unique=False)
op.create_index(op.f('ix_item_id'), 'item', ['id'], unique=False)
op.create_index(op.f('ix_item_title'), 'item', ['title'], unique=False)
op.create_table('playlist',
sa.Column('entity_id', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('spotify', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('amazon', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('apple_music', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('image', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('release_date', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=True),
sa.Column('category_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['category_id'], ['category.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_playlist_amazon'), 'playlist', ['amazon'], unique=False)
op.create_index(op.f('ix_playlist_apple_music'), 'playlist', ['apple_music'], unique=False)
op.create_index(op.f('ix_playlist_category_id'), 'playlist', ['category_id'], unique=False)
op.create_index(op.f('ix_playlist_entity_id'), 'playlist', ['entity_id'], unique=False)
op.create_index(op.f('ix_playlist_id'), 'playlist', ['id'], unique=False)
op.create_index(op.f('ix_playlist_image'), 'playlist', ['image'], unique=False)
op.create_index(op.f('ix_playlist_name'), 'playlist', ['name'], unique=False)
op.create_index(op.f('ix_playlist_release_date'), 'playlist', ['release_date'], unique=False)
op.create_index(op.f('ix_playlist_spotify'), 'playlist', ['spotify'], unique=False)
op.create_table('playlistitemlink',
sa.Column('playlist_id', sa.Integer(), nullable=True),
sa.Column('item_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['item_id'], ['item.id'], ),
sa.ForeignKeyConstraint(['playlist_id'], ['playlist.id'], ),
sa.PrimaryKeyConstraint('playlist_id', 'item_id')
)
op.create_index(op.f('ix_playlistitemlink_item_id'), 'playlistitemlink', ['item_id'], unique=False)
op.create_index(op.f('ix_playlistitemlink_playlist_id'), 'playlistitemlink', ['playlist_id'], unique=False)
op.drop_index('ix_category_playlists', table_name='category')
op.drop_column('category', 'playlists')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('category', sa.Column('playlists', sa.VARCHAR(), nullable=False))
op.create_index('ix_category_playlists', 'category', ['playlists'], unique=False)
op.drop_index(op.f('ix_playlistitemlink_playlist_id'), table_name='playlistitemlink')
op.drop_index(op.f('ix_playlistitemlink_item_id'), table_name='playlistitemlink')
op.drop_table('playlistitemlink')
op.drop_index(op.f('ix_playlist_spotify'), table_name='playlist')
op.drop_index(op.f('ix_playlist_release_date'), table_name='playlist')
op.drop_index(op.f('ix_playlist_name'), table_name='playlist')
op.drop_index(op.f('ix_playlist_image'), table_name='playlist')
op.drop_index(op.f('ix_playlist_id'), table_name='playlist')
op.drop_index(op.f('ix_playlist_entity_id'), table_name='playlist')
op.drop_index(op.f('ix_playlist_category_id'), table_name='playlist')
op.drop_index(op.f('ix_playlist_apple_music'), table_name='playlist')
op.drop_index(op.f('ix_playlist_amazon'), table_name='playlist')
op.drop_table('playlist')
op.drop_index(op.f('ix_item_title'), table_name='item')
op.drop_index(op.f('ix_item_id'), table_name='item')
op.drop_index(op.f('ix_item_artist'), table_name='item')
op.drop_table('item')
# ### end Alembic commands ###
|
[
"sqlmodel.sql.sqltypes.AutoString"
] |
[((3030, 3091), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_category_playlists"""'], {'table_name': '"""category"""'}), "('ix_category_playlists', table_name='category')\n", (3043, 3091), False, 'from alembic import op\n'), ((3096, 3135), 'alembic.op.drop_column', 'op.drop_column', (['"""category"""', '"""playlists"""'], {}), "('category', 'playlists')\n", (3110, 3135), False, 'from alembic import op\n'), ((3344, 3430), 'alembic.op.create_index', 'op.create_index', (['"""ix_category_playlists"""', '"""category"""', "['playlists']"], {'unique': '(False)'}), "('ix_category_playlists', 'category', ['playlists'], unique=\n False)\n", (3359, 3430), False, 'from alembic import op\n'), ((3606, 3639), 'alembic.op.drop_table', 'op.drop_table', (['"""playlistitemlink"""'], {}), "('playlistitemlink')\n", (3619, 3639), False, 'from alembic import op\n'), ((4278, 4303), 'alembic.op.drop_table', 'op.drop_table', (['"""playlist"""'], {}), "('playlist')\n", (4291, 4303), False, 'from alembic import op\n'), ((4486, 4507), 'alembic.op.drop_table', 'op.drop_table', (['"""item"""'], {}), "('item')\n", (4499, 4507), False, 'from alembic import op\n'), ((643, 672), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (666, 672), True, 'import sqlalchemy as sa\n'), ((699, 721), 'alembic.op.f', 'op.f', (['"""ix_item_artist"""'], {}), "('ix_item_artist')\n", (703, 721), False, 'from alembic import op\n'), ((777, 795), 'alembic.op.f', 'op.f', (['"""ix_item_id"""'], {}), "('ix_item_id')\n", (781, 795), False, 'from alembic import op\n'), ((847, 868), 'alembic.op.f', 'op.f', (['"""ix_item_title"""'], {}), "('ix_item_title')\n", (851, 868), False, 'from alembic import op\n'), ((1573, 1630), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['category_id']", "['category.id']"], {}), "(['category_id'], ['category.id'])\n", (1596, 1630), True, 'import sqlalchemy as sa\n'), ((1638, 1667), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (1661, 1667), True, 'import sqlalchemy as sa\n'), ((1694, 1720), 'alembic.op.f', 'op.f', (['"""ix_playlist_amazon"""'], {}), "('ix_playlist_amazon')\n", (1698, 1720), False, 'from alembic import op\n'), ((1780, 1811), 'alembic.op.f', 'op.f', (['"""ix_playlist_apple_music"""'], {}), "('ix_playlist_apple_music')\n", (1784, 1811), False, 'from alembic import op\n'), ((1876, 1907), 'alembic.op.f', 'op.f', (['"""ix_playlist_category_id"""'], {}), "('ix_playlist_category_id')\n", (1880, 1907), False, 'from alembic import op\n'), ((1972, 2001), 'alembic.op.f', 'op.f', (['"""ix_playlist_entity_id"""'], {}), "('ix_playlist_entity_id')\n", (1976, 2001), False, 'from alembic import op\n'), ((2064, 2086), 'alembic.op.f', 'op.f', (['"""ix_playlist_id"""'], {}), "('ix_playlist_id')\n", (2068, 2086), False, 'from alembic import op\n'), ((2142, 2167), 'alembic.op.f', 'op.f', (['"""ix_playlist_image"""'], {}), "('ix_playlist_image')\n", (2146, 2167), False, 'from alembic import op\n'), ((2226, 2250), 'alembic.op.f', 'op.f', (['"""ix_playlist_name"""'], {}), "('ix_playlist_name')\n", (2230, 2250), False, 'from alembic import op\n'), ((2308, 2340), 'alembic.op.f', 'op.f', (['"""ix_playlist_release_date"""'], {}), "('ix_playlist_release_date')\n", (2312, 2340), False, 'from alembic import op\n'), ((2406, 2433), 'alembic.op.f', 'op.f', (['"""ix_playlist_spotify"""'], {}), "('ix_playlist_spotify')\n", (2410, 2433), False, 'from alembic import op\n'), ((2632, 2681), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['item_id']", "['item.id']"], {}), "(['item_id'], ['item.id'])\n", (2655, 2681), True, 'import sqlalchemy as sa\n'), ((2689, 2746), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['playlist_id']", "['playlist.id']"], {}), "(['playlist_id'], ['playlist.id'])\n", (2712, 2746), True, 'import sqlalchemy as sa\n'), ((2754, 2803), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""playlist_id"""', '"""item_id"""'], {}), "('playlist_id', 'item_id')\n", (2777, 2803), True, 'import sqlalchemy as sa\n'), ((2830, 2865), 'alembic.op.f', 'op.f', (['"""ix_playlistitemlink_item_id"""'], {}), "('ix_playlistitemlink_item_id')\n", (2834, 2865), False, 'from alembic import op\n'), ((2934, 2973), 'alembic.op.f', 'op.f', (['"""ix_playlistitemlink_playlist_id"""'], {}), "('ix_playlistitemlink_playlist_id')\n", (2938, 2973), False, 'from alembic import op\n'), ((3444, 3483), 'alembic.op.f', 'op.f', (['"""ix_playlistitemlink_playlist_id"""'], {}), "('ix_playlistitemlink_playlist_id')\n", (3448, 3483), False, 'from alembic import op\n'), ((3534, 3569), 'alembic.op.f', 'op.f', (['"""ix_playlistitemlink_item_id"""'], {}), "('ix_playlistitemlink_item_id')\n", (3538, 3569), False, 'from alembic import op\n'), ((3658, 3685), 'alembic.op.f', 'op.f', (['"""ix_playlist_spotify"""'], {}), "('ix_playlist_spotify')\n", (3662, 3685), False, 'from alembic import op\n'), ((3728, 3760), 'alembic.op.f', 'op.f', (['"""ix_playlist_release_date"""'], {}), "('ix_playlist_release_date')\n", (3732, 3760), False, 'from alembic import op\n'), ((3803, 3827), 'alembic.op.f', 'op.f', (['"""ix_playlist_name"""'], {}), "('ix_playlist_name')\n", (3807, 3827), False, 'from alembic import op\n'), ((3870, 3895), 'alembic.op.f', 'op.f', (['"""ix_playlist_image"""'], {}), "('ix_playlist_image')\n", (3874, 3895), False, 'from alembic import op\n'), ((3938, 3960), 'alembic.op.f', 'op.f', (['"""ix_playlist_id"""'], {}), "('ix_playlist_id')\n", (3942, 3960), False, 'from alembic import op\n'), ((4003, 4032), 'alembic.op.f', 'op.f', (['"""ix_playlist_entity_id"""'], {}), "('ix_playlist_entity_id')\n", (4007, 4032), False, 'from alembic import op\n'), ((4075, 4106), 'alembic.op.f', 'op.f', (['"""ix_playlist_category_id"""'], {}), "('ix_playlist_category_id')\n", (4079, 4106), False, 'from alembic import op\n'), ((4149, 4180), 'alembic.op.f', 'op.f', (['"""ix_playlist_apple_music"""'], {}), "('ix_playlist_apple_music')\n", (4153, 4180), False, 'from alembic import op\n'), ((4223, 4249), 'alembic.op.f', 'op.f', (['"""ix_playlist_amazon"""'], {}), "('ix_playlist_amazon')\n", (4227, 4249), False, 'from alembic import op\n'), ((4322, 4343), 'alembic.op.f', 'op.f', (['"""ix_item_title"""'], {}), "('ix_item_title')\n", (4326, 4343), False, 'from alembic import op\n'), ((4382, 4400), 'alembic.op.f', 'op.f', (['"""ix_item_id"""'], {}), "('ix_item_id')\n", (4386, 4400), False, 'from alembic import op\n'), ((4439, 4461), 'alembic.op.f', 'op.f', (['"""ix_item_artist"""'], {}), "('ix_item_artist')\n", (4443, 4461), False, 'from alembic import op\n'), ((459, 493), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (491, 493), False, 'import sqlmodel\n'), ((536, 570), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (568, 570), False, 'import sqlmodel\n'), ((609, 621), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (619, 621), True, 'import sqlalchemy as sa\n'), ((962, 996), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (994, 996), False, 'import sqlmodel\n'), ((1037, 1071), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1069, 1071), False, 'import sqlmodel\n'), ((1115, 1149), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1147, 1149), False, 'import sqlmodel\n'), ((1191, 1225), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1223, 1225), False, 'import sqlmodel\n'), ((1272, 1306), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1304, 1306), False, 'import sqlmodel\n'), ((1347, 1381), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1379, 1381), False, 'import sqlmodel\n'), ((1429, 1442), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (1440, 1442), True, 'import sqlalchemy as sa\n'), ((1480, 1492), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1490, 1492), True, 'import sqlalchemy as sa\n'), ((1539, 1551), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1549, 1551), True, 'import sqlalchemy as sa\n'), ((2543, 2555), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2553, 2555), True, 'import sqlalchemy as sa\n'), ((2598, 2610), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2608, 2610), True, 'import sqlalchemy as sa\n'), ((3309, 3321), 'sqlalchemy.VARCHAR', 'sa.VARCHAR', ([], {}), '()\n', (3319, 3321), True, 'import sqlalchemy as sa\n')]
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.autodiff as ad
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
class Net(M.Module):
def __init__(self):
super().__init__()
self.conv1 = M.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = M.BatchNorm2d(64)
self.avgpool = M.AvgPool2d(kernel_size=5, stride=5, padding=0)
self.fc = M.Linear(64, 10)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.avgpool(x)
x = F.avg_pool2d(x, 22)
x = F.flatten(x, 1)
x = self.fc(x)
return x
def save_grad_value(net):
for param in net.parameters():
param.grad_backup = param.grad.numpy().copy()
def test_clip_grad_norm():
net = Net()
x = mge.tensor(np.random.randn(10, 3, 224, 224))
gm = ad.GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
with gm:
loss = net(x).sum()
gm.backward(loss)
save_grad_value(net)
max_norm = 1.0
original_norm = optim.clip_grad_norm(net.parameters(), max_norm=max_norm, ord=2)
scale = max_norm / original_norm
for param in net.parameters():
np.testing.assert_almost_equal(param.grad.numpy(), param.grad_backup * scale)
opt.step().clear_grad()
def test_clip_grad_value():
net = Net()
x = np.random.randn(10, 3, 224, 224).astype("float32")
gm = ad.GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
with gm:
y = net(mge.tensor(x))
y = y.mean()
gm.backward(y)
save_grad_value(net)
max_val = 5
min_val = -2
optim.clip_grad_value(net.parameters(), lower=min_val, upper=max_val)
for param in net.parameters():
np.testing.assert_almost_equal(
param.grad.numpy(),
np.maximum(np.minimum(param.grad_backup, max_val), min_val),
)
opt.step().clear_grad()
|
[
"megengine.module.Linear",
"megengine.tensor",
"megengine.module.AvgPool2d",
"megengine.module.BatchNorm2d",
"megengine.functional.avg_pool2d",
"megengine.functional.relu",
"megengine.autodiff.GradManager",
"megengine.module.Conv2d",
"megengine.functional.flatten"
] |
[((623, 686), 'megengine.module.Conv2d', 'M.Conv2d', (['(3)', '(64)'], {'kernel_size': '(7)', 'stride': '(2)', 'padding': '(3)', 'bias': '(False)'}), '(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n', (631, 686), True, 'import megengine.module as M\n'), ((706, 723), 'megengine.module.BatchNorm2d', 'M.BatchNorm2d', (['(64)'], {}), '(64)\n', (719, 723), True, 'import megengine.module as M\n'), ((747, 794), 'megengine.module.AvgPool2d', 'M.AvgPool2d', ([], {'kernel_size': '(5)', 'stride': '(5)', 'padding': '(0)'}), '(kernel_size=5, stride=5, padding=0)\n', (758, 794), True, 'import megengine.module as M\n'), ((813, 829), 'megengine.module.Linear', 'M.Linear', (['(64)', '(10)'], {}), '(64, 10)\n', (821, 829), True, 'import megengine.module as M\n'), ((919, 928), 'megengine.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (925, 928), True, 'import megengine.functional as F\n'), ((969, 988), 'megengine.functional.avg_pool2d', 'F.avg_pool2d', (['x', '(22)'], {}), '(x, 22)\n', (981, 988), True, 'import megengine.functional as F\n'), ((1001, 1016), 'megengine.functional.flatten', 'F.flatten', (['x', '(1)'], {}), '(x, 1)\n', (1010, 1016), True, 'import megengine.functional as F\n'), ((1238, 1270), 'numpy.random.randn', 'np.random.randn', (['(10)', '(3)', '(224)', '(224)'], {}), '(10, 3, 224, 224)\n', (1253, 1270), True, 'import numpy as np\n'), ((1281, 1297), 'megengine.autodiff.GradManager', 'ad.GradManager', ([], {}), '()\n', (1295, 1297), True, 'import megengine.autodiff as ad\n'), ((1817, 1849), 'numpy.random.randn', 'np.random.randn', (['(10)', '(3)', '(224)', '(224)'], {}), '(10, 3, 224, 224)\n', (1832, 1849), True, 'import numpy as np\n'), ((1877, 1893), 'megengine.autodiff.GradManager', 'ad.GradManager', ([], {}), '()\n', (1891, 1893), True, 'import megengine.autodiff as ad\n'), ((2006, 2019), 'megengine.tensor', 'mge.tensor', (['x'], {}), '(x)\n', (2016, 2019), True, 'import megengine as mge\n'), ((2327, 2365), 'numpy.minimum', 'np.minimum', (['param.grad_backup', 'max_val'], {}), '(param.grad_backup, max_val)\n', (2337, 2365), True, 'import numpy as np\n')]
|
from datetime import datetime
from typing import Optional
from sqlmodel import Field, SQLModel, Relationship
from sqlalchemy import Column
from sqlalchemy.dialects.postgresql import JSON
class ZeroShotInferenceBase(SQLModel):
text: str = Field(nullable=False, index=True)
candidate_labels: list[str] = Field(
nullable=False, index=True, sa_column=Column(JSON)
)
class ZeroShotInference(ZeroShotInferenceBase, table=True):
id: Optional[int] = Field(default=None, nullable=False, primary_key=True)
result: dict[str, float] = Field(nullable=False, sa_column=Column(JSON))
created_at: Optional[datetime]
updated_at: Optional[datetime]
created_by_id: Optional[int] = Field(default=None, foreign_key="user.id")
created_by: "User" = Relationship(
sa_relationship_kwargs={
"lazy": "selectin",
"primaryjoin": "ZeroShotInference.created_by_id == User.id",
}
)
|
[
"sqlmodel.Relationship",
"sqlmodel.Field"
] |
[((244, 277), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)', 'index': '(True)'}), '(nullable=False, index=True)\n', (249, 277), False, 'from sqlmodel import Field, SQLModel, Relationship\n'), ((470, 523), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'nullable': '(False)', 'primary_key': '(True)'}), '(default=None, nullable=False, primary_key=True)\n', (475, 523), False, 'from sqlmodel import Field, SQLModel, Relationship\n'), ((706, 748), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""user.id"""'}), "(default=None, foreign_key='user.id')\n", (711, 748), False, 'from sqlmodel import Field, SQLModel, Relationship\n'), ((774, 896), 'sqlmodel.Relationship', 'Relationship', ([], {'sa_relationship_kwargs': "{'lazy': 'selectin', 'primaryjoin':\n 'ZeroShotInference.created_by_id == User.id'}"}), "(sa_relationship_kwargs={'lazy': 'selectin', 'primaryjoin':\n 'ZeroShotInference.created_by_id == User.id'})\n", (786, 896), False, 'from sqlmodel import Field, SQLModel, Relationship\n'), ((365, 377), 'sqlalchemy.Column', 'Column', (['JSON'], {}), '(JSON)\n', (371, 377), False, 'from sqlalchemy import Column\n'), ((587, 599), 'sqlalchemy.Column', 'Column', (['JSON'], {}), '(JSON)\n', (593, 599), False, 'from sqlalchemy import Column\n')]
|
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import copy
import os
import time
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import AdamW, SGD
import math
from megengine.core._imperative_rt.utils import _set_defrag
_set_defrag(True)
from layers.tools.data_mapper import data_mapper
from layers.tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 256 * 1024 * 1024, 4.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-df", "--distill_file", default="distill_configs/ICD.py", type=str, help="distill description file"
)
parser.add_argument(
"-tf", "--teacher_file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-w", "--weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-tw", "--teacher_weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-n", "--devices", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch_size", default=2, type=int, help="batch size for training",
)
parser.add_argument(
"-d", "--dataset_dir", default="/data/datasets", type=str,
)
parser.add_argument(
"-l", "--load_head", action='store_true'
)
parser.add_argument(
"-sp", "--save_path", type=str,
)
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
logger.info("Device Count = %d", args.devices)
from datetime import datetime
now = datetime.now()
log_dir = "log-of-ICD-{}-{}".format(os.path.basename(args.file).split(".")[0], now.strftime('%H:%M') + ('-Inherent' if args.load_head else ''))
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
args.save_path = log_dir
if args.devices > 1:
trainer = dist.launcher(worker, n_gpus=args.devices)
trainer(args)
else:
worker(args)
def worker(args):
mge.dtr.eviction_threshold = "6GB" # set the eviction threshold to 5 GB
mge.dtr.enable() # enable the DTR optimization
current_network = import_from_file(args.file)
model = current_network.Net(current_network.Cfg())
model.train()
########### BUILD TEACHER MODEL ############
current_teacher = import_from_file(args.teacher_file)
cfg_tea = current_teacher.Cfg()
cfg_tea.backbone_pretrained = False
model_tea = current_teacher.Net(cfg_tea)
model_tea.train()
# model_tea.training = True # run in train mod
state_dict_tea = mge.load(args.teacher_weight_file)
if "state_dict" in state_dict_tea:
state_dict_tea = state_dict_tea["state_dict"]
model_tea.load_state_dict(state_dict_tea)
############### LOADED ####################
################## DISTILLER ##############
distiller_cfg = import_from_file(args.distill_file)
distiller = distiller_cfg.Net()
############### END ####################
if dist.get_rank() == 0:
logger.info(get_config_info(model.cfg))
logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size * dist.get_world_size(),
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay,
)
opt_d = AdamW(
distiller.parameters(),
lr=1e-4,
weight_decay=1e-4,
)
params_with_grad.extend(distiller.parameters())
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("mean", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if args.weight_file is not None:
weights = mge.load(args.weight_file)
model.backbone.bottom_up.load_state_dict(weights, strict=False)
if args.load_head:
print('Loading Prameters Besides from Backbones.')
res = model.load_state_dict({k:v for k, v in model_tea.state_dict().items() if 'bottom_up' not in k}, strict=False)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters()) # sync parameters
dist.bcast_list_(distiller.parameters()) # sync parameters
dist.bcast_list_(model.buffers()) # sync buffers
dist.bcast_list_(distiller.buffers()) # sync parameters
if dist.get_rank() == 0:
logger.info("Prepare dataset")
train_loader = iter(build_dataloader(args.batch_size, args.dataset_dir, model.cfg))
############## REGISTER ###############
# To get intermediate representations
holder = [None, None]
def register_tea_hooker(m, i, o):
holder[0] = o
return
model_tea.backbone.register_forward_hook(register_tea_hooker)
def register_stu_hooker(m, i, o):
holder[1] = o
return
model.backbone.register_forward_hook(register_stu_hooker)
for epoch in range(model.cfg.max_epoch):
train_one_epoch(model, model_tea, distiller, holder, train_loader, opt, opt_d, gm, epoch, args)
if dist.get_rank() == 0:
save_path = "{}/epoch_{}.pkl".format(
args.save_path, epoch
)
mge.save(
{"epoch": epoch, "state_dict": model.state_dict()}, save_path,
)
logger.info("dump weights to %s", save_path)
def train_one_epoch(model, model_tea, distiller, feat_holder, data_queue, opt, opt_d, gm, epoch, args):
def train_func(image, im_info, gt_boxes, **args):
model_tea(image=image, im_info=im_info, gt_boxes=gt_boxes)
with gm:
loss_dict = model(image=image, im_info=im_info, gt_boxes=gt_boxes)
loss_distill = distiller(feat_holder[0], feat_holder[1], image, gt_boxes, im_info, distill_flag=0 if args['cur_step'] < 1000 else 1)
loss_dict.update(loss_distill)
loss_dict["total_loss"] = loss_dict["total_loss"] + sum(loss_distill.values())
gm.backward(loss_dict["total_loss"])
loss_list = list(loss_dict.values())
mge.optimizer.clip_grad_norm(
distiller.parameters(), 0.01)
opt.step().clear_grad()
opt_d.step().clear_grad()
return loss_list
meter = AverageMeter(record_len=model.cfg.num_losses + distiller.num_losses)
time_meter = AverageMeter(record_len=2)
log_interval = model.cfg.log_interval
tot_step = model.cfg.nr_images_epoch // (args.batch_size * dist.get_world_size())
full_step = tot_step * model.cfg.max_epoch
for step in range(tot_step):
cur_step = tot_step * epoch + step
adjust_learning_rate(opt, epoch, step, model.cfg, args)
adjust_learning_rate_cos(opt_d, cur_step, full_step)
data_tik = time.time()
mini_batch = next(data_queue)
data_tok = time.time()
tik = time.time()
loss_list = train_func(
image=mge.tensor(mini_batch["data"]),
im_info=mge.tensor(mini_batch["im_info"]),
gt_boxes=mge.tensor(mini_batch["gt_boxes"]),
cur_step=cur_step,
full_step=full_step
)
tok = time.time()
time_meter.update([tok - tik, data_tok - data_tik])
if dist.get_rank() == 0:
info_str = "e%d, %d/%d, lr:%f, "
loss_str = ", ".join(
["{}:%f".format(loss) for loss in model.cfg.losses_keys] + ["{}:%f".format(loss) for loss in distiller.loss_keys]
)
time_str = ", train_time:%.3fs, data_time:%.3fs"
log_info_str = info_str + loss_str + time_str
meter.update([loss.numpy() for loss in loss_list])
if step % log_interval == 0:
logger.info(
log_info_str,
epoch,
step,
tot_step,
opt.param_groups[0]["lr"],
*meter.average(),
*time_meter.average()
)
meter.reset()
time_meter.reset()
def adjust_learning_rate_cos(optimizer, cur_iter, total_iter):
base_lr = 1e-4
# Warm up
lr = 0.5 * base_lr * (1 + math.cos(cur_iter / total_iter * math.pi))
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def adjust_learning_rate(optimizer, epoch, step, cfg, args):
base_lr = (
cfg.basic_lr * args.batch_size * dist.get_world_size() * (
cfg.lr_decay_rate
** bisect.bisect_right(cfg.lr_decay_stages, epoch)
)
)
# Warm up
lr_factor = 1.0
if epoch == 0 and step < cfg.warm_iters:
lr_factor = (step + 1.0) / cfg.warm_iters
for param_group in optimizer.param_groups:
param_group["lr"] = base_lr * lr_factor
def build_dataset(dataset_dir, cfg):
data_cfg = copy.deepcopy(cfg.train_dataset)
data_name = data_cfg.pop("name")
data_cfg["root"] = os.path.join(dataset_dir, data_name, data_cfg["root"])
if "ann_file" in data_cfg:
data_cfg["ann_file"] = os.path.join(dataset_dir, data_name, data_cfg["ann_file"])
data_cfg["order"] = ["image", "boxes", "boxes_category", "info"]
return data_mapper[data_name](**data_cfg)
# pylint: disable=dangerous-default-value
def build_sampler(train_dataset, batch_size, aspect_grouping=[1]):
def _compute_aspect_ratios(dataset):
aspect_ratios = []
for i in range(len(dataset)):
info = dataset.get_img_info(i)
aspect_ratios.append(info["height"] / info["width"])
return aspect_ratios
def _quantize(x, bins):
return list(map(lambda y: bisect.bisect_right(sorted(bins), y), x))
if len(aspect_grouping) == 0:
return Infinite(RandomSampler(train_dataset, batch_size, drop_last=True))
aspect_ratios = _compute_aspect_ratios(train_dataset)
group_ids = _quantize(aspect_ratios, aspect_grouping)
return Infinite(GroupedRandomSampler(train_dataset, batch_size, group_ids))
def build_dataloader(batch_size, dataset_dir, cfg):
train_dataset = build_dataset(dataset_dir, cfg)
train_sampler = build_sampler(train_dataset, batch_size)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
transforms=[
T.ShortestEdgeResize(
cfg.train_image_short_size,
cfg.train_image_max_size,
sample_style="choice",
),
T.RandomHorizontalFlip(),
T.ToMode(),
],
order=["image", "boxes", "boxes_category"],
),
collator=DetectionPadCollator(),
num_workers=2,
)
return train_dataloader
if __name__ == "__main__":
main()
|
[
"megengine.data.transform.ToMode",
"megengine.device.set_prealloc_config",
"megengine.distributed.get_rank",
"megengine.distributed.get_world_size",
"megengine.dtr.enable",
"megengine.get_logger",
"megengine.distributed.make_allreduce_cb",
"megengine.data.transform.RandomHorizontalFlip",
"megengine.tensor",
"megengine.data.RandomSampler",
"megengine.data.transform.ShortestEdgeResize",
"megengine.autodiff.GradManager",
"megengine.load",
"megengine.distributed.launcher",
"megengine.core._imperative_rt.utils._set_defrag"
] |
[((767, 784), 'megengine.core._imperative_rt.utils._set_defrag', '_set_defrag', (['(True)'], {}), '(True)\n', (778, 784), False, 'from megengine.core._imperative_rt.utils import _set_defrag\n'), ((992, 1016), 'megengine.get_logger', 'mge.get_logger', (['__name__'], {}), '(__name__)\n', (1006, 1016), True, 'import megengine as mge\n'), ((1041, 1107), 'megengine.device.set_prealloc_config', 'mge.device.set_prealloc_config', (['(1024)', '(1024)', '(256 * 1024 * 1024)', '(4.0)'], {}), '(1024, 1024, 256 * 1024 * 1024, 4.0)\n', (1071, 1107), True, 'import megengine as mge\n'), ((1142, 1167), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1165, 1167), False, 'import argparse\n'), ((2517, 2531), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2529, 2531), False, 'from datetime import datetime\n'), ((3015, 3031), 'megengine.dtr.enable', 'mge.dtr.enable', ([], {}), '()\n', (3029, 3031), True, 'import megengine as mge\n'), ((3103, 3130), 'layers.tools.utils.import_from_file', 'import_from_file', (['args.file'], {}), '(args.file)\n', (3119, 3130), False, 'from layers.tools.utils import AverageMeter, DetectionPadCollator, GroupedRandomSampler, get_config_info, import_from_file\n'), ((3277, 3312), 'layers.tools.utils.import_from_file', 'import_from_file', (['args.teacher_file'], {}), '(args.teacher_file)\n', (3293, 3312), False, 'from layers.tools.utils import AverageMeter, DetectionPadCollator, GroupedRandomSampler, get_config_info, import_from_file\n'), ((3529, 3563), 'megengine.load', 'mge.load', (['args.teacher_weight_file'], {}), '(args.teacher_weight_file)\n', (3537, 3563), True, 'import megengine as mge\n'), ((3821, 3856), 'layers.tools.utils.import_from_file', 'import_from_file', (['args.distill_file'], {}), '(args.distill_file)\n', (3837, 3856), False, 'from layers.tools.utils import AverageMeter, DetectionPadCollator, GroupedRandomSampler, get_config_info, import_from_file\n'), ((4725, 4738), 'megengine.autodiff.GradManager', 'GradManager', ([], {}), '()\n', (4736, 4738), False, 'from megengine.autodiff import GradManager\n'), ((7515, 7583), 'layers.tools.utils.AverageMeter', 'AverageMeter', ([], {'record_len': '(model.cfg.num_losses + distiller.num_losses)'}), '(record_len=model.cfg.num_losses + distiller.num_losses)\n', (7527, 7583), False, 'from layers.tools.utils import AverageMeter, DetectionPadCollator, GroupedRandomSampler, get_config_info, import_from_file\n'), ((7601, 7627), 'layers.tools.utils.AverageMeter', 'AverageMeter', ([], {'record_len': '(2)'}), '(record_len=2)\n', (7613, 7627), False, 'from layers.tools.utils import AverageMeter, DetectionPadCollator, GroupedRandomSampler, get_config_info, import_from_file\n'), ((10114, 10146), 'copy.deepcopy', 'copy.deepcopy', (['cfg.train_dataset'], {}), '(cfg.train_dataset)\n', (10127, 10146), False, 'import copy\n'), ((10208, 10262), 'os.path.join', 'os.path.join', (['dataset_dir', 'data_name', "data_cfg['root']"], {}), "(dataset_dir, data_name, data_cfg['root'])\n", (10220, 10262), False, 'import os\n'), ((2692, 2714), 'os.path.isdir', 'os.path.isdir', (['log_dir'], {}), '(log_dir)\n', (2705, 2714), False, 'import os\n'), ((2724, 2744), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (2735, 2744), False, 'import os\n'), ((2819, 2861), 'megengine.distributed.launcher', 'dist.launcher', (['worker'], {'n_gpus': 'args.devices'}), '(worker, n_gpus=args.devices)\n', (2832, 2861), True, 'import megengine.distributed as dist\n'), ((3946, 3961), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (3959, 3961), True, 'import megengine.distributed as dist\n'), ((4746, 4767), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (4765, 4767), True, 'import megengine.distributed as dist\n'), ((5001, 5027), 'megengine.load', 'mge.load', (['args.weight_file'], {}), '(args.weight_file)\n', (5009, 5027), True, 'import megengine as mge\n'), ((5315, 5336), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (5334, 5336), True, 'import megengine.distributed as dist\n'), ((5605, 5620), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (5618, 5620), True, 'import megengine.distributed as dist\n'), ((8029, 8040), 'time.time', 'time.time', ([], {}), '()\n', (8038, 8040), False, 'import time\n'), ((8098, 8109), 'time.time', 'time.time', ([], {}), '()\n', (8107, 8109), False, 'import time\n'), ((8125, 8136), 'time.time', 'time.time', ([], {}), '()\n', (8134, 8136), False, 'import time\n'), ((8419, 8430), 'time.time', 'time.time', ([], {}), '()\n', (8428, 8430), False, 'import time\n'), ((10326, 10384), 'os.path.join', 'os.path.join', (['dataset_dir', 'data_name', "data_cfg['ann_file']"], {}), "(dataset_dir, data_name, data_cfg['ann_file'])\n", (10338, 10384), False, 'import os\n'), ((11215, 11273), 'layers.tools.utils.GroupedRandomSampler', 'GroupedRandomSampler', (['train_dataset', 'batch_size', 'group_ids'], {}), '(train_dataset, batch_size, group_ids)\n', (11235, 11273), False, 'from layers.tools.utils import AverageMeter, DetectionPadCollator, GroupedRandomSampler, get_config_info, import_from_file\n'), ((3988, 4014), 'layers.tools.utils.get_config_info', 'get_config_info', (['model.cfg'], {}), '(model.cfg)\n', (4003, 4014), False, 'from layers.tools.utils import AverageMeter, DetectionPadCollator, GroupedRandomSampler, get_config_info, import_from_file\n'), ((6315, 6330), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (6328, 6330), True, 'import megengine.distributed as dist\n'), ((7733, 7754), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (7752, 7754), True, 'import megengine.distributed as dist\n'), ((8504, 8519), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (8517, 8519), True, 'import megengine.distributed as dist\n'), ((9456, 9497), 'math.cos', 'math.cos', (['(cur_iter / total_iter * math.pi)'], {}), '(cur_iter / total_iter * math.pi)\n', (9464, 9497), False, 'import math\n'), ((9701, 9722), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (9720, 9722), True, 'import megengine.distributed as dist\n'), ((9772, 9819), 'bisect.bisect_right', 'bisect.bisect_right', (['cfg.lr_decay_stages', 'epoch'], {}), '(cfg.lr_decay_stages, epoch)\n', (9791, 9819), False, 'import bisect\n'), ((11020, 11076), 'megengine.data.RandomSampler', 'RandomSampler', (['train_dataset', 'batch_size'], {'drop_last': '(True)'}), '(train_dataset, batch_size, drop_last=True)\n', (11033, 11076), False, 'from megengine.data import DataLoader, Infinite, RandomSampler\n'), ((11948, 11970), 'layers.tools.utils.DetectionPadCollator', 'DetectionPadCollator', ([], {}), '()\n', (11968, 11970), False, 'from layers.tools.utils import AverageMeter, DetectionPadCollator, GroupedRandomSampler, get_config_info, import_from_file\n'), ((4451, 4472), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (4470, 4472), True, 'import megengine.distributed as dist\n'), ((8187, 8217), 'megengine.tensor', 'mge.tensor', (["mini_batch['data']"], {}), "(mini_batch['data'])\n", (8197, 8217), True, 'import megengine as mge\n'), ((8239, 8272), 'megengine.tensor', 'mge.tensor', (["mini_batch['im_info']"], {}), "(mini_batch['im_info'])\n", (8249, 8272), True, 'import megengine as mge\n'), ((8295, 8329), 'megengine.tensor', 'mge.tensor', (["mini_batch['gt_boxes']"], {}), "(mini_batch['gt_boxes'])\n", (8305, 8329), True, 'import megengine as mge\n'), ((2573, 2600), 'os.path.basename', 'os.path.basename', (['args.file'], {}), '(args.file)\n', (2589, 2600), False, 'import os\n'), ((4845, 4887), 'megengine.distributed.make_allreduce_cb', 'dist.make_allreduce_cb', (['"""mean"""', 'dist.WORLD'], {}), "('mean', dist.WORLD)\n", (4867, 4887), True, 'import megengine.distributed as dist\n'), ((11601, 11702), 'megengine.data.transform.ShortestEdgeResize', 'T.ShortestEdgeResize', (['cfg.train_image_short_size', 'cfg.train_image_max_size'], {'sample_style': '"""choice"""'}), "(cfg.train_image_short_size, cfg.train_image_max_size,\n sample_style='choice')\n", (11621, 11702), True, 'from megengine.data import transform as T\n'), ((11795, 11819), 'megengine.data.transform.RandomHorizontalFlip', 'T.RandomHorizontalFlip', ([], {}), '()\n', (11817, 11819), True, 'from megengine.data import transform as T\n'), ((11837, 11847), 'megengine.data.transform.ToMode', 'T.ToMode', ([], {}), '()\n', (11845, 11847), True, 'from megengine.data import transform as T\n')]
|
import os
import numpy as np
import collections
import megengine.module as M
import megengine.functional as F
import megengine as mge
from megengine.data.dataset import Dataset
from megengine.data import DataLoader
import hparams as hp
from megengine.data import Collator
class AsrDataset(Dataset):
def __init__(self, data_set="train"):
"""
Args:
root_dir (string): Directory with all the spectrograms.
"""
self.metas = self.load_metas(hp.dataset_root, data_set)
def load_metas(self, root, data_set): # fix a bug
metas = []
with open(os.path.join(root, f"{data_set}.txt")) as f:
for line in f.readlines():
info = line.split("|")
metas.append(
{
"mel_path": os.path.join(root, info[0]),
"frames": info[1],
"token_ids_str": info[2],
"speaker": info[3],
}
)
return metas
def __len__(self):
return len(self.metas)
def __getitem__(self, idx):
meta = self.metas[idx]
token_ids = [int(i) for i in meta["token_ids_str"].split(" ")]
text = np.array(token_ids, dtype=np.int32)
mel = np.load(meta["mel_path"])
text_input = text[:-1]
text_output = text[1:]
text_length = text_input.shape[0]
pos_text = np.arange(1, text_length + 1)
pos_mel = np.arange(1, mel.shape[0] + 1)
return {
"text": text,
"text_input": text_input,
"text_output": text_output,
"text_length": text_length,
"mel": mel,
"pos_mel": pos_mel,
"pos_text": pos_text,
}
class AsrCollator(Collator):
def __init__(self, pad_value: float = 0.0):
super().__init__()
self.pad_value = pad_value
def apply(self, batch):
# Puts each data field into a tensor with outer dimension batch size
if isinstance(batch[0], collections.Mapping):
text = [d["text"] for d in batch]
text_input = [d["text_input"] for d in batch]
text_output = [d["text_output"] for d in batch]
text_length = [d["text_length"] for d in batch]
mel = [d["mel"] for d in batch]
mel_length = [d["mel"].shape[0] for d in batch]
pos_mel = [d["pos_mel"] for d in batch]
pos_text = [d["pos_text"] for d in batch]
text = [
i
for i, _ in sorted(
zip(text, mel_length), key=lambda x: x[1], reverse=True
)
]
text_input = [
i
for i, _ in sorted(
zip(text_input, mel_length), key=lambda x: x[1], reverse=True
)
]
text_output = [
i
for i, _ in sorted(
zip(text_output, mel_length), key=lambda x: x[1], reverse=True
)
]
text_length = [
i
for i, _ in sorted(
zip(text_length, mel_length), key=lambda x: x[1], reverse=True
)
]
mel = [
i
for i, _ in sorted(
zip(mel, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_text = [
i
for i, _ in sorted(
zip(pos_text, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_mel = [
i
for i, _ in sorted(
zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True
)
]
mel_length = sorted(mel_length, reverse=True)
# PAD sequences with largest length of the batch
text_input = _prepare_data(text_input).astype(np.int32)
text_output = _prepare_data(text_output).astype(np.int32)
mel = _pad_mel(mel)
pos_mel = _prepare_data(pos_mel).astype(np.int32)
pos_text = _prepare_data(pos_text).astype(np.int32)
return (
mge.Tensor(text_input),
mge.Tensor(text_output),
mge.Tensor(mel),
mge.Tensor(pos_text),
mge.Tensor(pos_mel),
mge.Tensor(text_length),
mge.Tensor(mel_length),
)
raise TypeError(
(
"batch must contain tensors, numbers, dicts or lists; found {}".format(
type(batch[0])
)
)
)
def collate_fn_transformer_test(batch):
# Puts each data field into a tensor with outer dimension batch size
# if isinstance(batch[0], collections.Mapping):
text = [batch["text"]] # for d in batch]
text_input = batch["text_input"]
text_output = batch["text_output"]
text_length = batch["text_length"]
mel = [batch["mel"]]
mel_length = [batch["mel"].shape[1]]
pos_mel = batch["pos_mel"]
pos_text = batch["pos_text"]
text = [
i for i, _ in sorted(zip(text, mel_length), key=lambda x: x[1], reverse=True)
]
text_input = [
i
for i, _ in sorted(
zip(text_input, mel_length), key=lambda x: x[1], reverse=True
)
]
text_output = [
i
for i, _ in sorted(
zip(text_output, mel_length), key=lambda x: x[1], reverse=True
)
]
text_length = [
i
for i, _ in sorted(
zip(text_length, mel_length), key=lambda x: x[1], reverse=True
)
]
mel = [i for i, _ in sorted(zip(mel, mel_length), key=lambda x: x[1], reverse=True)]
pos_text = [
i
for i, _ in sorted(zip(pos_text, mel_length), key=lambda x: x[1], reverse=True)
]
pos_mel = [
i for i, _ in sorted(zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True)
]
mel_length = sorted(mel_length, reverse=True)
# PAD sequences with largest length of the batch
text_input = _prepare_data(text_input).astype(np.int32)
text_output = _prepare_data(text_output).astype(np.int32)
mel = _pad_mel(mel[0])
pos_mel = _prepare_data(pos_mel).astype(np.int32)
pos_text = _prepare_data(pos_text).astype(np.int32)
return (
mge.Tensor(text_input),
mge.Tensor(text_output),
mge.Tensor(mel),
mge.Tensor(pos_text),
mge.Tensor(pos_mel),
mge.Tensor(text_length),
mge.Tensor(mel_length),
)
raise TypeError(
(
"batch must contain tensors, numbers, dicts or lists; found {}".format(
type(batch[0])
)
)
)
############################ Utils ###################################
def _pad_data(x, length):
_pad = 0
return np.pad(x, (0, length - x.shape[0]), mode="constant", constant_values=_pad)
def _prepare_data(inputs):
max_len = max((len(x) for x in inputs))
return np.stack([_pad_data(x, max_len) for x in inputs])
def _pad_mel(inputs):
_pad = 0
def _pad_one(x, max_len):
mel_len = x.shape[0]
return np.pad(
x, [[0, max_len - mel_len], [0, 0]], mode="constant", constant_values=_pad
)
max_len = max((x.shape[0] for x in inputs))
return np.stack([_pad_one(x, max_len) for x in inputs])
|
[
"megengine.Tensor"
] |
[((7013, 7087), 'numpy.pad', 'np.pad', (['x', '(0, length - x.shape[0])'], {'mode': '"""constant"""', 'constant_values': '_pad'}), "(x, (0, length - x.shape[0]), mode='constant', constant_values=_pad)\n", (7019, 7087), True, 'import numpy as np\n'), ((1250, 1285), 'numpy.array', 'np.array', (['token_ids'], {'dtype': 'np.int32'}), '(token_ids, dtype=np.int32)\n', (1258, 1285), True, 'import numpy as np\n'), ((1300, 1325), 'numpy.load', 'np.load', (["meta['mel_path']"], {}), "(meta['mel_path'])\n", (1307, 1325), True, 'import numpy as np\n'), ((1449, 1478), 'numpy.arange', 'np.arange', (['(1)', '(text_length + 1)'], {}), '(1, text_length + 1)\n', (1458, 1478), True, 'import numpy as np\n'), ((1497, 1527), 'numpy.arange', 'np.arange', (['(1)', '(mel.shape[0] + 1)'], {}), '(1, mel.shape[0] + 1)\n', (1506, 1527), True, 'import numpy as np\n'), ((6499, 6521), 'megengine.Tensor', 'mge.Tensor', (['text_input'], {}), '(text_input)\n', (6509, 6521), True, 'import megengine as mge\n'), ((6531, 6554), 'megengine.Tensor', 'mge.Tensor', (['text_output'], {}), '(text_output)\n', (6541, 6554), True, 'import megengine as mge\n'), ((6564, 6579), 'megengine.Tensor', 'mge.Tensor', (['mel'], {}), '(mel)\n', (6574, 6579), True, 'import megengine as mge\n'), ((6589, 6609), 'megengine.Tensor', 'mge.Tensor', (['pos_text'], {}), '(pos_text)\n', (6599, 6609), True, 'import megengine as mge\n'), ((6619, 6638), 'megengine.Tensor', 'mge.Tensor', (['pos_mel'], {}), '(pos_mel)\n', (6629, 6638), True, 'import megengine as mge\n'), ((6648, 6671), 'megengine.Tensor', 'mge.Tensor', (['text_length'], {}), '(text_length)\n', (6658, 6671), True, 'import megengine as mge\n'), ((6681, 6703), 'megengine.Tensor', 'mge.Tensor', (['mel_length'], {}), '(mel_length)\n', (6691, 6703), True, 'import megengine as mge\n'), ((7334, 7420), 'numpy.pad', 'np.pad', (['x', '[[0, max_len - mel_len], [0, 0]]'], {'mode': '"""constant"""', 'constant_values': '_pad'}), "(x, [[0, max_len - mel_len], [0, 0]], mode='constant',\n constant_values=_pad)\n", (7340, 7420), True, 'import numpy as np\n'), ((607, 644), 'os.path.join', 'os.path.join', (['root', 'f"""{data_set}.txt"""'], {}), "(root, f'{data_set}.txt')\n", (619, 644), False, 'import os\n'), ((4312, 4334), 'megengine.Tensor', 'mge.Tensor', (['text_input'], {}), '(text_input)\n', (4322, 4334), True, 'import megengine as mge\n'), ((4352, 4375), 'megengine.Tensor', 'mge.Tensor', (['text_output'], {}), '(text_output)\n', (4362, 4375), True, 'import megengine as mge\n'), ((4393, 4408), 'megengine.Tensor', 'mge.Tensor', (['mel'], {}), '(mel)\n', (4403, 4408), True, 'import megengine as mge\n'), ((4426, 4446), 'megengine.Tensor', 'mge.Tensor', (['pos_text'], {}), '(pos_text)\n', (4436, 4446), True, 'import megengine as mge\n'), ((4464, 4483), 'megengine.Tensor', 'mge.Tensor', (['pos_mel'], {}), '(pos_mel)\n', (4474, 4483), True, 'import megengine as mge\n'), ((4501, 4524), 'megengine.Tensor', 'mge.Tensor', (['text_length'], {}), '(text_length)\n', (4511, 4524), True, 'import megengine as mge\n'), ((4542, 4564), 'megengine.Tensor', 'mge.Tensor', (['mel_length'], {}), '(mel_length)\n', (4552, 4564), True, 'import megengine as mge\n'), ((818, 845), 'os.path.join', 'os.path.join', (['root', 'info[0]'], {}), '(root, info[0])\n', (830, 845), False, 'import os\n')]
|
"""
Module for handling state variables.
"""
import numpy as nm
from sfepy.base.base import Struct
class State(Struct):
"""
Class holding/manipulating the state variables and corresponding DOF
vectors.
Manipulating the state class changes the underlying variables, and
hence also the corresponding equations/terms (if any).
Notes
-----
This class allows working with LCBC conditions in time-dependent
problems, as it keeps track of the reduced DOF vector that cannot
be reconstructed from the full DOF vector by using the usual
`variables.strip_state_vector()`.
"""
@staticmethod
def from_variables(variables):
"""
Create a State instance for the given variables.
The DOF vector is created using the DOF data in `variables`.
Parameters
----------
variables : Variables instance
The variables.
"""
parts = variables.get_state_parts()
vec = variables.create_state_vector()
for key, part in parts.iteritems():
indx = variables.get_indx(key)
vec[indx] = part
return State(variables, vec)
def __init__(self, variables, vec=None, preserve_caches=False):
"""
Create a State instance for the given variables.
Parameters
----------
variables : Variables instance
The variables.
vec : array, optional
The (initial) DOF vector corresponding to the variables.
preserve_caches : bool
If True, do not invalidate evaluate caches of variables.
"""
Struct.__init__(self, variables=variables, vec=vec, r_vec=None)
if self.vec is None:
self.vec = variables.create_state_vector()
self.variables.set_data(self.vec, preserve_caches=preserve_caches)
def copy(self, deep=False, preserve_caches=False):
"""
Copy the state. By default, the new state contains the same
variables, and creates new DOF vectors. If `deep` is True, also
the DOF vectors are copied.
Parameters
----------
deep : bool
If True, make a copy of the DOF vectors.
preserve_caches : bool
If True, do not invalidate evaluate caches of variables.
"""
if deep:
other = State(self.variables, self.vec.copy(), preserve_caches=True)
if self.r_vec is not None:
other.r_vec = self.r_vec.copy()
else:
other = State(self.variables, preserve_caches=True)
return other
def fill(self, value):
"""
Fill the DOF vector with given value.
"""
if self.r_vec is not None:
self.r_vec.fill(value)
self.vec.fill(value)
def init_history(self):
"""
Initialize variables with history.
"""
self.variables.init_history()
def apply_ebc(self, force_values=None):
"""
Apply essential (Dirichlet) boundary conditions to the state.
"""
self.variables.apply_ebc(self.vec, force_values=force_values)
def has_ebc(self):
"""
Test whether the essential (Dirichlet) boundary conditions have
been applied to the DOF vector.
"""
return self.variables.has_ebc(self.vec)
def apply_ic(self, force_values=None):
"""
Apply initial conditions to the state.
"""
if self.r_vec is not None:
raise ValueError('cannot re-apply initial conditions with LCBCs!')
self.variables.apply_ic(self.vec, force_values=force_values)
def get_reduced(self, follow_epbc=False):
"""
Get the reduced DOF vector, with EBC and PBC DOFs removed.
"""
strip = self.variables.strip_state_vector
if self.variables.has_lcbc:
if self.r_vec is None:
r_vec = strip(self.vec, follow_epbc=follow_epbc)
r_vec = self.variables.op_lcbc.T * r_vec
else:
r_vec = self.r_vec
else:
r_vec = strip(self.vec, follow_epbc=follow_epbc)
return r_vec
def set_reduced(self, r_vec, preserve_caches=False):
"""
Set the reduced DOF vector, with EBC and PBC DOFs removed.
Parameters
----------
r_vec : array
The reduced DOF vector corresponding to the variables.
preserve_caches : bool
If True, do not invalidate evaluate caches of variables.
"""
self.vec = self.variables.make_full_vec(r_vec)
if self.variables.has_lcbc:
self.r_vec = r_vec
self.variables.set_data(self.vec, preserve_caches=preserve_caches)
def set_full(self, vec, var_name=None, force=False):
"""
Set the full DOF vector (including EBC and PBC DOFs). If
`var_name` is given, set only the DOF sub-vector corresponding
to the given variable. If `force` is True, setting variables
with LCBC DOFs is allowed.
"""
if var_name is None:
if self.variables.has_lcbc and not force:
raise ValueError('cannot set full DOF vector with LCBCs!')
self.vec = vec
self.variables.set_data(self.vec)
else:
var = self.variables[var_name]
if var.has_lcbc and not force:
raise ValueError('cannot set full DOF vector with LCBCs!')
self.variables.set_state_part(self.vec, vec, var_name)
var.set_data(self.vec, self.variables.get_indx(var_name))
def __call__(self, var_name=None):
"""
Get the full DOF vector (including EBC and PBC DOFs). If
`var_name` is given, return only the DOF vector corresponding to
the given variable.
"""
if var_name is None:
out = self.vec
else:
out = self.variables.get_state_part_view(self.vec, var_name)
return out
def set_parts(self, parts, force=False):
"""
Set parts of the DOF vector corresponding to individual state
variables.
Parameters
----------
parts : dict
The dictionary of the DOF vector parts.
"""
if self.variables.has_lcbc and not force:
raise ValueError('cannot set full DOF vector with LCBCs!')
self.variables.set_data(parts)
for key, part in parts.iteritems():
indx = self.variables.get_indx(key)
self.vec[indx] = part
def get_parts(self):
"""
Return parts of the DOF vector corresponding to individual state
variables.
Returns
-------
out : dict
The dictionary of the DOF vector parts.
"""
return self.variables.get_state_parts(self.vec)
def create_output_dict(self, fill_value=None, var_info=None,
extend=True, linearization=None):
"""
Transforms state to an output dictionary, that can be
passed as 'out' kwarg to Mesh.write().
Then the dictionary entries are formed by components of the
state vector corresponding to unknown variables according to
kind of linearization given by `linearization`.
Examples
--------
>>> out = state.create_output_dict()
>>> problem.save_state('file.vtk', out=out)
"""
return self.variables.state_to_output(self.vec, fill_value,
var_info, extend,
linearization=linearization)
def get_weighted_norm(self, vec, weights=None, return_weights=False):
"""
Return the weighted norm of DOF vector `vec`.
By default, each component of `vec` is weighted by the 1/norm of the
corresponding state part, or 1 if the norm is zero. Alternatively, the
weights can be provided explicitly using `weights` argument.
Parameters
----------
vec : array
The DOF vector corresponding to the variables.
weights : dict, optional
If given, the weights are used instead of the norms of the state
parts. Keys of the dictionary must be equal to the names of
variables comprising the DOF vector.
return_weights: bool
If True, return also the used weights.
Returns
-------
norm : float
The weighted norm.
weights : dict, optional
If `return_weights` is True, the used weights.
Examples
--------
>>> err = state0.get_weighted_norm(state() - state0())
"""
if weights is None:
parts = self.get_parts()
weights = {}
for key, part in parts.iteritems():
pnorm = nm.linalg.norm(part)
if pnorm < 10.0 * nm.finfo(nm.float64).eps:
pnorm = 1.0
weights[key] = 1.0 / pnorm
else:
if set(weights.keys()) != self.variables.state:
raise ValueError('weights keys have to be in %s!'
% self.variables.state)
wvec = vec.copy()
for key in weights.iterkeys():
indx = self.variables.get_indx(key)
wvec[indx] *= weights[key]
norm = nm.linalg.norm(wvec)
if return_weights:
return norm, weights
else:
return norm
|
[
"sfepy.base.base.Struct.__init__"
] |
[((1635, 1698), 'sfepy.base.base.Struct.__init__', 'Struct.__init__', (['self'], {'variables': 'variables', 'vec': 'vec', 'r_vec': 'None'}), '(self, variables=variables, vec=vec, r_vec=None)\n', (1650, 1698), False, 'from sfepy.base.base import Struct\n'), ((9457, 9477), 'numpy.linalg.norm', 'nm.linalg.norm', (['wvec'], {}), '(wvec)\n', (9471, 9477), True, 'import numpy as nm\n'), ((8934, 8954), 'numpy.linalg.norm', 'nm.linalg.norm', (['part'], {}), '(part)\n', (8948, 8954), True, 'import numpy as nm\n'), ((8989, 9009), 'numpy.finfo', 'nm.finfo', (['nm.float64'], {}), '(nm.float64)\n', (8997, 9009), True, 'import numpy as nm\n')]
|
import os
from flask import Flask
import os
import requests
from app import config
from app.routes import default
from app.routes import debug
from app.routes import auth
from app.routes import admin
from app.routes import profile
from app.routes import server
from app.pages import create_pages
from sqlmodel import SQLModel
from sqlmodel import create_engine
from app.models.user import Log, User
APP_SETTINGS = os.getenv("APP_SETTINGS", "Testing")
def drop_database(config):
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy import MetaData
from sqlalchemy import inspect
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine(config["DATABASE_URI"])
meta = MetaData()
meta.reflect(bind=engine)
meta.drop_all(engine, checkfirst=False)
def create_app():
app = Flask(__name__, template_folder="app/templates/", static_folder="app/static/")
app.count_requests = 0
app.config.from_object(f"app.config.{APP_SETTINGS}")
app.secret_key = os.urandom(256)
app.url_map.strict_slashes = False
app.register_blueprint(default.bp)
app.register_blueprint(debug.bp, url_prefix="/debug")
app.register_blueprint(auth.bp, url_prefix="/auth")
app.register_blueprint(admin.bp, url_prefix="/admin")
app.register_blueprint(profile.bp, url_prefix="/user")
app.register_blueprint(server.bp, url_prefix="/server")
app.register_error_handler(Exception, default.handle_exception)
return app
app = create_app()
app.pages = create_pages()
with app.app_context():
from app.database import engine
app.engine = engine
if os.getenv("DROP_DATABASE", False):
drop_database(app.config)
SQLModel.metadata.create_all(engine)
if __name__ == "__main__":
app.run(host=app.config.get("HOST"), port=app.config.get("PORT"))
|
[
"sqlmodel.SQLModel.metadata.create_all"
] |
[((417, 453), 'os.getenv', 'os.getenv', (['"""APP_SETTINGS"""', '"""Testing"""'], {}), "('APP_SETTINGS', 'Testing')\n", (426, 453), False, 'import os\n'), ((1587, 1601), 'app.pages.create_pages', 'create_pages', ([], {}), '()\n', (1599, 1601), False, 'from app.pages import create_pages\n'), ((1691, 1724), 'os.getenv', 'os.getenv', (['"""DROP_DATABASE"""', '(False)'], {}), "('DROP_DATABASE', False)\n", (1700, 1724), False, 'import os\n'), ((1757, 1793), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (1785, 1793), False, 'from sqlmodel import SQLModel\n'), ((735, 772), 'sqlalchemy.create_engine', 'create_engine', (["config['DATABASE_URI']"], {}), "(config['DATABASE_URI'])\n", (748, 772), False, 'from sqlalchemy import create_engine\n'), ((784, 794), 'sqlalchemy.MetaData', 'MetaData', ([], {}), '()\n', (792, 794), False, 'from sqlalchemy import MetaData\n'), ((899, 977), 'flask.Flask', 'Flask', (['__name__'], {'template_folder': '"""app/templates/"""', 'static_folder': '"""app/static/"""'}), "(__name__, template_folder='app/templates/', static_folder='app/static/')\n", (904, 977), False, 'from flask import Flask\n'), ((1084, 1099), 'os.urandom', 'os.urandom', (['(256)'], {}), '(256)\n', (1094, 1099), False, 'import os\n')]
|
from typing import Optional
import pandas as pd
import streamlit as st
import os
from sqlmodel import Field, Session, SQLModel, create_engine, select
class Hero(SQLModel, table=True):
__table_args__ = {'extend_existing': True} # required for streamlit refreshing
id: Optional[int] = Field(default=None, primary_key=True)
name: str
secret_name: str
age: Optional[int] = None
sqlite_file_name = 'database.db'
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
hero_1 = Hero(name="Rusty-Man", secret_name="<NAME>", age=36)
hero_2 = Hero(name="Tarantula", secret_name="<NAME>", age=26)
hero_3 = Hero(name="<NAME>", secret_name="<NAME>", age=33)
with Session(engine) as session:
session.add(hero_1)
session.add(hero_2)
session.add(hero_3)
session.commit()
def get_db_size():
with Session(engine) as session:
heroes = session.exec(select(Hero)).all()
return len(heroes)
def select_heros():
with Session(engine) as session:
statement = select(Hero).where(Hero.age <= 35)
results = session.exec(statement)
for hero in results:
st.text(hero)
#st.text(len(results))
def show_table():
with Session(engine) as session:
heroes = session.exec(select(Hero)).all()
st.table(pd.DataFrame([s.dict() for s in heroes[-5:]]))
def delete_db():
with Session(engine) as session:
heroes = session.exec(select(Hero)).all()
for hero in heroes:
session.delete(hero)
session.commit()
st.text("Deleted all rows")
def write_new_row():
with st.form('new_row'):
name_input = st.text_input('Name', value="<NAME>")
secret_name = st.text_input('Secret alias', value="Dr. Jon")
submitted = st.form_submit_button("Submit")
if submitted:
hero = Hero(name=name_input, secret_name=secret_name, age=23)
st.write('submitted')
hero_1 = Hero(name="Rusty-Man", secret_name="<NAME>", age=36)
with Session(engine) as session:
session.add(hero_1)
session.commit()
# ====================================== main ====================================== #
def main():
create_db_and_tables()
st.title('🦄 SQLModel Demo')
b1, b2, b3, b4= st.columns(4)
#if b1.button('Add Filter'):
# pass
#select_heros() # todo
if b4.button("♻️ Empty db"):
delete_db()
#if b2.button('Create db'):
# create_db_and_tables()
if b3.button('+ Add 3 rows'):
create_heroes()
if st.button("➡️ Insert Row"):
write_new_row()
show_table()
col0, col1, col2 = st.columns(3)
file_size = os.path.getsize('database.db')
col1.metric("💾 database.db", f"{get_db_size()}", "total rows")
col2.metric("filesize", f"{file_size/1000:0.1f}", 'kb')
if __name__ == '__main__':
st.set_page_config(
page_title="SQLModel Demo",
page_icon="🦄",
layout="centered",
initial_sidebar_state="auto")
main()
|
[
"sqlmodel.create_engine",
"sqlmodel.Session",
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.Field",
"sqlmodel.select"
] |
[((487, 523), 'sqlmodel.create_engine', 'create_engine', (['sqlite_url'], {'echo': '(True)'}), '(sqlite_url, echo=True)\n', (500, 523), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((294, 331), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (299, 331), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((558, 594), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (586, 594), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((1703, 1730), 'streamlit.text', 'st.text', (['"""Deleted all rows"""'], {}), "('Deleted all rows')\n", (1710, 1730), True, 'import streamlit as st\n'), ((2419, 2446), 'streamlit.title', 'st.title', (['"""🦄 SQLModel Demo"""'], {}), "('🦄 SQLModel Demo')\n", (2427, 2446), True, 'import streamlit as st\n'), ((2467, 2480), 'streamlit.columns', 'st.columns', (['(4)'], {}), '(4)\n', (2477, 2480), True, 'import streamlit as st\n'), ((2745, 2771), 'streamlit.button', 'st.button', (['"""➡️ Insert Row"""'], {}), "('➡️ Insert Row')\n", (2754, 2771), True, 'import streamlit as st\n'), ((2838, 2851), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (2848, 2851), True, 'import streamlit as st\n'), ((2868, 2898), 'os.path.getsize', 'os.path.getsize', (['"""database.db"""'], {}), "('database.db')\n", (2883, 2898), False, 'import os\n'), ((3059, 3174), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""SQLModel Demo"""', 'page_icon': '"""🦄"""', 'layout': '"""centered"""', 'initial_sidebar_state': '"""auto"""'}), "(page_title='SQLModel Demo', page_icon='🦄', layout=\n 'centered', initial_sidebar_state='auto')\n", (3077, 3174), True, 'import streamlit as st\n'), ((822, 837), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (829, 837), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((989, 1004), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (996, 1004), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((1121, 1136), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1128, 1136), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((1361, 1376), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1368, 1376), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((1531, 1546), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1538, 1546), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((1766, 1784), 'streamlit.form', 'st.form', (['"""new_row"""'], {}), "('new_row')\n", (1773, 1784), True, 'import streamlit as st\n'), ((1807, 1844), 'streamlit.text_input', 'st.text_input', (['"""Name"""'], {'value': '"""<NAME>"""'}), "('Name', value='<NAME>')\n", (1820, 1844), True, 'import streamlit as st\n'), ((1867, 1913), 'streamlit.text_input', 'st.text_input', (['"""Secret alias"""'], {'value': '"""Dr. Jon"""'}), "('Secret alias', value='Dr. Jon')\n", (1880, 1913), True, 'import streamlit as st\n'), ((1935, 1966), 'streamlit.form_submit_button', 'st.form_submit_button', (['"""Submit"""'], {}), "('Submit')\n", (1956, 1966), True, 'import streamlit as st\n'), ((1287, 1300), 'streamlit.text', 'st.text', (['hero'], {}), '(hero)\n', (1294, 1300), True, 'import streamlit as st\n'), ((2075, 2096), 'streamlit.write', 'st.write', (['"""submitted"""'], {}), "('submitted')\n", (2083, 2096), True, 'import streamlit as st\n'), ((1169, 1181), 'sqlmodel.select', 'select', (['Hero'], {}), '(Hero)\n', (1175, 1181), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((2189, 2204), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (2196, 2204), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((1047, 1059), 'sqlmodel.select', 'select', (['Hero'], {}), '(Hero)\n', (1053, 1059), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((1419, 1431), 'sqlmodel.select', 'select', (['Hero'], {}), '(Hero)\n', (1425, 1431), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((1589, 1601), 'sqlmodel.select', 'select', (['Hero'], {}), '(Hero)\n', (1595, 1601), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n')]
|
#! /usr/bin/env python3
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import json
import math
import os
from megengine.utils.module_stats import sizeof_fmt
from megengine.utils.tensorboard import SummaryWriterExtend
def load_single_graph(fpath):
with open(fpath) as fin:
data = json.load(fin)
for t in ["operator", "var"]:
data[t] = {int(i): j for i, j in data[t].items()}
gvars = data["var"]
for oid, i in data["operator"].items():
i["input"] = list(map(int, i["input"]))
out = i["output"] = list(map(int, i["output"]))
for j in out:
gvars[j]["owner_opr"] = oid
for var in data["var"].values():
mp = var.get("mem_plan", None)
if mp:
var["shape"] = "{" + ",".join(map(str, mp["layout"]["shape"])) + "}"
else:
var["shape"] = "<?>"
return data
def comp_graph_plotter(input, writer):
jgraph = load_single_graph(input)
all_oprs = jgraph["operator"]
all_vars = jgraph["var"]
for i in all_oprs:
opr = all_oprs[i]
if opr["type"] == "ImmutableTensor":
continue
inputlist = []
for var in opr["input"]:
inpopr = all_oprs[all_vars[var]["owner_opr"]]
if inpopr["type"] == "ImmutableTensor":
continue
inputlist.append(all_oprs[all_vars[var]["owner_opr"]]["name"])
writer.add_node_raw(opr["name"], opr["type"], inputlist)
writer.add_graph_by_node_raw_list()
def load_mem_info(fpath):
with open(fpath) as fin:
data = json.load(fin)
oprs = data["opr"]
for oid, i in oprs.items():
i["size"] = 0
for oid, i in data["chunk"].items():
i["size"] = int(i["logic_addr_end"]) - int(i["logic_addr_begin"])
data["peak_memory"] = 0
data["weight_memory"] = 0
for oid, i in data["chunk"].items():
if i["type"] == "static_mem":
i["owner_opr"] = oprs[i["time_begin"]]["name"]
life_begin = int(i["time_begin"])
life_end = int(i["time_end"])
if i["overwrite_dest_id"] != "-1":
life_begin = life_begin + 1
if data["peak_memory"] < int(i["logic_addr_end"]):
data["peak_memory"] = int(i["logic_addr_end"])
for j in range(life_begin, life_end):
oprs[str(j)]["size"] = oprs[str(j)]["size"] + i["size"]
elif i["type"] == "weight_mem":
data["weight_memory"] += int(i["logic_addr_end"]) - int(
i["logic_addr_begin"]
)
return data
def peak_mem_regist(input, writer):
jmem = load_mem_info(input)
writer.add_text(
"PEAK_MEMORY_SIZE",
[sizeof_fmt(jmem["peak_memory"]) + "(" + str(jmem["peak_memory"]) + " B)"],
)
writer.add_text(
"WEIGHT_MEMORY_SIZE",
[sizeof_fmt(jmem["weight_memory"]) + "(" + str(jmem["weight_memory"]) + " B)"],
)
all_oprs = jmem["opr"]
all_chunks = jmem["chunk"]
max_size = 0
max_size_oprs = []
# get oprs that reach the max memory
for oid, i in all_oprs.items():
if i["size"] == max_size:
max_size_oprs.append(int(i["id"]))
elif i["size"] > max_size:
max_size = i["size"]
max_size_oprs.clear()
max_size_oprs.append(int(i["id"]))
# get component of chunks
max_size_oprs.sort()
opr2chunks = []
num = len(max_size_oprs)
for i in range(num):
opr2chunks.append([])
for oid, i in all_chunks.items():
if i["type"] == "static_mem":
life_begin = int(i["time_begin"])
life_end = int(i["time_end"])
if i["overwrite_dest_id"] != "-1":
life_begin = life_begin + 1
if max_size_oprs[0] >= life_end or max_size_oprs[-1] < life_begin:
continue
for j in range(num):
if max_size_oprs[j] >= life_end:
break
elif max_size_oprs[j] >= life_begin:
opr2chunks[j].append(i["id"])
peak_num = 0
for i in range(num):
suffix_1 = "PEAK" + str(peak_num)
if i - 1 > 0 and opr2chunks[i - 1] == opr2chunks[i]:
continue
max_num = 0
opr2chunks[i] = sorted(
opr2chunks[i],
key=lambda chunk_id: all_chunks[chunk_id]["size"],
reverse=True,
)
writer.add_text(
suffix_1 + "/" + "<SUMMARY_INFO>",
["reached_max_opr_name: " + all_oprs[str(max_size_oprs[i])]["name"]],
0,
)
writer.add_text(
suffix_1 + "/" + "<SUMMARY_INFO>",
["max_used_size: " + sizeof_fmt(max_size)],
1,
)
for j in opr2chunks[i]:
suffix_2 = "MAX" + str(max_num)
j_size = sizeof_fmt(all_chunks[j]["size"])
j_percent = round(all_chunks[j]["size"] / max_size * 100, 3)
writer.add_text(
suffix_1 + "/" + suffix_2 + "_OPR",
["percent: " + str(j_percent) + "%"],
0,
)
writer.add_text(
suffix_1 + "/" + suffix_2 + "_OPR", ["memory_size: " + j_size], 1,
)
writer.add_text(
suffix_1 + "/" + suffix_2 + "_OPR",
["owner_opr: " + all_chunks[j]["owner_opr"]],
2,
)
writer.add_node_raw_attributes(
all_chunks[j]["owner_opr"],
{
"memory_" + all_chunks[j]["id"]: j_size,
"memory_percent": str(j_percent) + "%",
"summary_memory_" + str(peak_num): sizeof_fmt(max_size),
},
)
writer.add_node_raw_name_suffix(
all_chunks[j]["owner_opr"], "_" + suffix_1 + "_" + suffix_2
)
max_num += 1
peak_num += 1
writer.add_graph_by_node_raw_list()
def convert(args):
file_process_order = {
"graph.json": comp_graph_plotter,
"StaticMemoryInfo.json": peak_mem_regist,
}
g = os.walk(args.input)
for path, dir_list, file_list in g:
out_path = path.replace(args.input, args.output)
writer = SummaryWriterExtend(out_path)
for key, value in file_process_order.items():
if key in file_list:
value(os.path.join(path, key), writer)
def main():
"""`graph_info_analyze.py` is uesed to convert json dumped by `VisableDataSet`
class to logs which can be read by python `tensorboard`.
Now `get_static_memory_alloc_info()` support this feature,it will dump a dir
which can be convert by `graph_info_analyze.py`.
Examples:
.. code-block:: shell
graph_info_analyze.py -i <input_dir_name> -o <output_dir_name>
tensorboard --logdir <output_dir_name>
"""
parser = argparse.ArgumentParser(
"convert json dumped by c to logs which can be read by python tensorboard",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"-i", "--input", required=True, help="input dirctor name(c tensorboard info)"
)
parser.add_argument(
"-o",
"--output",
required=True,
help="output dirctor name(python tensorboard info)",
)
args = parser.parse_args()
convert(args)
if __name__ == "__main__":
main()
|
[
"megengine.utils.module_stats.sizeof_fmt",
"megengine.utils.tensorboard.SummaryWriterExtend"
] |
[((6623, 6642), 'os.walk', 'os.walk', (['args.input'], {}), '(args.input)\n', (6630, 6642), False, 'import os\n'), ((7413, 7577), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""convert json dumped by c to logs which can be read by python tensorboard"""'], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(\n 'convert json dumped by c to logs which can be read by python tensorboard',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (7436, 7577), False, 'import argparse\n'), ((616, 630), 'json.load', 'json.load', (['fin'], {}), '(fin)\n', (625, 630), False, 'import json\n'), ((1952, 1966), 'json.load', 'json.load', (['fin'], {}), '(fin)\n', (1961, 1966), False, 'import json\n'), ((6757, 6786), 'megengine.utils.tensorboard.SummaryWriterExtend', 'SummaryWriterExtend', (['out_path'], {}), '(out_path)\n', (6776, 6786), False, 'from megengine.utils.tensorboard import SummaryWriterExtend\n'), ((5321, 5354), 'megengine.utils.module_stats.sizeof_fmt', 'sizeof_fmt', (["all_chunks[j]['size']"], {}), "(all_chunks[j]['size'])\n", (5331, 5354), False, 'from megengine.utils.module_stats import sizeof_fmt\n'), ((5175, 5195), 'megengine.utils.module_stats.sizeof_fmt', 'sizeof_fmt', (['max_size'], {}), '(max_size)\n', (5185, 5195), False, 'from megengine.utils.module_stats import sizeof_fmt\n'), ((6191, 6211), 'megengine.utils.module_stats.sizeof_fmt', 'sizeof_fmt', (['max_size'], {}), '(max_size)\n', (6201, 6211), False, 'from megengine.utils.module_stats import sizeof_fmt\n'), ((6896, 6919), 'os.path.join', 'os.path.join', (['path', 'key'], {}), '(path, key)\n', (6908, 6919), False, 'import os\n'), ((3178, 3209), 'megengine.utils.module_stats.sizeof_fmt', 'sizeof_fmt', (["jmem['peak_memory']"], {}), "(jmem['peak_memory'])\n", (3188, 3209), False, 'from megengine.utils.module_stats import sizeof_fmt\n'), ((3319, 3352), 'megengine.utils.module_stats.sizeof_fmt', 'sizeof_fmt', (["jmem['weight_memory']"], {}), "(jmem['weight_memory'])\n", (3329, 3352), False, 'from megengine.utils.module_stats import sizeof_fmt\n')]
|
"""
Rewards are Txs with the claim-iscore method but since this service only listens for
new Txs, this job backfills the value and iscore from the logs service.
"""
import json
from requests import RequestException, get
from sqlmodel import func, select
from icon_governance.config import settings
from icon_governance.log import logger
from icon_governance.models.rewards import Reward
from icon_governance.utils.rpc import convert_hex_int
def get_iscore_value(tx_hash):
"""Get rewards value and Tx from logs service."""
try:
response = get(f"{settings.LOGS_SERVICE_URL}/api/v1/logs?transaction_hash={tx_hash}")
except RequestException as e:
logger.info(f"Exception in iscore - \n{e} - \n{tx_hash}")
return None, None
if response.status_code == 200:
try:
data = json.loads(response.json()[0]["data"])
return convert_hex_int(data[0]) / 1e18, convert_hex_int(data[1]) / 1e18
except Exception as e:
logger.info(f"Exception in iscore - \n{e} - \n{tx_hash}")
return None, None
else:
logger.info(f"Could not find Tx hash from logs service {tx_hash}")
def get_rewards(session):
"""
Cron to get all the values and iscores for rewards txs. Works by getting all the
iscore distributions which are picked up by the transactions processor and insert
them into a DB. The values are then inserted with this cron job by querying for
rewards that have no value.
"""
count = (
session.execute(select([func.count(Reward.address)]).where(Reward.value == None))
.scalars()
.all()
)
logger.info(f"Found {count} number of rewards records.")
chunk_size = 10
for i in range(0, int(count[0] / chunk_size) + 1):
rewards = (
session.execute(select(Reward).where(Reward.value == None).limit(chunk_size))
.scalars()
.all()
)
for r in rewards:
# Get value from logs service
iscore, value = get_iscore_value(tx_hash=r.tx_hash)
if iscore is None:
continue
r.value = value
r.iscore = iscore
session.add(r)
try:
session.commit()
except:
session.rollback()
raise
|
[
"sqlmodel.func.count",
"sqlmodel.select"
] |
[((1647, 1703), 'icon_governance.log.logger.info', 'logger.info', (['f"""Found {count} number of rewards records."""'], {}), "(f'Found {count} number of rewards records.')\n", (1658, 1703), False, 'from icon_governance.log import logger\n'), ((558, 632), 'requests.get', 'get', (['f"""{settings.LOGS_SERVICE_URL}/api/v1/logs?transaction_hash={tx_hash}"""'], {}), "(f'{settings.LOGS_SERVICE_URL}/api/v1/logs?transaction_hash={tx_hash}')\n", (561, 632), False, 'from requests import RequestException, get\n'), ((1100, 1166), 'icon_governance.log.logger.info', 'logger.info', (['f"""Could not find Tx hash from logs service {tx_hash}"""'], {}), "(f'Could not find Tx hash from logs service {tx_hash}')\n", (1111, 1166), False, 'from icon_governance.log import logger\n'), ((675, 734), 'icon_governance.log.logger.info', 'logger.info', (['f"""Exception in iscore - \n{e} - \n{tx_hash}"""'], {}), '(f"""Exception in iscore - \n{e} - \n{tx_hash}""")\n', (686, 734), False, 'from icon_governance.log import logger\n'), ((994, 1053), 'icon_governance.log.logger.info', 'logger.info', (['f"""Exception in iscore - \n{e} - \n{tx_hash}"""'], {}), '(f"""Exception in iscore - \n{e} - \n{tx_hash}""")\n', (1005, 1053), False, 'from icon_governance.log import logger\n'), ((886, 910), 'icon_governance.utils.rpc.convert_hex_int', 'convert_hex_int', (['data[0]'], {}), '(data[0])\n', (901, 910), False, 'from icon_governance.utils.rpc import convert_hex_int\n'), ((919, 943), 'icon_governance.utils.rpc.convert_hex_int', 'convert_hex_int', (['data[1]'], {}), '(data[1])\n', (934, 943), False, 'from icon_governance.utils.rpc import convert_hex_int\n'), ((1544, 1570), 'sqlmodel.func.count', 'func.count', (['Reward.address'], {}), '(Reward.address)\n', (1554, 1570), False, 'from sqlmodel import func, select\n'), ((1828, 1842), 'sqlmodel.select', 'select', (['Reward'], {}), '(Reward)\n', (1834, 1842), False, 'from sqlmodel import func, select\n')]
|
from datetime import datetime
from typing import Any, Dict, Generic, List, Optional, Type, TypeVar, Union
from uuid import UUID
from fastapi_pagination.ext.async_sqlmodel import paginate
from fastapi_pagination import Params, Page
from fastapi.encoders import jsonable_encoder
from pydantic import BaseModel
from sqlmodel import SQLModel, select, func
from sqlalchemy.orm import selectinload
from sqlmodel.ext.asyncio.session import AsyncSession
from sqlmodel.sql.expression import Select, SelectOfScalar
ModelType = TypeVar("ModelType", bound=SQLModel)
CreateSchemaType = TypeVar("CreateSchemaType", bound=BaseModel)
UpdateSchemaType = TypeVar("UpdateSchemaType", bound=BaseModel)
SchemaType = TypeVar("SchemaType", bound=BaseModel)
T = TypeVar("T", bound=SQLModel)
class CRUDBase(Generic[ModelType, CreateSchemaType, UpdateSchemaType]):
def __init__(self, model: Type[ModelType]):
"""
CRUD object with default methods to Create, Read, Update, Delete (CRUD).
**Parameters**
* `model`: A SQLAlchemy model class
* `schema`: A Pydantic model (schema) class
"""
self.model = model
async def get(
self, db_session: AsyncSession, *, id: Union[UUID, str]
) -> Optional[ModelType]:
response = await db_session.exec(select(self.model).where(self.model.id == id).options(selectinload('*')))
return response.first()
async def get_by_ids(self, db_session: AsyncSession, list_ids: List[Union[UUID, str]],) -> Optional[List[ModelType]]:
response = await db_session.exec(select(self.model).where(self.model.id.in_(list_ids)))
return response.all()
async def get_count(
self, db_session: AsyncSession
) -> Optional[ModelType]:
response = await db_session.exec(select(func.count()).select_from(select(self.model).subquery()))
return response.one()
async def get_multi(
self, db_session: AsyncSession, *, skip: int = 0, limit: int = 100
) -> List[ModelType]:
response = await db_session.exec(
select(self.model).offset(skip).limit(limit).order_by(self.model.id)
)
return response.all()
async def get_multi_paginated(
self, db_session: AsyncSession, *, params: Optional[Params] = Params(), query: Optional[Union[T, Select[T], SelectOfScalar[T]]] = None
) -> Page[ModelType]:
if query == None:
query = self.model
return await paginate(db_session, query, params)
async def create(
self, db_session: AsyncSession, *, obj_in: Union[CreateSchemaType, ModelType], created_by_id: Optional[Union[UUID, str]] = None
) -> ModelType:
db_obj = self.model.from_orm(obj_in) # type: ignore
db_obj.created_at = datetime.utcnow()
db_obj.updated_at = datetime.utcnow()
if(created_by_id):
db_obj.created_by_id = created_by_id
db_session.add(db_obj)
await db_session.commit()
await db_session.refresh(db_obj)
return db_obj
async def update(
self,
db_session: AsyncSession,
*,
obj_current: ModelType,
obj_new: Union[UpdateSchemaType, Dict[str, Any], ModelType]
) -> ModelType:
obj_data = jsonable_encoder(obj_current)
if isinstance(obj_new, dict):
update_data = obj_new
else:
update_data = obj_new.dict(exclude_unset=True) #This tells Pydantic to not include the values that were not sent
for field in obj_data:
if field in update_data:
setattr(obj_current, field, update_data[field])
if field == "updated_at":
setattr(obj_current, field, datetime.utcnow())
db_session.add(obj_current)
await db_session.commit()
await db_session.refresh(obj_current)
return obj_current
async def remove(
self, db_session: AsyncSession, *, id: Union[UUID, str]
) -> ModelType:
response = await db_session.exec(select(self.model).where(self.model.id == id))
obj = response.one()
await db_session.delete(obj)
await db_session.commit()
return obj
|
[
"sqlmodel.select",
"sqlmodel.func.count"
] |
[((518, 554), 'typing.TypeVar', 'TypeVar', (['"""ModelType"""'], {'bound': 'SQLModel'}), "('ModelType', bound=SQLModel)\n", (525, 554), False, 'from typing import Any, Dict, Generic, List, Optional, Type, TypeVar, Union\n'), ((574, 618), 'typing.TypeVar', 'TypeVar', (['"""CreateSchemaType"""'], {'bound': 'BaseModel'}), "('CreateSchemaType', bound=BaseModel)\n", (581, 618), False, 'from typing import Any, Dict, Generic, List, Optional, Type, TypeVar, Union\n'), ((638, 682), 'typing.TypeVar', 'TypeVar', (['"""UpdateSchemaType"""'], {'bound': 'BaseModel'}), "('UpdateSchemaType', bound=BaseModel)\n", (645, 682), False, 'from typing import Any, Dict, Generic, List, Optional, Type, TypeVar, Union\n'), ((696, 734), 'typing.TypeVar', 'TypeVar', (['"""SchemaType"""'], {'bound': 'BaseModel'}), "('SchemaType', bound=BaseModel)\n", (703, 734), False, 'from typing import Any, Dict, Generic, List, Optional, Type, TypeVar, Union\n'), ((739, 767), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {'bound': 'SQLModel'}), "('T', bound=SQLModel)\n", (746, 767), False, 'from typing import Any, Dict, Generic, List, Optional, Type, TypeVar, Union\n'), ((2277, 2285), 'fastapi_pagination.Params', 'Params', ([], {}), '()\n', (2283, 2285), False, 'from fastapi_pagination import Params, Page\n'), ((2759, 2776), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (2774, 2776), False, 'from datetime import datetime\n'), ((2805, 2822), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (2820, 2822), False, 'from datetime import datetime\n'), ((3257, 3286), 'fastapi.encoders.jsonable_encoder', 'jsonable_encoder', (['obj_current'], {}), '(obj_current)\n', (3273, 3286), False, 'from fastapi.encoders import jsonable_encoder\n'), ((2454, 2489), 'fastapi_pagination.ext.async_sqlmodel.paginate', 'paginate', (['db_session', 'query', 'params'], {}), '(db_session, query, params)\n', (2462, 2489), False, 'from fastapi_pagination.ext.async_sqlmodel import paginate\n'), ((1349, 1366), 'sqlalchemy.orm.selectinload', 'selectinload', (['"""*"""'], {}), "('*')\n", (1361, 1366), False, 'from sqlalchemy.orm import selectinload\n'), ((3713, 3730), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (3728, 3730), False, 'from datetime import datetime\n'), ((1565, 1583), 'sqlmodel.select', 'select', (['self.model'], {}), '(self.model)\n', (1571, 1583), False, 'from sqlmodel import SQLModel, select, func\n'), ((4024, 4042), 'sqlmodel.select', 'select', (['self.model'], {}), '(self.model)\n', (4030, 4042), False, 'from sqlmodel import SQLModel, select, func\n'), ((1793, 1805), 'sqlmodel.func.count', 'func.count', ([], {}), '()\n', (1803, 1805), False, 'from sqlmodel import SQLModel, select, func\n'), ((1819, 1837), 'sqlmodel.select', 'select', (['self.model'], {}), '(self.model)\n', (1825, 1837), False, 'from sqlmodel import SQLModel, select, func\n'), ((1295, 1313), 'sqlmodel.select', 'select', (['self.model'], {}), '(self.model)\n', (1301, 1313), False, 'from sqlmodel import SQLModel, select, func\n'), ((2062, 2080), 'sqlmodel.select', 'select', (['self.model'], {}), '(self.model)\n', (2068, 2080), False, 'from sqlmodel import SQLModel, select, func\n')]
|
# Copyright 2021 Modelyst LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from sqlmodel import create_engine
def main():
engine = create_engine('postgresql://michaelstatt@localhost/sqlalchemy')
file_name = Path(__file__).parent / 'test.csv'
test_conn = engine.raw_connection()
try:
with test_conn.cursor() as curs:
with open(file_name) as f:
curs.copy_from(f, 'test', null="None", columns=['name', 'tags'], sep='|')
f.seek(0)
print(f.read())
test_conn.commit()
finally:
test_conn.close()
if __name__ == '__main__':
main()
|
[
"sqlmodel.create_engine"
] |
[((685, 748), 'sqlmodel.create_engine', 'create_engine', (['"""postgresql://michaelstatt@localhost/sqlalchemy"""'], {}), "('postgresql://michaelstatt@localhost/sqlalchemy')\n", (698, 748), False, 'from sqlmodel import create_engine\n'), ((765, 779), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (769, 779), False, 'from pathlib import Path\n')]
|
import numpy as nm
from sfepy.base.base import assert_, Struct
import sfepy.linalg as la
class ContactPlane(Struct):
def __init__(self, anchor, normal, bounds):
Struct.__init__(self, anchor=nm.array(anchor, dtype=nm.float64),
bounds=nm.asarray(bounds, dtype=nm.float64))
self.normal = nm.asarray(normal, dtype=nm.float64)
norm = nm.linalg.norm
self.normal /= norm(self.normal)
e3 = [0.0, 0.0, 1.0]
dd = nm.dot(e3, self.normal)
rot_angle = nm.arccos(dd)
if nm.abs(rot_angle) < 1e-14:
mtx = nm.eye(3, dtype=nm.float64)
bounds2d = self.bounds[:, :2]
else:
rot_axis = nm.cross([0.0, 0.0, 1.0], self.normal)
mtx = la.make_axis_rotation_matrix(rot_axis, rot_angle)
mm = la.insert_strided_axis(mtx, 0, self.bounds.shape[0])
rbounds = la.dot_sequences(mm, self.bounds)
bounds2d = rbounds[:, :2]
assert_(nm.allclose(nm.dot(mtx, self.normal), e3,
rtol=0.0, atol=1e-12))
self.adotn = nm.dot(self.anchor, self.normal)
self.rot_angle = rot_angle
self.mtx = mtx
self.bounds2d = bounds2d
def mask_points(self, points):
mm = la.insert_strided_axis(self.mtx, 0, points.shape[0])
points2d = la.dot_sequences(mm, points)[:, :2]
return la.flag_points_in_polygon2d(self.bounds2d, points2d)
def get_distance(self, points):
dist = la.dot_sequences(points, self.normal) - self.adotn
return dist
class ContactSphere(Struct):
def __init__(self, centre, radius):
self.centre = nm.asarray(centre)
self.radius = radius
def mask_points(self, points, eps):
dist2 = la.norm_l2_along_axis(points - self.centre, squared=True)
radius2 = self.radius**2
mask = dist2 <= ((1 + eps)**2) * radius2
return mask
def get_distance(self, points):
"""
Get the penetration distance and normals of points w.r.t. the sphere
surface.
Returns
-------
d : array
The penetration distance.
normals : array
The normals from the points to the sphere centre.
"""
vecs = self.centre - points
dist = la.norm_l2_along_axis(vecs)
# Prevent zero division.
ii = dist > 1e-8
normals = nm.where(ii[:, None], vecs[ii] / dist[ii][:, None],
vecs[ii])
return self.radius - dist, normals
def _get_derivatives(self, points):
vecs = self.centre - points
dist = la.norm_l2_along_axis(vecs)
# Distance derivative w.r.t. point coordinates.
dd = vecs / dist[:, None]
normals = dd
# Unit normal derivative w.r.t. point coordinates.
dim = points.shape[1]
ee = nm.eye(dim)[None, ...]
nnt = normals[..., None] * normals[..., None, :]
dn = - (ee - nnt) / dist[:, None, None]
return dd, dn
def plot_polygon(ax, polygon):
from sfepy.postprocess.plot_dofs import _get_axes
dim = polygon.shape[1]
ax = _get_axes(ax, dim)
pp = nm.r_[polygon, polygon[:1]]
px, py = pp[:, 0], pp[:, 1]
if dim == 2:
ax.plot(px, py)
else:
pz = pp[:, 2]
ax.plot(px, py, pz)
return ax
def plot_points(ax, points, marker, **kwargs):
from sfepy.postprocess.plot_dofs import _get_axes
dim = points.shape[1]
ax = _get_axes(ax, dim)
px, py = points[:, 0], points[:, 1]
if dim == 2:
ax.plot(px, py, marker, **kwargs)
else:
pz = points[:, 2]
ax.plot(px, py, pz, marker, **kwargs)
return ax
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Test and draw the plane.
anchor = [1, 1, 1]
normal = [2, -1, 1]
bounds = [[-2, 0, 0],
[2, 1, 0],
[4, 3, 1],
[1, 3, 1],
[2, 2, 1]]
cp = ContactPlane(anchor, normal, bounds)
pps = 2 * nm.random.rand(20, 3)
mask = cp.mask_points(pps)
dist = cp.get_distance(pps)
v1, v2 = la.get_perpendiculars(cp.normal)
ax = plot_polygon(None, cp.bounds)
ax = plot_polygon(ax, nm.r_[cp.anchor[None, :],
cp.anchor[None, :] + cp.normal[None, :]])
ax = plot_polygon(ax, nm.r_[cp.anchor[None, :],
cp.anchor[None, :] + v1])
ax = plot_polygon(ax, nm.r_[cp.anchor[None, :],
cp.anchor[None, :] + v2])
ax = plot_points(ax, cp.anchor[None, :], 'r*')
ax = plot_points(ax, pps[mask], 'bs', ms=10, mec='None')
ax = plot_points(ax, pps[~mask], 'go', ms=10, mec='None')
mask = dist >= 0.0
ax = plot_points(ax, pps[mask], 'r^', mec='None')
ax = plot_points(ax, pps[~mask], 'kv', mec='None')
# Test and draw the sphere.
pps = nm.random.rand(5000, 3)
centre = [0, 0.5, 0.5]
radius = 0.8
cs = ContactSphere(centre, radius)
mask = cs.mask_points(pps, 0.0)
dist = cs.get_distance(pps)
ax = plot_points(None, cs.centre[None, :], 'b*', ms=30)
ax = plot_points(ax, pps[mask], 'kv')
ax = plot_points(ax, pps[~mask], 'r.')
plt.show()
|
[
"sfepy.linalg.make_axis_rotation_matrix",
"sfepy.linalg.get_perpendiculars",
"sfepy.linalg.dot_sequences",
"sfepy.linalg.norm_l2_along_axis",
"sfepy.linalg.insert_strided_axis",
"sfepy.linalg.flag_points_in_polygon2d",
"sfepy.postprocess.plot_dofs._get_axes"
] |
[((3170, 3188), 'sfepy.postprocess.plot_dofs._get_axes', '_get_axes', (['ax', 'dim'], {}), '(ax, dim)\n', (3179, 3188), False, 'from sfepy.postprocess.plot_dofs import _get_axes\n'), ((3514, 3532), 'sfepy.postprocess.plot_dofs._get_axes', '_get_axes', (['ax', 'dim'], {}), '(ax, dim)\n', (3523, 3532), False, 'from sfepy.postprocess.plot_dofs import _get_axes\n'), ((4161, 4193), 'sfepy.linalg.get_perpendiculars', 'la.get_perpendiculars', (['cp.normal'], {}), '(cp.normal)\n', (4182, 4193), True, 'import sfepy.linalg as la\n'), ((4930, 4953), 'numpy.random.rand', 'nm.random.rand', (['(5000)', '(3)'], {}), '(5000, 3)\n', (4944, 4953), True, 'import numpy as nm\n'), ((5257, 5267), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5265, 5267), True, 'import matplotlib.pyplot as plt\n'), ((332, 368), 'numpy.asarray', 'nm.asarray', (['normal'], {'dtype': 'nm.float64'}), '(normal, dtype=nm.float64)\n', (342, 368), True, 'import numpy as nm\n'), ((484, 507), 'numpy.dot', 'nm.dot', (['e3', 'self.normal'], {}), '(e3, self.normal)\n', (490, 507), True, 'import numpy as nm\n'), ((528, 541), 'numpy.arccos', 'nm.arccos', (['dd'], {}), '(dd)\n', (537, 541), True, 'import numpy as nm\n'), ((1111, 1143), 'numpy.dot', 'nm.dot', (['self.anchor', 'self.normal'], {}), '(self.anchor, self.normal)\n', (1117, 1143), True, 'import numpy as nm\n'), ((1285, 1337), 'sfepy.linalg.insert_strided_axis', 'la.insert_strided_axis', (['self.mtx', '(0)', 'points.shape[0]'], {}), '(self.mtx, 0, points.shape[0])\n', (1307, 1337), True, 'import sfepy.linalg as la\n'), ((1409, 1461), 'sfepy.linalg.flag_points_in_polygon2d', 'la.flag_points_in_polygon2d', (['self.bounds2d', 'points2d'], {}), '(self.bounds2d, points2d)\n', (1436, 1461), True, 'import sfepy.linalg as la\n'), ((1679, 1697), 'numpy.asarray', 'nm.asarray', (['centre'], {}), '(centre)\n', (1689, 1697), True, 'import numpy as nm\n'), ((1784, 1841), 'sfepy.linalg.norm_l2_along_axis', 'la.norm_l2_along_axis', (['(points - self.centre)'], {'squared': '(True)'}), '(points - self.centre, squared=True)\n', (1805, 1841), True, 'import sfepy.linalg as la\n'), ((2325, 2352), 'sfepy.linalg.norm_l2_along_axis', 'la.norm_l2_along_axis', (['vecs'], {}), '(vecs)\n', (2346, 2352), True, 'import sfepy.linalg as la\n'), ((2429, 2490), 'numpy.where', 'nm.where', (['ii[:, None]', '(vecs[ii] / dist[ii][:, None])', 'vecs[ii]'], {}), '(ii[:, None], vecs[ii] / dist[ii][:, None], vecs[ii])\n', (2437, 2490), True, 'import numpy as nm\n'), ((2653, 2680), 'sfepy.linalg.norm_l2_along_axis', 'la.norm_l2_along_axis', (['vecs'], {}), '(vecs)\n', (2674, 2680), True, 'import sfepy.linalg as la\n'), ((4061, 4082), 'numpy.random.rand', 'nm.random.rand', (['(20)', '(3)'], {}), '(20, 3)\n', (4075, 4082), True, 'import numpy as nm\n'), ((554, 571), 'numpy.abs', 'nm.abs', (['rot_angle'], {}), '(rot_angle)\n', (560, 571), True, 'import numpy as nm\n'), ((599, 626), 'numpy.eye', 'nm.eye', (['(3)'], {'dtype': 'nm.float64'}), '(3, dtype=nm.float64)\n', (605, 626), True, 'import numpy as nm\n'), ((707, 745), 'numpy.cross', 'nm.cross', (['[0.0, 0.0, 1.0]', 'self.normal'], {}), '([0.0, 0.0, 1.0], self.normal)\n', (715, 745), True, 'import numpy as nm\n'), ((764, 813), 'sfepy.linalg.make_axis_rotation_matrix', 'la.make_axis_rotation_matrix', (['rot_axis', 'rot_angle'], {}), '(rot_axis, rot_angle)\n', (792, 813), True, 'import sfepy.linalg as la\n'), ((832, 884), 'sfepy.linalg.insert_strided_axis', 'la.insert_strided_axis', (['mtx', '(0)', 'self.bounds.shape[0]'], {}), '(mtx, 0, self.bounds.shape[0])\n', (854, 884), True, 'import sfepy.linalg as la\n'), ((907, 940), 'sfepy.linalg.dot_sequences', 'la.dot_sequences', (['mm', 'self.bounds'], {}), '(mm, self.bounds)\n', (923, 940), True, 'import sfepy.linalg as la\n'), ((1357, 1385), 'sfepy.linalg.dot_sequences', 'la.dot_sequences', (['mm', 'points'], {}), '(mm, points)\n', (1373, 1385), True, 'import sfepy.linalg as la\n'), ((1514, 1551), 'sfepy.linalg.dot_sequences', 'la.dot_sequences', (['points', 'self.normal'], {}), '(points, self.normal)\n', (1530, 1551), True, 'import sfepy.linalg as la\n'), ((2896, 2907), 'numpy.eye', 'nm.eye', (['dim'], {}), '(dim)\n', (2902, 2907), True, 'import numpy as nm\n'), ((205, 239), 'numpy.array', 'nm.array', (['anchor'], {'dtype': 'nm.float64'}), '(anchor, dtype=nm.float64)\n', (213, 239), True, 'import numpy as nm\n'), ((272, 308), 'numpy.asarray', 'nm.asarray', (['bounds'], {'dtype': 'nm.float64'}), '(bounds, dtype=nm.float64)\n', (282, 308), True, 'import numpy as nm\n'), ((1008, 1032), 'numpy.dot', 'nm.dot', (['mtx', 'self.normal'], {}), '(mtx, self.normal)\n', (1014, 1032), True, 'import numpy as nm\n')]
|
from fastapi import APIRouter, Depends, HTTPException, Path
from sqlmodel import Session, select
from sqlalchemy.exc import IntegrityError
from typing import List
import datetime as dt
from app.src.common.security import get_current_user
from app.src.common.utils import profiling_api
from app.src.models.app_user import AppUser
from app.src.models.tag import Tag, TagRead, TagCreate, TagUpdate
from app.src.db.engine import get_session
router = APIRouter()
async def get_tag_or_404(
*,
session: Session = Depends(get_session),
tag_id: int = Path(..., ge=1),
current_user: AppUser = Depends(get_current_user),
):
start_time = dt.datetime.now()
try:
db_tag = session.get(Tag, tag_id)
if db_tag:
return {
"db_tag": db_tag,
"username": current_user.username,
"start_time": start_time,
}
else:
raise HTTPException(status_code=404, detail="Tag not found")
except KeyError:
raise HTTPException(status_code=400, detail="Tag not found")
async def get_tag_by_name_or_404(
*,
session: Session = Depends(get_session),
tag_name: str,
current_user: AppUser = Depends(get_current_user),
):
start_time = dt.datetime.now()
try:
db_tag = session.exec(select(Tag).where(Tag.name == tag_name)).one()
if db_tag:
return {
"db_tag": db_tag,
"username": current_user.username,
"start_time": start_time,
}
else:
raise HTTPException(status_code=404, detail="Tag not found by name")
except KeyError:
raise HTTPException(status_code=400, detail="Tag not found by name")
@router.get("/", response_model=List[TagRead])
async def read_tags(
*,
session: Session = Depends(get_session),
current_user: AppUser = Depends(get_current_user),
):
"""
Get all the existing tags
"""
start_time = dt.datetime.now()
tags = session.exec(select(Tag)).all()
profiling_api("Tags:get:all", start_time, current_user.username)
return tags
@router.get("/{tag_id}", response_model=TagRead)
async def read_tag(*, tag_id: int, db_tag: Tag = Depends(get_tag_or_404)):
"""
Get the tag by id
"""
profiling_api(
f"Tag:read:by_id:{tag_id}",
db_tag["start_time"],
db_tag["username"],
)
return db_tag["db_tag"]
@router.post("/", response_model=TagRead)
async def create_tags(
*,
session: Session = Depends(get_session),
tag: TagCreate,
current_user: AppUser = Depends(get_current_user),
):
"""
Create a tag
"""
start_time = dt.datetime.now()
try:
db_t = Tag.from_orm(tag)
session.add(db_t)
session.commit()
session.refresh(db_t)
except IntegrityError:
raise HTTPException(
status_code=404, detail="Impossible to create tag with same name"
)
profiling_api("Tag:insert:single", start_time, current_user.username)
return db_t
@router.patch("/{tag_id}", response_model=TagRead)
async def update_tag(
*,
tag_id: int,
session: Session = Depends(get_session),
tag: TagUpdate,
db_tag: Tag = Depends(get_tag_or_404),
):
"""
Modify a tag
"""
# exclude_unset=True: it would only include the values
# that were sent by the client
existing_tag = db_tag["db_tag"]
tag_data = tag.dict(exclude_unset=True)
for key, value in tag_data.items():
setattr(existing_tag, key, value)
session.add(existing_tag)
session.commit()
session.refresh(existing_tag)
profiling_api(
f"Tag:update:by_id:{tag_id}",
db_tag["start_time"],
db_tag["username"],
)
return existing_tag
@router.delete("/{tag_id}")
async def delete_tag(
*,
tag_id: int,
session: Session = Depends(get_session),
db_tag: Tag = Depends(get_tag_or_404),
):
"""
Delete and remove an existing product type by id; it must be >= 1
"""
existing_tag = db_tag["db_tag"]
session.delete(existing_tag)
session.commit()
profiling_api(
f"Tag:delete:by_id:{tag_id}",
db_tag["start_time"],
db_tag["username"],
)
return {"ok": True}
|
[
"sqlmodel.select"
] |
[((449, 460), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (458, 460), False, 'from fastapi import APIRouter, Depends, HTTPException, Path\n'), ((519, 539), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (526, 539), False, 'from fastapi import APIRouter, Depends, HTTPException, Path\n'), ((559, 574), 'fastapi.Path', 'Path', (['...'], {'ge': '(1)'}), '(..., ge=1)\n', (563, 574), False, 'from fastapi import APIRouter, Depends, HTTPException, Path\n'), ((604, 629), 'fastapi.Depends', 'Depends', (['get_current_user'], {}), '(get_current_user)\n', (611, 629), False, 'from fastapi import APIRouter, Depends, HTTPException, Path\n'), ((651, 668), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (666, 668), True, 'import datetime as dt\n'), ((1144, 1164), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1151, 1164), False, 'from fastapi import APIRouter, Depends, HTTPException, Path\n'), ((1213, 1238), 'fastapi.Depends', 'Depends', (['get_current_user'], {}), '(get_current_user)\n', (1220, 1238), False, 'from fastapi import APIRouter, Depends, HTTPException, Path\n'), ((1260, 1277), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (1275, 1277), True, 'import datetime as dt\n'), ((1838, 1858), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1845, 1858), False, 'from fastapi import APIRouter, Depends, HTTPException, Path\n'), ((1888, 1913), 'fastapi.Depends', 'Depends', (['get_current_user'], {}), '(get_current_user)\n', (1895, 1913), False, 'from fastapi import APIRouter, Depends, HTTPException, Path\n'), ((1981, 1998), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (1996, 1998), True, 'import datetime as dt\n'), ((2046, 2110), 'app.src.common.utils.profiling_api', 'profiling_api', (['"""Tags:get:all"""', 'start_time', 'current_user.username'], {}), "('Tags:get:all', start_time, current_user.username)\n", (2059, 2110), False, 'from app.src.common.utils import profiling_api\n'), ((2227, 2250), 'fastapi.Depends', 'Depends', (['get_tag_or_404'], {}), '(get_tag_or_404)\n', (2234, 2250), False, 'from fastapi import APIRouter, Depends, HTTPException, Path\n'), ((2295, 2383), 'app.src.common.utils.profiling_api', 'profiling_api', (['f"""Tag:read:by_id:{tag_id}"""', "db_tag['start_time']", "db_tag['username']"], {}), "(f'Tag:read:by_id:{tag_id}', db_tag['start_time'], db_tag[\n 'username'])\n", (2308, 2383), False, 'from app.src.common.utils import profiling_api\n'), ((2535, 2555), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (2542, 2555), False, 'from fastapi import APIRouter, Depends, HTTPException, Path\n'), ((2605, 2630), 'fastapi.Depends', 'Depends', (['get_current_user'], {}), '(get_current_user)\n', (2612, 2630), False, 'from fastapi import APIRouter, Depends, HTTPException, Path\n'), ((2685, 2702), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (2700, 2702), True, 'import datetime as dt\n'), ((2974, 3043), 'app.src.common.utils.profiling_api', 'profiling_api', (['"""Tag:insert:single"""', 'start_time', 'current_user.username'], {}), "('Tag:insert:single', start_time, current_user.username)\n", (2987, 3043), False, 'from app.src.common.utils import profiling_api\n'), ((3182, 3202), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (3189, 3202), False, 'from fastapi import APIRouter, Depends, HTTPException, Path\n'), ((3242, 3265), 'fastapi.Depends', 'Depends', (['get_tag_or_404'], {}), '(get_tag_or_404)\n', (3249, 3265), False, 'from fastapi import APIRouter, Depends, HTTPException, Path\n'), ((3648, 3738), 'app.src.common.utils.profiling_api', 'profiling_api', (['f"""Tag:update:by_id:{tag_id}"""', "db_tag['start_time']", "db_tag['username']"], {}), "(f'Tag:update:by_id:{tag_id}', db_tag['start_time'], db_tag[\n 'username'])\n", (3661, 3738), False, 'from app.src.common.utils import profiling_api\n'), ((3888, 3908), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (3895, 3908), False, 'from fastapi import APIRouter, Depends, HTTPException, Path\n'), ((3928, 3951), 'fastapi.Depends', 'Depends', (['get_tag_or_404'], {}), '(get_tag_or_404)\n', (3935, 3951), False, 'from fastapi import APIRouter, Depends, HTTPException, Path\n'), ((4136, 4226), 'app.src.common.utils.profiling_api', 'profiling_api', (['f"""Tag:delete:by_id:{tag_id}"""', "db_tag['start_time']", "db_tag['username']"], {}), "(f'Tag:delete:by_id:{tag_id}', db_tag['start_time'], db_tag[\n 'username'])\n", (4149, 4226), False, 'from app.src.common.utils import profiling_api\n'), ((2727, 2744), 'app.src.models.tag.Tag.from_orm', 'Tag.from_orm', (['tag'], {}), '(tag)\n', (2739, 2744), False, 'from app.src.models.tag import Tag, TagRead, TagCreate, TagUpdate\n'), ((933, 987), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Tag not found"""'}), "(status_code=404, detail='Tag not found')\n", (946, 987), False, 'from fastapi import APIRouter, Depends, HTTPException, Path\n'), ((1023, 1077), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(400)', 'detail': '"""Tag not found"""'}), "(status_code=400, detail='Tag not found')\n", (1036, 1077), False, 'from fastapi import APIRouter, Depends, HTTPException, Path\n'), ((1577, 1639), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Tag not found by name"""'}), "(status_code=404, detail='Tag not found by name')\n", (1590, 1639), False, 'from fastapi import APIRouter, Depends, HTTPException, Path\n'), ((1675, 1737), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(400)', 'detail': '"""Tag not found by name"""'}), "(status_code=400, detail='Tag not found by name')\n", (1688, 1737), False, 'from fastapi import APIRouter, Depends, HTTPException, Path\n'), ((2867, 2952), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""Impossible to create tag with same name"""'}), "(status_code=404, detail='Impossible to create tag with same name'\n )\n", (2880, 2952), False, 'from fastapi import APIRouter, Depends, HTTPException, Path\n'), ((2023, 2034), 'sqlmodel.select', 'select', (['Tag'], {}), '(Tag)\n', (2029, 2034), False, 'from sqlmodel import Session, select\n'), ((1317, 1328), 'sqlmodel.select', 'select', (['Tag'], {}), '(Tag)\n', (1323, 1328), False, 'from sqlmodel import Session, select\n')]
|
import sys
sys.path.append('.')
import cv2
import megengine as mge
import megengine.functional as F
import numpy as np
from model.RIFE import Model
model = Model()
model.load_model('train_log')
model.eval()
name = ['Beanbags', 'Dimetrodon', 'DogDance', 'Grove2', 'Grove3', 'Hydrangea', 'MiniCooper', 'RubberWhale', 'Urban2', 'Urban3', 'Venus', 'Walking']
IE_list = []
for i in name:
i0 = cv2.imread('other-data/{}/frame10.png'.format(i)).transpose(2, 0, 1) / 255.
i1 = cv2.imread('other-data/{}/frame11.png'.format(i)).transpose(2, 0, 1) / 255.
gt = cv2.imread('other-gt-interp/{}/frame10i11.png'.format(i))
h, w = i0.shape[1], i0.shape[2]
imgs = F.zeros([1, 6, 480, 640])
ph = (480 - h) // 2
pw = (640 - w) // 2
imgs[:, :3, :h, :w] = F.expand_dims(mge.Tensor(i0), 0).astype("float32")
imgs[:, 3:, :h, :w] = F.expand_dims(mge.Tensor(i1), 0).astype("float32")
I0 = imgs[:, :3]
I2 = imgs[:, 3:]
pred = model.inference(I0, I2)
out = pred[0].detach().numpy().transpose(1, 2, 0)
out = np.round(out[:h, :w] * 255)
IE_list.append(np.abs((out - gt * 1.0)).mean())
print(np.mean(IE_list))
|
[
"megengine.functional.zeros",
"megengine.Tensor"
] |
[((11, 31), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (26, 31), False, 'import sys\n'), ((157, 164), 'model.RIFE.Model', 'Model', ([], {}), '()\n', (162, 164), False, 'from model.RIFE import Model\n'), ((670, 695), 'megengine.functional.zeros', 'F.zeros', (['[1, 6, 480, 640]'], {}), '([1, 6, 480, 640])\n', (677, 695), True, 'import megengine.functional as F\n'), ((1039, 1066), 'numpy.round', 'np.round', (['(out[:h, :w] * 255)'], {}), '(out[:h, :w] * 255)\n', (1047, 1066), True, 'import numpy as np\n'), ((1129, 1145), 'numpy.mean', 'np.mean', (['IE_list'], {}), '(IE_list)\n', (1136, 1145), True, 'import numpy as np\n'), ((784, 798), 'megengine.Tensor', 'mge.Tensor', (['i0'], {}), '(i0)\n', (794, 798), True, 'import megengine as mge\n'), ((861, 875), 'megengine.Tensor', 'mge.Tensor', (['i1'], {}), '(i1)\n', (871, 875), True, 'import megengine as mge\n'), ((1086, 1108), 'numpy.abs', 'np.abs', (['(out - gt * 1.0)'], {}), '(out - gt * 1.0)\n', (1092, 1108), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import megengine as mge
import megengine.module as nn
import megengine.functional as F
def conv3x3(in_chn, out_chn, bias=True):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=3, stride=1, padding=1, bias=bias)
return layer
def conv_down(in_chn, out_chn, bias=False):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=4, stride=2, padding=1, bias=bias)
return layer
class UNetD(nn.Module):
def __init__(self, in_chn, wf=32, depth=5, relu_slope=0.2, subspace_dim=16):
super(UNetD, self).__init__()
self.depth = depth
self.down_path = []
prev_channels = self.get_input_chn(in_chn)
for i in range(depth):
downsample = True if (i+1) < depth else False
self.down_path.append(UNetConvBlock(prev_channels, (2**i)*wf, downsample, relu_slope))
prev_channels = (2**i) * wf
# self.ema = EMAU(prev_channels, prev_channels//8)
self.up_path = []
subnet_repeat_num = 1
for i in reversed(range(depth - 1)):
self.up_path.append(UNetUpBlock(prev_channels, (2**i)*wf, relu_slope, subnet_repeat_num, subspace_dim))
prev_channels = (2**i)*wf
subnet_repeat_num += 1
self.last = conv3x3(prev_channels, in_chn, bias=True)
#self._initialize()
def forward(self, x1):
blocks = []
for i, down in enumerate(self.down_path):
# print(x1.shape)
if (i+1) < self.depth:
x1, x1_up = down(x1)
blocks.append(x1_up)
else:
x1 = down(x1)
# print(x1.shape)
# x1 = self.ema(x1)
for i, up in enumerate(self.up_path):
# print(x1.shape, blocks[-i-1].shape)
x1 = up(x1, blocks[-i-1])
pred = self.last(x1)
return pred
def get_input_chn(self, in_chn):
return in_chn
def _initialize(self):
gain = nn.init.calculate_gain('leaky_relu', 0.20)
for m in self.modules():
if isinstance(m, nn.Conv2d):
print("weight")
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
print("bias")
nn.init.zeros_(m.bias)
class UNetConvBlock(nn.Module):
def __init__(self, in_size, out_size, downsample, relu_slope):
super(UNetConvBlock, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_size, out_size, kernel_size=3, padding=1, bias=True),
nn.LeakyReLU(relu_slope),
nn.Conv2d(out_size, out_size, kernel_size=3, padding=1, bias=True),
nn.LeakyReLU(relu_slope))
self.downsample = downsample
if downsample:
self.downsample = conv_down(out_size, out_size, bias=False)
self.shortcut = nn.Conv2d(in_size, out_size, kernel_size=1, bias=True)
def forward(self, x):
out = self.block(x)
sc = self.shortcut(x)
out = out + sc
if self.downsample:
out_down = self.downsample(out)
return out_down, out
else:
return out
class UNetUpBlock(nn.Module):
def __init__(self, in_size, out_size, relu_slope, subnet_repeat_num, subspace_dim=16):
super(UNetUpBlock, self).__init__()
self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2, bias=True)
self.conv_block = UNetConvBlock(in_size, out_size, False, relu_slope)
self.num_subspace = subspace_dim
print(self.num_subspace, subnet_repeat_num)
self.subnet = Subspace(in_size, self.num_subspace)
self.skip_m = skip_blocks(out_size, out_size, subnet_repeat_num)
def forward(self, x, bridge):
up = self.up(x)
bridge = self.skip_m(bridge)
out = F.concat([up, bridge], 1)
if self.subnet:
b_, c_, h_, w_ = bridge.shape
sub = self.subnet(out)
V_t = sub.reshape(b_, self.num_subspace, h_*w_)
V_t = V_t / (1e-6 + F.abs(V_t).sum(axis=2, keepdims=True))
V = V_t.transpose(0, 2, 1)
mat = F.matmul(V_t, V)
mat_inv = F.matinv(mat)
project_mat = F.matmul(mat_inv, V_t)
bridge_ = bridge.reshape(b_, c_, h_*w_)
project_feature = F.matmul(project_mat, bridge_.transpose(0, 2, 1))
bridge = F.matmul(V, project_feature).transpose(0, 2, 1).reshape(b_, c_, h_, w_)
out = F.concat([up, bridge], 1)
out = self.conv_block(out)
return out
class Subspace(nn.Module):
def __init__(self, in_size, out_size):
super(Subspace, self).__init__()
self.blocks = []
self.blocks.append(UNetConvBlock(in_size, out_size, False, 0.2))
self.shortcut = nn.Conv2d(in_size, out_size, kernel_size=1, bias=True)
def forward(self, x):
sc = self.shortcut(x)
for i in range(len(self.blocks)):
x = self.blocks[i](x)
return x + sc
class skip_blocks(nn.Module):
def __init__(self, in_size, out_size, repeat_num=1):
super(skip_blocks, self).__init__()
self.blocks = []
self.re_num = repeat_num
mid_c = 128
self.blocks.append(UNetConvBlock(in_size, mid_c, False, 0.2))
for i in range(self.re_num - 2):
self.blocks.append(UNetConvBlock(mid_c, mid_c, False, 0.2))
self.blocks.append(UNetConvBlock(mid_c, out_size, False, 0.2))
self.shortcut = nn.Conv2d(in_size, out_size, kernel_size=1, bias=True)
def forward(self, x):
sc = self.shortcut(x)
for m in self.blocks:
x = m(x)
return x + sc
if __name__ == "__main__":
import numpy as np
a = UNetD(3)
#print(a)
im = mge.tensor(np.random.randn(1, 3, 128, 128).astype(np.float32))
print(a(im))
|
[
"megengine.module.init.calculate_gain",
"megengine.functional.matmul",
"megengine.module.init.zeros_",
"megengine.functional.concat",
"megengine.functional.matinv",
"megengine.module.ConvTranspose2d",
"megengine.module.init.xavier_uniform_",
"megengine.module.Conv2d",
"megengine.functional.abs",
"megengine.module.LeakyReLU"
] |
[((164, 237), 'megengine.module.Conv2d', 'nn.Conv2d', (['in_chn', 'out_chn'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': 'bias'}), '(in_chn, out_chn, kernel_size=3, stride=1, padding=1, bias=bias)\n', (173, 237), True, 'import megengine.module as nn\n'), ((313, 386), 'megengine.module.Conv2d', 'nn.Conv2d', (['in_chn', 'out_chn'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': 'bias'}), '(in_chn, out_chn, kernel_size=4, stride=2, padding=1, bias=bias)\n', (322, 386), True, 'import megengine.module as nn\n'), ((1951, 1992), 'megengine.module.init.calculate_gain', 'nn.init.calculate_gain', (['"""leaky_relu"""', '(0.2)'], {}), "('leaky_relu', 0.2)\n", (1973, 1992), True, 'import megengine.module as nn\n'), ((2843, 2897), 'megengine.module.Conv2d', 'nn.Conv2d', (['in_size', 'out_size'], {'kernel_size': '(1)', 'bias': '(True)'}), '(in_size, out_size, kernel_size=1, bias=True)\n', (2852, 2897), True, 'import megengine.module as nn\n'), ((3334, 3407), 'megengine.module.ConvTranspose2d', 'nn.ConvTranspose2d', (['in_size', 'out_size'], {'kernel_size': '(2)', 'stride': '(2)', 'bias': '(True)'}), '(in_size, out_size, kernel_size=2, stride=2, bias=True)\n', (3352, 3407), True, 'import megengine.module as nn\n'), ((3821, 3846), 'megengine.functional.concat', 'F.concat', (['[up, bridge]', '(1)'], {}), '([up, bridge], 1)\n', (3829, 3846), True, 'import megengine.functional as F\n'), ((4797, 4851), 'megengine.module.Conv2d', 'nn.Conv2d', (['in_size', 'out_size'], {'kernel_size': '(1)', 'bias': '(True)'}), '(in_size, out_size, kernel_size=1, bias=True)\n', (4806, 4851), True, 'import megengine.module as nn\n'), ((5497, 5551), 'megengine.module.Conv2d', 'nn.Conv2d', (['in_size', 'out_size'], {'kernel_size': '(1)', 'bias': '(True)'}), '(in_size, out_size, kernel_size=1, bias=True)\n', (5506, 5551), True, 'import megengine.module as nn\n'), ((2462, 2527), 'megengine.module.Conv2d', 'nn.Conv2d', (['in_size', 'out_size'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(True)'}), '(in_size, out_size, kernel_size=3, padding=1, bias=True)\n', (2471, 2527), True, 'import megengine.module as nn\n'), ((2541, 2565), 'megengine.module.LeakyReLU', 'nn.LeakyReLU', (['relu_slope'], {}), '(relu_slope)\n', (2553, 2565), True, 'import megengine.module as nn\n'), ((2579, 2645), 'megengine.module.Conv2d', 'nn.Conv2d', (['out_size', 'out_size'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(True)'}), '(out_size, out_size, kernel_size=3, padding=1, bias=True)\n', (2588, 2645), True, 'import megengine.module as nn\n'), ((2659, 2683), 'megengine.module.LeakyReLU', 'nn.LeakyReLU', (['relu_slope'], {}), '(relu_slope)\n', (2671, 2683), True, 'import megengine.module as nn\n'), ((4136, 4152), 'megengine.functional.matmul', 'F.matmul', (['V_t', 'V'], {}), '(V_t, V)\n', (4144, 4152), True, 'import megengine.functional as F\n'), ((4175, 4188), 'megengine.functional.matinv', 'F.matinv', (['mat'], {}), '(mat)\n', (4183, 4188), True, 'import megengine.functional as F\n'), ((4215, 4237), 'megengine.functional.matmul', 'F.matmul', (['mat_inv', 'V_t'], {}), '(mat_inv, V_t)\n', (4223, 4237), True, 'import megengine.functional as F\n'), ((4481, 4506), 'megengine.functional.concat', 'F.concat', (['[up, bridge]', '(1)'], {}), '([up, bridge], 1)\n', (4489, 4506), True, 'import megengine.functional as F\n'), ((2116, 2149), 'megengine.module.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['m.weight'], {}), '(m.weight)\n', (2139, 2149), True, 'import megengine.module as nn\n'), ((5786, 5817), 'numpy.random.randn', 'np.random.randn', (['(1)', '(3)', '(128)', '(128)'], {}), '(1, 3, 128, 128)\n', (5801, 5817), True, 'import numpy as np\n'), ((2243, 2265), 'megengine.module.init.zeros_', 'nn.init.zeros_', (['m.bias'], {}), '(m.bias)\n', (2257, 2265), True, 'import megengine.module as nn\n'), ((4040, 4050), 'megengine.functional.abs', 'F.abs', (['V_t'], {}), '(V_t)\n', (4045, 4050), True, 'import megengine.functional as F\n'), ((4391, 4419), 'megengine.functional.matmul', 'F.matmul', (['V', 'project_feature'], {}), '(V, project_feature)\n', (4399, 4419), True, 'import megengine.functional as F\n')]
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: disable=import-error,no-name-in-module,no-member
from typing import List, Union
import megengine as mge
from megengine.traced_module import TracedModule
from ..backend.ir_to_caffe.caffe_converter import BackEnd, CaffeConverter
from ..converter_ir.ir_quantizer import IRQuantizer
from ..converter_ir.ir_transform import IRTransform, TransformerRule
from ..frontend.tm_to_ir import TM_FrontEnd
from ..frontend.tm_to_ir.tm_utils import _update_inputs_qparams
def tracedmodule_to_caffe(
traced_module,
prototxt="out.prototxt",
caffemodel="out.caffemodel",
outspec=None,
use_empty_blobs=False,
input_data_type: str = None,
input_scales: Union[float, List[float]] = None,
input_zero_points: Union[int, List[int]] = None,
require_quantize=False,
param_fake_quant=False,
split_conv_relu=False,
quantize_file_path="quant_params.json",
convert_backend: BackEnd = BackEnd.CAFFE,
):
"""
Convert TracedModule model to Caffe,
and save caffe model to `prototxt` and `caffemodel`.
:param traced_module: the file path of TracedModule model.
:type traced_module: str
:param prototxt: the filename used for saved model definition.
:type prototxt: str
:param caffemodel: the filename used for saved model weights.
:type caffemodel: str
:param outspec: specify the end points of the model, expect the full names of nodes.
:type outspec: list
"""
if isinstance(traced_module, str):
traced_module = mge.load(traced_module)
assert isinstance(
traced_module, TracedModule
), "Input should be a traced module or a path of traced module."
_update_inputs_qparams(
traced_module, input_data_type, input_scales, input_zero_points
)
irgraph = TM_FrontEnd(traced_module, outspec=outspec).resolve()
transformer_options = [
TransformerRule.REMOVE_DROPOUT,
TransformerRule.REMOVE_RESHAPE_REALTED_OP,
TransformerRule.REMOVE_UNRELATED_IROP,
TransformerRule.ADD_FAKE_HSIGMOID_OUT,
TransformerRule.EXPAND_CONVRELU,
]
if split_conv_relu:
transformer_options += [TransformerRule.REMOVE_RELU]
transformer = IRTransform(transformer_options)
transformed_irgraph = transformer.transform(irgraph)
quantizer = IRQuantizer(
require_quantize=require_quantize, param_fake_quant=param_fake_quant
)
if require_quantize:
quantizer.save_quantize_params(transformed_irgraph)
converter = CaffeConverter(
transformed_irgraph, quantizer, use_empty_blobs, convert_backend
)
converter.convert()
if require_quantize:
quantizer.dump_quant_param(path=quantize_file_path)
assert isinstance(prototxt, str) and isinstance(
caffemodel, str
), "'prototxt' and 'caffemodel' must be string"
converter.dump(prototxt, caffemodel)
|
[
"megengine.load"
] |
[((1858, 1881), 'megengine.load', 'mge.load', (['traced_module'], {}), '(traced_module)\n', (1866, 1881), True, 'import megengine as mge\n')]
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import megengine.functional as F
import megengine.module as M
from .darknet import Darknet
from .network_blocks import BaseConv, UpSample
class YOLOFPN(M.Module):
"""
YOLOFPN module. Darknet 53 is the default backbone of this model.
"""
def __init__(
self, depth=53, in_features=["dark3", "dark4", "dark5"],
):
super().__init__()
self.backbone = Darknet(depth)
self.in_features = in_features
# out 1
self.out1_cbl = self._make_cbl(512, 256, 1)
self.out1 = self._make_embedding([256, 512], 512 + 256)
# out 2
self.out2_cbl = self._make_cbl(256, 128, 1)
self.out2 = self._make_embedding([128, 256], 256 + 128)
# upsample
self.upsample = UpSample(scale_factor=2, mode="bilinear")
def _make_cbl(self, _in, _out, ks):
return BaseConv(_in, _out, ks, stride=1, act="lrelu")
def _make_embedding(self, filters_list, in_filters):
m = M.Sequential(
*[
self._make_cbl(in_filters, filters_list[0], 1),
self._make_cbl(filters_list[0], filters_list[1], 3),
self._make_cbl(filters_list[1], filters_list[0], 1),
self._make_cbl(filters_list[0], filters_list[1], 3),
self._make_cbl(filters_list[1], filters_list[0], 1),
]
)
return m
def forward(self, inputs):
"""
Args:
inputs (Tensor): input image.
Returns:
Tuple[Tensor]: FPN output features..
"""
# backbone
out_features = self.backbone(inputs)
x2, x1, x0 = [out_features[f] for f in self.in_features]
# yolo branch 1
x1_in = self.out1_cbl(x0)
x1_in = self.upsample(x1_in)
x1_in = F.concat([x1_in, x1], 1)
out_dark4 = self.out1(x1_in)
# yolo branch 2
x2_in = self.out2_cbl(out_dark4)
x2_in = self.upsample(x2_in)
x2_in = F.concat([x2_in, x2], 1)
out_dark3 = self.out2(x2_in)
outputs = (out_dark3, out_dark4, x0)
return outputs
|
[
"megengine.functional.concat"
] |
[((1916, 1940), 'megengine.functional.concat', 'F.concat', (['[x1_in, x1]', '(1)'], {}), '([x1_in, x1], 1)\n', (1924, 1940), True, 'import megengine.functional as F\n'), ((2098, 2122), 'megengine.functional.concat', 'F.concat', (['[x2_in, x2]', '(1)'], {}), '([x2_in, x2], 1)\n', (2106, 2122), True, 'import megengine.functional as F\n')]
|
r"""
Piezo-elasticity problem - homogenization of a piezoelectric linear elastic
matrix with embedded metalic electrodes, see [1] for details.
[1] E.Rohan, V.Lukes: Homogenization of the fluid-saturated piezoelectric
porous media. International Journal of Solids and Structures 147, 2018,
pages 110-125. https://doi.org/10.1016/j.ijsolstr.2018.05.017
"""
import numpy as nm
from sfepy import data_dir
from sfepy.base.base import Struct
from sfepy.homogenization.micmac import get_homog_coefs_linear
import os.path as osp
from sfepy.homogenization.recovery import recover_micro_hook_eps
from sfepy.discrete.projections import make_l2_projection_data
def linear_projection(pb, data_qp):
svar = pb.create_variables(['svar'])['svar']
aux = []
for ii in range(data_qp.shape[2]):
make_l2_projection_data(svar, data_qp[..., ii, :].copy())
aux.append(svar())
return nm.ascontiguousarray(nm.array(aux).T)
def post_process(out, pb, state, extend=False):
# evaluate macroscopic strain
strain = pb.evaluate('ev_cauchy_strain.i2.Omega(u)', mode='el_avg')
out['e'] = Struct(name='output_data', mode='cell', dofs=None,
var_name='u', data=strain)
# micro recovery
rreg = pb.domain.regions['Recovery']
dim = rreg.dim
state_dict = state.get_parts()
displ = state_dict['u']
strain_qp = pb.evaluate('ev_cauchy_strain.i2.Omega(u)', mode='qp')
nodal_data = {
'u': displ.reshape((displ.shape[0] // dim, dim)), # displacement
'strain': linear_projection(pb, strain_qp), # strain
}
const_data = {
'phi': pb.conf.phi, # el. potentials
}
def_args = {
'eps0': pb.conf.eps0,
'filename_mesh': pb.conf.filename_mesh_micro,
}
pvar = pb.create_variables(['svar'])
recover_micro_hook_eps(pb.conf.filename_micro, rreg,
pvar['svar'], nodal_data, const_data, pb.conf.eps0,
define_args=def_args)
return out
def get_homog_fun(fname):
return lambda ts, coors, mode=None, problem=None, **kwargs:\
get_homog(coors, mode, problem, fname, **kwargs)
def get_homog(coors, mode, pb, micro_filename, **kwargs):
if not (mode == 'qp'):
return
nqp = coors.shape[0]
coefs_filename = osp.join(pb.conf.options.get('output_dir', '.'),
'coefs_piezo.h5')
def_args = {
'eps0': pb.conf.eps0,
'filename_mesh': pb.conf.filename_mesh_micro,
}
coefs = get_homog_coefs_linear(0, 0, None,
micro_filename=micro_filename,
coefs_filename=coefs_filename,
define_args=def_args)
Vf = coefs['V0'] * pb.conf.phi[0] + coefs['V1'] * pb.conf.phi[1]
out = {
'A': nm.tile(coefs['A'], (nqp, 1, 1)),
'Vf': nm.tile(Vf[:, nm.newaxis], (nqp, 1, 1)),
}
return out
def define():
eps0 = 1. / 30 # real size of the reference cell
phi = nm.array([1, -1]) * 1e4 # prescribed el. potential
filename_mesh = data_dir + '/meshes/3d/cube_medium_hexa.mesh'
# define the micro problem - homogenization procedure
filename_micro = data_dir +\
'/examples/multi_physics/piezo_elasticity_micro.py'
filename_mesh_micro = data_dir + '/meshes/3d/piezo_mesh_micro.vtk'
fields = {
'displacement': ('real', 'vector', 'Omega', 1),
'sfield': ('real', 'scalar', 'Omega', 1),
}
variables = {
'u': ('unknown field', 'displacement'),
'v': ('test field', 'displacement', 'u'),
'svar': ('parameter field', 'sfield', 'set-to-none'),
}
# define material - homogenization
functions = {
'get_homog': (get_homog_fun(filename_micro),),
}
materials = {
'hom': 'get_homog',
}
integrals = {
'i2': 2,
}
regions = {
'Omega': 'all',
'Left': ('vertices in (x < -0.4999)', 'facet'),
'Recovery': ('cell 266'),
}
ebcs = {
'fixed_u': ('Left', {'u.all': 0.0}),
}
equations = {
'balance_of_forces': """
dw_lin_elastic.i2.Omega(hom.A, v, u)
=
- dw_lin_prestress.i2.Omega(hom.Vf, v)""",
}
solvers = {
'ls': ('ls.scipy_direct', {}),
'newton': ('nls.newton',
{'i_max': 10,
'eps_a': 1e-3,
'eps_r': 1e-3,
'problem': 'nonlinear',
})
}
options = {
'output_dir': 'output',
'nls': 'newton',
'post_process_hook': 'post_process',
}
return locals()
|
[
"sfepy.base.base.Struct",
"sfepy.homogenization.micmac.get_homog_coefs_linear",
"sfepy.homogenization.recovery.recover_micro_hook_eps"
] |
[((1104, 1181), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""cell"""', 'dofs': 'None', 'var_name': '"""u"""', 'data': 'strain'}), "(name='output_data', mode='cell', dofs=None, var_name='u', data=strain)\n", (1110, 1181), False, 'from sfepy.base.base import Struct\n'), ((1807, 1937), 'sfepy.homogenization.recovery.recover_micro_hook_eps', 'recover_micro_hook_eps', (['pb.conf.filename_micro', 'rreg', "pvar['svar']", 'nodal_data', 'const_data', 'pb.conf.eps0'], {'define_args': 'def_args'}), "(pb.conf.filename_micro, rreg, pvar['svar'],\n nodal_data, const_data, pb.conf.eps0, define_args=def_args)\n", (1829, 1937), False, 'from sfepy.homogenization.recovery import recover_micro_hook_eps\n'), ((2521, 2643), 'sfepy.homogenization.micmac.get_homog_coefs_linear', 'get_homog_coefs_linear', (['(0)', '(0)', 'None'], {'micro_filename': 'micro_filename', 'coefs_filename': 'coefs_filename', 'define_args': 'def_args'}), '(0, 0, None, micro_filename=micro_filename,\n coefs_filename=coefs_filename, define_args=def_args)\n', (2543, 2643), False, 'from sfepy.homogenization.micmac import get_homog_coefs_linear\n'), ((2841, 2873), 'numpy.tile', 'nm.tile', (["coefs['A']", '(nqp, 1, 1)'], {}), "(coefs['A'], (nqp, 1, 1))\n", (2848, 2873), True, 'import numpy as nm\n'), ((2889, 2928), 'numpy.tile', 'nm.tile', (['Vf[:, nm.newaxis]', '(nqp, 1, 1)'], {}), '(Vf[:, nm.newaxis], (nqp, 1, 1))\n', (2896, 2928), True, 'import numpy as nm\n'), ((3033, 3050), 'numpy.array', 'nm.array', (['[1, -1]'], {}), '([1, -1])\n', (3041, 3050), True, 'import numpy as nm\n'), ((916, 929), 'numpy.array', 'nm.array', (['aux'], {}), '(aux)\n', (924, 929), True, 'import numpy as nm\n')]
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import queue
from time import sleep
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.distributed as dist
_LOCALHOST = "127.0.0.1"
def _assert_q_empty(q):
try:
res = q.get(timeout=1)
except Exception as e:
assert isinstance(e, queue.Empty)
else:
assert False, "queue is not empty"
def _assert_q_val(q, val):
ret = q.get()
assert ret == val
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group(_LOCALHOST, 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group(_LOCALHOST, port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_create_mm_server():
def worker():
if not mge.is_cuda_available():
return
port = mgb.config.create_mm_server("0.0.0.0", 0)
assert port > 0
res = mgb.config.create_mm_server("0.0.0.0", port)
assert res == -1
p = mp.Process(target=worker)
p.start()
p.join(10)
assert p.exitcode == 0
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_init_process_group():
world_size = 2
def worker(rank, backend, q):
if not mge.is_cuda_available():
return
_init_process_group_wrapper(world_size, rank, rank, backend, q)
assert dist.is_distributed() == True
assert dist.get_master_ip() == _LOCALHOST
assert dist.get_master_port() > 0
assert dist.get_world_size() == world_size
assert dist.get_rank() == rank
assert dist.get_backend() == backend
def check(backend):
Q = mp.Queue()
p0 = mp.Process(target=worker, args=(0, backend, Q))
p1 = mp.Process(target=worker, args=(1, backend, Q))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
check("nccl")
check("ucx")
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_group_barrier():
world_size = 2
ip = "127.0.0.1"
backend = "nccl"
def worker(rank, q):
if not mge.is_cuda_available():
return
_init_process_group_wrapper(world_size, rank, rank, backend, q)
dist.group_barrier()
if rank == 0:
dist.group_barrier()
q.put(0) # to be observed in rank 1
else:
_assert_q_empty(q) # q.put(0) is not executed in rank 0
dist.group_barrier()
_assert_q_val(q, 0) # q.put(0) executed in rank 0
Q = mp.Queue()
p0 = mp.Process(target=worker, args=(0, Q))
p1 = mp.Process(target=worker, args=(1, Q))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.isolated_distributed
def test_synchronized():
world_size = 2
backend = "nccl"
@dist.synchronized
def func(rank, q):
q.put(rank)
def worker(rank, q):
if not mge.is_cuda_available():
return
_init_process_group_wrapper(world_size, rank, rank, backend, q)
dist.group_barrier()
if rank == 0:
func(0, q) # q.put(0)
q.put(2)
else:
_assert_q_val(q, 0) # func executed in rank 0
_assert_q_empty(q) # q.put(2) is not executed
func(1, q)
_assert_q_val(
q, 1
) # func in rank 1 executed earlier than q.put(2) in rank 0
_assert_q_val(q, 2) # q.put(2) executed in rank 0
Q = mp.Queue()
p0 = mp.Process(target=worker, args=(0, Q))
p1 = mp.Process(target=worker, args=(1, Q))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
|
[
"megengine.distributed.group_barrier",
"megengine.distributed.get_backend",
"megengine.distributed.get_master_ip",
"megengine.distributed.get_master_port",
"megengine.distributed.get_rank",
"megengine.distributed.init_process_group",
"megengine.distributed.is_distributed",
"megengine.distributed.get_world_size",
"megengine.is_cuda_available",
"megengine._internal.config.create_mm_server"
] |
[((1564, 1589), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker'}), '(target=worker)\n', (1574, 1589), True, 'import multiprocessing as mp\n'), ((3314, 3324), 'multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (3322, 3324), True, 'import multiprocessing as mp\n'), ((3334, 3372), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(0, Q)'}), '(target=worker, args=(0, Q))\n', (3344, 3372), True, 'import multiprocessing as mp\n'), ((3382, 3420), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(1, Q)'}), '(target=worker, args=(1, Q))\n', (3392, 3420), True, 'import multiprocessing as mp\n'), ((4415, 4425), 'multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (4423, 4425), True, 'import multiprocessing as mp\n'), ((4435, 4473), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(0, Q)'}), '(target=worker, args=(0, Q))\n', (4445, 4473), True, 'import multiprocessing as mp\n'), ((4483, 4521), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(1, Q)'}), '(target=worker, args=(1, Q))\n', (4493, 4521), True, 'import multiprocessing as mp\n'), ((924, 994), 'megengine.distributed.init_process_group', 'dist.init_process_group', (['_LOCALHOST', '(0)', 'world_size', 'rank', 'dev', 'backend'], {}), '(_LOCALHOST, 0, world_size, rank, dev, backend)\n', (947, 994), True, 'import megengine.distributed as dist\n'), ((1074, 1147), 'megengine.distributed.init_process_group', 'dist.init_process_group', (['_LOCALHOST', 'port', 'world_size', 'rank', 'dev', 'backend'], {}), '(_LOCALHOST, port, world_size, rank, dev, backend)\n', (1097, 1147), True, 'import megengine.distributed as dist\n'), ((1405, 1446), 'megengine._internal.config.create_mm_server', 'mgb.config.create_mm_server', (['"""0.0.0.0"""', '(0)'], {}), "('0.0.0.0', 0)\n", (1432, 1446), True, 'import megengine._internal as mgb\n'), ((1485, 1529), 'megengine._internal.config.create_mm_server', 'mgb.config.create_mm_server', (['"""0.0.0.0"""', 'port'], {}), "('0.0.0.0', port)\n", (1512, 1529), True, 'import megengine._internal as mgb\n'), ((1175, 1192), 'platform.system', 'platform.system', ([], {}), '()\n', (1190, 1192), False, 'import platform\n'), ((2310, 2320), 'multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (2318, 2320), True, 'import multiprocessing as mp\n'), ((2334, 2381), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(0, backend, Q)'}), '(target=worker, args=(0, backend, Q))\n', (2344, 2381), True, 'import multiprocessing as mp\n'), ((2395, 2442), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(1, backend, Q)'}), '(target=worker, args=(1, backend, Q))\n', (2405, 2442), True, 'import multiprocessing as mp\n'), ((1676, 1693), 'platform.system', 'platform.system', ([], {}), '()\n', (1691, 1693), False, 'import platform\n'), ((3001, 3021), 'megengine.distributed.group_barrier', 'dist.group_barrier', ([], {}), '()\n', (3019, 3021), True, 'import megengine.distributed as dist\n'), ((2640, 2657), 'platform.system', 'platform.system', ([], {}), '()\n', (2655, 2657), False, 'import platform\n'), ((3968, 3988), 'megengine.distributed.group_barrier', 'dist.group_barrier', ([], {}), '()\n', (3986, 3988), True, 'import megengine.distributed as dist\n'), ((3562, 3579), 'platform.system', 'platform.system', ([], {}), '()\n', (3577, 3579), False, 'import platform\n'), ((1009, 1031), 'megengine.distributed.get_master_port', 'dist.get_master_port', ([], {}), '()\n', (1029, 1031), True, 'import megengine.distributed as dist\n'), ((1346, 1369), 'megengine.is_cuda_available', 'mge.is_cuda_available', ([], {}), '()\n', (1367, 1369), True, 'import megengine as mge\n'), ((1885, 1908), 'megengine.is_cuda_available', 'mge.is_cuda_available', ([], {}), '()\n', (1906, 1908), True, 'import megengine as mge\n'), ((2016, 2037), 'megengine.distributed.is_distributed', 'dist.is_distributed', ([], {}), '()\n', (2035, 2037), True, 'import megengine.distributed as dist\n'), ((2061, 2081), 'megengine.distributed.get_master_ip', 'dist.get_master_ip', ([], {}), '()\n', (2079, 2081), True, 'import megengine.distributed as dist\n'), ((2111, 2133), 'megengine.distributed.get_master_port', 'dist.get_master_port', ([], {}), '()\n', (2131, 2133), True, 'import megengine.distributed as dist\n'), ((2153, 2174), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (2172, 2174), True, 'import megengine.distributed as dist\n'), ((2204, 2219), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (2217, 2219), True, 'import megengine.distributed as dist\n'), ((2243, 2261), 'megengine.distributed.get_backend', 'dist.get_backend', ([], {}), '()\n', (2259, 2261), True, 'import megengine.distributed as dist\n'), ((2877, 2900), 'megengine.is_cuda_available', 'mge.is_cuda_available', ([], {}), '()\n', (2898, 2900), True, 'import megengine as mge\n'), ((3056, 3076), 'megengine.distributed.group_barrier', 'dist.group_barrier', ([], {}), '()\n', (3074, 3076), True, 'import megengine.distributed as dist\n'), ((3221, 3241), 'megengine.distributed.group_barrier', 'dist.group_barrier', ([], {}), '()\n', (3239, 3241), True, 'import megengine.distributed as dist\n'), ((3844, 3867), 'megengine.is_cuda_available', 'mge.is_cuda_available', ([], {}), '()\n', (3865, 3867), True, 'import megengine as mge\n')]
|
from typing import Optional
from fastapi import FastAPI
from sqlmodel import (
SQLModel,
Field,
create_engine,
select,
Session
)
# Criar engine do banco
engine = create_engine('sqlite:///database.db')
class Pessoa(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
nome: str
idade: int
# Cria o banco de dados
SQLModel.metadata.create_all(engine)
app = FastAPI()
@app.get('/')
def home():
return {'message': 'Deu bom'}
@app.get('/pessoa')
def get_pessoa():
query = select(Pessoa)
with Session(engine) as session:
result = session.execute(query).scalars().all()
return result
@app.get('/pessoa-nome')
def get_pessoa():
query = select(Pessoa.nome)
with Session(engine) as session:
result = session.execute(query).scalars().all()
return result
@app.get('/pessoa-nome-idade')
def get_pessoa():
query = select(Pessoa.nome, Pessoa.idade)
with Session(engine) as session:
result = session.execute(query).scalars().all()
return result
|
[
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.Session",
"sqlmodel.Field",
"sqlmodel.select",
"sqlmodel.create_engine"
] |
[((183, 221), 'sqlmodel.create_engine', 'create_engine', (['"""sqlite:///database.db"""'], {}), "('sqlite:///database.db')\n", (196, 221), False, 'from sqlmodel import SQLModel, Field, create_engine, select, Session\n'), ((377, 413), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (405, 413), False, 'from sqlmodel import SQLModel, Field, create_engine, select, Session\n'), ((422, 431), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (429, 431), False, 'from fastapi import FastAPI\n'), ((284, 321), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (289, 321), False, 'from sqlmodel import SQLModel, Field, create_engine, select, Session\n'), ((546, 560), 'sqlmodel.select', 'select', (['Pessoa'], {}), '(Pessoa)\n', (552, 560), False, 'from sqlmodel import SQLModel, Field, create_engine, select, Session\n'), ((730, 749), 'sqlmodel.select', 'select', (['Pessoa.nome'], {}), '(Pessoa.nome)\n', (736, 749), False, 'from sqlmodel import SQLModel, Field, create_engine, select, Session\n'), ((925, 958), 'sqlmodel.select', 'select', (['Pessoa.nome', 'Pessoa.idade'], {}), '(Pessoa.nome, Pessoa.idade)\n', (931, 958), False, 'from sqlmodel import SQLModel, Field, create_engine, select, Session\n'), ((570, 585), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (577, 585), False, 'from sqlmodel import SQLModel, Field, create_engine, select, Session\n'), ((759, 774), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (766, 774), False, 'from sqlmodel import SQLModel, Field, create_engine, select, Session\n'), ((968, 983), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (975, 983), False, 'from sqlmodel import SQLModel, Field, create_engine, select, Session\n')]
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_broadcast():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.broadcast(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(target=worker, args=(0, x, backend, x, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, x, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_scatter():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.scatter(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(
target=worker, args=(0, x, backend, x[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, x[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_to_all():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_to_all(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
a = np.concatenate((x[: shape[0] // 2], y[: shape[0] // 2]))
b = np.concatenate((x[shape[0] // 2 :], y[shape[0] // 2 :]))
p0 = mp.Process(target=worker, args=(0, x, backend, a, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, b, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_gather(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(
target=worker, args=(0, x, backend, z[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, z[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 4), (8, 10), (88, 44)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_reduce_max():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_reduce_max(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.maximum(x, y)
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_reduce_min():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_reduce_min(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.minimum(x, y)
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_bcast_param():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = Parameter(data)
dist.functional.bcast_param(inp)
assert np.allclose(inp.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(target=worker, args=(0, x, backend, x, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, x, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
|
[
"megengine.core.tensor",
"megengine.distributed.functional.all_reduce_min",
"megengine.distributed.get_master_port",
"megengine.distributed.functional.all_gather",
"megengine.distributed.init_process_group",
"megengine.distributed.functional.all_to_all",
"megengine.get_device_count",
"megengine.distributed.functional.bcast_param",
"megengine.distributed.functional.reduce_sum",
"megengine.distributed.functional.gather",
"megengine.distributed.functional.broadcast",
"megengine.distributed.functional.all_reduce_max",
"megengine.distributed.functional.scatter",
"megengine.distributed.functional.all_reduce_sum",
"megengine.core.Parameter",
"megengine.distributed.functional.reduce_scatter_sum"
] |
[((636, 707), 'megengine.distributed.init_process_group', 'dist.init_process_group', (['"""localhost"""', '(0)', 'world_size', 'rank', 'dev', 'backend'], {}), "('localhost', 0, world_size, rank, dev, backend)\n", (659, 707), True, 'import megengine.distributed as dist\n'), ((787, 861), 'megengine.distributed.init_process_group', 'dist.init_process_group', (['"""localhost"""', 'port', 'world_size', 'rank', 'dev', 'backend'], {}), "('localhost', port, world_size, rank, dev, backend)\n", (810, 861), True, 'import megengine.distributed as dist\n'), ((1368, 1380), 'megengine.core.tensor', 'tensor', (['data'], {}), '(data)\n', (1374, 1380), False, 'from megengine.core import Parameter, tensor\n'), ((1398, 1429), 'megengine.distributed.functional.reduce_sum', 'dist.functional.reduce_sum', (['inp'], {}), '(inp)\n', (1424, 1429), True, 'import megengine.distributed as dist\n'), ((1624, 1634), 'multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (1632, 1634), True, 'import multiprocessing as mp\n'), ((1772, 1834), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(0, x, backend, z, port_queue)'}), '(target=worker, args=(0, x, backend, z, port_queue))\n', (1782, 1834), True, 'import multiprocessing as mp\n'), ((1848, 1913), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(1, y, backend, None, port_queue)'}), '(target=worker, args=(1, y, backend, None, port_queue))\n', (1858, 1913), True, 'import multiprocessing as mp\n'), ((889, 906), 'platform.system', 'platform.system', ([], {}), '()\n', (904, 906), False, 'import platform\n'), ((989, 1006), 'platform.system', 'platform.system', ([], {}), '()\n', (1004, 1006), False, 'import platform\n'), ((2671, 2683), 'megengine.core.tensor', 'tensor', (['data'], {}), '(data)\n', (2677, 2683), False, 'from megengine.core import Parameter, tensor\n'), ((2701, 2728), 'megengine.distributed.functional.gather', 'dist.functional.gather', (['inp'], {}), '(inp)\n', (2723, 2728), True, 'import megengine.distributed as dist\n'), ((2923, 2933), 'multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (2931, 2933), True, 'import multiprocessing as mp\n'), ((3052, 3074), 'numpy.concatenate', 'np.concatenate', (['(x, y)'], {}), '((x, y))\n', (3066, 3074), True, 'import numpy as np\n'), ((3088, 3150), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(0, x, backend, z, port_queue)'}), '(target=worker, args=(0, x, backend, z, port_queue))\n', (3098, 3150), True, 'import multiprocessing as mp\n'), ((3164, 3229), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(1, y, backend, None, port_queue)'}), '(target=worker, args=(1, y, backend, None, port_queue))\n', (3174, 3229), True, 'import multiprocessing as mp\n'), ((2196, 2213), 'platform.system', 'platform.system', ([], {}), '()\n', (2211, 2213), False, 'import platform\n'), ((2296, 2313), 'platform.system', 'platform.system', ([], {}), '()\n', (2311, 2313), False, 'import platform\n'), ((3990, 4002), 'megengine.core.tensor', 'tensor', (['data'], {}), '(data)\n', (3996, 4002), False, 'from megengine.core import Parameter, tensor\n'), ((4020, 4050), 'megengine.distributed.functional.broadcast', 'dist.functional.broadcast', (['inp'], {}), '(inp)\n', (4045, 4050), True, 'import megengine.distributed as dist\n'), ((4155, 4165), 'multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (4163, 4165), True, 'import multiprocessing as mp\n'), ((4250, 4312), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(0, x, backend, x, port_queue)'}), '(target=worker, args=(0, x, backend, x, port_queue))\n', (4260, 4312), True, 'import multiprocessing as mp\n'), ((4326, 4388), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(1, y, backend, x, port_queue)'}), '(target=worker, args=(1, y, backend, x, port_queue))\n', (4336, 4388), True, 'import multiprocessing as mp\n'), ((3512, 3529), 'platform.system', 'platform.system', ([], {}), '()\n', (3527, 3529), False, 'import platform\n'), ((3612, 3629), 'platform.system', 'platform.system', ([], {}), '()\n', (3627, 3629), False, 'import platform\n'), ((5147, 5159), 'megengine.core.tensor', 'tensor', (['data'], {}), '(data)\n', (5153, 5159), False, 'from megengine.core import Parameter, tensor\n'), ((5177, 5205), 'megengine.distributed.functional.scatter', 'dist.functional.scatter', (['inp'], {}), '(inp)\n', (5200, 5205), True, 'import megengine.distributed as dist\n'), ((5310, 5320), 'multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (5318, 5320), True, 'import multiprocessing as mp\n'), ((5405, 5483), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(0, x, backend, x[:shape[0] // 2], port_queue)'}), '(target=worker, args=(0, x, backend, x[:shape[0] // 2], port_queue))\n', (5415, 5483), True, 'import multiprocessing as mp\n'), ((5520, 5598), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(1, y, backend, x[shape[0] // 2:], port_queue)'}), '(target=worker, args=(1, y, backend, x[shape[0] // 2:], port_queue))\n', (5530, 5598), True, 'import multiprocessing as mp\n'), ((4671, 4688), 'platform.system', 'platform.system', ([], {}), '()\n', (4686, 4688), False, 'import platform\n'), ((4771, 4788), 'platform.system', 'platform.system', ([], {}), '()\n', (4786, 4788), False, 'import platform\n'), ((6384, 6396), 'megengine.core.tensor', 'tensor', (['data'], {}), '(data)\n', (6390, 6396), False, 'from megengine.core import Parameter, tensor\n'), ((6414, 6445), 'megengine.distributed.functional.all_to_all', 'dist.functional.all_to_all', (['inp'], {}), '(inp)\n', (6440, 6445), True, 'import megengine.distributed as dist\n'), ((6550, 6560), 'multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (6558, 6560), True, 'import multiprocessing as mp\n'), ((6679, 6733), 'numpy.concatenate', 'np.concatenate', (['(x[:shape[0] // 2], y[:shape[0] // 2])'], {}), '((x[:shape[0] // 2], y[:shape[0] // 2]))\n', (6693, 6733), True, 'import numpy as np\n'), ((6748, 6802), 'numpy.concatenate', 'np.concatenate', (['(x[shape[0] // 2:], y[shape[0] // 2:])'], {}), '((x[shape[0] // 2:], y[shape[0] // 2:]))\n', (6762, 6802), True, 'import numpy as np\n'), ((6818, 6880), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(0, x, backend, a, port_queue)'}), '(target=worker, args=(0, x, backend, a, port_queue))\n', (6828, 6880), True, 'import multiprocessing as mp\n'), ((6894, 6956), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(1, y, backend, b, port_queue)'}), '(target=worker, args=(1, y, backend, b, port_queue))\n', (6904, 6956), True, 'import multiprocessing as mp\n'), ((5905, 5922), 'platform.system', 'platform.system', ([], {}), '()\n', (5920, 5922), False, 'import platform\n'), ((6005, 6022), 'platform.system', 'platform.system', ([], {}), '()\n', (6020, 6022), False, 'import platform\n'), ((7719, 7731), 'megengine.core.tensor', 'tensor', (['data'], {}), '(data)\n', (7725, 7731), False, 'from megengine.core import Parameter, tensor\n'), ((7749, 7780), 'megengine.distributed.functional.all_gather', 'dist.functional.all_gather', (['inp'], {}), '(inp)\n', (7775, 7780), True, 'import megengine.distributed as dist\n'), ((7885, 7895), 'multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (7893, 7895), True, 'import multiprocessing as mp\n'), ((8014, 8036), 'numpy.concatenate', 'np.concatenate', (['(x, y)'], {}), '((x, y))\n', (8028, 8036), True, 'import numpy as np\n'), ((8050, 8112), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(0, x, backend, z, port_queue)'}), '(target=worker, args=(0, x, backend, z, port_queue))\n', (8060, 8112), True, 'import multiprocessing as mp\n'), ((8126, 8188), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(1, y, backend, z, port_queue)'}), '(target=worker, args=(1, y, backend, z, port_queue))\n', (8136, 8188), True, 'import multiprocessing as mp\n'), ((7240, 7257), 'platform.system', 'platform.system', ([], {}), '()\n', (7255, 7257), False, 'import platform\n'), ((7340, 7357), 'platform.system', 'platform.system', ([], {}), '()\n', (7355, 7357), False, 'import platform\n'), ((8958, 8970), 'megengine.core.tensor', 'tensor', (['data'], {}), '(data)\n', (8964, 8970), False, 'from megengine.core import Parameter, tensor\n'), ((8988, 9027), 'megengine.distributed.functional.reduce_scatter_sum', 'dist.functional.reduce_scatter_sum', (['inp'], {}), '(inp)\n', (9022, 9027), True, 'import megengine.distributed as dist\n'), ((9132, 9142), 'multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (9140, 9142), True, 'import multiprocessing as mp\n'), ((9280, 9358), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(0, x, backend, z[:shape[0] // 2], port_queue)'}), '(target=worker, args=(0, x, backend, z[:shape[0] // 2], port_queue))\n', (9290, 9358), True, 'import multiprocessing as mp\n'), ((9395, 9473), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(1, y, backend, z[shape[0] // 2:], port_queue)'}), '(target=worker, args=(1, y, backend, z[shape[0] // 2:], port_queue))\n', (9405, 9473), True, 'import multiprocessing as mp\n'), ((8471, 8488), 'platform.system', 'platform.system', ([], {}), '()\n', (8486, 8488), False, 'import platform\n'), ((8571, 8588), 'platform.system', 'platform.system', ([], {}), '()\n', (8586, 8588), False, 'import platform\n'), ((10262, 10274), 'megengine.core.tensor', 'tensor', (['data'], {}), '(data)\n', (10268, 10274), False, 'from megengine.core import Parameter, tensor\n'), ((10292, 10327), 'megengine.distributed.functional.all_reduce_sum', 'dist.functional.all_reduce_sum', (['inp'], {}), '(inp)\n', (10322, 10327), True, 'import megengine.distributed as dist\n'), ((10432, 10442), 'multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (10440, 10442), True, 'import multiprocessing as mp\n'), ((10580, 10642), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(0, x, backend, z, port_queue)'}), '(target=worker, args=(0, x, backend, z, port_queue))\n', (10590, 10642), True, 'import multiprocessing as mp\n'), ((10656, 10718), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(1, y, backend, z, port_queue)'}), '(target=worker, args=(1, y, backend, z, port_queue))\n', (10666, 10718), True, 'import multiprocessing as mp\n'), ((9779, 9796), 'platform.system', 'platform.system', ([], {}), '()\n', (9794, 9796), False, 'import platform\n'), ((9879, 9896), 'platform.system', 'platform.system', ([], {}), '()\n', (9894, 9896), False, 'import platform\n'), ((11484, 11496), 'megengine.core.tensor', 'tensor', (['data'], {}), '(data)\n', (11490, 11496), False, 'from megengine.core import Parameter, tensor\n'), ((11514, 11549), 'megengine.distributed.functional.all_reduce_max', 'dist.functional.all_reduce_max', (['inp'], {}), '(inp)\n', (11544, 11549), True, 'import megengine.distributed as dist\n'), ((11654, 11664), 'multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (11662, 11664), True, 'import multiprocessing as mp\n'), ((11783, 11799), 'numpy.maximum', 'np.maximum', (['x', 'y'], {}), '(x, y)\n', (11793, 11799), True, 'import numpy as np\n'), ((11813, 11875), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(0, x, backend, z, port_queue)'}), '(target=worker, args=(0, x, backend, z, port_queue))\n', (11823, 11875), True, 'import multiprocessing as mp\n'), ((11889, 11951), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(1, y, backend, z, port_queue)'}), '(target=worker, args=(1, y, backend, z, port_queue))\n', (11899, 11951), True, 'import multiprocessing as mp\n'), ((11001, 11018), 'platform.system', 'platform.system', ([], {}), '()\n', (11016, 11018), False, 'import platform\n'), ((11101, 11118), 'platform.system', 'platform.system', ([], {}), '()\n', (11116, 11118), False, 'import platform\n'), ((12717, 12729), 'megengine.core.tensor', 'tensor', (['data'], {}), '(data)\n', (12723, 12729), False, 'from megengine.core import Parameter, tensor\n'), ((12747, 12782), 'megengine.distributed.functional.all_reduce_min', 'dist.functional.all_reduce_min', (['inp'], {}), '(inp)\n', (12777, 12782), True, 'import megengine.distributed as dist\n'), ((12887, 12897), 'multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (12895, 12897), True, 'import multiprocessing as mp\n'), ((13016, 13032), 'numpy.minimum', 'np.minimum', (['x', 'y'], {}), '(x, y)\n', (13026, 13032), True, 'import numpy as np\n'), ((13046, 13108), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(0, x, backend, z, port_queue)'}), '(target=worker, args=(0, x, backend, z, port_queue))\n', (13056, 13108), True, 'import multiprocessing as mp\n'), ((13122, 13184), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(1, y, backend, z, port_queue)'}), '(target=worker, args=(1, y, backend, z, port_queue))\n', (13132, 13184), True, 'import multiprocessing as mp\n'), ((12234, 12251), 'platform.system', 'platform.system', ([], {}), '()\n', (12249, 12251), False, 'import platform\n'), ((12334, 12351), 'platform.system', 'platform.system', ([], {}), '()\n', (12349, 12351), False, 'import platform\n'), ((13947, 13962), 'megengine.core.Parameter', 'Parameter', (['data'], {}), '(data)\n', (13956, 13962), False, 'from megengine.core import Parameter, tensor\n'), ((13971, 14003), 'megengine.distributed.functional.bcast_param', 'dist.functional.bcast_param', (['inp'], {}), '(inp)\n', (13998, 14003), True, 'import megengine.distributed as dist\n'), ((14105, 14115), 'multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (14113, 14115), True, 'import multiprocessing as mp\n'), ((14200, 14262), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(0, x, backend, x, port_queue)'}), '(target=worker, args=(0, x, backend, x, port_queue))\n', (14210, 14262), True, 'import multiprocessing as mp\n'), ((14276, 14338), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(1, y, backend, x, port_queue)'}), '(target=worker, args=(1, y, backend, x, port_queue))\n', (14286, 14338), True, 'import multiprocessing as mp\n'), ((13467, 13484), 'platform.system', 'platform.system', ([], {}), '()\n', (13482, 13484), False, 'import platform\n'), ((13567, 13584), 'platform.system', 'platform.system', ([], {}), '()\n', (13582, 13584), False, 'import platform\n'), ((722, 744), 'megengine.distributed.get_master_port', 'dist.get_master_port', ([], {}), '()\n', (742, 744), True, 'import megengine.distributed as dist\n'), ((1212, 1239), 'megengine.get_device_count', 'mge.get_device_count', (['"""gpu"""'], {}), "('gpu')\n", (1232, 1239), True, 'import megengine as mge\n'), ((2515, 2542), 'megengine.get_device_count', 'mge.get_device_count', (['"""gpu"""'], {}), "('gpu')\n", (2535, 2542), True, 'import megengine as mge\n'), ((3834, 3861), 'megengine.get_device_count', 'mge.get_device_count', (['"""gpu"""'], {}), "('gpu')\n", (3854, 3861), True, 'import megengine as mge\n'), ((4991, 5018), 'megengine.get_device_count', 'mge.get_device_count', (['"""gpu"""'], {}), "('gpu')\n", (5011, 5018), True, 'import megengine as mge\n'), ((6228, 6255), 'megengine.get_device_count', 'mge.get_device_count', (['"""gpu"""'], {}), "('gpu')\n", (6248, 6255), True, 'import megengine as mge\n'), ((7563, 7590), 'megengine.get_device_count', 'mge.get_device_count', (['"""gpu"""'], {}), "('gpu')\n", (7583, 7590), True, 'import megengine as mge\n'), ((8802, 8829), 'megengine.get_device_count', 'mge.get_device_count', (['"""gpu"""'], {}), "('gpu')\n", (8822, 8829), True, 'import megengine as mge\n'), ((10106, 10133), 'megengine.get_device_count', 'mge.get_device_count', (['"""gpu"""'], {}), "('gpu')\n", (10126, 10133), True, 'import megengine as mge\n'), ((11328, 11355), 'megengine.get_device_count', 'mge.get_device_count', (['"""gpu"""'], {}), "('gpu')\n", (11348, 11355), True, 'import megengine as mge\n'), ((12561, 12588), 'megengine.get_device_count', 'mge.get_device_count', (['"""gpu"""'], {}), "('gpu')\n", (12581, 12588), True, 'import megengine as mge\n'), ((13791, 13818), 'megengine.get_device_count', 'mge.get_device_count', (['"""gpu"""'], {}), "('gpu')\n", (13811, 13818), True, 'import megengine as mge\n'), ((1647, 1669), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (1661, 1669), True, 'import numpy as np\n'), ((1700, 1722), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (1714, 1722), True, 'import numpy as np\n'), ((2946, 2968), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (2960, 2968), True, 'import numpy as np\n'), ((2999, 3021), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (3013, 3021), True, 'import numpy as np\n'), ((4178, 4200), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (4192, 4200), True, 'import numpy as np\n'), ((5333, 5355), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (5347, 5355), True, 'import numpy as np\n'), ((6573, 6595), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (6587, 6595), True, 'import numpy as np\n'), ((6626, 6648), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (6640, 6648), True, 'import numpy as np\n'), ((7908, 7930), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (7922, 7930), True, 'import numpy as np\n'), ((7961, 7983), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (7975, 7983), True, 'import numpy as np\n'), ((9155, 9177), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (9169, 9177), True, 'import numpy as np\n'), ((9208, 9230), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (9222, 9230), True, 'import numpy as np\n'), ((10455, 10477), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (10469, 10477), True, 'import numpy as np\n'), ((10508, 10530), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (10522, 10530), True, 'import numpy as np\n'), ((11677, 11699), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (11691, 11699), True, 'import numpy as np\n'), ((11730, 11752), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (11744, 11752), True, 'import numpy as np\n'), ((12910, 12932), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (12924, 12932), True, 'import numpy as np\n'), ((12963, 12985), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (12977, 12985), True, 'import numpy as np\n'), ((14128, 14150), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (14142, 14150), True, 'import numpy as np\n')]
|
from sqlmodel import create_engine
from pyflarum.database.session import FlarumDatabase
from pyflarum.database.flarum.core.users import DB_User
DATABASE = FlarumDatabase(create_engine('sqlite:///tests/database/database.db'))
if __name__ == "__main__":
with DATABASE:
DATABASE.create_user(username='test_wfkeoge', password=b'<PASSWORD>', email='<EMAIL>')
saved = DATABASE.generic_filter(DB_User, username='test_wfkeoge').first()
print(saved)
|
[
"sqlmodel.create_engine"
] |
[((173, 226), 'sqlmodel.create_engine', 'create_engine', (['"""sqlite:///tests/database/database.db"""'], {}), "('sqlite:///tests/database/database.db')\n", (186, 226), False, 'from sqlmodel import create_engine\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
# ----------------------------------------------------------------------
"""Megengine BERT model."""
import copy
import json
import math
import os
import urllib
import urllib.request
from io import open
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
from megengine import Parameter
from megengine.functional.loss import cross_entropy
from megengine.module import Dropout, Embedding, Linear, Module, Sequential
from megengine.module.activation import Softmax
def transpose(inp, a, b):
cur_shape = list(range(0, inp.ndim))
cur_shape[a], cur_shape[b] = cur_shape[b], cur_shape[a]
return inp.transpose(cur_shape)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
x * 0.5 * (1.0 + F.tanh((F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3)))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + F.tanh(F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3))))
ACT2FN = {"gelu": gelu, "relu": F.relu}
class BertConfig:
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
class BertLayerNorm(Module):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
self.weight = Parameter(np.ones(hidden_size).astype(np.float32))
self.bias = Parameter(np.zeros(hidden_size).astype(np.float32))
self.variance_epsilon = eps
def forward(self, x):
u = F.mean(x, len(x.shape) - 1, True)
s = F.mean((x - u) ** 2, len(x.shape) - 1, True)
x = (x - u) / ((s + self.variance_epsilon) ** 0.5)
return self.weight * x + self.bias
class BertEmbeddings(Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = Embedding(
config.max_position_embeddings, config.hidden_size
)
self.token_type_embeddings = Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name
# and be able to load any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.shape[1]
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
position_ids = F.linspace(0, seq_length - 1, seq_length).astype(np.int32)
position_ids = F.broadcast_to(F.expand_dims(position_ids, 0), input_ids.shape)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = Linear(config.hidden_size, self.all_head_size)
self.key = Linear(config.hidden_size, self.all_head_size)
self.value = Linear(config.hidden_size, self.all_head_size)
self.dropout = Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
# using symbolic shapes to make trace happy
x_shape = mge.tensor(x.shape)
new_x_shape = F.concat(
[x_shape[:-1], (self.num_attention_heads, self.attention_head_size)]
)
x = x.reshape(new_x_shape)
return x.transpose(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = F.matmul(query_layer, transpose(key_layer, -1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = Softmax(len(attention_scores.shape) - 1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = F.matmul(attention_probs, value_layer)
context_layer = context_layer.transpose(0, 2, 1, 3)
# using symbolic shapes to make trace happy
context_shape = mge.tensor(context_layer.shape)
new_context_layer_shape = F.concat([context_shape[:-2], self.all_head_size])
context_layer = context_layer.reshape(new_context_layer_shape)
return context_layer
class BertSelfOutput(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(Module):
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(Module):
def __init__(self, config):
super().__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(Module):
def __init__(self, config):
super().__init__()
self.layer = Sequential(
*[BertLayer(config) for _ in range(config.num_hidden_layers)]
)
# self.layer = ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.hidden_size)
self.activation = F.tanh
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertModel(Module):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary
(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape
[batch_size, sequence_length] with the token types indices selected in [0, 1].
Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [0, 1]. It's a mask to be used if the input sequence length
is smaller than the max input sequence length in the current batch.
It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers`
output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of
encoded-hidden-states at the end of each attention block
(i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size
[batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of
hidden-states corresponding to the last attention block of shape
[batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size]
which is the output of classifier pretrained on top of the hidden state
associated to the first character of the
input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super().__init__()
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
def forward(
self,
input_ids,
token_type_ids=None,
attention_mask=None,
output_all_encoded_layers=True,
):
if attention_mask is None:
attention_mask = F.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
# print('input_ids', input_ids.sum())
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
# print('attention_mask', attention_mask.sum())
extended_attention_mask = F.expand_dims(attention_mask, (1, 2))
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.astype(
next(self.parameters()).dtype
) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers = self.encoder(
embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class BertForSequenceClassification(Module):
"""BERT model for classification.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary.
Items in the batch should begin with the special "CLS" token.
(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length]
with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`
and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [0, 1]. It's a mask to be used if the input sequence length
is smaller than the max input sequence length in the current batch. It's the mask
that we typically use for attention when a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForSequenceClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels, bert=None):
super().__init__()
if bert is None:
self.bert = BertModel(config)
else:
self.bert = bert
self.num_labels = num_labels
self.dropout = Dropout(config.hidden_dropout_prob)
self.classifier = Linear(config.hidden_size, num_labels)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
_, pooled_output = self.bert(
input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False
)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
if labels is not None:
loss = cross_entropy(
logits.reshape(-1, self.num_labels), labels.reshape(-1)
)
return logits, loss
else:
return logits, None
DATA_URL = "https://data.megengine.org.cn/models/weights/bert"
CONFIG_NAME = "bert_config.json"
VOCAB_NAME = "vocab.txt"
MODEL_NAME = {
"wwm_cased_L-24_H-1024_A-16": "wwm_cased_L_24_H_1024_A_16",
"wwm_uncased_L-24_H-1024_A-16": "wwm_uncased_L_24_H_1024_A_16",
"cased_L-12_H-768_A-12": "cased_L_12_H_768_A_12",
"cased_L-24_H-1024_A-16": "cased_L_24_H_1024_A_16",
"uncased_L-12_H-768_A-12": "uncased_L_12_H_768_A_12",
"uncased_L-24_H-1024_A-16": "uncased_L_24_H_1024_A_16",
"chinese_L-12_H-768_A-12": "chinese_L_12_H_768_A_12",
"multi_cased_L-12_H-768_A-12": "multi_cased_L_12_H_768_A_12",
}
def download_file(url, filename):
# urllib.URLopener().retrieve(url, filename)
urllib.request.urlretrieve(url, filename)
def create_hub_bert(model_name, pretrained):
assert model_name in MODEL_NAME, "{} not in the valid models {}".format(
model_name, MODEL_NAME
)
data_dir = "./{}".format(model_name)
if not os.path.exists(data_dir):
os.makedirs(data_dir)
vocab_url = "{}/{}/{}".format(DATA_URL, model_name, VOCAB_NAME)
config_url = "{}/{}/{}".format(DATA_URL, model_name, CONFIG_NAME)
vocab_file = "./{}/{}".format(model_name, VOCAB_NAME)
config_file = "./{}/{}".format(model_name, CONFIG_NAME)
download_file(vocab_url, vocab_file)
download_file(config_url, config_file)
config = BertConfig(config_file)
model = hub.load("megengine/models", MODEL_NAME[model_name], pretrained=pretrained)
return model, config, vocab_file
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/bert/"
"uncased_L-12_H-768_A-12/bert_4f2157f7_uncased_L-12_H-768_A-12.pkl"
)
def uncased_L_12_H_768_A_12():
config_dict = {
"attention_probs_dropout_prob": 0.1,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"initializer_range": 0.02,
"intermediate_size": 3072,
"max_position_embeddings": 512,
"num_attention_heads": 12,
"num_hidden_layers": 12,
"type_vocab_size": 2,
"vocab_size": 30522,
}
config = BertConfig.from_dict(config_dict)
return BertModel(config)
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/bert/"
"cased_L-12_H-768_A-12/bert_b9727c2f_cased_L-12_H-768_A-12.pkl"
)
def cased_L_12_H_768_A_12():
config_dict = {
"attention_probs_dropout_prob": 0.1,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"initializer_range": 0.02,
"intermediate_size": 3072,
"max_position_embeddings": 512,
"num_attention_heads": 12,
"num_hidden_layers": 12,
"type_vocab_size": 2,
"vocab_size": 28996,
}
config = BertConfig.from_dict(config_dict)
return BertModel(config)
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/bert/"
"uncased_L-24_H-1024_A-16/bert_222f5012_uncased_L-24_H-1024_A-16.pkl"
)
def uncased_L_24_H_1024_A_16():
config_dict = {
"attention_probs_dropout_prob": 0.1,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 1024,
"initializer_range": 0.02,
"intermediate_size": 4096,
"max_position_embeddings": 512,
"num_attention_heads": 16,
"num_hidden_layers": 24,
"type_vocab_size": 2,
"vocab_size": 30522,
}
config = BertConfig.from_dict(config_dict)
return BertModel(config)
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/bert/"
"cased_L-24_H-1024_A-16/bert_01f2a65f_cased_L-24_H-1024_A-16.pkl"
)
def cased_L_24_H_1024_A_16():
config_dict = {
"attention_probs_dropout_prob": 0.1,
"directionality": "bidi",
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 1024,
"initializer_range": 0.02,
"intermediate_size": 4096,
"max_position_embeddings": 512,
"num_attention_heads": 16,
"num_hidden_layers": 24,
"pooler_fc_size": 768,
"pooler_num_attention_heads": 12,
"pooler_num_fc_layers": 3,
"pooler_size_per_head": 128,
"pooler_type": "first_token_transform",
"type_vocab_size": 2,
"vocab_size": 28996,
}
config = BertConfig.from_dict(config_dict)
return BertModel(config)
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/bert/"
"chinese_L-12_H-768_A-12/bert_ee91be1a_chinese_L-12_H-768_A-12.pkl"
)
def chinese_L_12_H_768_A_12():
config_dict = {
"attention_probs_dropout_prob": 0.1,
"directionality": "bidi",
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"initializer_range": 0.02,
"intermediate_size": 3072,
"max_position_embeddings": 512,
"num_attention_heads": 12,
"num_hidden_layers": 12,
"pooler_fc_size": 768,
"pooler_num_attention_heads": 12,
"pooler_num_fc_layers": 3,
"pooler_size_per_head": 128,
"pooler_type": "first_token_transform",
"type_vocab_size": 2,
"vocab_size": 21128,
}
config = BertConfig.from_dict(config_dict)
return BertModel(config)
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/bert/"
"multi_cased_L-12_H-768_A-12/bert_283ceec5_multi_cased_L-12_H-768_A-12.pkl"
)
def multi_cased_L_12_H_768_A_12():
config_dict = {
"attention_probs_dropout_prob": 0.1,
"directionality": "bidi",
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"initializer_range": 0.02,
"intermediate_size": 3072,
"max_position_embeddings": 512,
"num_attention_heads": 12,
"num_hidden_layers": 12,
"pooler_fc_size": 768,
"pooler_num_attention_heads": 12,
"pooler_num_fc_layers": 3,
"pooler_size_per_head": 128,
"pooler_type": "first_token_transform",
"type_vocab_size": 2,
"vocab_size": 119547,
}
config = BertConfig.from_dict(config_dict)
return BertModel(config)
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/bert/"
"wwm_uncased_L-24_H-1024_A-16/bert_e2780a6a_wwm_uncased_L-24_H-1024_A-16.pkl"
)
def wwm_uncased_L_24_H_1024_A_16():
config_dict = {
"attention_probs_dropout_prob": 0.1,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 1024,
"initializer_range": 0.02,
"intermediate_size": 4096,
"max_position_embeddings": 512,
"num_attention_heads": 16,
"num_hidden_layers": 24,
"type_vocab_size": 2,
"vocab_size": 30522,
}
config = BertConfig.from_dict(config_dict)
return BertModel(config)
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/bert/"
"wwm_cased_L-24_H-1024_A-16/bert_0a8f1389_wwm_cased_L-24_H-1024_A-16.pkl"
)
def wwm_cased_L_24_H_1024_A_16():
config_dict = {
"attention_probs_dropout_prob": 0.1,
"directionality": "bidi",
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 1024,
"initializer_range": 0.02,
"intermediate_size": 4096,
"max_position_embeddings": 512,
"num_attention_heads": 16,
"num_hidden_layers": 24,
"pooler_fc_size": 768,
"pooler_num_attention_heads": 12,
"pooler_num_fc_layers": 3,
"pooler_size_per_head": 128,
"pooler_type": "first_token_transform",
"type_vocab_size": 2,
"vocab_size": 28996,
}
config = BertConfig.from_dict(config_dict)
return BertModel(config)
|
[
"megengine.hub.pretrained",
"megengine.module.Embedding",
"megengine.hub.load",
"megengine.tensor",
"megengine.functional.concat",
"megengine.functional.matmul",
"megengine.functional.sqrt",
"megengine.functional.ones_like",
"megengine.functional.zeros_like",
"megengine.functional.linspace",
"megengine.module.Dropout",
"megengine.module.Linear",
"megengine.functional.expand_dims"
] |
[((24900, 25043), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/models/weights/bert/uncased_L-12_H-768_A-12/bert_4f2157f7_uncased_L-12_H-768_A-12.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/models/weights/bert/uncased_L-12_H-768_A-12/bert_4f2157f7_uncased_L-12_H-768_A-12.pkl'\n )\n", (24914, 25043), True, 'import megengine.hub as hub\n'), ((25559, 25698), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/models/weights/bert/cased_L-12_H-768_A-12/bert_b9727c2f_cased_L-12_H-768_A-12.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/models/weights/bert/cased_L-12_H-768_A-12/bert_b9727c2f_cased_L-12_H-768_A-12.pkl'\n )\n", (25573, 25698), True, 'import megengine.hub as hub\n'), ((26212, 26357), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/models/weights/bert/uncased_L-24_H-1024_A-16/bert_222f5012_uncased_L-24_H-1024_A-16.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/models/weights/bert/uncased_L-24_H-1024_A-16/bert_222f5012_uncased_L-24_H-1024_A-16.pkl'\n )\n", (26226, 26357), True, 'import megengine.hub as hub\n'), ((26876, 27017), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/models/weights/bert/cased_L-24_H-1024_A-16/bert_01f2a65f_cased_L-24_H-1024_A-16.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/models/weights/bert/cased_L-24_H-1024_A-16/bert_01f2a65f_cased_L-24_H-1024_A-16.pkl'\n )\n", (26890, 27017), True, 'import megengine.hub as hub\n'), ((27761, 27904), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/models/weights/bert/chinese_L-12_H-768_A-12/bert_ee91be1a_chinese_L-12_H-768_A-12.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/models/weights/bert/chinese_L-12_H-768_A-12/bert_ee91be1a_chinese_L-12_H-768_A-12.pkl'\n )\n", (27775, 27904), True, 'import megengine.hub as hub\n'), ((28647, 28798), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/models/weights/bert/multi_cased_L-12_H-768_A-12/bert_283ceec5_multi_cased_L-12_H-768_A-12.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/models/weights/bert/multi_cased_L-12_H-768_A-12/bert_283ceec5_multi_cased_L-12_H-768_A-12.pkl'\n )\n", (28661, 28798), True, 'import megengine.hub as hub\n'), ((29547, 29700), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/models/weights/bert/wwm_uncased_L-24_H-1024_A-16/bert_e2780a6a_wwm_uncased_L-24_H-1024_A-16.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/models/weights/bert/wwm_uncased_L-24_H-1024_A-16/bert_e2780a6a_wwm_uncased_L-24_H-1024_A-16.pkl'\n )\n", (29561, 29700), True, 'import megengine.hub as hub\n'), ((30222, 30371), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/models/weights/bert/wwm_cased_L-24_H-1024_A-16/bert_0a8f1389_wwm_cased_L-24_H-1024_A-16.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/models/weights/bert/wwm_cased_L-24_H-1024_A-16/bert_0a8f1389_wwm_cased_L-24_H-1024_A-16.pkl'\n )\n", (30236, 30371), True, 'import megengine.hub as hub\n'), ((24078, 24119), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['url', 'filename'], {}), '(url, filename)\n', (24104, 24119), False, 'import urllib\n'), ((24783, 24858), 'megengine.hub.load', 'hub.load', (['"""megengine/models"""', 'MODEL_NAME[model_name]'], {'pretrained': 'pretrained'}), "('megengine/models', MODEL_NAME[model_name], pretrained=pretrained)\n", (24791, 24858), True, 'import megengine.hub as hub\n'), ((5786, 5814), 'copy.deepcopy', 'copy.deepcopy', (['self.__dict__'], {}), '(self.__dict__)\n', (5799, 5814), False, 'import copy\n'), ((7035, 7083), 'megengine.module.Embedding', 'Embedding', (['config.vocab_size', 'config.hidden_size'], {}), '(config.vocab_size, config.hidden_size)\n', (7044, 7083), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((7119, 7180), 'megengine.module.Embedding', 'Embedding', (['config.max_position_embeddings', 'config.hidden_size'], {}), '(config.max_position_embeddings, config.hidden_size)\n', (7128, 7180), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((7240, 7293), 'megengine.module.Embedding', 'Embedding', (['config.type_vocab_size', 'config.hidden_size'], {}), '(config.type_vocab_size, config.hidden_size)\n', (7249, 7293), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((7560, 7595), 'megengine.module.Dropout', 'Dropout', (['config.hidden_dropout_prob'], {}), '(config.hidden_dropout_prob)\n', (7567, 7595), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((8981, 9027), 'megengine.module.Linear', 'Linear', (['config.hidden_size', 'self.all_head_size'], {}), '(config.hidden_size, self.all_head_size)\n', (8987, 9027), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((9047, 9093), 'megengine.module.Linear', 'Linear', (['config.hidden_size', 'self.all_head_size'], {}), '(config.hidden_size, self.all_head_size)\n', (9053, 9093), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((9115, 9161), 'megengine.module.Linear', 'Linear', (['config.hidden_size', 'self.all_head_size'], {}), '(config.hidden_size, self.all_head_size)\n', (9121, 9161), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((9186, 9230), 'megengine.module.Dropout', 'Dropout', (['config.attention_probs_dropout_prob'], {}), '(config.attention_probs_dropout_prob)\n', (9193, 9230), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((9341, 9360), 'megengine.tensor', 'mge.tensor', (['x.shape'], {}), '(x.shape)\n', (9351, 9360), True, 'import megengine as mge\n'), ((9383, 9461), 'megengine.functional.concat', 'F.concat', (['[x_shape[:-1], (self.num_attention_heads, self.attention_head_size)]'], {}), '([x_shape[:-1], (self.num_attention_heads, self.attention_head_size)])\n', (9391, 9461), True, 'import megengine.functional as F\n'), ((10768, 10806), 'megengine.functional.matmul', 'F.matmul', (['attention_probs', 'value_layer'], {}), '(attention_probs, value_layer)\n', (10776, 10806), True, 'import megengine.functional as F\n'), ((10943, 10974), 'megengine.tensor', 'mge.tensor', (['context_layer.shape'], {}), '(context_layer.shape)\n', (10953, 10974), True, 'import megengine as mge\n'), ((11009, 11059), 'megengine.functional.concat', 'F.concat', (['[context_shape[:-2], self.all_head_size]'], {}), '([context_shape[:-2], self.all_head_size])\n', (11017, 11059), True, 'import megengine.functional as F\n'), ((11272, 11318), 'megengine.module.Linear', 'Linear', (['config.hidden_size', 'config.hidden_size'], {}), '(config.hidden_size, config.hidden_size)\n', (11278, 11318), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((11412, 11447), 'megengine.module.Dropout', 'Dropout', (['config.hidden_dropout_prob'], {}), '(config.hidden_dropout_prob)\n', (11419, 11447), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((12210, 12262), 'megengine.module.Linear', 'Linear', (['config.hidden_size', 'config.intermediate_size'], {}), '(config.hidden_size, config.intermediate_size)\n', (12216, 12262), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((12736, 12788), 'megengine.module.Linear', 'Linear', (['config.intermediate_size', 'config.hidden_size'], {}), '(config.intermediate_size, config.hidden_size)\n', (12742, 12788), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((12882, 12917), 'megengine.module.Dropout', 'Dropout', (['config.hidden_dropout_prob'], {}), '(config.hidden_dropout_prob)\n', (12889, 12917), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((14562, 14608), 'megengine.module.Linear', 'Linear', (['config.hidden_size', 'config.hidden_size'], {}), '(config.hidden_size, config.hidden_size)\n', (14568, 14608), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((18962, 18999), 'megengine.functional.expand_dims', 'F.expand_dims', (['attention_mask', '(1, 2)'], {}), '(attention_mask, (1, 2))\n', (18975, 18999), True, 'import megengine.functional as F\n'), ((22709, 22744), 'megengine.module.Dropout', 'Dropout', (['config.hidden_dropout_prob'], {}), '(config.hidden_dropout_prob)\n', (22716, 22744), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((22771, 22809), 'megengine.module.Linear', 'Linear', (['config.hidden_size', 'num_labels'], {}), '(config.hidden_size, num_labels)\n', (22777, 22809), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((24333, 24357), 'os.path.exists', 'os.path.exists', (['data_dir'], {}), '(data_dir)\n', (24347, 24357), False, 'import os\n'), ((24367, 24388), 'os.makedirs', 'os.makedirs', (['data_dir'], {}), '(data_dir)\n', (24378, 24388), False, 'import os\n'), ((5485, 5523), 'io.open', 'open', (['json_file', '"""r"""'], {'encoding': '"""utf-8"""'}), "(json_file, 'r', encoding='utf-8')\n", (5489, 5523), False, 'from io import open\n'), ((5597, 5613), 'json.loads', 'json.loads', (['text'], {}), '(text)\n', (5607, 5613), False, 'import json\n'), ((6108, 6151), 'io.open', 'open', (['json_file_path', '"""w"""'], {'encoding': '"""utf-8"""'}), "(json_file_path, 'w', encoding='utf-8')\n", (6112, 6151), False, 'from io import open\n'), ((7757, 7780), 'megengine.functional.zeros_like', 'F.zeros_like', (['input_ids'], {}), '(input_ids)\n', (7769, 7780), True, 'import megengine.functional as F\n'), ((7902, 7932), 'megengine.functional.expand_dims', 'F.expand_dims', (['position_ids', '(0)'], {}), '(position_ids, 0)\n', (7915, 7932), True, 'import megengine.functional as F\n'), ((10185, 10220), 'math.sqrt', 'math.sqrt', (['self.attention_head_size'], {}), '(self.attention_head_size)\n', (10194, 10220), False, 'import math\n'), ((18332, 18354), 'megengine.functional.ones_like', 'F.ones_like', (['input_ids'], {}), '(input_ids)\n', (18343, 18354), True, 'import megengine.functional as F\n'), ((18419, 18442), 'megengine.functional.zeros_like', 'F.zeros_like', (['input_ids'], {}), '(input_ids)\n', (18431, 18442), True, 'import megengine.functional as F\n'), ((3928, 3987), 'io.open', 'open', (['vocab_size_or_config_json_file', '"""r"""'], {'encoding': '"""utf-8"""'}), "(vocab_size_or_config_json_file, 'r', encoding='utf-8')\n", (3932, 3987), False, 'from io import open\n'), ((7805, 7846), 'megengine.functional.linspace', 'F.linspace', (['(0)', '(seq_length - 1)', 'seq_length'], {}), '(0, seq_length - 1, seq_length)\n', (7815, 7846), True, 'import megengine.functional as F\n'), ((1797, 1816), 'megengine.functional.sqrt', 'F.sqrt', (['(2 / math.pi)'], {}), '(2 / math.pi)\n', (1803, 1816), True, 'import megengine.functional as F\n'), ((6444, 6464), 'numpy.ones', 'np.ones', (['hidden_size'], {}), '(hidden_size)\n', (6451, 6464), True, 'import numpy as np\n'), ((6515, 6536), 'numpy.zeros', 'np.zeros', (['hidden_size'], {}), '(hidden_size)\n', (6523, 6536), True, 'import numpy as np\n')]
|
from sqlmodel import Field
from typing import Optional
from app.models.base_uuid_model import BaseUUIDModel
from uuid import UUID
class LinkGroupUser(BaseUUIDModel, table=True):
group_id: Optional[UUID] = Field(default=None, nullable=False, foreign_key="group.id", primary_key=True)
user_id: Optional[UUID] = Field(default=None, nullable=False, foreign_key="user.id", primary_key=True)
|
[
"sqlmodel.Field"
] |
[((210, 287), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'nullable': '(False)', 'foreign_key': '"""group.id"""', 'primary_key': '(True)'}), "(default=None, nullable=False, foreign_key='group.id', primary_key=True)\n", (215, 287), False, 'from sqlmodel import Field\n'), ((318, 394), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'nullable': '(False)', 'foreign_key': '"""user.id"""', 'primary_key': '(True)'}), "(default=None, nullable=False, foreign_key='user.id', primary_key=True)\n", (323, 394), False, 'from sqlmodel import Field\n')]
|
"""node state add
Revision ID: <KEY>
Revises: ad46aa4e<PASSWORD>
Create Date: 2021-11-15 21:08:52.990959
"""
import sqlalchemy as sa
import sqlmodel
from alembic import op
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "<KEY>"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"preps", sa.Column("node_state", sqlmodel.sql.sqltypes.AutoString(), nullable=True)
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"proposals", "id", existing_type=sa.INTEGER(), nullable=False, autoincrement=True
)
op.drop_column("preps", "node_state")
# ### end Alembic commands ###
|
[
"sqlmodel.sql.sqltypes.AutoString"
] |
[((739, 776), 'alembic.op.drop_column', 'op.drop_column', (['"""preps"""', '"""node_state"""'], {}), "('preps', 'node_state')\n", (753, 776), False, 'from alembic import op\n'), ((441, 475), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (473, 475), False, 'import sqlmodel\n'), ((680, 692), 'sqlalchemy.INTEGER', 'sa.INTEGER', ([], {}), '()\n', (690, 692), True, 'import sqlalchemy as sa\n')]
|
import numpy as nm
from sfepy.linalg import dot_sequences
from sfepy.terms.terms import Term, terms
class PiezoCouplingTerm(Term):
r"""
Piezoelectric coupling term. Can be evaluated.
:Definition:
.. math::
\int_{\Omega} g_{kij}\ e_{ij}(\ul{v}) \nabla_k p \mbox{ , }
\int_{\Omega} g_{kij}\ e_{ij}(\ul{u}) \nabla_k q
:Arguments 1:
- material : :math:`g_{kij}`
- virtual : :math:`\ul{v}`
- state : :math:`p`
:Arguments 2:
- material : :math:`g_{kij}`
- state : :math:`\ul{u}`
- virtual : :math:`q`
:Arguments 3:
- material : :math:`g_{kij}`
- parameter_v : :math:`\ul{u}`
- parameter_s : :math:`p`
"""
name = 'dw_piezo_coupling'
arg_types = (('material', 'virtual', 'state'),
('material', 'state', 'virtual'),
('material', 'parameter_v', 'parameter_s'))
arg_shapes = {'material' : 'D, S',
'virtual/grad' : ('D', None), 'state/grad' : 1,
'virtual/div' : (1, None), 'state/div' : 'D',
'parameter_v' : 'D', 'parameter_s' : 1}
modes = ('grad', 'div', 'eval')
def get_fargs(self, mat, vvar, svar,
mode=None, term_mode=None, diff_var=None, **kwargs):
if self.mode == 'grad':
qp_var, qp_name = svar, 'grad'
else:
qp_var, qp_name = vvar, 'cauchy_strain'
vvg, _ = self.get_mapping(vvar)
if mode == 'weak':
aux = nm.array([0], ndmin=4, dtype=nm.float64)
if diff_var is None:
# grad or strain according to mode.
val_qp = self.get(qp_var, qp_name)
fmode = 0
else:
val_qp = aux
fmode = 1
if self.mode == 'grad':
strain, grad = aux, val_qp
else:
strain, grad = val_qp, aux
fmode += 2
return strain, grad, mat, vvg, fmode
elif mode == 'eval':
strain = self.get(vvar, 'cauchy_strain')
grad = self.get(svar, 'grad')
return strain, grad, mat, vvg
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
def get_eval_shape(self, mat, vvar, svar,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(vvar)
return (n_el, 1, 1, 1), vvar.dtype
def set_arg_types( self ):
self.function = {
'grad' : terms.dw_piezo_coupling,
'div' : terms.dw_piezo_coupling,
'eval' : terms.d_piezo_coupling,
}[self.mode]
class PiezoStressTerm(Term):
r"""
Evaluate piezoelectric stress tensor.
It is given in the usual vector form exploiting symmetry: in 3D it has 6
components with the indices ordered as :math:`[11, 22, 33, 12, 13, 23]`, in
2D it has 3 components with the indices ordered as :math:`[11, 22, 12]`.
Supports 'eval', 'el_avg' and 'qp' evaluation modes.
:Definition:
.. math::
\int_{\Omega} g_{kij} \nabla_k p
:Arguments:
- material : :math:`g_{kij}`
- parameter : :math:`p`
"""
name = 'ev_piezo_stress'
arg_types = ('material', 'parameter')
arg_shapes = {'material' : 'D, S', 'parameter' : '1'}
@staticmethod
def function(out, val_qp, vg, fmode):
if fmode == 2:
out[:] = val_qp
status = 0
else:
status = vg.integrate(out, val_qp, fmode)
return status
def get_fargs(self, mat, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(parameter)
grad = self.get(parameter, 'grad')
val_qp = dot_sequences(mat, grad, mode='ATB')
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
return val_qp, vg, fmode
def get_eval_shape(self, mat, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(parameter)
if mode != 'qp':
n_qp = 1
return (n_el, n_qp, dim * (dim + 1) / 2, 1), parameter.dtype
|
[
"sfepy.linalg.dot_sequences"
] |
[((3897, 3933), 'sfepy.linalg.dot_sequences', 'dot_sequences', (['mat', 'grad'], {'mode': '"""ATB"""'}), "(mat, grad, mode='ATB')\n", (3910, 3933), False, 'from sfepy.linalg import dot_sequences\n'), ((1536, 1576), 'numpy.array', 'nm.array', (['[0]'], {'ndmin': '(4)', 'dtype': 'nm.float64'}), '([0], ndmin=4, dtype=nm.float64)\n', (1544, 1576), True, 'import numpy as nm\n')]
|
from datetime import datetime
from typing import TYPE_CHECKING, Optional
from models.base import BaseModel, BaseTableFields
from sqlmodel import Field, Relationship, Column, String
if TYPE_CHECKING:
from models.user import User
class Session(BaseModel, BaseTableFields, table=True):
__tablename__ = "sessions"
access_token: str = Field(
sa_column=Column(
String,
unique=True,
nullable=False
)
)
ip_address: Optional[str] = Field(
sa_column=Column(
String(100),
nullable=True
)
)
user_agent: Optional[str] = Field(
sa_column=Column(
String(100),
nullable=True
)
)
user_id: int = Field(foreign_key="users.id")
user: "User" = Relationship(back_populates="sessions")
class SessionRead(BaseModel):
id: int
access_token: str
ip_address: Optional[str]
user_agent: Optional[str]
created_at: datetime
updated_at: datetime
is_active: bool
class SessionCreate(BaseModel):
access_token: str
ip_address: Optional[str]
user_agent: Optional[str]
user_id: int
class SessionUpdate(BaseModel):
is_active: Optional[bool] = None
|
[
"sqlmodel.String",
"sqlmodel.Relationship",
"sqlmodel.Field",
"sqlmodel.Column"
] |
[((751, 780), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""users.id"""'}), "(foreign_key='users.id')\n", (756, 780), False, 'from sqlmodel import Field, Relationship, Column, String\n'), ((800, 839), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""sessions"""'}), "(back_populates='sessions')\n", (812, 839), False, 'from sqlmodel import Field, Relationship, Column, String\n'), ((371, 414), 'sqlmodel.Column', 'Column', (['String'], {'unique': '(True)', 'nullable': '(False)'}), '(String, unique=True, nullable=False)\n', (377, 414), False, 'from sqlmodel import Field, Relationship, Column, String\n'), ((544, 555), 'sqlmodel.String', 'String', (['(100)'], {}), '(100)\n', (550, 555), False, 'from sqlmodel import Field, Relationship, Column, String\n'), ((676, 687), 'sqlmodel.String', 'String', (['(100)'], {}), '(100)\n', (682, 687), False, 'from sqlmodel import Field, Relationship, Column, String\n')]
|
from typing import Optional
from loguru import logger
from sqlmodel import Field, Session, SQLModel, create_engine, or_, select
class Hero(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
secret_name: str = Field(max_length=30)
age: Optional[int] = None
def test_database_with_sqlmodel():
hero_1 = Hero(name='Deadpond', secret_name='<NAME>')
hero_2 = Hero(name='Spider-Boy', secret_name='<NAME>')
hero_3 = Hero(name='Rusty-Man', secret_name='<NAME>', age=48)
# engine = create_engine('sqlite:///temp.db')
engine = create_engine('sqlite:///:memory:')
SQLModel.metadata.create_all(engine)
with Session(engine) as session:
for hero in [hero_1, hero_2, hero_3]:
session.add(hero)
session.commit()
with Session(engine) as session:
statement = select(Hero).where(Hero.name == 'Spider-Boy')
hero = session.exec(statement).first()
logger.info(hero)
# Or statement
statement = select(Hero).where((Hero.name == 'Spider-Boy') | (Hero.name == 'Rusty-Man'))
heroes = session.exec(statement)
for hero in heroes:
logger.info(hero)
# Or statement, alternative way
statement = select(Hero).where(or_(Hero.name == 'Spider-Boy', Hero.name == 'Rusty-Man'))
heroes = session.exec(statement)
for hero in heroes:
logger.info(hero)
# And statement
statement = select(Hero).where(Hero.name == 'Spider-Boy', Hero.secret_name == '<NAME>')
heroes = session.exec(statement)
for hero in heroes:
logger.info(hero)
# And statement, alternative way
statement = select(Hero).where(Hero.name == 'Spider-Boy').where(Hero.secret_name == '<NAME>')
heroes = session.exec(statement)
for hero in heroes:
logger.info(hero)
if __name__ == '__main__':
test_database_with_sqlmodel()
|
[
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.Session",
"sqlmodel.or_",
"sqlmodel.Field",
"sqlmodel.select",
"sqlmodel.create_engine"
] |
[((189, 226), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (194, 226), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, or_, select\n'), ((264, 284), 'sqlmodel.Field', 'Field', ([], {'max_length': '(30)'}), '(max_length=30)\n', (269, 284), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, or_, select\n'), ((598, 633), 'sqlmodel.create_engine', 'create_engine', (['"""sqlite:///:memory:"""'], {}), "('sqlite:///:memory:')\n", (611, 633), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, or_, select\n'), ((639, 675), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (667, 675), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, or_, select\n'), ((686, 701), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (693, 701), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, or_, select\n'), ((825, 840), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (832, 840), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, or_, select\n'), ((974, 991), 'loguru.logger.info', 'logger.info', (['hero'], {}), '(hero)\n', (985, 991), False, 'from loguru import logger\n'), ((1194, 1211), 'loguru.logger.info', 'logger.info', (['hero'], {}), '(hero)\n', (1205, 1211), False, 'from loguru import logger\n'), ((1292, 1348), 'sqlmodel.or_', 'or_', (["(Hero.name == 'Spider-Boy')", "(Hero.name == 'Rusty-Man')"], {}), "(Hero.name == 'Spider-Boy', Hero.name == 'Rusty-Man')\n", (1295, 1348), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, or_, select\n'), ((1431, 1448), 'loguru.logger.info', 'logger.info', (['hero'], {}), '(hero)\n', (1442, 1448), False, 'from loguru import logger\n'), ((1651, 1668), 'loguru.logger.info', 'logger.info', (['hero'], {}), '(hero)\n', (1662, 1668), False, 'from loguru import logger\n'), ((1894, 1911), 'loguru.logger.info', 'logger.info', (['hero'], {}), '(hero)\n', (1905, 1911), False, 'from loguru import logger\n'), ((873, 885), 'sqlmodel.select', 'select', (['Hero'], {}), '(Hero)\n', (879, 885), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, or_, select\n'), ((1036, 1048), 'sqlmodel.select', 'select', (['Hero'], {}), '(Hero)\n', (1042, 1048), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, or_, select\n'), ((1273, 1285), 'sqlmodel.select', 'select', (['Hero'], {}), '(Hero)\n', (1279, 1285), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, or_, select\n'), ((1494, 1506), 'sqlmodel.select', 'select', (['Hero'], {}), '(Hero)\n', (1500, 1506), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, or_, select\n'), ((1731, 1743), 'sqlmodel.select', 'select', (['Hero'], {}), '(Hero)\n', (1737, 1743), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, or_, select\n')]
|
"""Anime CRUD controller."""
import sqlmodel
from sqlmodel.ext.asyncio import session as aio_session
from app.crud import base
from app.models import anime
class AnimeCRUD(base.BaseCRUD[anime.Anime, anime.AnimeCreate,
anime.AnimeUpdate]):
"""CRUD controller for anime.
It contains Create, Read, Update, and Delete methods.
"""
@classmethod
async def get_by_title(cls, session: aio_session.AsyncSession,
title: str) -> anime.Anime | None:
"""Gets an anime by their title.
Args:
session: The database session.
title: The anime's title.
"""
anime_list = await session.exec(
sqlmodel.select(anime.Anime).where(anime.Anime.title_en == title))
return anime_list.first()
|
[
"sqlmodel.select"
] |
[((723, 751), 'sqlmodel.select', 'sqlmodel.select', (['anime.Anime'], {}), '(anime.Anime)\n', (738, 751), False, 'import sqlmodel\n')]
|
from decimal import Decimal
from unittest.mock import patch
from sqlmodel import create_engine
from ...conftest import get_testing_print_function
expected_calls = [
[
"Hero 1:",
{
"name": "Deadpond",
"age": None,
"id": 1,
"secret_name": "<NAME>",
"money": Decimal("1.100"),
},
],
[
"Hero 2:",
{
"name": "Rusty-Man",
"age": 48,
"id": 3,
"secret_name": "<NAME>",
"money": Decimal("2.200"),
},
],
["Total money: 3.300"],
]
def test_tutorial(clear_sqlmodel):
from docs_src.advanced.decimal import tutorial001 as mod
mod.sqlite_url = "sqlite://"
mod.engine = create_engine(mod.sqlite_url)
calls = []
new_print = get_testing_print_function(calls)
with patch("builtins.print", new=new_print):
mod.main()
assert calls == expected_calls
|
[
"sqlmodel.create_engine"
] |
[((760, 789), 'sqlmodel.create_engine', 'create_engine', (['mod.sqlite_url'], {}), '(mod.sqlite_url)\n', (773, 789), False, 'from sqlmodel import create_engine\n'), ((866, 904), 'unittest.mock.patch', 'patch', (['"""builtins.print"""'], {'new': 'new_print'}), "('builtins.print', new=new_print)\n", (871, 904), False, 'from unittest.mock import patch\n'), ((914, 924), 'docs_src.advanced.decimal.tutorial001.main', 'mod.main', ([], {}), '()\n', (922, 924), True, 'from docs_src.advanced.decimal import tutorial001 as mod\n'), ((339, 355), 'decimal.Decimal', 'Decimal', (['"""1.100"""'], {}), "('1.100')\n", (346, 355), False, 'from decimal import Decimal\n'), ((545, 561), 'decimal.Decimal', 'Decimal', (['"""2.200"""'], {}), "('2.200')\n", (552, 561), False, 'from decimal import Decimal\n')]
|
import megengine as mge
import megengine.functional as F
from megengine.core import Tensor
def softmax_loss(pred, label, ignore_label=-1):
max_pred = F.zero_grad(pred.max(axis=1, keepdims=True))
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask
loss = -(F.indexing_one_hot(log_prob, vlabel, 1) * mask)
return loss
def smooth_l1_loss(pred, target, beta: float):
abs_x = F.abs(pred - target)
in_mask = abs_x < beta
out_mask = 1 - in_mask
in_loss = 0.5 * abs_x ** 2 / beta
out_loss = abs_x - 0.5 * beta
loss = in_loss * in_mask + out_loss * out_mask
return loss.sum(axis=1)
|
[
"megengine.functional.abs",
"megengine.functional.equal",
"megengine.functional.exp",
"megengine.functional.indexing_one_hot"
] |
[((496, 516), 'megengine.functional.abs', 'F.abs', (['(pred - target)'], {}), '(pred - target)\n', (501, 516), True, 'import megengine.functional as F\n'), ((304, 332), 'megengine.functional.equal', 'F.equal', (['label', 'ignore_label'], {}), '(label, ignore_label)\n', (311, 332), True, 'import megengine.functional as F\n'), ((372, 411), 'megengine.functional.indexing_one_hot', 'F.indexing_one_hot', (['log_prob', 'vlabel', '(1)'], {}), '(log_prob, vlabel, 1)\n', (390, 411), True, 'import megengine.functional as F\n'), ((249, 260), 'megengine.functional.exp', 'F.exp', (['pred'], {}), '(pred)\n', (254, 260), True, 'import megengine.functional as F\n')]
|
#! /usr/bin/env python3
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import getopt
import os
import runpy
import sys
from megengine.logger import get_logger
from megengine.utils.profiler import Profiler, merge_trace_events
def main():
parser = argparse.ArgumentParser(
prog="megengine.tools.profiler", description="Profiling megengine program"
)
parser.add_argument(
"-m", "--module", action="store_true", help="whether launch program as module"
)
parser.add_argument("-o", "--output", type=str, help="output file location")
parser.add_argument(
"-f",
"--format",
action="append",
type=str,
help="output file format",
choices=Profiler.valid_formats,
)
parser.add_argument(
"--merge_trace_events", action="store_true",
)
parser.add_argument(
"--clean", action="store_true",
)
for opt in Profiler.valid_options:
parser.add_argument("--" + opt, type=int, default=None)
args, extras = parser.parse_known_args(sys.argv[1:])
prof_args = {}
for opt in Profiler.valid_options:
optval = getattr(args, opt, None)
if optval is not None:
prof_args[opt] = optval
if args.output is not None:
prof_args["path"] = args.output
if args.format:
prof_args["formats"] = args.format
if args.clean:
for file in os.listdir(profiler.directory):
os.remove(os.path.join(profiler.directory, file))
if len(extras) == 0:
if not args.merge_trace_events:
parser.print_usage()
exit(1)
else:
filename = extras[0]
if not args.module:
if not os.path.exists(filename):
get_logger().fatal("cannot find file {}".format(filename))
exit(1)
filename = os.path.realpath(filename)
# Replace profiler's dir with script's dir in front of module search path.
sys.path[0] = os.path.dirname(filename)
sys.argv[:] = [filename, *extras[1:]]
profiler = Profiler(**prof_args)
with profiler:
if args.module:
runpy.run_module(filename)
else:
run_script(filename)
profiler.dump()
if args.merge_trace_events:
merge_trace_events(profiler.directory)
def run_module(modulename):
runpy.run_module(modulename, None, "__main__", True)
def run_script(filename):
runpy.run_path(filename, None, "__main__")
if __name__ == "__main__":
main()
|
[
"megengine.utils.profiler.merge_trace_events",
"megengine.logger.get_logger",
"megengine.utils.profiler.Profiler"
] |
[((575, 679), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""megengine.tools.profiler"""', 'description': '"""Profiling megengine program"""'}), "(prog='megengine.tools.profiler', description=\n 'Profiling megengine program')\n", (598, 679), False, 'import argparse\n'), ((2719, 2771), 'runpy.run_module', 'runpy.run_module', (['modulename', 'None', '"""__main__"""', '(True)'], {}), "(modulename, None, '__main__', True)\n", (2735, 2771), False, 'import runpy\n'), ((2804, 2846), 'runpy.run_path', 'runpy.run_path', (['filename', 'None', '"""__main__"""'], {}), "(filename, None, '__main__')\n", (2818, 2846), False, 'import runpy\n'), ((1730, 1760), 'os.listdir', 'os.listdir', (['profiler.directory'], {}), '(profiler.directory)\n', (1740, 1760), False, 'import os\n'), ((2410, 2431), 'megengine.utils.profiler.Profiler', 'Profiler', ([], {}), '(**prof_args)\n', (2418, 2431), False, 'from megengine.utils.profiler import Profiler, merge_trace_events\n'), ((2646, 2684), 'megengine.utils.profiler.merge_trace_events', 'merge_trace_events', (['profiler.directory'], {}), '(profiler.directory)\n', (2664, 2684), False, 'from megengine.utils.profiler import Profiler, merge_trace_events\n'), ((2177, 2203), 'os.path.realpath', 'os.path.realpath', (['filename'], {}), '(filename)\n', (2193, 2203), False, 'import os\n'), ((2317, 2342), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (2332, 2342), False, 'import os\n'), ((1784, 1822), 'os.path.join', 'os.path.join', (['profiler.directory', 'file'], {}), '(profiler.directory, file)\n', (1796, 1822), False, 'import os\n'), ((2029, 2053), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (2043, 2053), False, 'import os\n'), ((2499, 2525), 'runpy.run_module', 'runpy.run_module', (['filename'], {}), '(filename)\n', (2515, 2525), False, 'import runpy\n'), ((2071, 2083), 'megengine.logger.get_logger', 'get_logger', ([], {}), '()\n', (2081, 2083), False, 'from megengine.logger import get_logger\n')]
|
import datetime
from typing import Optional
from pydantic import PositiveInt
from sqlalchemy import Integer, ForeignKey
from sqlmodel import Field, Column, DateTime, Relationship, SQLModel
from sqlalchemy import UniqueConstraint
from sb_backend.app.models.base.base_model import TimeStampMixin
from sb_backend.app.models.fields import SeriesCode
from .noseries import NoSeries
class NoSeriesLineBase(SQLModel):
"""«No. Series Line» («Серия Номеров Строка»)"""
starting_date: datetime.datetime = Field(
sa_column=Column(
DateTime(timezone=True),
nullable=False
)
)
starting_no: SeriesCode
ending_no: SeriesCode
# starting_no: str = Field(max_length=20, nullable=False)
# ending_no: str = Field(max_length=20, nullable=True, default='')
last_date_used: datetime.datetime = Field(
sa_column=Column(
DateTime(timezone=True),
nullable=True
)
)
warning_no: str = Field(max_length=20, nullable=True, default='')
last_no_used: str = Field(max_length=20, nullable=True, default='')
increment_by: PositiveInt = Field(default=1)
blocked: bool = False
# series_no_id: int = Field(
# sa_column=Column(
# Integer,
# ForeignKey(NoSeries.id, ondelete="CASCADE"),
# ),
# nullable = False
# )
# ForeignKey(NoSeries.id, ondelete="RESTRICT"),
# series_no_id: int = Field(default=None, foreign_key="no_series.id")
series_no_id: int
series_no: Optional[NoSeries] = Relationship(back_populates = "noserieslines")
class NoSeriesLine(NoSeriesLineBase, TimeStampMixin, table=True):
"""«No. Series Line» («Серия Номеров Строка»)"""
__tablename__ = "no_series_line"
__table_args__ = (UniqueConstraint("series_no_id", "starting_date"),)
id: Optional[int] = Field(default=None, primary_key=True)
series_no_id: int = Field(
sa_column=Column(
Integer,
ForeignKey(NoSeries.id, ondelete="RESTRICT"),
),
nullable = False
)
class NoSeriesLineCreate(NoSeriesLineBase):
"""«No. Series Line» («Серия Номеров Строка»)"""
pass
class NoSeriesLineRead(NoSeriesLineBase):
"""«No. Series Line» («Серия Номеров Строка»)"""
id: int
class NoSeriesLineUpdate(SQLModel):
"""«No. Series Line» («Серия Номеров Строка»)"""
starting_date: datetime.datetime
starting_no: SeriesCode
ending_no: Optional[SeriesCode]
last_date_used: datetime.datetime
warning_no: Optional[SeriesCode]
increment_by: Optional[PositiveInt] = 1
blocked: Optional[bool] = False
series_no_id: PositiveInt
# series_no_id: Optional[int] = None
|
[
"sqlmodel.Relationship",
"sqlmodel.Field",
"sqlmodel.DateTime"
] |
[((967, 1014), 'sqlmodel.Field', 'Field', ([], {'max_length': '(20)', 'nullable': '(True)', 'default': '""""""'}), "(max_length=20, nullable=True, default='')\n", (972, 1014), False, 'from sqlmodel import Field, Column, DateTime, Relationship, SQLModel\n'), ((1039, 1086), 'sqlmodel.Field', 'Field', ([], {'max_length': '(20)', 'nullable': '(True)', 'default': '""""""'}), "(max_length=20, nullable=True, default='')\n", (1044, 1086), False, 'from sqlmodel import Field, Column, DateTime, Relationship, SQLModel\n'), ((1119, 1135), 'sqlmodel.Field', 'Field', ([], {'default': '(1)'}), '(default=1)\n', (1124, 1135), False, 'from sqlmodel import Field, Column, DateTime, Relationship, SQLModel\n'), ((1529, 1573), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""noserieslines"""'}), "(back_populates='noserieslines')\n", (1541, 1573), False, 'from sqlmodel import Field, Column, DateTime, Relationship, SQLModel\n'), ((1831, 1868), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (1836, 1868), False, 'from sqlmodel import Field, Column, DateTime, Relationship, SQLModel\n'), ((1755, 1804), 'sqlalchemy.UniqueConstraint', 'UniqueConstraint', (['"""series_no_id"""', '"""starting_date"""'], {}), "('series_no_id', 'starting_date')\n", (1771, 1804), False, 'from sqlalchemy import UniqueConstraint\n'), ((548, 571), 'sqlmodel.DateTime', 'DateTime', ([], {'timezone': '(True)'}), '(timezone=True)\n', (556, 571), False, 'from sqlmodel import Field, Column, DateTime, Relationship, SQLModel\n'), ((882, 905), 'sqlmodel.DateTime', 'DateTime', ([], {'timezone': '(True)'}), '(timezone=True)\n', (890, 905), False, 'from sqlmodel import Field, Column, DateTime, Relationship, SQLModel\n'), ((1954, 1998), 'sqlalchemy.ForeignKey', 'ForeignKey', (['NoSeries.id'], {'ondelete': '"""RESTRICT"""'}), "(NoSeries.id, ondelete='RESTRICT')\n", (1964, 1998), False, 'from sqlalchemy import Integer, ForeignKey\n')]
|
import os
import numpy as nm
from sfepy.base.testing import TestCommon
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
test = Test(conf=conf, options=options)
test.join = lambda x: os.path.join(test.options.out_dir, x)
return test
def test_linearization(self):
from sfepy.base.base import Struct
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy import data_dir
geometries = ['2_3', '2_4', '3_4', '3_8']
approx_orders = [1, 2]
funs = [nm.cos, nm.sin, lambda x: x]
ok = True
for geometry in geometries:
name = os.path.join(data_dir,
'meshes/elements/%s_1.mesh' % geometry)
mesh = Mesh.from_file(name)
domain = FEDomain('', mesh)
domain = domain.refine()
domain.mesh.write(self.join('linearizer-%s-0.mesh' % geometry))
omega = domain.create_region('Omega', 'all')
for approx_order in approx_orders:
for dpn in [1, mesh.dim]:
self.report('geometry: %s, approx. order: %d, dpn: %d' %
(geometry, approx_order, dpn))
field = Field.from_args('fu', nm.float64, dpn, omega,
approx_order=approx_order)
cc = field.get_coor()
dofs = nm.zeros((field.n_nod, dpn), dtype=nm.float64)
for ic in range(dpn):
dofs[:, ic] = funs[ic](3 * (cc[:, 0] * cc[:, 1]))
vmesh, vdofs, level = field.linearize(dofs,
min_level=0,
max_level=3,
eps=1e-2)
if approx_order == 1:
_ok = level == 0
else:
_ok = level > 0
self.report('max. refinement level: %d: %s' % (level, _ok))
ok = ok and _ok
rdofs = nm.zeros((vmesh.n_nod, dpn), dtype=nm.float64)
cc = vmesh.coors
for ic in range(dpn):
rdofs[:, ic] = funs[ic](3 * (cc[:, 0] * cc[:, 1]))
_ok = nm.allclose(rdofs, vdofs, rtol=0.0, atol=0.03)
self.report('interpolation: %s' % _ok)
ok = ok and _ok
out = {
'u' : Struct(name='output_data',
mode='vertex', data=vdofs,
var_name='u', dofs=None)
}
name = self.join('linearizer-%s-%d-%d'
% (geometry, approx_order, dpn))
vmesh.write(name + '.mesh')
vmesh.write(name + '.vtk', out=out)
return ok
|
[
"sfepy.base.base.Struct",
"sfepy.discrete.fem.Mesh.from_file",
"sfepy.discrete.fem.Field.from_args",
"sfepy.discrete.fem.FEDomain"
] |
[((228, 265), 'os.path.join', 'os.path.join', (['test.options.out_dir', 'x'], {}), '(test.options.out_dir, x)\n', (240, 265), False, 'import os\n'), ((661, 723), 'os.path.join', 'os.path.join', (['data_dir', "('meshes/elements/%s_1.mesh' % geometry)"], {}), "(data_dir, 'meshes/elements/%s_1.mesh' % geometry)\n", (673, 723), False, 'import os\n'), ((775, 795), 'sfepy.discrete.fem.Mesh.from_file', 'Mesh.from_file', (['name'], {}), '(name)\n', (789, 795), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((818, 836), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['""""""', 'mesh'], {}), "('', mesh)\n", (826, 836), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((1268, 1340), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""fu"""', 'nm.float64', 'dpn', 'omega'], {'approx_order': 'approx_order'}), "('fu', nm.float64, dpn, omega, approx_order=approx_order)\n", (1283, 1340), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((1455, 1501), 'numpy.zeros', 'nm.zeros', (['(field.n_nod, dpn)'], {'dtype': 'nm.float64'}), '((field.n_nod, dpn), dtype=nm.float64)\n', (1463, 1501), True, 'import numpy as nm\n'), ((2191, 2237), 'numpy.zeros', 'nm.zeros', (['(vmesh.n_nod, dpn)'], {'dtype': 'nm.float64'}), '((vmesh.n_nod, dpn), dtype=nm.float64)\n', (2199, 2237), True, 'import numpy as nm\n'), ((2419, 2465), 'numpy.allclose', 'nm.allclose', (['rdofs', 'vdofs'], {'rtol': '(0.0)', 'atol': '(0.03)'}), '(rdofs, vdofs, rtol=0.0, atol=0.03)\n', (2430, 2465), True, 'import numpy as nm\n'), ((2620, 2698), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""vertex"""', 'data': 'vdofs', 'var_name': '"""u"""', 'dofs': 'None'}), "(name='output_data', mode='vertex', data=vdofs, var_name='u', dofs=None)\n", (2626, 2698), False, 'from sfepy.base.base import Struct\n')]
|
import numpy as nm
from sfepy.base.base import assert_, Struct
from sfepy.terms.terms import terms
from sfepy.terms.terms_hyperelastic_base import HyperElasticBase
class HyperElasticTLBase(HyperElasticBase):
"""
Base class for all hyperelastic terms in TL formulation family.
The subclasses should have the following static method attributes:
- `stress_function()` (the stress)
- `tan_mod_function()` (the tangent modulus)
The common (family) data are cached in the evaluate cache of state
variable.
"""
family_function = staticmethod(terms.dq_finite_strain_tl)
weak_function = staticmethod(terms.dw_he_rtm)
fd_cache_name = 'tl_common'
hyperelastic_mode = 0
def compute_family_data(self, state):
ap, vg = self.get_approximation(state)
vec = self.get_vector(state)
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(state)
sym = dim * (dim + 1) / 2
shapes = {
'mtx_f' : (n_el, n_qp, dim, dim),
'det_f' : (n_el, n_qp, 1, 1),
'sym_c' : (n_el, n_qp, sym, 1),
'tr_c' : (n_el, n_qp, 1, 1),
'in2_c' : (n_el, n_qp, 1, 1),
'sym_inv_c' : (n_el, n_qp, sym, 1),
'green_strain' : (n_el, n_qp, sym, 1),
}
data = Struct(name='tl_family_data')
for key, shape in shapes.iteritems():
setattr(data, key, nm.zeros(shape, dtype=nm.float64))
self.family_function(data.mtx_f,
data.det_f,
data.sym_c,
data.tr_c,
data.in2_c,
data.sym_inv_c,
data.green_strain,
vec, vg, ap.econn)
return data
class NeoHookeanTLTerm(HyperElasticTLBase):
r"""
Hyperelastic neo-Hookean term. Effective stress
:math:`S_{ij} = \mu J^{-\frac{2}{3}}(\delta_{ij} -
\frac{1}{3}C_{kk}C_{ij}^{-1})`.
:Definition:
.. math::
\int_{\Omega} S_{ij}(\ul{u}) \delta E_{ij}(\ul{u};\ul{v})
:Arguments:
- material : :math:`\mu`
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
"""
name = 'dw_tl_he_neohook'
family_data_names = ['det_f', 'tr_c', 'sym_inv_c']
stress_function = staticmethod(terms.dq_tl_he_stress_neohook)
tan_mod_function = staticmethod(terms.dq_tl_he_tan_mod_neohook)
class MooneyRivlinTLTerm(HyperElasticTLBase):
r"""
Hyperelastic Mooney-Rivlin term. Effective stress
:math:`S_{ij} = \kappa J^{-\frac{4}{3}} (C_{kk} \delta_{ij} - C_{ij}
- \frac{2}{3 } I_2 C_{ij}^{-1})`.
:Definition:
.. math::
\int_{\Omega} S_{ij}(\ul{u}) \delta E_{ij}(\ul{u};\ul{v})
:Arguments:
- material : :math:`\kappa`
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
"""
name = 'dw_tl_he_mooney_rivlin'
family_data_names = ['det_f', 'tr_c', 'sym_inv_c', 'sym_c', 'in2_c']
stress_function = staticmethod(terms.dq_tl_he_stress_mooney_rivlin)
tan_mod_function = staticmethod(terms.dq_tl_he_tan_mod_mooney_rivlin)
class BulkPenaltyTLTerm(HyperElasticTLBase):
r"""
Hyperelastic bulk penalty term. Stress
:math:`S_{ij} = K(J-1)\; J C_{ij}^{-1}`.
:Definition:
.. math::
\int_{\Omega} S_{ij}(\ul{u}) \delta E_{ij}(\ul{u};\ul{v})
:Arguments:
- material : :math:`K`
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
"""
name = 'dw_tl_bulk_penalty'
family_data_names = ['det_f', 'sym_inv_c']
stress_function = staticmethod(terms.dq_tl_he_stress_bulk)
tan_mod_function = staticmethod(terms.dq_tl_he_tan_mod_bulk)
class BulkActiveTLTerm(HyperElasticTLBase):
r"""
Hyperelastic bulk active term. Stress :math:`S_{ij} = A J C_{ij}^{-1}`,
where :math:`A` is the activation in :math:`[0, F_{\rm max}]`.
:Definition:
.. math::
\int_{\Omega} S_{ij}(\ul{u}) \delta E_{ij}(\ul{u};\ul{v})
:Arguments:
- material : :math:`A`
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
"""
name = 'dw_tl_bulk_active'
family_data_names = ['det_f', 'sym_inv_c']
stress_function = staticmethod(terms.dq_tl_he_stress_bulk_active)
tan_mod_function = staticmethod(terms.dq_tl_he_tan_mod_bulk_active)
class BulkPressureTLTerm(HyperElasticTLBase):
r"""
Hyperelastic bulk pressure term. Stress
:math:`S_{ij} = -p J C_{ij}^{-1}`.
:Definition:
.. math::
\int_{\Omega} S_{ij}(p) \delta E_{ij}(\ul{u};\ul{v})
:Arguments:
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
- state_p : :math:`p`
"""
name = 'dw_tl_bulk_pressure'
arg_types = ('virtual', 'state', 'state_p')
arg_shapes = {'virtual' : ('D', 'state'), 'state' : 'D', 'state_p' : 1}
family_data_names = ['det_f', 'sym_inv_c']
family_function = staticmethod(terms.dq_finite_strain_tl)
weak_function = staticmethod(terms.dw_he_rtm)
weak_dp_function = staticmethod(terms.dw_tl_volume)
stress_function = staticmethod(terms.dq_tl_stress_bulk_pressure)
tan_mod_u_function = staticmethod(terms.dq_tl_tan_mod_bulk_pressure_u)
def compute_data(self, family_data, mode, **kwargs):
det_f, sym_inv_c = family_data.det_f, family_data.sym_inv_c
p_qp = family_data.p_qp
if mode == 0:
out = nm.empty_like(sym_inv_c)
fun = self.stress_function
elif mode == 1:
shape = list(sym_inv_c.shape)
shape[-1] = shape[-2]
out = nm.empty(shape, dtype=nm.float64)
fun = self.tan_mod_u_function
else:
raise ValueError('bad mode! (%d)' % mode)
fun(out, p_qp, det_f, sym_inv_c)
return out
def get_fargs(self, virtual, state, state_p,
mode=None, term_mode=None, diff_var=None, **kwargs):
vgv, _ = self.get_mapping(state)
fd = self.get_family_data(state, 'tl_common', self.family_data_names)
fd.p_qp = self.get(state_p, 'val')
if mode == 'weak':
if diff_var != state_p.name:
if diff_var is None:
stress = self.compute_data(fd, 0, **kwargs)
self.stress_cache = stress
tan_mod = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 0
else:
stress = self.stress_cache
if stress is None:
stress = self.compute_data(fd, 0, **kwargs)
tan_mod = self.compute_data(fd, 1, **kwargs)
fmode = 1
fargs = (self.weak_function,
stress, tan_mod, fd.mtx_f, fd.det_f, vgv, fmode, 0)
else:
vgs, _ = self.get_mapping(state_p)
fargs = (self.weak_dp_function,
fd.mtx_f, fd.sym_inv_c, fd.det_f, vgs, vgv, 1, -1)
return fargs
elif mode == 'el_avg':
if term_mode == 'strain':
out_qp = fd.green_strain
elif term_mode == 'stress':
out_qp = self.compute_data(fd, 0, **kwargs)
else:
raise ValueError('unsupported term mode in %s! (%s)'
% (self.name, term_mode))
return self.integrate, out_qp, vgv, 1
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
def get_eval_shape(self, virtual, state, state_p,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(state)
sym = dim * (dim + 1) / 2
return (n_el, 1, sym, 1), state.dtype
class VolumeTLTerm(HyperElasticTLBase):
r"""
Volume term (weak form) in the total Lagrangian formulation.
:Definition:
.. math::
\begin{array}{l}
\int_{\Omega} q J(\ul{u}) \\
\mbox{volume mode: vector for } K \from \Ical_h: \int_{T_K}
J(\ul{u}) \\
\mbox{rel\_volume mode: vector for } K \from \Ical_h:
\int_{T_K} J(\ul{u}) / \int_{T_K} 1
\end{array}
:Arguments:
- virtual : :math:`q`
- state : :math:`\ul{u}`
"""
name = 'dw_tl_volume'
arg_types = ('virtual', 'state')
arg_shapes = {'virtual' : (1, None), 'state' : 'D'}
family_data_names = ['mtx_f', 'det_f', 'sym_inv_c']
function = staticmethod(terms.dw_tl_volume)
def get_fargs(self, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vgs, _ = self.get_mapping(virtual)
vgv, _ = self.get_mapping(state)
fd = self.get_family_data(state, 'tl_common', self.family_data_names)
if mode == 'weak':
if diff_var is None:
fmode = 0
else:
fmode = 1
elif (mode == 'eval') or (mode == 'el_avg'):
if term_mode == 'volume':
fmode = 2
elif term_mode == 'rel_volume':
fmode = 3
else:
raise ValueError('unsupported term evaluation mode in %s! (%s)'
% (self.name, term_mode))
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
return fd.mtx_f, fd.sym_inv_c, fd.det_f, vgs, vgv, 0, fmode
def get_eval_shape(self, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(state)
return (n_el, 1, 1, 1), state.dtype
class DiffusionTLTerm(HyperElasticTLBase):
r"""
Diffusion term in the total Lagrangian formulation with
linearized deformation-dependent permeability
:math:`\ull{K}(\ul{u}) = J \ull{F}^{-1} \ull{k} f(J) \ull{F}^{-T}`,
where :math:`\ul{u}` relates to the previous time step :math:`(n-1)`
and
:math:`f(J) = \max\left(0, \left(1 + \frac{(J - 1)}{N_f}\right)\right)^2`
expresses the dependence on volume compression/expansion.
:Definition:
.. math::
\int_{\Omega} \ull{K}(\ul{u}^{(n-1)}) : \pdiff{q}{\ul{X}}
\pdiff{p}{\ul{X}}
:Arguments:
- material_1 : :math:`\ull{k}`
- material_2 : :math:`N_f`
- virtual : :math:`q`
- state : :math:`p`
- parameter : :math:`\ul{u}^{(n-1)}`
"""
name = 'dw_tl_diffusion'
arg_types = ('material_1', 'material_2', 'virtual', 'state', 'parameter')
arg_shapes = {'material_1' : 'D, D', 'material_2' : '1, 1',
'virtual' : (1, 'state'), 'state' : 1, 'parameter' : 'D'}
family_data_names = ['mtx_f', 'det_f']
function = staticmethod(terms.dw_tl_diffusion)
def get_fargs(self, perm, ref_porosity, virtual, state, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
vgv, _ = self.get_mapping(parameter)
fd = self.get_family_data(parameter, 'tl_common',
self.family_data_names)
grad = self.get(state, 'grad')
if mode == 'weak':
if diff_var is None:
fmode = 0
else:
fmode = 1
elif mode == 'el_avg':
if term_mode == 'diffusion_velocity':
fmode = 2
else:
raise ValueError('unsupported term evaluation mode in %s! (%s)'
% (self.name, term_mode))
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
return grad, perm, ref_porosity, fd.mtx_f, fd.det_f, vgv, fmode
def get_eval_shape(self, perm, ref_porosity, virtual, state, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(state)
return (n_el, 1, dim, 1), state.dtype
class HyperElasticSurfaceTLBase(HyperElasticBase):
"""
Base class for all hyperelastic surface terms in TL formulation family.
"""
family_function = staticmethod(terms.dq_tl_finite_strain_surface)
fd_cache_name = 'tl_surface_common'
def compute_family_data(self, state):
ap, sg = self.get_approximation(state)
sd = ap.surface_data[self.region.name]
vec = self.get_vector(state)
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(state)
shapes = {
'mtx_f' : (n_el, n_qp, dim, dim),
'det_f' : (n_el, n_qp, 1, 1),
'inv_f' : (n_el, n_qp, dim, dim),
}
data = Struct(name='tl_surface_family_data')
for key, shape in shapes.iteritems():
setattr(data, key, nm.zeros(shape, dtype=nm.float64))
self.family_function(data.mtx_f,
data.det_f,
data.inv_f,
vec, sg, sd.fis, ap.econn)
return data
class SurfaceFluxTLTerm(HyperElasticSurfaceTLBase):
r"""
Surface flux term in the total Lagrangian formulation, consistent with
:class:`DiffusionTLTerm`.
:Definition:
.. math::
\int_{\Gamma} \ul{\nu} \cdot \ull{K}(\ul{u}^{(n-1)}) \pdiff{p}{\ul{X}}
:Arguments:
- material_1 : :math:`\ull{k}`
- material_2 : :math:`N_f`
- parameter_1 : :math:`p`
- parameter_2 : :math:`\ul{u}^{(n-1)}`
"""
name = 'd_tl_surface_flux'
arg_types = ('material_1', 'material_2', 'parameter_1', 'parameter_2')
arg_shapes = {'material_1' : 'D, D', 'material_2' : '1, 1',
'parameter_1' : 1, 'parameter_2' : 'D'}
family_data_names = ['det_f', 'inv_f']
integration = 'surface_extra'
function = staticmethod(terms.d_tl_surface_flux)
def get_fargs(self, perm, ref_porosity, pressure, displacement,
mode=None, term_mode=None, diff_var=None, **kwargs):
ap, sg = self.get_approximation(displacement)
fd = self.get_family_data(displacement, 'tl_surface_common',
self.family_data_names)
grad = self.get(pressure, 'grad')
fmode = {'eval' : 0, 'el_avg' : 1}.get(mode, 0)
return grad, perm, ref_porosity, fd.inv_f, fd.det_f, sg, fmode
def get_eval_shape(self, perm, ref_porosity, pressure, displacement,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_fa, n_qp, dim, n_en, n_c = self.get_data_shape(displacement)
return (n_fa, 1, 1, 1), pressure.dtype
class SurfaceTractionTLTerm(HyperElasticSurfaceTLBase):
r"""
Surface traction term in the total Lagrangian formulation, expressed
using :math:`\ul{\nu}`, the outward unit normal vector w.r.t. the
undeformed surface, :math:`\ull{F}(\ul{u})`, the deformation gradient,
:math:`J = \det(\ull{F})`, and :math:`\ull{\sigma}` a given traction,
often equal to a given pressure, i.e.
:math:`\ull{\sigma} = \pi \ull{I}`.
:Definition:
.. math::
\int_{\Gamma} \ul{\nu} \cdot \ull{F}^{-1} \cdot \ull{\sigma} \cdot
\ul{v} J
:Arguments:
- material : :math:`\ull{\sigma}`
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
"""
name = 'dw_tl_surface_traction'
arg_types = ('opt_material', 'virtual', 'state')
arg_shapes = [{'opt_material' : 'D, D', 'virtual' : ('D', 'state'),
'state' : 'D'},
{'opt_material' : None}]
family_data_names = ['det_f', 'inv_f']
integration = 'surface_extra'
function = staticmethod(terms.dw_tl_surface_traction)
def get_fargs(self, mat, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
ap, sg = self.get_approximation(virtual)
sd = ap.surface_data[self.region.name]
bf = ap.get_base(sd.bkey, 0, self.integral)
fd = self.get_family_data(state, 'tl_surface_common',
self.family_data_names)
if mat is None:
eye = nm.eye(sg.dim, dtype=nm.float64)
mat = nm.tile(eye, ((1, sg.n_qp, 1, 1)))
if diff_var is None:
fmode = 0
else:
fmode = 1
return mat, fd.det_f, fd.inv_f, bf, sg, sd.fis, fmode
class VolumeSurfaceTLTerm(HyperElasticSurfaceTLBase):
r"""
Volume of a :math:`D`-dimensional domain, using a surface integral in the
total Lagrangian formulation, expressed using :math:`\ul{\nu}`, the outward
unit normal vector w.r.t. the undeformed surface, :math:`\ull{F}(\ul{u})`,
the deformation gradient, and :math:`J = \det(\ull{F})`. Uses the
approximation of :math:`\ul{u}` for the deformed surface coordinates
:math:`\ul{x}`.
:Definition:
.. math::
1 / D \int_{\Gamma} \ul{\nu} \cdot \ull{F}^{-1} \cdot \ul{x} J
:Arguments:
- parameter : :math:`\ul{u}`
"""
name = 'd_tl_volume_surface'
arg_types = ('parameter',)
arg_shapes = {'parameter' : 'D'}
family_data_names = ['det_f', 'inv_f']
integration = 'surface_extra'
function = staticmethod(terms.d_tl_volume_surface)
def get_fargs(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
ap, sg = self.get_approximation(parameter)
sd = ap.surface_data[self.region.name]
bf = ap.get_base(sd.bkey, 0, self.integral)
fd = self.get_family_data(parameter, 'tl_surface_common',
self.family_data_names)
asc = nm.ascontiguousarray
coors0 = parameter.field.get_coor()
coors = asc(coors0 + parameter().reshape(coors0.shape))
return coors, fd.det_f, fd.inv_f, bf, sg, asc(sd.econn)
def get_eval_shape(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(parameter)
return (n_el, 1, 1, 1), parameter.dtype
|
[
"sfepy.base.base.Struct"
] |
[((1296, 1325), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""tl_family_data"""'}), "(name='tl_family_data')\n", (1302, 1325), False, 'from sfepy.base.base import assert_, Struct\n'), ((12851, 12888), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""tl_surface_family_data"""'}), "(name='tl_surface_family_data')\n", (12857, 12888), False, 'from sfepy.base.base import assert_, Struct\n'), ((5459, 5483), 'numpy.empty_like', 'nm.empty_like', (['sym_inv_c'], {}), '(sym_inv_c)\n', (5472, 5483), True, 'import numpy as nm\n'), ((16288, 16320), 'numpy.eye', 'nm.eye', (['sg.dim'], {'dtype': 'nm.float64'}), '(sg.dim, dtype=nm.float64)\n', (16294, 16320), True, 'import numpy as nm\n'), ((16339, 16371), 'numpy.tile', 'nm.tile', (['eye', '(1, sg.n_qp, 1, 1)'], {}), '(eye, (1, sg.n_qp, 1, 1))\n', (16346, 16371), True, 'import numpy as nm\n'), ((1403, 1436), 'numpy.zeros', 'nm.zeros', (['shape'], {'dtype': 'nm.float64'}), '(shape, dtype=nm.float64)\n', (1411, 1436), True, 'import numpy as nm\n'), ((5642, 5675), 'numpy.empty', 'nm.empty', (['shape'], {'dtype': 'nm.float64'}), '(shape, dtype=nm.float64)\n', (5650, 5675), True, 'import numpy as nm\n'), ((12966, 12999), 'numpy.zeros', 'nm.zeros', (['shape'], {'dtype': 'nm.float64'}), '(shape, dtype=nm.float64)\n', (12974, 12999), True, 'import numpy as nm\n'), ((6380, 6420), 'numpy.array', 'nm.array', (['[0]'], {'ndmin': '(4)', 'dtype': 'nm.float64'}), '([0], ndmin=4, dtype=nm.float64)\n', (6388, 6420), True, 'import numpy as nm\n')]
|
from datetime import datetime
from decimal import Decimal
from typing import Optional
from fastapi import APIRouter
from sqlmodel import Field, SQLModel
router = APIRouter()
class Guardian(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
email = str
email_verified_at: Optional[datetime] = None
password: str
remember_token: str
first_name_thai: str
last_name_thai: str
first_name_english: str
last_name_english: str
occupation_id: Optional[int] = None
gender: str
is_thai_address: bool
address_house_number: str
address_moo: str
address_soi: str
address_road: str
address_tambon_id: Optional[int] = None
address_amphoe_id: Optional[int] = None
address_province_id: Optional[int] = None
address_other: str
latitude: Decimal
longitude: Decimal
latitude_custom: Decimal
longitude_custom: Decimal
alive: bool
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class GuardianPhone(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
guardian_id: int
number: str
detail: str
receive_sms: bool
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class GuardianIdNumber(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
guardian_id: int
id_type_id: int
number: str
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class GuardianPatientMap(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
guardian_id: int
patient_id: int
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class GuardianNotification(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
guardian_id: int
name: str
detail: str
is_read: bool
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
|
[
"sqlmodel.Field"
] |
[((164, 175), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (173, 175), False, 'from fastapi import APIRouter\n'), ((240, 277), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (245, 277), False, 'from sqlmodel import Field, SQLModel\n'), ((1118, 1155), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (1123, 1155), False, 'from sqlmodel import Field, SQLModel\n'), ((1410, 1447), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (1415, 1447), False, 'from sqlmodel import Field, SQLModel\n'), ((1686, 1723), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (1691, 1723), False, 'from sqlmodel import Field, SQLModel\n'), ((1948, 1985), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (1953, 1985), False, 'from sqlmodel import Field, SQLModel\n')]
|
# 04.08.2009
#!
#! Homogenization: Linear Elasticity
#! =================================
#$ \centerline{Example input file, \today}
#! Homogenization of heterogeneous linear elastic material
import sfepy.discrete.fem.periodic as per
from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson
from sfepy.homogenization.utils import define_box_regions
import sfepy.homogenization.coefs_base as cb
from sfepy import data_dir
from sfepy.base.base import Struct
from sfepy.homogenization.recovery import compute_micro_u, compute_stress_strain_u, compute_mac_stress_part
def recovery_le( pb, corrs, macro ):
out = {}
dim = corrs['corrs_le']['u_00'].shape[1]
mic_u = - compute_micro_u( corrs['corrs_le'], macro['strain'], 'u', dim )
out['u_mic'] = Struct( name = 'output_data',
mode = 'vertex', data = mic_u,
var_name = 'u', dofs = None )
stress_Y, strain_Y = compute_stress_strain_u( pb, 'i', 'Y', 'mat.D', 'u', mic_u )
stress_Y += compute_mac_stress_part( pb, 'i', 'Y', 'mat.D', 'u', macro['strain'] )
strain = macro['strain'] + strain_Y
out['cauchy_strain'] = Struct( name = 'output_data',
mode = 'cell', data = strain,
dofs = None )
out['cauchy_stress'] = Struct( name = 'output_data',
mode = 'cell', data = stress_Y,
dofs = None )
return out
#! Mesh
#! ----
filename_mesh = data_dir + '/meshes/3d/matrix_fiber.mesh'
dim = 3
region_lbn = (0, 0, 0)
region_rtf = (1, 1, 1)
#! Regions
#! -------
#! Regions, edges, ...
regions = {
'Y' : 'all',
'Ym' : 'cells of group 1',
'Yc' : 'cells of group 2',
}
regions.update( define_box_regions( dim, region_lbn, region_rtf ) )
#! Materials
#! ---------
materials = {
'mat' : ({'D' : {'Ym': stiffness_from_youngpoisson(dim, 7.0e9, 0.4),
'Yc': stiffness_from_youngpoisson(dim, 70.0e9, 0.2)}},),
}
#! Fields
#! ------
#! Scalar field for corrector basis functions.
fields = {
'corrector' : ('real', dim, 'Y', 1),
}
#! Variables
#! ---------
#! Unknown and corresponding test variables. Parameter fields
#! used for evaluation of homogenized coefficients.
variables = {
'u' : ('unknown field', 'corrector', 0),
'v' : ('test field', 'corrector', 'u'),
'Pi' : ('parameter field', 'corrector', 'u'),
'Pi1' : ('parameter field', 'corrector', '(set-to-None)'),
'Pi2' : ('parameter field', 'corrector', '(set-to-None)'),
}
#! Functions
functions = {
'match_x_plane' : (per.match_x_plane,),
'match_y_plane' : (per.match_y_plane,),
'match_z_plane' : (per.match_z_plane,),
}
#! Boundary Conditions
#! -------------------
#! Fixed nodes.
ebcs = {
'fixed_u' : ('Corners', {'u.all' : 0.0}),
}
if dim == 3:
epbcs = {
'periodic_x' : (['Left', 'Right'], {'u.all' : 'u.all'}, 'match_x_plane'),
'periodic_y' : (['Near', 'Far'], {'u.all' : 'u.all'}, 'match_y_plane'),
'periodic_z' : (['Top', 'Bottom'], {'u.all' : 'u.all'}, 'match_z_plane'),
}
else:
epbcs = {
'periodic_x' : (['Left', 'Right'], {'u.all' : 'u.all'}, 'match_x_plane'),
'periodic_y' : (['Bottom', 'Top'], {'u.all' : 'u.all'}, 'match_y_plane'),
}
all_periodic = ['periodic_%s' % ii for ii in ['x', 'y', 'z'][:dim] ]
#! Integrals
#! ---------
#! Define the integral type Volume/Surface and quadrature rule.
integrals = {
'i' : 2,
}
#! Options
#! -------
#! Various problem-specific options.
options = {
'coefs' : 'coefs',
'requirements' : 'requirements',
'ls' : 'ls', # linear solver to use
'volume' : { 'variables' : ['u'],
'expression' : 'd_volume.i.Y( u )' },
'output_dir' : 'output',
'coefs_filename' : 'coefs_le',
'recovery_hook' : 'recovery_le',
}
#! Equations
#! ---------
#! Equations for corrector functions.
equation_corrs = {
'balance_of_forces' :
"""dw_lin_elastic.i.Y(mat.D, v, u ) =
- dw_lin_elastic.i.Y(mat.D, v, Pi )"""
}
#! Expressions for homogenized linear elastic coefficients.
expr_coefs = """dw_lin_elastic.i.Y(mat.D, Pi1, Pi2 )"""
#! Coefficients
#! ------------
#! Definition of homogenized acoustic coefficients.
def set_elastic(variables, ir, ic, mode, pis, corrs_rs):
mode2var = {'row' : 'Pi1', 'col' : 'Pi2'}
val = pis.states[ir, ic]['u'] + corrs_rs.states[ir, ic]['u']
variables[mode2var[mode]].set_data(val)
coefs = {
'D' : {
'requires' : ['pis', 'corrs_rs'],
'expression' : expr_coefs,
'set_variables' : set_elastic,
'class' : cb.CoefSymSym,
},
'filenames' : {},
}
requirements = {
'pis' : {
'variables' : ['u'],
'class' : cb.ShapeDimDim,
},
'corrs_rs' : {
'requires' : ['pis'],
'ebcs' : ['fixed_u'],
'epbcs' : all_periodic,
'equations' : equation_corrs,
'set_variables' : [('Pi', 'pis', 'u')],
'class' : cb.CorrDimDim,
'save_name' : 'corrs_le',
'dump_variables' : ['u'],
},
}
#! Solvers
#! -------
#! Define linear and nonlinear solver.
solvers = {
'ls' : ('ls.umfpack', {}),
'newton' : ('nls.newton', {'i_max' : 1,
'eps_a' : 1e-4,
'problem' : 'nonlinear',
})
}
|
[
"sfepy.base.base.Struct",
"sfepy.homogenization.recovery.compute_micro_u",
"sfepy.homogenization.utils.define_box_regions",
"sfepy.homogenization.recovery.compute_mac_stress_part",
"sfepy.homogenization.recovery.compute_stress_strain_u",
"sfepy.mechanics.matcoefs.stiffness_from_youngpoisson"
] |
[((770, 848), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""vertex"""', 'data': 'mic_u', 'var_name': '"""u"""', 'dofs': 'None'}), "(name='output_data', mode='vertex', data=mic_u, var_name='u', dofs=None)\n", (776, 848), False, 'from sfepy.base.base import Struct\n'), ((941, 999), 'sfepy.homogenization.recovery.compute_stress_strain_u', 'compute_stress_strain_u', (['pb', '"""i"""', '"""Y"""', '"""mat.D"""', '"""u"""', 'mic_u'], {}), "(pb, 'i', 'Y', 'mat.D', 'u', mic_u)\n", (964, 999), False, 'from sfepy.homogenization.recovery import compute_micro_u, compute_stress_strain_u, compute_mac_stress_part\n'), ((1018, 1086), 'sfepy.homogenization.recovery.compute_mac_stress_part', 'compute_mac_stress_part', (['pb', '"""i"""', '"""Y"""', '"""mat.D"""', '"""u"""', "macro['strain']"], {}), "(pb, 'i', 'Y', 'mat.D', 'u', macro['strain'])\n", (1041, 1086), False, 'from sfepy.homogenization.recovery import compute_micro_u, compute_stress_strain_u, compute_mac_stress_part\n'), ((1158, 1221), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""cell"""', 'data': 'strain', 'dofs': 'None'}), "(name='output_data', mode='cell', data=strain, dofs=None)\n", (1164, 1221), False, 'from sfepy.base.base import Struct\n'), ((1329, 1394), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""cell"""', 'data': 'stress_Y', 'dofs': 'None'}), "(name='output_data', mode='cell', data=stress_Y, dofs=None)\n", (1335, 1394), False, 'from sfepy.base.base import Struct\n'), ((1773, 1820), 'sfepy.homogenization.utils.define_box_regions', 'define_box_regions', (['dim', 'region_lbn', 'region_rtf'], {}), '(dim, region_lbn, region_rtf)\n', (1791, 1820), False, 'from sfepy.homogenization.utils import define_box_regions\n'), ((686, 747), 'sfepy.homogenization.recovery.compute_micro_u', 'compute_micro_u', (["corrs['corrs_le']", "macro['strain']", '"""u"""', 'dim'], {}), "(corrs['corrs_le'], macro['strain'], 'u', dim)\n", (701, 747), False, 'from sfepy.homogenization.recovery import compute_micro_u, compute_stress_strain_u, compute_mac_stress_part\n'), ((1892, 1943), 'sfepy.mechanics.matcoefs.stiffness_from_youngpoisson', 'stiffness_from_youngpoisson', (['dim', '(7000000000.0)', '(0.4)'], {}), '(dim, 7000000000.0, 0.4)\n', (1919, 1943), False, 'from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson\n'), ((1965, 2017), 'sfepy.mechanics.matcoefs.stiffness_from_youngpoisson', 'stiffness_from_youngpoisson', (['dim', '(70000000000.0)', '(0.2)'], {}), '(dim, 70000000000.0, 0.2)\n', (1992, 2017), False, 'from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = tensor([0], dtype=np.int32)
@trace(symbolic=True)
def f(x):
a = x.shape
b = a[0]
assert isscalar(b)
return b == 0
for i in range(3):
np.testing.assert_equal(f(x).numpy(), False)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_reshape(trace_mode):
x1 = tensor(np.random.randn(2, 10, 10))
x2 = tensor(np.random.randn(4, 10, 10))
x3 = tensor(np.random.randn(8, 10, 10))
@trace(symbolic=trace_mode, capture_as_const=True)
def f(x):
y = x.reshape(x.shape[0], 100)
return y
f(x1)
f(x2)
f(x3)
def test_trace_topk():
x = tensor([5, 2, 7, 1, 0, 3, 2])
@trace(symbolic=True)
def f(x):
y = F.topk(x, 3)
np.testing.assert_equal(y[0].shape.numpy(), np.array([3,]))
return y
for i in range(3):
f(x)
def test_trace_warp_perspective():
inp_shape = (1, 1, 4, 4)
x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
M_shape = (1, 3, 3)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
@trace(symbolic=True)
def f(x, M):
out = F.vision.warp_perspective(x, M, (2, 2))
np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2]))
return out
for i in range(3):
f(x, M)
def test_raise_on_trace():
step_count = 0
catch_count = 0
bad_step = 10
class CatchMe(Exception):
pass
a = tensor([1, 2, 3, 4])
b = tensor([5, 6, 7, 8])
c = tensor([9, 0, 1, 2])
@trace
def add_abc(a, b, c):
ps = a + b
result = ps + c
if step_count == bad_step:
raise CatchMe("catch me")
return result
for i in range(100):
try:
d = add_abc(a, b, c)
except CatchMe as e:
catch_count += 1
else:
np.testing.assert_equal(d.numpy(), (a + b + c).numpy())
step_count += 1
assert catch_count == 1
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_broadcast(trace_mode):
x1 = tensor(np.random.randn(3, 1, 1))
x2 = tensor(np.random.randn(1, 4, 1))
x3 = tensor(np.random.randn(1, 1, 5))
@trace(symbolic=trace_mode, capture_as_const=True)
def f(x):
y = F.broadcast_to(x, (3, 4, 5))
return y
f(x1)
f(x2)
f(x3)
def test_trace_nms():
def make_inputs(n):
boxes = np.zeros((n, 4))
boxes[:, :2] = np.random.rand(n, 2) * 100
boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100
scores = np.random.rand(n)
return tensor(boxes), tensor(scores)
@trace(symbolic=False)
def f(boxes, scores):
# with tracing, max_output must be specified
results = F.vision.nms(boxes, scores=scores, iou_thresh=0.5, max_output=20)
# without tracing, max output can be inferred inside nms
with exclude_from_trace():
_ = F.vision.nms(boxes, scores=scores, iou_thresh=0.5)
return results
f(*make_inputs(10))
f(*make_inputs(20))
f(*make_inputs(30))
def test_trace_valid_broadcast():
x1 = tensor(np.random.randn(1, 1))
x2 = tensor(np.random.randn(1, 2))
shape = (tensor([2]), tensor([2]))
@trace(symbolic=False)
def f(x, shape):
y = F.broadcast_to(x, shape)
return y
f(x1, shape)
f(x2, shape)
def test_clip():
x = tensor(np.random.randn(10, 10))
@trace(symbolic=True)
def f(x, lower, upper):
y = F.clip(x, lower, upper)
return y
for i in range(3):
f(x, tensor([0]), tensor([1]))
# test returning noncontiguous tensor from trace
def test_slice():
@trace
def f(x):
return x[:, 1::2]
x = F.arange(8).reshape(2, 4)
f(x)
y = f(x)
np.testing.assert_array_equal(y.numpy(), x.numpy()[:, 1::2])
y + y
@pytest.mark.parametrize("shape_mode", [False, True])
def test_random(shape_mode):
def run_test(op):
@trace(symbolic=True, symbolic_shape=shape_mode)
def f():
out = op(size=[10, 10])
out_shape = out.shape
assert out_shape is not None
if not isinstance(out_shape, tuple):
assert out.shape.numpy() is not None
return out
for _ in range(3):
f()
run_test(uniform)
run_test(normal)
@pytest.mark.parametrize("shape_mode", [False, True])
def test_trace_advance_indexing(shape_mode):
funcs = [
lambda x, i: x[i],
# lambda x, i, j: x[i, j], # FIXME
lambda x, i, j: x[i, :, j, ...],
# lambda x, start, end: x[start:end], # FIXME
lambda x, start, end: x[:, 0, start:end, ..., 1],
lambda x, vec: x[vec],
lambda x, vec: x[vec, ..., 0, 1:3],
lambda x, vec: x[vec, vec[0], vec[1]],
# lambda x, i, start, end, vec: x[i, ..., :, vec, start:end], # FIXME
lambda x, mask: x[mask],
]
inputs = {
"x": np.random.randn(5, 5, 5, 5, 5).astype("float32"),
"i": 0,
"j": 2,
"start": 1,
"end": 3,
"vec": [1, 2, 3],
"mask": np.random.randn(5, 5, 5, 5, 5) >= 0,
}
for f in funcs:
sig = inspect.signature(f)
param_names = list(sig._parameters.keys())
params = {}
params_np = {}
f_traced = trace(f, symbolic=False, symbolic_shape=shape_mode)
for name in param_names:
params[name] = tensor(inputs[name])
params_np[name] = inputs[name]
expected = f(**params_np)
result_imperative = f(**params)
np.testing.assert_equal(expected, result_imperative.numpy())
for _ in range(3):
result_trace = f_traced(**params)
np.testing.assert_equal(expected, result_trace.numpy())
|
[
"megengine.functional.arange",
"megengine.utils.comp_graph_tools.GraphInference",
"megengine.utils.naming.AutoNaming.clear",
"megengine.optimizer.SGD",
"megengine.autodiff.GradManager",
"megengine.jit.trace",
"megengine.functional.clip",
"megengine.functional.sum",
"megengine.core.tensor.utils.isscalar",
"megengine.functional.vision.warp_perspective",
"megengine.utils.comp_graph_tools.get_owner_opr_inputs",
"megengine.functional.vision.nms",
"megengine.tensor",
"megengine.core.tensor.megbrain_graph.load_graph",
"megengine.functional.topk",
"megengine.functional.exp",
"megengine.functional.broadcast_to",
"megengine.jit.exclude_from_trace",
"megengine.Parameter",
"megengine.utils.comp_graph_tools.get_oprs_seq"
] |
[((1162, 1214), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""trace_mode"""', '[False, True]'], {}), "('trace_mode', [False, True])\n", (1185, 1214), False, 'import pytest\n'), ((1216, 1290), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""return_mode"""', "['Value', 'Tuple', 'List', 'Dict']"], {}), "('return_mode', ['Value', 'Tuple', 'List', 'Dict'])\n", (1239, 1290), False, 'import pytest\n'), ((2940, 2992), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""trace_mode"""', '[False, True]'], {}), "('trace_mode', [False, True])\n", (2963, 2992), False, 'import pytest\n'), ((3329, 3381), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""trace_mode"""', '[False, True]'], {}), "('trace_mode', [False, True])\n", (3352, 3381), False, 'import pytest\n'), ((3889, 3941), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""trace_mode"""', '[False, True]'], {}), "('trace_mode', [False, True])\n", (3912, 3941), False, 'import pytest\n'), ((6628, 6680), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""trace_mode"""', '[False, True]'], {}), "('trace_mode', [False, True])\n", (6651, 6680), False, 'import pytest\n'), ((9311, 9363), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""trace_mode"""', '[False, True]'], {}), "('trace_mode', [False, True])\n", (9334, 9363), False, 'import pytest\n'), ((11140, 11192), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""trace_mode"""', '[False, True]'], {}), "('trace_mode', [False, True])\n", (11163, 11192), False, 'import pytest\n'), ((13015, 13067), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape_mode"""', '[False, True]'], {}), "('shape_mode', [False, True])\n", (13038, 13067), False, 'import pytest\n'), ((13520, 13572), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape_mode"""', '[False, True]'], {}), "('shape_mode', [False, True])\n", (13543, 13572), False, 'import pytest\n'), ((1337, 1363), 'megengine.jit.trace', 'trace', ([], {'symbolic': 'trace_mode'}), '(symbolic=trace_mode)\n', (1342, 1363), False, 'from megengine.jit import exclude_from_trace, trace\n'), ((1822, 1833), 'megengine.tensor', 'tensor', (['[1]'], {}), '([1])\n', (1828, 1833), False, 'from megengine import Parameter, tensor\n'), ((3039, 3065), 'megengine.jit.trace', 'trace', ([], {'symbolic': 'trace_mode'}), '(symbolic=trace_mode)\n', (3044, 3065), False, 'from megengine.jit import exclude_from_trace, trace\n'), ((3216, 3227), 'megengine.tensor', 'tensor', (['[1]'], {}), '([1])\n', (3222, 3227), False, 'from megengine import Parameter, tensor\n'), ((3463, 3502), 'megengine.jit.trace', 'trace', ([], {'symbolic': 'trace_mode', 'opt_level': '(2)'}), '(symbolic=trace_mode, opt_level=2)\n', (3468, 3502), False, 'from megengine.jit import exclude_from_trace, trace\n'), ((4082, 4140), 'megengine.optimizer.SGD', 'optim.SGD', (['[w]'], {'lr': '(0.01)', 'momentum': '(0.9)', 'weight_decay': '(0.0005)'}), '([w], lr=0.01, momentum=0.9, weight_decay=0.0005)\n', (4091, 4140), True, 'import megengine.optimizer as optim\n'), ((4185, 4224), 'megengine.jit.trace', 'trace', ([], {'symbolic': 'trace_mode', 'opt_level': '(2)'}), '(symbolic=trace_mode, opt_level=2)\n', (4190, 4224), False, 'from megengine.jit import exclude_from_trace, trace\n'), ((5000, 5043), 'megengine.jit.trace', 'trace', ([], {'symbolic': '(True)', 'capture_as_const': '(True)'}), '(symbolic=True, capture_as_const=True)\n', (5005, 5043), False, 'from megengine.jit import exclude_from_trace, trace\n'), ((5142, 5160), 'megengine.utils.naming.AutoNaming.clear', 'AutoNaming.clear', ([], {}), '()\n', (5158, 5160), False, 'from megengine.utils.naming import AutoNaming\n'), ((5169, 5180), 'megengine.tensor', 'tensor', (['[2]'], {}), '([2])\n', (5175, 5180), False, 'from megengine import Parameter, tensor\n'), ((5189, 5200), 'megengine.tensor', 'tensor', (['[4]'], {}), '([4])\n', (5195, 5200), False, 'from megengine import Parameter, tensor\n'), ((5313, 5325), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (5323, 5325), False, 'import io\n'), ((5392, 5453), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['dump_info.inputs', "['arg_0', 'arg_1']"], {}), "(dump_info.inputs, ['arg_0', 'arg_1'])\n", (5415, 5453), True, 'import numpy as np\n'), ((5458, 5509), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['dump_info.outputs', "['ADD']"], {}), "(dump_info.outputs, ['ADD'])\n", (5481, 5509), True, 'import numpy as np\n'), ((5542, 5570), 'megengine.utils.comp_graph_tools.GraphInference', 'cgtools.GraphInference', (['file'], {}), '(file)\n', (5564, 5570), True, 'import megengine.utils.comp_graph_tools as cgtools\n'), ((5627, 5664), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['result[0]', 'y'], {}), '(result[0], y)\n', (5650, 5664), True, 'import numpy as np\n'), ((5700, 5711), 'megengine.tensor', 'tensor', (['[2]'], {}), '([2])\n', (5706, 5711), False, 'from megengine import Parameter, tensor\n'), ((5718, 5761), 'megengine.jit.trace', 'trace', ([], {'symbolic': '(True)', 'capture_as_const': '(True)'}), '(symbolic=True, capture_as_const=True)\n', (5723, 5761), False, 'from megengine.jit import exclude_from_trace, trace\n'), ((5806, 5817), 'megengine.tensor', 'tensor', (['[3]'], {}), '([3])\n', (5812, 5817), False, 'from megengine import Parameter, tensor\n'), ((5924, 5936), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (5934, 5936), False, 'import io\n'), ((5986, 6014), 'megengine.utils.comp_graph_tools.GraphInference', 'cgtools.GraphInference', (['file'], {}), '(file)\n', (6008, 6014), True, 'import megengine.utils.comp_graph_tools as cgtools\n'), ((6068, 6105), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['result[0]', 'y'], {}), '(result[0], y)\n', (6091, 6105), True, 'import numpy as np\n'), ((6142, 6153), 'megengine.tensor', 'tensor', (['[2]'], {}), '([2])\n', (6148, 6153), False, 'from megengine import Parameter, tensor\n'), ((6160, 6203), 'megengine.jit.trace', 'trace', ([], {'symbolic': '(True)', 'capture_as_const': '(True)'}), '(symbolic=True, capture_as_const=True)\n', (6165, 6203), False, 'from megengine.jit import exclude_from_trace, trace\n'), ((6248, 6259), 'megengine.tensor', 'tensor', (['[3]'], {}), '([3])\n', (6254, 6259), False, 'from megengine import Parameter, tensor\n'), ((6366, 6378), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (6376, 6378), False, 'import io\n'), ((6464, 6482), 'megengine.core.tensor.megbrain_graph.load_graph', 'G.load_graph', (['file'], {}), '(file)\n', (6476, 6482), True, 'import megengine.core.tensor.megbrain_graph as G\n'), ((6723, 6765), 'megengine.jit.trace', 'trace', ([], {'symbolic': 'trace_mode', 'profiling': '(True)'}), '(symbolic=trace_mode, profiling=True)\n', (6728, 6765), False, 'from megengine.jit import exclude_from_trace, trace\n'), ((6807, 6818), 'megengine.tensor', 'tensor', (['[1]'], {}), '([1])\n', (6813, 6818), False, 'from megengine import Parameter, tensor\n'), ((6970, 7026), 'megengine.jit.trace', 'trace', ([], {'symbolic': '(True)', 'opt_level': '(0)', 'capture_as_const': '(True)'}), '(symbolic=True, opt_level=0, capture_as_const=True)\n', (6975, 7026), False, 'from megengine.jit import exclude_from_trace, trace\n'), ((7204, 7260), 'megengine.jit.trace', 'trace', ([], {'symbolic': '(True)', 'opt_level': '(1)', 'capture_as_const': '(True)'}), '(symbolic=True, opt_level=1, capture_as_const=True)\n', (7209, 7260), False, 'from megengine.jit import exclude_from_trace, trace\n'), ((7325, 7336), 'megengine.tensor', 'tensor', (['(0.0)'], {}), '(0.0)\n', (7331, 7336), False, 'from megengine import Parameter, tensor\n'), ((7472, 7528), 'megengine.jit.trace', 'trace', ([], {'symbolic': '(True)', 'opt_level': '(0)', 'capture_as_const': '(True)'}), '(symbolic=True, opt_level=0, capture_as_const=True)\n', (7477, 7528), False, 'from megengine.jit import exclude_from_trace, trace\n'), ((7588, 7644), 'megengine.jit.trace', 'trace', ([], {'symbolic': '(True)', 'opt_level': '(1)', 'capture_as_const': '(True)'}), '(symbolic=True, opt_level=1, capture_as_const=True)\n', (7593, 7644), False, 'from megengine.jit import exclude_from_trace, trace\n'), ((7723, 7734), 'megengine.tensor', 'tensor', (['val'], {}), '(val)\n', (7729, 7734), False, 'from megengine import Parameter, tensor\n'), ((7743, 7754), 'megengine.tensor', 'tensor', (['(0.0)'], {}), '(0.0)\n', (7749, 7754), False, 'from megengine import Parameter, tensor\n'), ((7884, 7940), 'megengine.jit.trace', 'trace', ([], {'symbolic': '(True)', 'opt_level': '(0)', 'capture_as_const': '(True)'}), '(symbolic=True, opt_level=0, capture_as_const=True)\n', (7889, 7940), False, 'from megengine.jit import exclude_from_trace, trace\n'), ((7988, 8044), 'megengine.jit.trace', 'trace', ([], {'symbolic': '(True)', 'opt_level': '(1)', 'capture_as_const': '(True)'}), '(symbolic=True, opt_level=1, capture_as_const=True)\n', (7993, 8044), False, 'from megengine.jit import exclude_from_trace, trace\n'), ((8119, 8128), 'tempfile.mkstemp', 'mkstemp', ([], {}), '()\n', (8126, 8128), False, 'from tempfile import mkstemp\n'), ((8193, 8210), 'megengine.core.tensor.megbrain_graph.load_graph', 'G.load_graph', (['out'], {}), '(out)\n', (8205, 8210), True, 'import megengine.core.tensor.megbrain_graph as G\n'), ((8224, 8253), 'megengine.utils.comp_graph_tools.get_oprs_seq', 'cgtools.get_oprs_seq', (['outputs'], {}), '(outputs)\n', (8244, 8253), True, 'import megengine.utils.comp_graph_tools as cgtools\n'), ((8338, 8355), 'megengine.core.tensor.megbrain_graph.load_graph', 'G.load_graph', (['out'], {}), '(out)\n', (8350, 8355), True, 'import megengine.core.tensor.megbrain_graph as G\n'), ((8369, 8398), 'megengine.utils.comp_graph_tools.get_oprs_seq', 'cgtools.get_oprs_seq', (['outputs'], {}), '(outputs)\n', (8389, 8398), True, 'import megengine.utils.comp_graph_tools as cgtools\n'), ((8484, 8527), 'megengine.jit.trace', 'trace', ([], {'symbolic': '(True)', 'capture_as_const': '(True)'}), '(symbolic=True, capture_as_const=True)\n', (8489, 8527), False, 'from megengine.jit import exclude_from_trace, trace\n'), ((8578, 8587), 'tempfile.mkstemp', 'mkstemp', ([], {}), '()\n', (8585, 8587), False, 'from tempfile import mkstemp\n'), ((8656, 8673), 'megengine.core.tensor.megbrain_graph.load_graph', 'G.load_graph', (['out'], {}), '(out)\n', (8668, 8673), True, 'import megengine.core.tensor.megbrain_graph as G\n'), ((8881, 8930), 'megengine.jit.trace', 'trace', ([], {'capture_as_const': '(True)', 'symbolic_shape': '(True)'}), '(capture_as_const=True, symbolic_shape=True)\n', (8886, 8930), False, 'from megengine.jit import exclude_from_trace, trace\n'), ((9076, 9103), 'megengine.tensor', 'tensor', (['[0]'], {'dtype': 'np.int32'}), '([0], dtype=np.int32)\n', (9082, 9103), False, 'from megengine import Parameter, tensor\n'), ((9110, 9130), 'megengine.jit.trace', 'trace', ([], {'symbolic': '(True)'}), '(symbolic=True)\n', (9115, 9130), False, 'from megengine.jit import exclude_from_trace, trace\n'), ((9538, 9587), 'megengine.jit.trace', 'trace', ([], {'symbolic': 'trace_mode', 'capture_as_const': '(True)'}), '(symbolic=trace_mode, capture_as_const=True)\n', (9543, 9587), False, 'from megengine.jit import exclude_from_trace, trace\n'), ((9722, 9751), 'megengine.tensor', 'tensor', (['[5, 2, 7, 1, 0, 3, 2]'], {}), '([5, 2, 7, 1, 0, 3, 2])\n', (9728, 9751), False, 'from megengine import Parameter, tensor\n'), ((9758, 9778), 'megengine.jit.trace', 'trace', ([], {'symbolic': '(True)'}), '(symbolic=True)\n', (9763, 9778), False, 'from megengine.jit import exclude_from_trace, trace\n'), ((10252, 10272), 'megengine.jit.trace', 'trace', ([], {'symbolic': '(True)'}), '(symbolic=True)\n', (10257, 10272), False, 'from megengine.jit import exclude_from_trace, trace\n'), ((10617, 10637), 'megengine.tensor', 'tensor', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (10623, 10637), False, 'from megengine import Parameter, tensor\n'), ((10646, 10666), 'megengine.tensor', 'tensor', (['[5, 6, 7, 8]'], {}), '([5, 6, 7, 8])\n', (10652, 10666), False, 'from megengine import Parameter, tensor\n'), ((10675, 10695), 'megengine.tensor', 'tensor', (['[9, 0, 1, 2]'], {}), '([9, 0, 1, 2])\n', (10681, 10695), False, 'from megengine import Parameter, tensor\n'), ((11363, 11412), 'megengine.jit.trace', 'trace', ([], {'symbolic': 'trace_mode', 'capture_as_const': '(True)'}), '(symbolic=trace_mode, capture_as_const=True)\n', (11368, 11412), False, 'from megengine.jit import exclude_from_trace, trace\n'), ((11791, 11812), 'megengine.jit.trace', 'trace', ([], {'symbolic': '(False)'}), '(symbolic=False)\n', (11796, 11812), False, 'from megengine.jit import exclude_from_trace, trace\n'), ((12398, 12419), 'megengine.jit.trace', 'trace', ([], {'symbolic': '(False)'}), '(symbolic=False)\n', (12403, 12419), False, 'from megengine.jit import exclude_from_trace, trace\n'), ((12595, 12615), 'megengine.jit.trace', 'trace', ([], {'symbolic': '(True)'}), '(symbolic=True)\n', (12600, 12615), False, 'from megengine.jit import exclude_from_trace, trace\n'), ((2549, 2573), 'megengine.jit.trace', 'trace', ([], {'symbolic': 'symbolic'}), '(symbolic=symbolic)\n', (2554, 2573), False, 'from megengine.jit import exclude_from_trace, trace\n'), ((2886, 2936), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['ys[False][i]', 'ys[True][i]'], {}), '(ys[False][i], ys[True][i])\n', (2909, 2936), True, 'import numpy as np\n'), ((3572, 3584), 'megengine.functional.topk', 'F.topk', (['c', '(3)'], {}), '(c, 3)\n', (3578, 3584), True, 'import megengine.functional as F\n'), ((3737, 3752), 'numpy.ones', 'np.ones', (['(7, 2)'], {}), '((7, 2))\n', (3744, 3752), True, 'import numpy as np\n'), ((4004, 4019), 'numpy.ones', 'np.ones', (['[4, 6]'], {}), '([4, 6])\n', (4011, 4019), True, 'import numpy as np\n'), ((4580, 4604), 'megengine.jit.trace', 'trace', ([], {'symbolic': 'symbolic'}), '(symbolic=symbolic)\n', (4585, 4604), False, 'from megengine.jit import exclude_from_trace, trace\n'), ((4767, 4778), 'megengine.tensor', 'tensor', (['[1]'], {}), '([1])\n', (4773, 4778), False, 'from megengine import Parameter, tensor\n'), ((8093, 8104), 'megengine.tensor', 'tensor', (['(1.0)'], {}), '(1.0)\n', (8099, 8104), False, 'from megengine import Parameter, tensor\n'), ((8261, 8272), 'megengine.tensor', 'tensor', (['(1.0)'], {}), '(1.0)\n', (8267, 8272), False, 'from megengine import Parameter, tensor\n'), ((8557, 8563), 'megengine.functional.exp', 'exp', (['x'], {}), '(x)\n', (8560, 8563), False, 'from megengine.functional import exp, log\n'), ((8594, 8605), 'megengine.tensor', 'tensor', (['(5.0)'], {}), '(5.0)\n', (8600, 8605), False, 'from megengine import Parameter, tensor\n'), ((8845, 8873), 'numpy.ones', 'np.ones', (['(1)'], {'dtype': 'np.float32'}), '(1, dtype=np.float32)\n', (8852, 8873), True, 'import numpy as np\n'), ((9025, 9037), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (9035, 9037), False, 'import io\n'), ((9197, 9208), 'megengine.core.tensor.utils.isscalar', 'isscalar', (['b'], {}), '(b)\n', (9205, 9208), False, 'from megengine.core.tensor.utils import isscalar\n'), ((9416, 9442), 'numpy.random.randn', 'np.random.randn', (['(2)', '(10)', '(10)'], {}), '(2, 10, 10)\n', (9431, 9442), True, 'import numpy as np\n'), ((9460, 9486), 'numpy.random.randn', 'np.random.randn', (['(4)', '(10)', '(10)'], {}), '(4, 10, 10)\n', (9475, 9486), True, 'import numpy as np\n'), ((9504, 9530), 'numpy.random.randn', 'np.random.randn', (['(8)', '(10)', '(10)'], {}), '(8, 10, 10)\n', (9519, 9530), True, 'import numpy as np\n'), ((9805, 9817), 'megengine.functional.topk', 'F.topk', (['x', '(3)'], {}), '(x, 3)\n', (9811, 9817), True, 'import megengine.functional as F\n'), ((10304, 10343), 'megengine.functional.vision.warp_perspective', 'F.vision.warp_perspective', (['x', 'M', '(2, 2)'], {}), '(x, M, (2, 2))\n', (10329, 10343), True, 'import megengine.functional as F\n'), ((11247, 11271), 'numpy.random.randn', 'np.random.randn', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (11262, 11271), True, 'import numpy as np\n'), ((11289, 11313), 'numpy.random.randn', 'np.random.randn', (['(1)', '(4)', '(1)'], {}), '(1, 4, 1)\n', (11304, 11313), True, 'import numpy as np\n'), ((11331, 11355), 'numpy.random.randn', 'np.random.randn', (['(1)', '(1)', '(5)'], {}), '(1, 1, 5)\n', (11346, 11355), True, 'import numpy as np\n'), ((11439, 11467), 'megengine.functional.broadcast_to', 'F.broadcast_to', (['x', '(3, 4, 5)'], {}), '(x, (3, 4, 5))\n', (11453, 11467), True, 'import megengine.functional as F\n'), ((11580, 11596), 'numpy.zeros', 'np.zeros', (['(n, 4)'], {}), '((n, 4))\n', (11588, 11596), True, 'import numpy as np\n'), ((11721, 11738), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (11735, 11738), True, 'import numpy as np\n'), ((11910, 11975), 'megengine.functional.vision.nms', 'F.vision.nms', (['boxes'], {'scores': 'scores', 'iou_thresh': '(0.5)', 'max_output': '(20)'}), '(boxes, scores=scores, iou_thresh=0.5, max_output=20)\n', (11922, 11975), True, 'import megengine.functional as F\n'), ((12291, 12312), 'numpy.random.randn', 'np.random.randn', (['(1)', '(1)'], {}), '(1, 1)\n', (12306, 12312), True, 'import numpy as np\n'), ((12330, 12351), 'numpy.random.randn', 'np.random.randn', (['(1)', '(2)'], {}), '(1, 2)\n', (12345, 12351), True, 'import numpy as np\n'), ((12366, 12377), 'megengine.tensor', 'tensor', (['[2]'], {}), '([2])\n', (12372, 12377), False, 'from megengine import Parameter, tensor\n'), ((12379, 12390), 'megengine.tensor', 'tensor', (['[2]'], {}), '([2])\n', (12385, 12390), False, 'from megengine import Parameter, tensor\n'), ((12453, 12477), 'megengine.functional.broadcast_to', 'F.broadcast_to', (['x', 'shape'], {}), '(x, shape)\n', (12467, 12477), True, 'import megengine.functional as F\n'), ((12564, 12587), 'numpy.random.randn', 'np.random.randn', (['(10)', '(10)'], {}), '(10, 10)\n', (12579, 12587), True, 'import numpy as np\n'), ((12656, 12679), 'megengine.functional.clip', 'F.clip', (['x', 'lower', 'upper'], {}), '(x, lower, upper)\n', (12662, 12679), True, 'import megengine.functional as F\n'), ((13128, 13175), 'megengine.jit.trace', 'trace', ([], {'symbolic': '(True)', 'symbolic_shape': 'shape_mode'}), '(symbolic=True, symbolic_shape=shape_mode)\n', (13133, 13175), False, 'from megengine.jit import exclude_from_trace, trace\n'), ((14365, 14385), 'inspect.signature', 'inspect.signature', (['f'], {}), '(f)\n', (14382, 14385), False, 'import inspect\n'), ((14499, 14550), 'megengine.jit.trace', 'trace', (['f'], {'symbolic': '(False)', 'symbolic_shape': 'shape_mode'}), '(f, symbolic=False, symbolic_shape=shape_mode)\n', (14504, 14550), False, 'from megengine.jit import exclude_from_trace, trace\n'), ((2072, 2106), 'megengine.Parameter', 'Parameter', (['[1.0]'], {'dtype': 'np.float32'}), '([1.0], dtype=np.float32)\n', (2081, 2106), False, 'from megengine import Parameter, tensor\n'), ((2237, 2245), 'megengine.functional.exp', 'F.exp', (['x'], {}), '(x)\n', (2242, 2245), True, 'import megengine.functional as F\n'), ((3108, 3128), 'megengine.jit.exclude_from_trace', 'exclude_from_trace', ([], {}), '()\n', (3126, 3128), False, 'from megengine.jit import exclude_from_trace, trace\n'), ((3789, 3804), 'numpy.ones', 'np.ones', (['(7, 2)'], {}), '((7, 2))\n', (3796, 3804), True, 'import numpy as np\n'), ((4048, 4061), 'megengine.autodiff.GradManager', 'GradManager', ([], {}), '()\n', (4059, 4061), False, 'from megengine.autodiff import GradManager\n'), ((4944, 4975), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['z', 'buf'], {}), '(z, buf)\n', (4967, 4975), True, 'import numpy as np\n'), ((7974, 7980), 'megengine.functional.exp', 'exp', (['x'], {}), '(x)\n', (7977, 7980), False, 'from megengine.functional import exp, log\n'), ((8078, 8084), 'megengine.functional.exp', 'exp', (['x'], {}), '(x)\n', (8081, 8084), False, 'from megengine.functional import exp, log\n'), ((8972, 9003), 'megengine.tensor', 'tensor', (['[1, 10]'], {'dtype': 'np.int32'}), '([1, 10], dtype=np.int32)\n', (8978, 9003), False, 'from megengine import Parameter, tensor\n'), ((9870, 9883), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (9878, 9883), True, 'import numpy as np\n'), ((10395, 10417), 'numpy.array', 'np.array', (['[1, 1, 2, 2]'], {}), '([1, 1, 2, 2])\n', (10403, 10417), True, 'import numpy as np\n'), ((11620, 11640), 'numpy.random.rand', 'np.random.rand', (['n', '(2)'], {}), '(n, 2)\n', (11634, 11640), True, 'import numpy as np\n'), ((11755, 11768), 'megengine.tensor', 'tensor', (['boxes'], {}), '(boxes)\n', (11761, 11768), False, 'from megengine import Parameter, tensor\n'), ((11770, 11784), 'megengine.tensor', 'tensor', (['scores'], {}), '(scores)\n', (11776, 11784), False, 'from megengine import Parameter, tensor\n'), ((12054, 12074), 'megengine.jit.exclude_from_trace', 'exclude_from_trace', ([], {}), '()\n', (12072, 12074), False, 'from megengine.jit import exclude_from_trace, trace\n'), ((12092, 12142), 'megengine.functional.vision.nms', 'F.vision.nms', (['boxes'], {'scores': 'scores', 'iou_thresh': '(0.5)'}), '(boxes, scores=scores, iou_thresh=0.5)\n', (12104, 12142), True, 'import megengine.functional as F\n'), ((12734, 12745), 'megengine.tensor', 'tensor', (['[0]'], {}), '([0])\n', (12740, 12745), False, 'from megengine import Parameter, tensor\n'), ((12747, 12758), 'megengine.tensor', 'tensor', (['[1]'], {}), '([1])\n', (12753, 12758), False, 'from megengine import Parameter, tensor\n'), ((12889, 12900), 'megengine.functional.arange', 'F.arange', (['(8)'], {}), '(8)\n', (12897, 12900), True, 'import megengine.functional as F\n'), ((14288, 14318), 'numpy.random.randn', 'np.random.randn', (['(5)', '(5)', '(5)', '(5)', '(5)'], {}), '(5, 5, 5, 5, 5)\n', (14303, 14318), True, 'import numpy as np\n'), ((14611, 14631), 'megengine.tensor', 'tensor', (['inputs[name]'], {}), '(inputs[name])\n', (14617, 14631), False, 'from megengine import Parameter, tensor\n'), ((2371, 2384), 'megengine.autodiff.GradManager', 'GradManager', ([], {}), '()\n', (2382, 2384), False, 'from megengine.autodiff import GradManager\n'), ((4272, 4293), 'megengine.functional.sum', 'F.sum', (['(w ** 2)'], {'axis': '(1)'}), '(w ** 2, axis=1)\n', (4277, 4293), True, 'import megengine.functional as F\n'), ((6552, 6585), 'megengine.utils.comp_graph_tools.get_owner_opr_inputs', 'cgtools.get_owner_opr_inputs', (['out'], {}), '(out)\n', (6580, 6585), True, 'import megengine.utils.comp_graph_tools as cgtools\n'), ((7565, 7571), 'megengine.functional.exp', 'exp', (['x'], {}), '(x)\n', (7568, 7571), False, 'from megengine.functional import exp, log\n'), ((7574, 7580), 'megengine.functional.exp', 'exp', (['y'], {}), '(y)\n', (7577, 7580), False, 'from megengine.functional import exp, log\n'), ((7681, 7687), 'megengine.functional.exp', 'exp', (['x'], {}), '(x)\n', (7684, 7687), False, 'from megengine.functional import exp, log\n'), ((7690, 7696), 'megengine.functional.exp', 'exp', (['y'], {}), '(y)\n', (7693, 7696), False, 'from megengine.functional import exp, log\n'), ((10021, 10052), 'numpy.arange', 'np.arange', (['(16)'], {'dtype': 'np.float32'}), '(16, dtype=np.float32)\n', (10030, 10052), True, 'import numpy as np\n'), ((10121, 10200), 'numpy.array', 'np.array', (['[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]]'], {'dtype': 'np.float32'}), '([[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32)\n', (10129, 10200), True, 'import numpy as np\n'), ((11670, 11690), 'numpy.random.rand', 'np.random.rand', (['n', '(2)'], {}), '(n, 2)\n', (11684, 11690), True, 'import numpy as np\n'), ((14126, 14156), 'numpy.random.randn', 'np.random.randn', (['(5)', '(5)', '(5)', '(5)', '(5)'], {}), '(5, 5, 5, 5, 5)\n', (14141, 14156), True, 'import numpy as np\n'), ((2494, 2506), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (2503, 2506), True, 'import numpy as np\n')]
|
import numpy as np
import megengine as mge
import megengine.module as M
import megengine.functional as F
from .utils.utils import bilinear_sampler, coords_grid
class AGCL:
"""
Implementation of Adaptive Group Correlation Layer (AGCL).
"""
def __init__(self, fmap1, fmap2, att=None):
self.fmap1 = fmap1
self.fmap2 = fmap2
self.att = att
self.coords = coords_grid(fmap1.shape[0], fmap1.shape[2], fmap1.shape[3]).to(
fmap1.device
)
def __call__(self, flow, extra_offset, small_patch=False, iter_mode=False):
if iter_mode:
corr = self.corr_iter(self.fmap1, self.fmap2, flow, small_patch)
else:
corr = self.corr_att_offset(
self.fmap1, self.fmap2, flow, extra_offset, small_patch
)
return corr
def get_correlation(self, left_feature, right_feature, psize=(3, 3), dilate=(1, 1)):
N, C, H, W = left_feature.shape
di_y, di_x = dilate[0], dilate[1]
pady, padx = psize[0] // 2 * di_y, psize[1] // 2 * di_x
right_pad = F.pad(right_feature, pad_witdth=(
(0, 0), (0, 0), (pady, pady), (padx, padx)), mode="replicate")
right_slid = F.sliding_window(
right_pad, kernel_size=(H, W), stride=(di_y, di_x))
right_slid = right_slid.reshape(N, C, -1, H, W)
right_slid = F.transpose(right_slid, (0, 2, 1, 3, 4))
right_slid = right_slid.reshape(-1, C, H, W)
corr_mean = F.mean(left_feature * right_slid, axis=1, keepdims=True)
corr_final = corr_mean.reshape(1, -1, H, W)
return corr_final
def corr_iter(self, left_feature, right_feature, flow, small_patch):
coords = self.coords + flow
coords = F.transpose(coords, (0, 2, 3, 1))
right_feature = bilinear_sampler(right_feature, coords)
if small_patch:
psize_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
else:
psize_list = [(1, 9), (1, 9), (1, 9), (1, 9)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
N, C, H, W = left_feature.shape
lefts = F.split(left_feature, 4, axis=1)
rights = F.split(right_feature, 4, axis=1)
corrs = []
for i in range(len(psize_list)):
corr = self.get_correlation(
lefts[i], rights[i], psize_list[i], dilate_list[i]
)
corrs.append(corr)
final_corr = F.concat(corrs, axis=1)
return final_corr
def corr_att_offset(
self, left_feature, right_feature, flow, extra_offset, small_patch
):
N, C, H, W = left_feature.shape
if self.att is not None:
left_feature = F.reshape(
F.transpose(left_feature, (0, 2, 3, 1)), (N, H * W, C)
) # 'n c h w -> n (h w) c'
right_feature = F.reshape(
F.transpose(right_feature, (0, 2, 3, 1)), (N, H * W, C)
) # 'n c h w -> n (h w) c'
left_feature, right_feature = self.att(left_feature, right_feature)
# 'n (h w) c -> n c h w'
left_feature, right_feature = [
F.transpose(F.reshape(x, (N, H, W, C)), (0, 3, 1, 2))
for x in [left_feature, right_feature]
]
lefts = F.split(left_feature, 4, axis=1)
rights = F.split(right_feature, 4, axis=1)
C = C // 4
if small_patch:
psize_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
else:
psize_list = [(1, 9), (1, 9), (1, 9), (1, 9)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
search_num = 9
extra_offset = F.transpose(
F.reshape(extra_offset, (N, search_num, 2, H, W)), (0, 1, 3, 4, 2)
) # [N, search_num, 1, 1, 2]
corrs = []
for i in range(len(psize_list)):
left_feature, right_feature = lefts[i], rights[i]
psize, dilate = psize_list[i], dilate_list[i]
psizey, psizex = psize[0], psize[1]
dilatey, dilatex = dilate[0], dilate[1]
ry = psizey // 2 * dilatey
rx = psizex // 2 * dilatex
x_grid, y_grid = np.meshgrid(
np.arange(-rx, rx + 1, dilatex), np.arange(-ry, ry + 1, dilatey)
)
y_grid, x_grid = mge.tensor(y_grid, device=self.fmap1.device), mge.tensor(
x_grid, device=self.fmap1.device
)
offsets = F.transpose(
F.reshape(F.stack((x_grid, y_grid)), (2, -1)), (1, 0)
) # [search_num, 2]
offsets = F.expand_dims(offsets, (0, 2, 3))
offsets = offsets + extra_offset
coords = self.coords + flow # [N, 2, H, W]
coords = F.transpose(coords, (0, 2, 3, 1)) # [N, H, W, 2]
coords = F.expand_dims(coords, 1) + offsets
coords = F.reshape(coords, (N, -1, W, 2)) # [N, search_num*H, W, 2]
right_feature = bilinear_sampler(
right_feature, coords
) # [N, C, search_num*H, W]
right_feature = F.reshape(
right_feature, (N, C, -1, H, W)
) # [N, C, search_num, H, W]
left_feature = F.expand_dims(left_feature, 2)
corr = F.mean(left_feature * right_feature, axis=1)
corrs.append(corr)
final_corr = F.concat(corrs, axis=1)
return final_corr
|
[
"megengine.functional.split",
"megengine.functional.pad",
"megengine.tensor",
"megengine.functional.expand_dims",
"megengine.functional.transpose",
"megengine.functional.mean",
"megengine.functional.concat",
"megengine.functional.reshape",
"megengine.functional.sliding_window",
"megengine.functional.stack"
] |
[((1102, 1202), 'megengine.functional.pad', 'F.pad', (['right_feature'], {'pad_witdth': '((0, 0), (0, 0), (pady, pady), (padx, padx))', 'mode': '"""replicate"""'}), "(right_feature, pad_witdth=((0, 0), (0, 0), (pady, pady), (padx, padx)\n ), mode='replicate')\n", (1107, 1202), True, 'import megengine.functional as F\n'), ((1233, 1301), 'megengine.functional.sliding_window', 'F.sliding_window', (['right_pad'], {'kernel_size': '(H, W)', 'stride': '(di_y, di_x)'}), '(right_pad, kernel_size=(H, W), stride=(di_y, di_x))\n', (1249, 1301), True, 'import megengine.functional as F\n'), ((1392, 1432), 'megengine.functional.transpose', 'F.transpose', (['right_slid', '(0, 2, 1, 3, 4)'], {}), '(right_slid, (0, 2, 1, 3, 4))\n', (1403, 1432), True, 'import megengine.functional as F\n'), ((1507, 1563), 'megengine.functional.mean', 'F.mean', (['(left_feature * right_slid)'], {'axis': '(1)', 'keepdims': '(True)'}), '(left_feature * right_slid, axis=1, keepdims=True)\n', (1513, 1563), True, 'import megengine.functional as F\n'), ((1771, 1804), 'megengine.functional.transpose', 'F.transpose', (['coords', '(0, 2, 3, 1)'], {}), '(coords, (0, 2, 3, 1))\n', (1782, 1804), True, 'import megengine.functional as F\n'), ((2199, 2231), 'megengine.functional.split', 'F.split', (['left_feature', '(4)'], {'axis': '(1)'}), '(left_feature, 4, axis=1)\n', (2206, 2231), True, 'import megengine.functional as F\n'), ((2249, 2282), 'megengine.functional.split', 'F.split', (['right_feature', '(4)'], {'axis': '(1)'}), '(right_feature, 4, axis=1)\n', (2256, 2282), True, 'import megengine.functional as F\n'), ((2519, 2542), 'megengine.functional.concat', 'F.concat', (['corrs'], {'axis': '(1)'}), '(corrs, axis=1)\n', (2527, 2542), True, 'import megengine.functional as F\n'), ((3370, 3402), 'megengine.functional.split', 'F.split', (['left_feature', '(4)'], {'axis': '(1)'}), '(left_feature, 4, axis=1)\n', (3377, 3402), True, 'import megengine.functional as F\n'), ((3420, 3453), 'megengine.functional.split', 'F.split', (['right_feature', '(4)'], {'axis': '(1)'}), '(right_feature, 4, axis=1)\n', (3427, 3453), True, 'import megengine.functional as F\n'), ((5509, 5532), 'megengine.functional.concat', 'F.concat', (['corrs'], {'axis': '(1)'}), '(corrs, axis=1)\n', (5517, 5532), True, 'import megengine.functional as F\n'), ((3819, 3868), 'megengine.functional.reshape', 'F.reshape', (['extra_offset', '(N, search_num, 2, H, W)'], {}), '(extra_offset, (N, search_num, 2, H, W))\n', (3828, 3868), True, 'import megengine.functional as F\n'), ((4733, 4766), 'megengine.functional.expand_dims', 'F.expand_dims', (['offsets', '(0, 2, 3)'], {}), '(offsets, (0, 2, 3))\n', (4746, 4766), True, 'import megengine.functional as F\n'), ((4890, 4923), 'megengine.functional.transpose', 'F.transpose', (['coords', '(0, 2, 3, 1)'], {}), '(coords, (0, 2, 3, 1))\n', (4901, 4923), True, 'import megengine.functional as F\n'), ((5017, 5049), 'megengine.functional.reshape', 'F.reshape', (['coords', '(N, -1, W, 2)'], {}), '(coords, (N, -1, W, 2))\n', (5026, 5049), True, 'import megengine.functional as F\n'), ((5231, 5273), 'megengine.functional.reshape', 'F.reshape', (['right_feature', '(N, C, -1, H, W)'], {}), '(right_feature, (N, C, -1, H, W))\n', (5240, 5273), True, 'import megengine.functional as F\n'), ((5360, 5390), 'megengine.functional.expand_dims', 'F.expand_dims', (['left_feature', '(2)'], {}), '(left_feature, 2)\n', (5373, 5390), True, 'import megengine.functional as F\n'), ((5410, 5454), 'megengine.functional.mean', 'F.mean', (['(left_feature * right_feature)'], {'axis': '(1)'}), '(left_feature * right_feature, axis=1)\n', (5416, 5454), True, 'import megengine.functional as F\n'), ((2807, 2846), 'megengine.functional.transpose', 'F.transpose', (['left_feature', '(0, 2, 3, 1)'], {}), '(left_feature, (0, 2, 3, 1))\n', (2818, 2846), True, 'import megengine.functional as F\n'), ((2957, 2997), 'megengine.functional.transpose', 'F.transpose', (['right_feature', '(0, 2, 3, 1)'], {}), '(right_feature, (0, 2, 3, 1))\n', (2968, 2997), True, 'import megengine.functional as F\n'), ((4344, 4375), 'numpy.arange', 'np.arange', (['(-rx)', '(rx + 1)', 'dilatex'], {}), '(-rx, rx + 1, dilatex)\n', (4353, 4375), True, 'import numpy as np\n'), ((4377, 4408), 'numpy.arange', 'np.arange', (['(-ry)', '(ry + 1)', 'dilatey'], {}), '(-ry, ry + 1, dilatey)\n', (4386, 4408), True, 'import numpy as np\n'), ((4452, 4496), 'megengine.tensor', 'mge.tensor', (['y_grid'], {'device': 'self.fmap1.device'}), '(y_grid, device=self.fmap1.device)\n', (4462, 4496), True, 'import megengine as mge\n'), ((4498, 4542), 'megengine.tensor', 'mge.tensor', (['x_grid'], {'device': 'self.fmap1.device'}), '(x_grid, device=self.fmap1.device)\n', (4508, 4542), True, 'import megengine as mge\n'), ((4961, 4985), 'megengine.functional.expand_dims', 'F.expand_dims', (['coords', '(1)'], {}), '(coords, 1)\n', (4974, 4985), True, 'import megengine.functional as F\n'), ((3242, 3268), 'megengine.functional.reshape', 'F.reshape', (['x', '(N, H, W, C)'], {}), '(x, (N, H, W, C))\n', (3251, 3268), True, 'import megengine.functional as F\n'), ((4634, 4659), 'megengine.functional.stack', 'F.stack', (['(x_grid, y_grid)'], {}), '((x_grid, y_grid))\n', (4641, 4659), True, 'import megengine.functional as F\n')]
|
# Vibroacoustics
#
# E.Rohan, V.Lukeš
# Homogenization of the vibro–acoustic transmission on periodically
# perforated elastic plates with arrays of resonators.
# https://arxiv.org/abs/2104.01367 (arXiv:2104.01367v1)
import os.path as op
import numpy as nm
from acoustics_macro_utils import get_homogmat
from sfepy.homogenization.utils import define_box_regions
from sfepy.discrete.fem import Mesh
from sfepy.discrete.fem.periodic import match_y_plane, match_x_plane
wdir = op.dirname(__file__)
def get_regions(filename_mesh):
mesh = Mesh.from_file(filename_mesh)
bbox = nm.array(mesh.get_bounding_box())
region_lb, region_rt = bbox
return define_box_regions(2, region_lb, region_rt)
def get_homogmat_plate(coors, mode, pb):
if mode != 'qp':
return
mconf = pb.conf.mconf
c = mconf.sound_speed
wave_num = mconf.wave_num
rho0 = mconf.rho0
c2 = c**2
w = wave_num * c
w2 = w**2
pb.ofn_trunk = mconf.ofn_trunk + '_plate'
out_ac = get_homogmat(coors, mode, pb, mconf.coefs_filename, omega=w)
nqp = coors.shape[0]
out = {}
out['A'] = out_ac['A']
out['w2F'] = out_ac['F'] * w2
out['wB'] = out_ac['B'] * w
vol_Imp = 0.5 * (out_ac['Vol_Imp']['volume_Im'] +
out_ac['Vol_Imp']['volume_Ip'])
zeta = out_ac['volumes']['volume_Y'] / vol_Imp
out['w2Kr'] = nm.tile(zeta / c2, (nqp, 1, 1)) * w2
out['w'] = nm.ones((nqp, 1, 1), dtype=nm.float64) * w
out_vc = get_homogmat(coors, mode, pb, mconf.coefs_filename_plate)
bar_h = out_ac['h']
h = mconf.eps0 * bar_h
out['E'] = bar_h / rho0 * out_vc['Cm']
out['h2E'] = h**2 / 12. * out['E']
out['wH'] = bar_h * out_vc['Hm'] * w
out['w2K'] = out['w2Kr'] + rho0 * out_vc['Km'] * bar_h * w2
out['S'] = bar_h / rho0 * out_vc['Gm']
sfdim = out_vc['Gm'].shape[2]
out['w2C3'] = out_ac['C'][:, sfdim:, :] * w2
out['wD'] = nm.ascontiguousarray(out_ac['D'][:, :sfdim, :sfdim]) * w
out['w2M'] = (out_ac['tM'] + out_ac['M'][:, :sfdim, :sfdim]) * w2
out['w2N'] = out_ac['M'][:, sfdim:, sfdim:] * w2
out['w2h2L'] = h**2 / 12. * out_ac['tM'] * w2
print('### material-plate: wave number = ', wave_num)
return out
def define(**kwargs):
mconf = kwargs['master_problem'].conf
filename_mesh = mconf.filename_mesh_plate
regions = {
'Gamma0_1': 'all',
}
regions.update(get_regions(filename_mesh))
functions = {
'get_homogmat':
(lambda ts, coors, mode=None, problem=None, **kwargs:
get_homogmat_plate(coors, mode, problem),),
'match_y_plane': (match_y_plane,),
}
materials = {
'ac': 'get_homogmat',
}
fields = {
'tvelocity0': ('complex', 'scalar', 'Gamma0_1', 1),
'pressure0': ('complex', 'scalar', 'Gamma0_1', 1),
'deflection': ('complex', 'scalar', 'Gamma0_1', 2),
'displacement': ('complex', 'vector', 'Gamma0_1', 1),
'rotation': ('complex', 'vector', 'Gamma0_1', 1),
}
integrals = {
'i': 4,
}
variables = {
'sp0': ('unknown field', 'pressure0', 0),
'sq0': ('test field', 'pressure0', 'sp0'),
'dp0': ('unknown field', 'pressure0', 1),
'dq0': ('test field', 'pressure0', 'dp0'),
'g01': ('unknown field', 'tvelocity0', 2),
'f01': ('test field', 'tvelocity0', 'g01'),
'g02': ('unknown field', 'tvelocity0', 3),
'f02': ('test field', 'tvelocity0', 'g02'),
'u': ('unknown field', 'displacement', 4),
'v': ('test field', 'displacement', 'u'),
'w': ('unknown field', 'deflection', 5),
'z': ('test field', 'deflection', 'w'),
'theta': ('unknown field', 'rotation', 6),
'nu': ('test field', 'rotation', 'theta'),
}
ebcs = {
'fixed_l': ('Left', {'w.0': 0.0, 'u.all': 0.0, 'theta.all': 0.0}),
'fixed_r': ('Right', {'w.0': 0.0, 'u.all': 0.0, 'theta.all': 0.0}),
}
epbcs = {
# 'per_g01': (['Bottom', 'Top'], {'g01.0': 'g01.0'},
# 'match_y_plane'),
# 'per_g02': (['Bottom', 'Top'], {'g02.0': 'g02.0'},
# 'match_y_plane'),
# 'per_dp0': (['Bottom', 'Top'], {'dp0.0': 'dp0.0'},
# 'match_y_plane'),
# 'per_sp0': (['Bottom', 'Top'], {'sp0.0': 'sp0.0'},
# 'match_y_plane'),
'per_w': (['Bottom', 'Top'], {'w.0': 'w.0'},
'match_y_plane'),
'per_u': (['Bottom', 'Top'], {'u.all': 'u.all'},
'match_y_plane'),
'per_theta': (['Bottom', 'Top'], {'theta.all': 'theta.all'},
'match_y_plane'),
}
equations = {
# p^0 = 0.5 * (P^+ + P^-)
# eq. (79)_1
'eq_g01': """
0.5 * dw_diffusion.i.Gamma0_1(ac.A, f01, sp0)
- 0.5 * dw_volume_dot.i.Gamma0_1(ac.w2K, f01, sp0)
+ %s * dw_v_dot_grad_s.i.Gamma0_1(ac.wD, u, f01)
+ %s * dw_biot.i.Gamma0_1(ac.wH, u, f01)
+ %s * dw_volume_dot.i.Gamma0_1(ac.w, f01, g01)
- %s * dw_volume_dot.i.Gamma0_1(ac.w, f01, g02)
= 0""" % (1j, 1j, 1j / mconf.eps0, 1j / mconf.eps0),
# eq. (80)_1
'eq_g02': """
+ 0.5 * dw_volume_dot.i.Gamma0_1(ac.w2F, f02, g01)
+ 0.5 * dw_volume_dot.i.Gamma0_1(ac.w2F, f02, g02)
- dw_volume_dot.i.Gamma0_1(ac.w2C3, f02, w)
- %s * dw_volume_dot.i.Gamma0_1(ac.w, f02, dp0)
= 0""" % (1j / mconf.eps0,),
# p^0 = 0.5 * (P^+ + P^-)
# eq. (79)_2
'eq_v': """
- %s * dw_v_dot_grad_s.i.Gamma0_1(ac.wD, v, sp0)
- %s * dw_biot.i.Gamma0_1(ac.wH, v, sp0)
+ dw_lin_elastic.i.Gamma0_1(ac.E, v, u)
- dw_volume_dot.i.Gamma0_1(ac.w2M, v, u)
= 0""" % (1j * 0.5, 1j * 0.5),
# eq. (80)_2
'eq_z': """
- dw_volume_dot.i.Gamma0_1(ac.w2N, z, w)
+ dw_diffusion.i.Gamma0_1(ac.S, z, w)
- dw_v_dot_grad_s.i.Gamma0_1(ac.S, theta, z)
+ 0.5 * dw_volume_dot.i.Gamma0_1(ac.w2C3, z, g01)
+ 0.5 * dw_volume_dot.i.Gamma0_1(ac.w2C3, z, g02)
= 0""",
# eq. (80)_2
'eq_nu': """
- dw_volume_dot.i.Gamma0_1(ac.w2h2L, nu, theta)
+ dw_lin_elastic.i.Gamma0_1(ac.h2E, nu, theta)
+ dw_volume_dot.i.Gamma0_1(ac.S, nu, theta)
- dw_v_dot_grad_s.i.Gamma0_1(ac.S, nu, w)
= 0""",
}
options = {
'output_dir': op.join(wdir, 'results'),
'output_format': 'h5',
}
solvers = {
'ls': ('ls.scipy_direct', {}),
'newton': ('nls.newton', {'i_max': 1,
'eps_a': 1e-6,
'eps_r': 1e-6,
'problem': 'nonlinear', })
}
return locals()
|
[
"sfepy.discrete.fem.Mesh.from_file",
"sfepy.homogenization.utils.define_box_regions"
] |
[((476, 496), 'os.path.dirname', 'op.dirname', (['__file__'], {}), '(__file__)\n', (486, 496), True, 'import os.path as op\n'), ((542, 571), 'sfepy.discrete.fem.Mesh.from_file', 'Mesh.from_file', (['filename_mesh'], {}), '(filename_mesh)\n', (556, 571), False, 'from sfepy.discrete.fem import Mesh\n'), ((661, 704), 'sfepy.homogenization.utils.define_box_regions', 'define_box_regions', (['(2)', 'region_lb', 'region_rt'], {}), '(2, region_lb, region_rt)\n', (679, 704), False, 'from sfepy.homogenization.utils import define_box_regions\n'), ((1000, 1060), 'acoustics_macro_utils.get_homogmat', 'get_homogmat', (['coors', 'mode', 'pb', 'mconf.coefs_filename'], {'omega': 'w'}), '(coors, mode, pb, mconf.coefs_filename, omega=w)\n', (1012, 1060), False, 'from acoustics_macro_utils import get_homogmat\n'), ((1481, 1538), 'acoustics_macro_utils.get_homogmat', 'get_homogmat', (['coors', 'mode', 'pb', 'mconf.coefs_filename_plate'], {}), '(coors, mode, pb, mconf.coefs_filename_plate)\n', (1493, 1538), False, 'from acoustics_macro_utils import get_homogmat\n'), ((1372, 1403), 'numpy.tile', 'nm.tile', (['(zeta / c2)', '(nqp, 1, 1)'], {}), '(zeta / c2, (nqp, 1, 1))\n', (1379, 1403), True, 'import numpy as nm\n'), ((1424, 1462), 'numpy.ones', 'nm.ones', (['(nqp, 1, 1)'], {'dtype': 'nm.float64'}), '((nqp, 1, 1), dtype=nm.float64)\n', (1431, 1462), True, 'import numpy as nm\n'), ((1922, 1974), 'numpy.ascontiguousarray', 'nm.ascontiguousarray', (["out_ac['D'][:, :sfdim, :sfdim]"], {}), "(out_ac['D'][:, :sfdim, :sfdim])\n", (1942, 1974), True, 'import numpy as nm\n'), ((6668, 6692), 'os.path.join', 'op.join', (['wdir', '"""results"""'], {}), "(wdir, 'results')\n", (6675, 6692), True, 'import os.path as op\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import itertools
import numpy as np
import megengine as mge
import megengine.autodiff as ad
import megengine.functional as F
from megengine import Tensor
from megengine.core._imperative_rt.core2 import (
_set_drop_flag,
_set_swap_flag,
get_option,
set_option,
)
from megengine.module import Linear, Module
from megengine.optimizer import SGD
batch_size = 64
data_shape = (batch_size, 2)
label_shape = (batch_size,)
def minibatch_generator():
while True:
inp_data = np.zeros((batch_size, 2))
label = np.zeros(batch_size, dtype=np.int32)
for i in range(batch_size):
# [x0, x1], sampled from U[-1, 1]
inp_data[i, :] = np.random.rand(2) * 2 - 1
label[i] = 0 if np.prod(inp_data[i]) < 0 else 1
yield inp_data.astype(np.float32), label.astype(np.int32)
def calculate_precision(data: np.ndarray, pred: np.ndarray) -> float:
""" Calculate precision for given data and prediction.
:type data: [[x, y], ...]
:param data: Input data
:type pred: [[x_pred, y_pred], ...]
:param pred: Network output data
"""
correct = 0
assert len(data) == len(pred)
for inp_data, pred_output in zip(data, pred):
label = 0 if np.prod(inp_data) < 0 else 1
pred_label = np.argmax(pred_output)
if pred_label == label:
correct += 1
return float(correct) / len(data)
class XORNet(Module):
def __init__(self):
self.mid_layers = 14
self.num_class = 2
super().__init__()
self.fc0 = Linear(self.num_class, self.mid_layers, bias=True)
self.fc1 = Linear(self.mid_layers, self.mid_layers, bias=True)
self.fc2 = Linear(self.mid_layers, self.num_class, bias=True)
def forward(self, x):
y = self.fc0(x)
x._swap_out()
x = F.tanh(y)
y = self.fc1(x)
x = F.tanh(y)
x = self.fc2(x)
y = (x + x) / 2 # in order to test drop()
y._drop()
return y
def test_training_converge_with_swap_and_drop():
_set_swap_flag(True)
_set_drop_flag(True)
old_buffer_length = get_option("buffer_length")
set_option("buffer_length", 0)
net = XORNet()
opt = SGD(net.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
gm = ad.GradManager().attach(net.parameters())
def train(data, label):
with gm:
pred = net(data)
loss = F.nn.cross_entropy(pred, label)
gm.backward(loss)
return loss
def infer(data):
return net(data)
train_dataset = minibatch_generator()
losses = []
for data, label in itertools.islice(train_dataset, 2000):
data = Tensor(data, dtype=np.float32)
label = Tensor(label, dtype=np.int32)
opt.clear_grad()
loss = train(data, label)
opt.step()
losses.append(loss.numpy())
assert np.mean(losses[-100:]) < 0.1, "Final training Loss must be low enough"
ngrid = 10
x = np.linspace(-1.0, 1.0, ngrid)
xx, yy = np.meshgrid(x, x)
xx = xx.reshape((ngrid * ngrid, 1))
yy = yy.reshape((ngrid * ngrid, 1))
data = mge.tensor(np.concatenate((xx, yy), axis=1).astype(np.float32))
pred = infer(Tensor(data)).numpy()
precision = calculate_precision(data.numpy(), pred)
assert precision == 1.0, "Test precision must be high enough, get {}".format(
precision
)
_set_swap_flag(False)
_set_drop_flag(False)
set_option("buffer_length", old_buffer_length)
|
[
"megengine.core._imperative_rt.core2._set_swap_flag",
"megengine.core._imperative_rt.core2.set_option",
"megengine.Tensor",
"megengine.module.Linear",
"megengine.functional.nn.cross_entropy",
"megengine.core._imperative_rt.core2._set_drop_flag",
"megengine.autodiff.GradManager",
"megengine.functional.tanh",
"megengine.core._imperative_rt.core2.get_option"
] |
[((2431, 2451), 'megengine.core._imperative_rt.core2._set_swap_flag', '_set_swap_flag', (['(True)'], {}), '(True)\n', (2445, 2451), False, 'from megengine.core._imperative_rt.core2 import _set_drop_flag, _set_swap_flag, get_option, set_option\n'), ((2456, 2476), 'megengine.core._imperative_rt.core2._set_drop_flag', '_set_drop_flag', (['(True)'], {}), '(True)\n', (2470, 2476), False, 'from megengine.core._imperative_rt.core2 import _set_drop_flag, _set_swap_flag, get_option, set_option\n'), ((2501, 2528), 'megengine.core._imperative_rt.core2.get_option', 'get_option', (['"""buffer_length"""'], {}), "('buffer_length')\n", (2511, 2528), False, 'from megengine.core._imperative_rt.core2 import _set_drop_flag, _set_swap_flag, get_option, set_option\n'), ((2533, 2563), 'megengine.core._imperative_rt.core2.set_option', 'set_option', (['"""buffer_length"""', '(0)'], {}), "('buffer_length', 0)\n", (2543, 2563), False, 'from megengine.core._imperative_rt.core2 import _set_drop_flag, _set_swap_flag, get_option, set_option\n'), ((3014, 3051), 'itertools.islice', 'itertools.islice', (['train_dataset', '(2000)'], {}), '(train_dataset, 2000)\n', (3030, 3051), False, 'import itertools\n'), ((3366, 3395), 'numpy.linspace', 'np.linspace', (['(-1.0)', '(1.0)', 'ngrid'], {}), '(-1.0, 1.0, ngrid)\n', (3377, 3395), True, 'import numpy as np\n'), ((3409, 3426), 'numpy.meshgrid', 'np.meshgrid', (['x', 'x'], {}), '(x, x)\n', (3420, 3426), True, 'import numpy as np\n'), ((3789, 3810), 'megengine.core._imperative_rt.core2._set_swap_flag', '_set_swap_flag', (['(False)'], {}), '(False)\n', (3803, 3810), False, 'from megengine.core._imperative_rt.core2 import _set_drop_flag, _set_swap_flag, get_option, set_option\n'), ((3815, 3836), 'megengine.core._imperative_rt.core2._set_drop_flag', '_set_drop_flag', (['(False)'], {}), '(False)\n', (3829, 3836), False, 'from megengine.core._imperative_rt.core2 import _set_drop_flag, _set_swap_flag, get_option, set_option\n'), ((3841, 3887), 'megengine.core._imperative_rt.core2.set_option', 'set_option', (['"""buffer_length"""', 'old_buffer_length'], {}), "('buffer_length', old_buffer_length)\n", (3851, 3887), False, 'from megengine.core._imperative_rt.core2 import _set_drop_flag, _set_swap_flag, get_option, set_option\n'), ((875, 900), 'numpy.zeros', 'np.zeros', (['(batch_size, 2)'], {}), '((batch_size, 2))\n', (883, 900), True, 'import numpy as np\n'), ((917, 953), 'numpy.zeros', 'np.zeros', (['batch_size'], {'dtype': 'np.int32'}), '(batch_size, dtype=np.int32)\n', (925, 953), True, 'import numpy as np\n'), ((1663, 1685), 'numpy.argmax', 'np.argmax', (['pred_output'], {}), '(pred_output)\n', (1672, 1685), True, 'import numpy as np\n'), ((1932, 1982), 'megengine.module.Linear', 'Linear', (['self.num_class', 'self.mid_layers'], {'bias': '(True)'}), '(self.num_class, self.mid_layers, bias=True)\n', (1938, 1982), False, 'from megengine.module import Linear, Module\n'), ((2002, 2053), 'megengine.module.Linear', 'Linear', (['self.mid_layers', 'self.mid_layers'], {'bias': '(True)'}), '(self.mid_layers, self.mid_layers, bias=True)\n', (2008, 2053), False, 'from megengine.module import Linear, Module\n'), ((2074, 2124), 'megengine.module.Linear', 'Linear', (['self.mid_layers', 'self.num_class'], {'bias': '(True)'}), '(self.mid_layers, self.num_class, bias=True)\n', (2080, 2124), False, 'from megengine.module import Linear, Module\n'), ((2210, 2219), 'megengine.functional.tanh', 'F.tanh', (['y'], {}), '(y)\n', (2216, 2219), True, 'import megengine.functional as F\n'), ((2256, 2265), 'megengine.functional.tanh', 'F.tanh', (['y'], {}), '(y)\n', (2262, 2265), True, 'import megengine.functional as F\n'), ((3068, 3098), 'megengine.Tensor', 'Tensor', (['data'], {'dtype': 'np.float32'}), '(data, dtype=np.float32)\n', (3074, 3098), False, 'from megengine import Tensor\n'), ((3115, 3144), 'megengine.Tensor', 'Tensor', (['label'], {'dtype': 'np.int32'}), '(label, dtype=np.int32)\n', (3121, 3144), False, 'from megengine import Tensor\n'), ((3271, 3293), 'numpy.mean', 'np.mean', (['losses[-100:]'], {}), '(losses[-100:])\n', (3278, 3293), True, 'import numpy as np\n'), ((2666, 2682), 'megengine.autodiff.GradManager', 'ad.GradManager', ([], {}), '()\n', (2680, 2682), True, 'import megengine.autodiff as ad\n'), ((2802, 2833), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['pred', 'label'], {}), '(pred, label)\n', (2820, 2833), True, 'import megengine.functional as F\n'), ((1613, 1630), 'numpy.prod', 'np.prod', (['inp_data'], {}), '(inp_data)\n', (1620, 1630), True, 'import numpy as np\n'), ((3529, 3561), 'numpy.concatenate', 'np.concatenate', (['(xx, yy)'], {'axis': '(1)'}), '((xx, yy), axis=1)\n', (3543, 3561), True, 'import numpy as np\n'), ((3600, 3612), 'megengine.Tensor', 'Tensor', (['data'], {}), '(data)\n', (3606, 3612), False, 'from megengine import Tensor\n'), ((1065, 1082), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (1079, 1082), True, 'import numpy as np\n'), ((1119, 1139), 'numpy.prod', 'np.prod', (['inp_data[i]'], {}), '(inp_data[i])\n', (1126, 1139), True, 'import numpy as np\n')]
|
import io
import pickle
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine.core._trace_option import set_symbolic_shape
from megengine.jit import trace
from megengine.traced_module import trace_module
set_symbolic_shape(True)
class Main(M.Module):
def forward(self, x):
return x["data"]
class PreProcess(M.Module):
def __init__(self):
super().__init__()
self.A = F.zeros((1,))
self.I = F.ones((1,))
self.bb_out = mge.tensor(
np.array([[[0, 0], [160, 0], [160, 48], [0, 48]]], dtype="float32")
)
def forward(self, data, quad):
"""
data: (1, 3, 48, 160)
quad: (1, 4, 2)
"""
N = quad.shape[0]
dst = F.repeat(self.bb_out, N, axis=0).reshape(-1, 4, 2)
I = F.broadcast_to(self.I, quad.shape)
A = F.broadcast_to(self.A, (N, 8, 8))
A[:, 0:4, 0:2] = quad
A[:, 4:8, 5:6] = I[:, :, 0:1]
A[:, 0:4, 6:8] = -quad * dst[:, :, 0:1]
A[:, 4:8, 3:5] = quad
A[:, 0:4, 2:3] = I[:, :, 0:1]
A[:, 4:8, 6:8] = -quad * dst[:, :, 1:2]
B = dst.transpose(0, 2, 1).reshape(-1, 8, 1)
M = F.concat([F.matmul(F.matinv(A), B)[:, :, 0], I[:, 0:1, 0]], axis=1).reshape(
-1, 3, 3
)
new_data = F.warp_perspective(data, M, (48, 160)) # (N, 3, 48, 160)
return {"data": new_data}
class Net(M.Module):
def __init__(self, traced_module):
super().__init__()
self.pre_process = PreProcess()
self.traced_module = traced_module
def forward(self, data, quad):
x = self.pre_process(data, quad)
x = self.traced_module(x)
return x
def test_preprocess():
batch_size = 2
module = Main()
data = mge.tensor(
np.random.randint(0, 256, size=(batch_size, 3, 48, 160)), dtype=np.float32
)
traced_module = trace_module(module, {"data": data})
obj = pickle.dumps(traced_module)
traced_module = pickle.loads(obj)
module = Net(traced_module)
module.eval()
quad = mge.tensor(np.random.normal(size=(batch_size, 4, 2)), dtype=np.float32)
expect = module(data, quad)
traced_module = trace_module(module, data, quad)
actual = traced_module(data, quad)
for i, j in zip(expect, actual):
np.testing.assert_array_equal(i, j)
func = trace(traced_module, capture_as_const=True)
actual = func(data, quad)
for i, j in zip(expect, actual):
np.testing.assert_array_equal(i, j)
model = io.BytesIO()
func.dump(model, arg_names=("data", "quad"))
model.seek(0)
infer_cg = cgtools.GraphInference(model)
actual = list(
infer_cg.run(inp_dict={"data": data.numpy(), "quad": quad.numpy()}).values()
)[0]
np.testing.assert_allclose(expect, actual)
|
[
"megengine.jit.trace",
"megengine.functional.repeat",
"megengine.functional.zeros",
"megengine.functional.broadcast_to",
"megengine.functional.ones",
"megengine.functional.matinv",
"megengine.functional.warp_perspective",
"megengine.core._trace_option.set_symbolic_shape",
"megengine.traced_module.trace_module",
"megengine.utils.comp_graph_tools.GraphInference"
] |
[((324, 348), 'megengine.core._trace_option.set_symbolic_shape', 'set_symbolic_shape', (['(True)'], {}), '(True)\n', (342, 348), False, 'from megengine.core._trace_option import set_symbolic_shape\n'), ((2000, 2036), 'megengine.traced_module.trace_module', 'trace_module', (['module', "{'data': data}"], {}), "(module, {'data': data})\n", (2012, 2036), False, 'from megengine.traced_module import trace_module\n'), ((2047, 2074), 'pickle.dumps', 'pickle.dumps', (['traced_module'], {}), '(traced_module)\n', (2059, 2074), False, 'import pickle\n'), ((2095, 2112), 'pickle.loads', 'pickle.loads', (['obj'], {}), '(obj)\n', (2107, 2112), False, 'import pickle\n'), ((2298, 2330), 'megengine.traced_module.trace_module', 'trace_module', (['module', 'data', 'quad'], {}), '(module, data, quad)\n', (2310, 2330), False, 'from megengine.traced_module import trace_module\n'), ((2462, 2505), 'megengine.jit.trace', 'trace', (['traced_module'], {'capture_as_const': '(True)'}), '(traced_module, capture_as_const=True)\n', (2467, 2505), False, 'from megengine.jit import trace\n'), ((2629, 2641), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (2639, 2641), False, 'import io\n'), ((2724, 2753), 'megengine.utils.comp_graph_tools.GraphInference', 'cgtools.GraphInference', (['model'], {}), '(model)\n', (2746, 2753), True, 'import megengine.utils.comp_graph_tools as cgtools\n'), ((2871, 2913), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['expect', 'actual'], {}), '(expect, actual)\n', (2897, 2913), True, 'import numpy as np\n'), ((522, 535), 'megengine.functional.zeros', 'F.zeros', (['(1,)'], {}), '((1,))\n', (529, 535), True, 'import megengine.functional as F\n'), ((553, 565), 'megengine.functional.ones', 'F.ones', (['(1,)'], {}), '((1,))\n', (559, 565), True, 'import megengine.functional as F\n'), ((907, 941), 'megengine.functional.broadcast_to', 'F.broadcast_to', (['self.I', 'quad.shape'], {}), '(self.I, quad.shape)\n', (921, 941), True, 'import megengine.functional as F\n'), ((954, 987), 'megengine.functional.broadcast_to', 'F.broadcast_to', (['self.A', '(N, 8, 8)'], {}), '(self.A, (N, 8, 8))\n', (968, 987), True, 'import megengine.functional as F\n'), ((1412, 1450), 'megengine.functional.warp_perspective', 'F.warp_perspective', (['data', 'M', '(48, 160)'], {}), '(data, M, (48, 160))\n', (1430, 1450), True, 'import megengine.functional as F\n'), ((1899, 1955), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)'], {'size': '(batch_size, 3, 48, 160)'}), '(0, 256, size=(batch_size, 3, 48, 160))\n', (1916, 1955), True, 'import numpy as np\n'), ((2185, 2226), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(batch_size, 4, 2)'}), '(size=(batch_size, 4, 2))\n', (2201, 2226), True, 'import numpy as np\n'), ((2415, 2450), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['i', 'j'], {}), '(i, j)\n', (2444, 2450), True, 'import numpy as np\n'), ((2581, 2616), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['i', 'j'], {}), '(i, j)\n', (2610, 2616), True, 'import numpy as np\n'), ((612, 679), 'numpy.array', 'np.array', (['[[[0, 0], [160, 0], [160, 48], [0, 48]]]'], {'dtype': '"""float32"""'}), "([[[0, 0], [160, 0], [160, 48], [0, 48]]], dtype='float32')\n", (620, 679), True, 'import numpy as np\n'), ((844, 876), 'megengine.functional.repeat', 'F.repeat', (['self.bb_out', 'N'], {'axis': '(0)'}), '(self.bb_out, N, axis=0)\n', (852, 876), True, 'import megengine.functional as F\n'), ((1304, 1315), 'megengine.functional.matinv', 'F.matinv', (['A'], {}), '(A)\n', (1312, 1315), True, 'import megengine.functional as F\n')]
|
from sqlmodel import create_engine, Session
from sqlmodel.main import SQLModel
from core.config import settings
engine = create_engine(settings.SQLALCHEMY_DATABASE_URI, pool_pre_ping=True)
def init_db():
SQLModel.metadata.create_all(engine)
def get_session():
with Session(engine) as session:
yield session
|
[
"sqlmodel.main.SQLModel.metadata.create_all",
"sqlmodel.Session",
"sqlmodel.create_engine"
] |
[((123, 190), 'sqlmodel.create_engine', 'create_engine', (['settings.SQLALCHEMY_DATABASE_URI'], {'pool_pre_ping': '(True)'}), '(settings.SQLALCHEMY_DATABASE_URI, pool_pre_ping=True)\n', (136, 190), False, 'from sqlmodel import create_engine, Session\n'), ((212, 248), 'sqlmodel.main.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (240, 248), False, 'from sqlmodel.main import SQLModel\n'), ((279, 294), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (286, 294), False, 'from sqlmodel import create_engine, Session\n')]
|
from urllib.parse import urlparse
from datetime import datetime
import dramatiq
from dramatiq.brokers.redis import RedisBroker
from sqlmodel import Session
from app.db import engine
from app.models import Document, DocumentInput
from app.predict import CategoryPrediction
from app.settings import settings
redis_parameters = urlparse(settings.redis_url)
redis_broker = RedisBroker(
host=redis_parameters.hostname,
port=redis_parameters.port,
username=redis_parameters.username,
password=redis_parameters.password,
# Heroku Redis with TLS use self-signed certs, so we need to tinker a bit
ssl=redis_parameters.scheme == "rediss",
ssl_cert_reqs=None,
)
dramatiq.set_broker(redis_broker)
category_prediction = CategoryPrediction()
@dramatiq.actor
def ingest_document(document_json: str):
document = DocumentInput.parse_raw(document_json)
with Session(engine) as session:
document_db = session.get(Document, document.id)
if document_db is None:
document_db = Document(**document.dict())
else:
document_dict = document.dict(exclude_unset=True)
for key, value in document_dict.items():
setattr(document_db, key, value)
document_db.category = category_prediction.predict(document_db.content)
document_db.updated_at = datetime.utcnow()
session.add(document_db)
session.commit()
|
[
"sqlmodel.Session"
] |
[((328, 356), 'urllib.parse.urlparse', 'urlparse', (['settings.redis_url'], {}), '(settings.redis_url)\n', (336, 356), False, 'from urllib.parse import urlparse\n'), ((372, 584), 'dramatiq.brokers.redis.RedisBroker', 'RedisBroker', ([], {'host': 'redis_parameters.hostname', 'port': 'redis_parameters.port', 'username': 'redis_parameters.username', 'password': 'redis_parameters.password', 'ssl': "(redis_parameters.scheme == 'rediss')", 'ssl_cert_reqs': 'None'}), "(host=redis_parameters.hostname, port=redis_parameters.port,\n username=redis_parameters.username, password=redis_parameters.password,\n ssl=redis_parameters.scheme == 'rediss', ssl_cert_reqs=None)\n", (383, 584), False, 'from dramatiq.brokers.redis import RedisBroker\n'), ((682, 715), 'dramatiq.set_broker', 'dramatiq.set_broker', (['redis_broker'], {}), '(redis_broker)\n', (701, 715), False, 'import dramatiq\n'), ((740, 760), 'app.predict.CategoryPrediction', 'CategoryPrediction', ([], {}), '()\n', (758, 760), False, 'from app.predict import CategoryPrediction\n'), ((835, 873), 'app.models.DocumentInput.parse_raw', 'DocumentInput.parse_raw', (['document_json'], {}), '(document_json)\n', (858, 873), False, 'from app.models import Document, DocumentInput\n'), ((883, 898), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (890, 898), False, 'from sqlmodel import Session\n'), ((1346, 1363), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1361, 1363), False, 'from datetime import datetime\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import tensor
from megengine.distributed.functional import (
all_gather,
all_to_all,
gather,
reduce_scatter_sum,
scatter,
)
from megengine.jit import trace
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77), (2, 2, 2, 2)], ids=str)
@pytest.mark.parametrize("symbolic", [False, True], ids=str)
@pytest.mark.parametrize("axis", [0, 1], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = all_gather(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
z = np.concatenate((x, y), axis=axis)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape,symbolic", [((2, 4, 6, 8), False), ((2, 4, 6, 8), True)], ids=str
)
@pytest.mark.parametrize("axis", [1, 0, 2, 3], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = reduce_scatter_sum(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
z = x + y
data = (x, y)
z = np.split(z, 2, axis=axis)
z = np.concatenate(z, axis=0)
expect = (z[: z.shape[0] // 2], z[z.shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize(
"shape,symbolic", [((2, 4, 6, 8), True), ((2, 4, 6, 8), False)], ids=str
)
@pytest.mark.parametrize("axis", [1, 0, 2, 3], ids=str)
@pytest.mark.isolated_distributed
def test_scatter(shape, symbolic, axis):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
output = scatter(inp, axis=axis)
return output
func = trace(symbolic=symbolic)(func)
output = func()
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype("float32")
y = x + 1
data = (x, y)
_x = np.split(x, 2, axis=axis)
_x = np.concatenate(_x, axis=0)
expect = (_x[: _x.shape[0] // 2], _x[_x.shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 4, 6, 8)], ids=str)
@pytest.mark.parametrize("symbolic", [False, True], ids=str)
@pytest.mark.parametrize(
"split_axis,concat_axis", [(0, 1), (1, 0), (2, 0), (0, 2), (2, 3)], ids=str
)
@pytest.mark.isolated_distributed
def test_all_to_all(shape, symbolic, split_axis, concat_axis):
@dist.launcher(n_gpus=2)
def worker(data):
rank = dist.get_rank()
inp = tensor(data[rank])
def func():
all_to_all_output = all_to_all(
inp, split_axis=split_axis, concat_axis=concat_axis
)
gather_C = gather(inp, axis=concat_axis)
gather_B = gather(all_to_all_output, axis=split_axis)
if rank == 0:
return gather_B, gather_C
return all_to_all_output
func = trace(symbolic=symbolic)(func)
ret = func()
if rank == 0:
assert np.allclose(ret[0], ret[1])
x = np.random.random_sample(shape).astype("float32")
y = np.random.random_sample(shape).astype("float32")
data = (x, y)
worker(data)
|
[
"megengine.tensor",
"megengine.distributed.functional.reduce_scatter_sum",
"megengine.distributed.get_rank",
"megengine.distributed.functional.all_gather",
"megengine.distributed.functional.scatter",
"megengine.distributed.functional.all_to_all",
"megengine.jit.trace",
"megengine.distributed.launcher",
"megengine.distributed.functional.gather"
] |
[((666, 693), 'pytest.mark.require_ngpu', 'pytest.mark.require_ngpu', (['(2)'], {}), '(2)\n', (690, 693), False, 'import pytest\n'), ((695, 783), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', '[(2, 3), (8, 10), (99, 77), (2, 2, 2, 2)]'], {'ids': 'str'}), "('shape', [(2, 3), (8, 10), (99, 77), (2, 2, 2, 2)],\n ids=str)\n", (718, 783), False, 'import pytest\n'), ((781, 840), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""symbolic"""', '[False, True]'], {'ids': 'str'}), "('symbolic', [False, True], ids=str)\n", (804, 840), False, 'import pytest\n'), ((842, 890), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""axis"""', '[0, 1]'], {'ids': 'str'}), "('axis', [0, 1], ids=str)\n", (865, 890), False, 'import pytest\n'), ((1538, 1565), 'pytest.mark.require_ngpu', 'pytest.mark.require_ngpu', (['(2)'], {}), '(2)\n', (1562, 1565), False, 'import pytest\n'), ((1567, 1668), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape,symbolic"""', '[((2, 4, 6, 8), False), ((2, 4, 6, 8), True)]'], {'ids': 'str'}), "('shape,symbolic', [((2, 4, 6, 8), False), ((2, 4, 6,\n 8), True)], ids=str)\n", (1590, 1668), False, 'import pytest\n'), ((1672, 1726), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""axis"""', '[1, 0, 2, 3]'], {'ids': 'str'}), "('axis', [1, 0, 2, 3], ids=str)\n", (1695, 1726), False, 'import pytest\n'), ((2468, 2495), 'pytest.mark.require_ngpu', 'pytest.mark.require_ngpu', (['(2)'], {}), '(2)\n', (2492, 2495), False, 'import pytest\n'), ((2497, 2598), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape,symbolic"""', '[((2, 4, 6, 8), True), ((2, 4, 6, 8), False)]'], {'ids': 'str'}), "('shape,symbolic', [((2, 4, 6, 8), True), ((2, 4, 6,\n 8), False)], ids=str)\n", (2520, 2598), False, 'import pytest\n'), ((2602, 2656), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""axis"""', '[1, 0, 2, 3]'], {'ids': 'str'}), "('axis', [1, 0, 2, 3], ids=str)\n", (2625, 2656), False, 'import pytest\n'), ((3326, 3353), 'pytest.mark.require_ngpu', 'pytest.mark.require_ngpu', (['(2)'], {}), '(2)\n', (3350, 3353), False, 'import pytest\n'), ((3355, 3412), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', '[(2, 4, 6, 8)]'], {'ids': 'str'}), "('shape', [(2, 4, 6, 8)], ids=str)\n", (3378, 3412), False, 'import pytest\n'), ((3414, 3473), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""symbolic"""', '[False, True]'], {'ids': 'str'}), "('symbolic', [False, True], ids=str)\n", (3437, 3473), False, 'import pytest\n'), ((3475, 3579), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""split_axis,concat_axis"""', '[(0, 1), (1, 0), (2, 0), (0, 2), (2, 3)]'], {'ids': 'str'}), "('split_axis,concat_axis', [(0, 1), (1, 0), (2, 0),\n (0, 2), (2, 3)], ids=str)\n", (3498, 3579), False, 'import pytest\n'), ((974, 997), 'megengine.distributed.launcher', 'dist.launcher', ([], {'n_gpus': '(2)'}), '(n_gpus=2)\n', (987, 997), True, 'import megengine.distributed as dist\n'), ((1438, 1471), 'numpy.concatenate', 'np.concatenate', (['(x, y)'], {'axis': 'axis'}), '((x, y), axis=axis)\n', (1452, 1471), True, 'import numpy as np\n'), ((1818, 1841), 'megengine.distributed.launcher', 'dist.launcher', ([], {'n_gpus': '(2)'}), '(n_gpus=2)\n', (1831, 1841), True, 'import megengine.distributed as dist\n'), ((2322, 2347), 'numpy.split', 'np.split', (['z', '(2)'], {'axis': 'axis'}), '(z, 2, axis=axis)\n', (2330, 2347), True, 'import numpy as np\n'), ((2356, 2381), 'numpy.concatenate', 'np.concatenate', (['z'], {'axis': '(0)'}), '(z, axis=0)\n', (2370, 2381), True, 'import numpy as np\n'), ((2737, 2760), 'megengine.distributed.launcher', 'dist.launcher', ([], {'n_gpus': '(2)'}), '(n_gpus=2)\n', (2750, 2760), True, 'import megengine.distributed as dist\n'), ((3174, 3199), 'numpy.split', 'np.split', (['x', '(2)'], {'axis': 'axis'}), '(x, 2, axis=axis)\n', (3182, 3199), True, 'import numpy as np\n'), ((3209, 3235), 'numpy.concatenate', 'np.concatenate', (['_x'], {'axis': '(0)'}), '(_x, axis=0)\n', (3223, 3235), True, 'import numpy as np\n'), ((3684, 3707), 'megengine.distributed.launcher', 'dist.launcher', ([], {'n_gpus': '(2)'}), '(n_gpus=2)\n', (3697, 3707), True, 'import megengine.distributed as dist\n'), ((1043, 1058), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (1056, 1058), True, 'import megengine.distributed as dist\n'), ((1073, 1091), 'megengine.tensor', 'tensor', (['data[rank]'], {}), '(data[rank])\n', (1079, 1091), False, 'from megengine import tensor\n'), ((1887, 1902), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (1900, 1902), True, 'import megengine.distributed as dist\n'), ((1917, 1935), 'megengine.tensor', 'tensor', (['data[rank]'], {}), '(data[rank])\n', (1923, 1935), False, 'from megengine import tensor\n'), ((2806, 2821), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (2819, 2821), True, 'import megengine.distributed as dist\n'), ((2836, 2854), 'megengine.tensor', 'tensor', (['data[rank]'], {}), '(data[rank])\n', (2842, 2854), False, 'from megengine import tensor\n'), ((3745, 3760), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (3758, 3760), True, 'import megengine.distributed as dist\n'), ((3775, 3793), 'megengine.tensor', 'tensor', (['data[rank]'], {}), '(data[rank])\n', (3781, 3793), False, 'from megengine import tensor\n'), ((1134, 1160), 'megengine.distributed.functional.all_gather', 'all_gather', (['inp'], {'axis': 'axis'}), '(inp, axis=axis)\n', (1144, 1160), False, 'from megengine.distributed.functional import all_gather, all_to_all, gather, reduce_scatter_sum, scatter\n'), ((1203, 1227), 'megengine.jit.trace', 'trace', ([], {'symbolic': 'symbolic'}), '(symbolic=symbolic)\n', (1208, 1227), False, 'from megengine.jit import trace\n'), ((1324, 1354), 'numpy.random.random_sample', 'np.random.random_sample', (['shape'], {}), '(shape)\n', (1347, 1354), True, 'import numpy as np\n'), ((1381, 1411), 'numpy.random.random_sample', 'np.random.random_sample', (['shape'], {}), '(shape)\n', (1404, 1411), True, 'import numpy as np\n'), ((1978, 2012), 'megengine.distributed.functional.reduce_scatter_sum', 'reduce_scatter_sum', (['inp'], {'axis': 'axis'}), '(inp, axis=axis)\n', (1996, 2012), False, 'from megengine.distributed.functional import all_gather, all_to_all, gather, reduce_scatter_sum, scatter\n'), ((2055, 2079), 'megengine.jit.trace', 'trace', ([], {'symbolic': 'symbolic'}), '(symbolic=symbolic)\n', (2060, 2079), False, 'from megengine.jit import trace\n'), ((2176, 2206), 'numpy.random.random_sample', 'np.random.random_sample', (['shape'], {}), '(shape)\n', (2199, 2206), True, 'import numpy as np\n'), ((2233, 2263), 'numpy.random.random_sample', 'np.random.random_sample', (['shape'], {}), '(shape)\n', (2256, 2263), True, 'import numpy as np\n'), ((2897, 2920), 'megengine.distributed.functional.scatter', 'scatter', (['inp'], {'axis': 'axis'}), '(inp, axis=axis)\n', (2904, 2920), False, 'from megengine.distributed.functional import all_gather, all_to_all, gather, reduce_scatter_sum, scatter\n'), ((2963, 2987), 'megengine.jit.trace', 'trace', ([], {'symbolic': 'symbolic'}), '(symbolic=symbolic)\n', (2968, 2987), False, 'from megengine.jit import trace\n'), ((3084, 3114), 'numpy.random.random_sample', 'np.random.random_sample', (['shape'], {}), '(shape)\n', (3107, 3114), True, 'import numpy as np\n'), ((3847, 3910), 'megengine.distributed.functional.all_to_all', 'all_to_all', (['inp'], {'split_axis': 'split_axis', 'concat_axis': 'concat_axis'}), '(inp, split_axis=split_axis, concat_axis=concat_axis)\n', (3857, 3910), False, 'from megengine.distributed.functional import all_gather, all_to_all, gather, reduce_scatter_sum, scatter\n'), ((3964, 3993), 'megengine.distributed.functional.gather', 'gather', (['inp'], {'axis': 'concat_axis'}), '(inp, axis=concat_axis)\n', (3970, 3993), False, 'from megengine.distributed.functional import all_gather, all_to_all, gather, reduce_scatter_sum, scatter\n'), ((4017, 4059), 'megengine.distributed.functional.gather', 'gather', (['all_to_all_output'], {'axis': 'split_axis'}), '(all_to_all_output, axis=split_axis)\n', (4023, 4059), False, 'from megengine.distributed.functional import all_gather, all_to_all, gather, reduce_scatter_sum, scatter\n'), ((4181, 4205), 'megengine.jit.trace', 'trace', ([], {'symbolic': 'symbolic'}), '(symbolic=symbolic)\n', (4186, 4205), False, 'from megengine.jit import trace\n'), ((4274, 4301), 'numpy.allclose', 'np.allclose', (['ret[0]', 'ret[1]'], {}), '(ret[0], ret[1])\n', (4285, 4301), True, 'import numpy as np\n'), ((4311, 4341), 'numpy.random.random_sample', 'np.random.random_sample', (['shape'], {}), '(shape)\n', (4334, 4341), True, 'import numpy as np\n'), ((4368, 4398), 'numpy.random.random_sample', 'np.random.random_sample', (['shape'], {}), '(shape)\n', (4391, 4398), True, 'import numpy as np\n')]
|
from create_db import Student
from sqlmodel import Session, create_engine
student_1 = Student(id=1, first_name="Misal", last_name="Gupta", email="<EMAIL>")
student_2 = Student(id=2, first_name="Vivek", last_name="Kumar", email="<EMAIL>")
student_3 = Student(id=3, first_name="Himesh", last_name="Mahto", email="<EMAIL>")
sqlite_url = "sqlite:///school.db"
engine = create_engine(sqlite_url, echo=True)
session = Session(engine)
session.add(student_1)
session.add(student_2)
session.add(student_3)
session.commit()
session.close()
|
[
"sqlmodel.Session",
"sqlmodel.create_engine"
] |
[((87, 156), 'create_db.Student', 'Student', ([], {'id': '(1)', 'first_name': '"""Misal"""', 'last_name': '"""Gupta"""', 'email': '"""<EMAIL>"""'}), "(id=1, first_name='Misal', last_name='Gupta', email='<EMAIL>')\n", (94, 156), False, 'from create_db import Student\n'), ((169, 238), 'create_db.Student', 'Student', ([], {'id': '(2)', 'first_name': '"""Vivek"""', 'last_name': '"""Kumar"""', 'email': '"""<EMAIL>"""'}), "(id=2, first_name='Vivek', last_name='Kumar', email='<EMAIL>')\n", (176, 238), False, 'from create_db import Student\n'), ((251, 321), 'create_db.Student', 'Student', ([], {'id': '(3)', 'first_name': '"""Himesh"""', 'last_name': '"""Mahto"""', 'email': '"""<EMAIL>"""'}), "(id=3, first_name='Himesh', last_name='Mahto', email='<EMAIL>')\n", (258, 321), False, 'from create_db import Student\n'), ((367, 403), 'sqlmodel.create_engine', 'create_engine', (['sqlite_url'], {'echo': '(True)'}), '(sqlite_url, echo=True)\n', (380, 403), False, 'from sqlmodel import Session, create_engine\n'), ((414, 429), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (421, 429), False, 'from sqlmodel import Session, create_engine\n')]
|
import asyncio
import os
from decimal import Decimal
from typing import Optional
from pydantic import condecimal
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
from sqlmodel import Field, SQLModel, select
class Restaurant(SQLModel, table=True):
id: int = Field(default=None, primary_key=True)
name: str = Field(index=True)
address: str
currency: str
class MenuItem(SQLModel, table=True):
id: int = Field(default=None, primary_key=True)
name: str
price: condecimal(decimal_places=2)
restaurant_id: Optional[int] = Field(default=None, foreign_key="restaurant.id")
async def main() -> None:
db_url = os.environ.get("RESTAURANT_DB_URL", "sqlite+aiosqlite:///my_db")
db_engine = create_async_engine(db_url)
async with db_engine.begin() as conn:
await conn.run_sync(SQLModel.metadata.create_all)
async with AsyncSession(db_engine, expire_on_commit=False) as session:
# Writing
restaurant = Restaurant(
name="Second best Pizza in town", address="Foo street 1", currency="EUR"
)
session.add(restaurant)
await session.commit()
pizza1 = MenuItem(name="Margherita", price=10.50, restaurant_id=restaurant.id)
pizza2 = MenuItem(name="2xPineapple", price=16.80, restaurant_id=restaurant.id)
session.add_all((pizza1, pizza2))
await session.commit()
# Reading
query = (
select(MenuItem)
.join(Restaurant)
.where(Restaurant.name == "Second best Pizza in town")
)
result = await session.execute(query)
menu_items = result.scalars().all()
assert len(menu_items) == 2
assert menu_items[0] == MenuItem(
id=1, name="Margherita", price=Decimal("10.50"), restaurant_id=restaurant.id
)
if __name__ == "__main__":
asyncio.run(main())
|
[
"sqlmodel.select",
"sqlmodel.Field"
] |
[((284, 321), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (289, 321), False, 'from sqlmodel import Field, SQLModel, select\n'), ((338, 355), 'sqlmodel.Field', 'Field', ([], {'index': '(True)'}), '(index=True)\n', (343, 355), False, 'from sqlmodel import Field, SQLModel, select\n'), ((445, 482), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (450, 482), False, 'from sqlmodel import Field, SQLModel, select\n'), ((508, 536), 'pydantic.condecimal', 'condecimal', ([], {'decimal_places': '(2)'}), '(decimal_places=2)\n', (518, 536), False, 'from pydantic import condecimal\n'), ((573, 621), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""restaurant.id"""'}), "(default=None, foreign_key='restaurant.id')\n", (578, 621), False, 'from sqlmodel import Field, SQLModel, select\n'), ((663, 727), 'os.environ.get', 'os.environ.get', (['"""RESTAURANT_DB_URL"""', '"""sqlite+aiosqlite:///my_db"""'], {}), "('RESTAURANT_DB_URL', 'sqlite+aiosqlite:///my_db')\n", (677, 727), False, 'import os\n'), ((744, 771), 'sqlalchemy.ext.asyncio.create_async_engine', 'create_async_engine', (['db_url'], {}), '(db_url)\n', (763, 771), False, 'from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine\n'), ((888, 935), 'sqlalchemy.ext.asyncio.AsyncSession', 'AsyncSession', (['db_engine'], {'expire_on_commit': '(False)'}), '(db_engine, expire_on_commit=False)\n', (900, 935), False, 'from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine\n'), ((1791, 1807), 'decimal.Decimal', 'Decimal', (['"""10.50"""'], {}), "('10.50')\n", (1798, 1807), False, 'from decimal import Decimal\n'), ((1455, 1471), 'sqlmodel.select', 'select', (['MenuItem'], {}), '(MenuItem)\n', (1461, 1471), False, 'from sqlmodel import Field, SQLModel, select\n')]
|
# Copyright (c) Megvii, Inc. and its affiliates.
"""do the evaluation work with single gpu
"""
import argparse
import os
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.functional as F
import numpy as np
from tqdm.auto import tqdm
from recognition.datasets import get_eval_dataset
from recognition.models import FaceRecognitionModel
from recognition.tools.utils import load_config_from_path
logger = mge.get_logger(__name__)
def get_inference_func(configs):
"""load checkpoint and construct inference function
Args:
configs (dict): configuration, required fields include:
base_dir: base directory of experiment outputs
evaluate_epoch: model of evaluate_epoch to evaluate
Raises:
FileNotFoundError: model of given epoch is not found
Returns:
inference_func (function): inference function mapping image to embedding
"""
model = FaceRecognitionModel(configs)
evaluate_epoch = configs["evaluate_epoch"]
checkpoint_path = os.path.join(configs["base_dir"], f"epoch-{evaluate_epoch}-checkpoint.pkl")
if os.path.exists(checkpoint_path):
checkpoint_data = mge.load(checkpoint_path)
model.load_state_dict(checkpoint_data["state_dict"], strict=False)
else:
raise FileNotFoundError(f"{checkpoint_path} not found!!!")
def inference_func(images):
model.eval()
# classic test-time mirror augment
embedding_origin = model.forward_embedding_only(images)
embedding_mirror = model.forward_embedding_only(images[:, :, :, ::-1])
embedding = embedding_origin + embedding_mirror
embedding = F.normalize(embedding, axis=1)
return embedding
return inference_func
def extract_feature_and_clean_noise(configs, inference_func):
"""extract feature and clean noise. the noise cleaning algorithm is proposed in
`"ArcFace: Additive Angular Margin Loss for Deep Face Recognition" <https://arxiv.org/pdf/1801.07698.pdf>`_
please refer to https://github.com/deepinsight/insightface/blob/master/Evaluation/Megaface/remove_noises.py for
more detail. this implement does basicly the same thing as the above, but with much higher speed
Args:
configs (dict): configuration, required fields include:
batch_size: inference batch size
feature_dim: model output feature dimension
base_dir: base directory of experiment outputs
dataset_dir: directory of dataset root
inference_func (function): constructed inference function
Returns:
facescrub_feature (np.array): noise-cleaned feature of facescrub (shape: n * (feature_dim + 1))
facescrub_label (np.array): label of facescrub (shape: n)
megaface_feature (np.array): noise-cleaned feature of megaface (shape: m * (feature_dim + 1))
"""
def prepare_dataset(name):
"""prepare dataset
Args:
name (str): name of the dataset, should be one of {facescrub, megaface}
Returns:
dataset (data.Dataset): required dataset
queue (data.DataLoader): corresponding dataloader
"""
preprocess = T.Compose([T.Normalize(mean=127.5, std=128), T.ToMode("CHW")])
dataset = get_eval_dataset(name, dataset_dir=configs["dataset_dir"])
sampler = data.SequentialSampler(dataset, batch_size=configs["batch_size"])
queue = data.DataLoader(dataset, sampler=sampler, transform=preprocess)
return dataset, queue
def extract_vanilla_feature(n, data_queue):
"""extract features without any postprocessing
Args:
n (int): size of dataset
data_queue (data.DataLoader): dataloader to extract feature
Returns:
feature_store (np.array): extracted feature (shape: n * feature_dim)
label (np.array): label of this instance, -1 if unknown (shape: n)
is_noise (np.array): whether this instance is a noise (shape: n)
"""
feature_store = np.zeros((n, configs["feature_dim"]), dtype="float32")
label_store = np.zeros(n, dtype="int32")
is_noise_store = np.zeros(n, dtype="bool")
for images, indice, labels, is_noise in tqdm(data_queue):
images = mge.tensor(images, dtype="float32")
embedding = inference_func(images)
embedding = embedding.numpy()
feature_store[indice] = embedding
label_store[indice] = labels
is_noise_store[indice] = is_noise
return feature_store, label_store, is_noise_store
# prepare facescrub dataset
logger.info("preparing facescrub dataset...")
facescrub_dataset, facescrub_queue = prepare_dataset("facescrub")
# extract facescrub feature
logger.info("extracting facescrub...")
facescrub_feature_store, facescrub_label, facescrub_is_noise = extract_vanilla_feature(
n=len(facescrub_dataset), data_queue=facescrub_queue
)
# prepare megaface dataset
logger.info("preparing megaface dataset...")
megaface_dataset, megaface_queue = prepare_dataset("megaface")
# extract feature for megaface
logger.info("extracting megaface...")
megaface_feature_store, _, megaface_is_noise = extract_vanilla_feature(
n=len(megaface_dataset), data_queue=megaface_queue
)
# parse facescrub noise, replace noisy feature with class center of same person
facescrub_feature_center = np.zeros((facescrub_dataset.num_class, configs["feature_dim"]), dtype="float32")
for i in range(facescrub_dataset.num_class):
mask = (facescrub_label == i) & (~facescrub_is_noise)
center = facescrub_feature_store[mask].sum(axis=0)
center = center / np.linalg.norm(center)
facescrub_feature_center[i] = center
for index in np.where(facescrub_is_noise)[0]:
center = facescrub_feature_center[facescrub_label[index]]
disturb = np.random.uniform(-1e-5, 1e-5, (configs["feature_dim"],))
feat = center + disturb # avoid identical features with minor disturb
feat = feat / np.linalg.norm(feat)
facescrub_feature_store[index] = feat
# extend feature by 1 dimension
# the extended feature is infinitly large (100) if and only if megaface noise, 0 otherwise
# so, the distance between probe and a noisy distractor is infinitly large, while other distances remain unchanged
facescrub_feature_extend = np.zeros((len(facescrub_dataset), 1), dtype="float32")
facescrub_feature = np.concatenate([facescrub_feature_store, facescrub_feature_extend], axis=1)
megaface_feature_extend = megaface_is_noise.astype("float32").reshape(-1, 1) * 100
megaface_feature = np.concatenate([megaface_feature_store, megaface_feature_extend], axis=1)
# write to file system
facescrub_feature_path = os.path.join(configs["base_dir"], "facescrub.npy")
np.save(facescrub_feature_path, facescrub_feature)
facescrub_label_path = os.path.join(configs["base_dir"], "facescrub_label.npy")
np.save(facescrub_label_path, facescrub_label)
megaface_feature_path = os.path.join(configs["base_dir"], "megaface.npy")
np.save(megaface_feature_path, megaface_feature)
return facescrub_feature, facescrub_label, megaface_feature
def calculate_score(configs, facescrub, labels, megaface):
"""calculate megaface identification top1 score. this evaluation implement strictly follows the description of
`"The MegaFace Benchmark: 1 Million Faces for Recognition at Scale" <https://arxiv.org/pdf/1512.00596.pdf>`_
this implement outputs exactly the same as dev-sdk provided by the official, but with much higher speed
Args:
configs (dict): configuration
facescrub (np.array): feature of facescrub
labels (np.array): label of facescrub
megaface (np.array): feature of megaface
Returns:
megaface_score (float): top1 score of megaface
"""
facescrub = mge.tensor(facescrub, dtype="float32")
megaface = mge.tensor(megaface, dtype="float32")
# note: (x - y) ** 2 = x ** 2 + y ** 2 - 2 * x * y
# facescrub_score[i][j] = l2-dist(facescrub[i], facescrub[j])
facescrub_score = (
(facescrub ** 2).sum(axis=-1, keepdims=True)
+ (facescrub ** 2).sum(axis=-1, keepdims=True).transpose(1, 0)
- 2 * F.matmul(facescrub, facescrub.transpose(1, 0))
)
facescrub_score = facescrub_score.numpy()
def get_score_min_megaface(x):
distr_score = (x ** 2).sum(axis=-1) + (megaface ** 2).sum(axis=-1) - 2 * (x * megaface).sum(axis=-1)
return distr_score.min()
up, down = 0, 0
for probe_i in tqdm(range(len(facescrub))):
distr_score_min = get_score_min_megaface(facescrub[probe_i]).numpy()
mask = (labels == labels[probe_i]) & (np.arange(len(facescrub)) != probe_i)
for probe_j in np.where(mask)[0]:
probe_score = facescrub_score[probe_i][probe_j]
up += probe_score < distr_score_min
down += 1
megaface_score = up / down * 100
return megaface_score
def main(args):
configs = load_config_from_path(args.config_file)
configs["evaluate_epoch"] = args.epoch if args.epoch is not None else configs["num_epoch"]
# write log to worklog.txt
os.makedirs(configs["base_dir"], exist_ok=True)
worklog_path = os.path.join(configs["base_dir"], "worklog.txt")
mge.set_log_file(worklog_path)
inference_func = get_inference_func(configs)
facescrub_feature, facescrub_label, megaface_feature = extract_feature_and_clean_noise(configs, inference_func)
megaface_score = calculate_score(configs, facescrub_feature, facescrub_label, megaface_feature)
logger.info("Epoch: %d", configs["evaluate_epoch"])
logger.info("MegaFace Top1: %.2f", megaface_score)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--config-file", help="path to experiment configuration", required=True)
parser.add_argument(
"-e", "--epoch", help="model of num epoch to evaluate (default: num_epoch)", default=None, type=int
)
args = parser.parse_args()
main(args)
|
[
"megengine.functional.normalize",
"megengine.data.DataLoader",
"megengine.load",
"megengine.tensor",
"megengine.data.SequentialSampler",
"megengine.get_logger",
"megengine.set_log_file",
"megengine.data.transform.Normalize",
"megengine.data.transform.ToMode"
] |
[((464, 488), 'megengine.get_logger', 'mge.get_logger', (['__name__'], {}), '(__name__)\n', (478, 488), True, 'import megengine as mge\n'), ((967, 996), 'recognition.models.FaceRecognitionModel', 'FaceRecognitionModel', (['configs'], {}), '(configs)\n', (987, 996), False, 'from recognition.models import FaceRecognitionModel\n'), ((1066, 1141), 'os.path.join', 'os.path.join', (["configs['base_dir']", 'f"""epoch-{evaluate_epoch}-checkpoint.pkl"""'], {}), "(configs['base_dir'], f'epoch-{evaluate_epoch}-checkpoint.pkl')\n", (1078, 1141), False, 'import os\n'), ((1149, 1180), 'os.path.exists', 'os.path.exists', (['checkpoint_path'], {}), '(checkpoint_path)\n', (1163, 1180), False, 'import os\n'), ((5515, 5600), 'numpy.zeros', 'np.zeros', (["(facescrub_dataset.num_class, configs['feature_dim'])"], {'dtype': '"""float32"""'}), "((facescrub_dataset.num_class, configs['feature_dim']), dtype='float32'\n )\n", (5523, 5600), True, 'import numpy as np\n'), ((6581, 6656), 'numpy.concatenate', 'np.concatenate', (['[facescrub_feature_store, facescrub_feature_extend]'], {'axis': '(1)'}), '([facescrub_feature_store, facescrub_feature_extend], axis=1)\n', (6595, 6656), True, 'import numpy as np\n'), ((6767, 6840), 'numpy.concatenate', 'np.concatenate', (['[megaface_feature_store, megaface_feature_extend]'], {'axis': '(1)'}), '([megaface_feature_store, megaface_feature_extend], axis=1)\n', (6781, 6840), True, 'import numpy as np\n'), ((6898, 6948), 'os.path.join', 'os.path.join', (["configs['base_dir']", '"""facescrub.npy"""'], {}), "(configs['base_dir'], 'facescrub.npy')\n", (6910, 6948), False, 'import os\n'), ((6953, 7003), 'numpy.save', 'np.save', (['facescrub_feature_path', 'facescrub_feature'], {}), '(facescrub_feature_path, facescrub_feature)\n', (6960, 7003), True, 'import numpy as np\n'), ((7031, 7087), 'os.path.join', 'os.path.join', (["configs['base_dir']", '"""facescrub_label.npy"""'], {}), "(configs['base_dir'], 'facescrub_label.npy')\n", (7043, 7087), False, 'import os\n'), ((7092, 7138), 'numpy.save', 'np.save', (['facescrub_label_path', 'facescrub_label'], {}), '(facescrub_label_path, facescrub_label)\n', (7099, 7138), True, 'import numpy as np\n'), ((7167, 7216), 'os.path.join', 'os.path.join', (["configs['base_dir']", '"""megaface.npy"""'], {}), "(configs['base_dir'], 'megaface.npy')\n", (7179, 7216), False, 'import os\n'), ((7221, 7269), 'numpy.save', 'np.save', (['megaface_feature_path', 'megaface_feature'], {}), '(megaface_feature_path, megaface_feature)\n', (7228, 7269), True, 'import numpy as np\n'), ((8020, 8058), 'megengine.tensor', 'mge.tensor', (['facescrub'], {'dtype': '"""float32"""'}), "(facescrub, dtype='float32')\n", (8030, 8058), True, 'import megengine as mge\n'), ((8074, 8111), 'megengine.tensor', 'mge.tensor', (['megaface'], {'dtype': '"""float32"""'}), "(megaface, dtype='float32')\n", (8084, 8111), True, 'import megengine as mge\n'), ((9171, 9210), 'recognition.tools.utils.load_config_from_path', 'load_config_from_path', (['args.config_file'], {}), '(args.config_file)\n', (9192, 9210), False, 'from recognition.tools.utils import load_config_from_path\n'), ((9343, 9390), 'os.makedirs', 'os.makedirs', (["configs['base_dir']"], {'exist_ok': '(True)'}), "(configs['base_dir'], exist_ok=True)\n", (9354, 9390), False, 'import os\n'), ((9410, 9458), 'os.path.join', 'os.path.join', (["configs['base_dir']", '"""worklog.txt"""'], {}), "(configs['base_dir'], 'worklog.txt')\n", (9422, 9458), False, 'import os\n'), ((9463, 9493), 'megengine.set_log_file', 'mge.set_log_file', (['worklog_path'], {}), '(worklog_path)\n', (9479, 9493), True, 'import megengine as mge\n'), ((9914, 9939), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9937, 9939), False, 'import argparse\n'), ((1208, 1233), 'megengine.load', 'mge.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (1216, 1233), True, 'import megengine as mge\n'), ((1702, 1732), 'megengine.functional.normalize', 'F.normalize', (['embedding'], {'axis': '(1)'}), '(embedding, axis=1)\n', (1713, 1732), True, 'import megengine.functional as F\n'), ((3313, 3371), 'recognition.datasets.get_eval_dataset', 'get_eval_dataset', (['name'], {'dataset_dir': "configs['dataset_dir']"}), "(name, dataset_dir=configs['dataset_dir'])\n", (3329, 3371), False, 'from recognition.datasets import get_eval_dataset\n'), ((3390, 3455), 'megengine.data.SequentialSampler', 'data.SequentialSampler', (['dataset'], {'batch_size': "configs['batch_size']"}), "(dataset, batch_size=configs['batch_size'])\n", (3412, 3455), True, 'import megengine.data as data\n'), ((3472, 3535), 'megengine.data.DataLoader', 'data.DataLoader', (['dataset'], {'sampler': 'sampler', 'transform': 'preprocess'}), '(dataset, sampler=sampler, transform=preprocess)\n', (3487, 3535), True, 'import megengine.data as data\n'), ((4085, 4139), 'numpy.zeros', 'np.zeros', (["(n, configs['feature_dim'])"], {'dtype': '"""float32"""'}), "((n, configs['feature_dim']), dtype='float32')\n", (4093, 4139), True, 'import numpy as np\n'), ((4162, 4188), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': '"""int32"""'}), "(n, dtype='int32')\n", (4170, 4188), True, 'import numpy as np\n'), ((4214, 4239), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': '"""bool"""'}), "(n, dtype='bool')\n", (4222, 4239), True, 'import numpy as np\n'), ((4288, 4304), 'tqdm.auto.tqdm', 'tqdm', (['data_queue'], {}), '(data_queue)\n', (4292, 4304), False, 'from tqdm.auto import tqdm\n'), ((5877, 5905), 'numpy.where', 'np.where', (['facescrub_is_noise'], {}), '(facescrub_is_noise)\n', (5885, 5905), True, 'import numpy as np\n'), ((5994, 6053), 'numpy.random.uniform', 'np.random.uniform', (['(-1e-05)', '(1e-05)', "(configs['feature_dim'],)"], {}), "(-1e-05, 1e-05, (configs['feature_dim'],))\n", (6011, 6053), True, 'import numpy as np\n'), ((4327, 4362), 'megengine.tensor', 'mge.tensor', (['images'], {'dtype': '"""float32"""'}), "(images, dtype='float32')\n", (4337, 4362), True, 'import megengine as mge\n'), ((5792, 5814), 'numpy.linalg.norm', 'np.linalg.norm', (['center'], {}), '(center)\n', (5806, 5814), True, 'import numpy as np\n'), ((6153, 6173), 'numpy.linalg.norm', 'np.linalg.norm', (['feat'], {}), '(feat)\n', (6167, 6173), True, 'import numpy as np\n'), ((8926, 8940), 'numpy.where', 'np.where', (['mask'], {}), '(mask)\n', (8934, 8940), True, 'import numpy as np\n'), ((3243, 3275), 'megengine.data.transform.Normalize', 'T.Normalize', ([], {'mean': '(127.5)', 'std': '(128)'}), '(mean=127.5, std=128)\n', (3254, 3275), True, 'import megengine.data.transform as T\n'), ((3277, 3292), 'megengine.data.transform.ToMode', 'T.ToMode', (['"""CHW"""'], {}), "('CHW')\n", (3285, 3292), True, 'import megengine.data.transform as T\n')]
|
from typing import Optional, List
import sqlalchemy
from sqlmodel import SQLModel, Field, Relationship
from datetime import date, datetime
# #############################################################################
# Links
class ListingFacilityLink(SQLModel, table=True):
listing_id: int = Field(
foreign_key="listings.id", primary_key=True
)
facility_id: int = Field(
foreign_key="facilities.id", primary_key=True
)
# #############################################################################
class SongBase(SQLModel):
id: Optional[int]
name: str
artist: str
year: Optional[int] = None
class Song(SongBase, table=True):
id: int = Field(primary_key=True)
created_at: datetime = Field(default=datetime.now())
updated_at: datetime = Field(default=datetime.now(),
sa_column_kwargs={'onupdate': datetime.now()})
class SongRead(SongBase):
id: int
created_at: datetime
updated_at: datetime
class SongUpdate(SQLModel):
name: Optional[str] = None
artist: Optional[str] = None
year: Optional[int] = None
class SongCreate(SongBase):
pass
class Increment(SQLModel, table=True):
id: int = Field(primary_key=True)
# #############################################################################
class ListingBase(SQLModel):
id: int = Field(primary_key=True)
is_active: bool
title: Optional[str] = None
description: Optional[str] = None
url: str
source: str
source_id: str
source_code: Optional[str] = None
address: str
short_postal_code: Optional[str] = None
property_type: Optional[str] = None
postal_code: Optional[str] = None
ber_code: Optional[str] = None
views: Optional[int] = None
bedrooms: Optional[int] = None
bathrooms: Optional[int] = None
price: Optional[int] = None
rating_auto: Optional[int] = None
rating_user: Optional[int] = None
telegram_sent_at: Optional[datetime] = None
images_count: Optional[int] = 0
latitude: Optional[float] = None
longitude: Optional[float] = None
notes: Optional[str] = None
publish_date: Optional[datetime] = None
last_updated: Optional[datetime] = None
class Listing(ListingBase, table=True):
__tablename__ = 'listings'
id: int = Field(primary_key=True)
created_at: datetime = Field(default=datetime.now())
updated_at: datetime = Field(default=datetime.now(),
sa_column_kwargs={'onupdate': datetime.now()})
images: List["Image"] = Relationship(back_populates="listing",
# sa_relationship_kwargs={'lazy': 'joined'}
)
facilities: List["Facility"] = Relationship(link_model=ListingFacilityLink)
places_nearby: List["PlaceNearby"] = Relationship(
back_populates="listing",)
routes: List["Route"] = Relationship(back_populates="listing",)
class ListingRead(ListingBase):
id: str
created_at: datetime
updated_at: datetime
class ListingCreate(ListingBase):
pass
class ListingUpdate(ListingBase):
id: Optional[str]
is_active: Optional[bool]
url: Optional[str]
source: Optional[str]
source_id: Optional[str]
address: Optional[str]
# #############################################################################
class FacilityBase(SQLModel):
id: Optional[int]
name: str
category: Optional[str] = None
notes: Optional[str] = None
class Facility(FacilityBase, table=True):
__tablename__ = 'facilities'
id: int = Field(primary_key=True)
created_at: datetime = Field(default=datetime.now())
updated_at: datetime = Field(default=datetime.now(),
sa_column_kwargs={'onupdate': datetime.now()})
class FacilityRead(FacilityBase):
id: int
created_at: datetime
updated_at: datetime
class FacilityCreate(FacilityBase):
pass
# #############################################################################
class ImageBase(SQLModel):
id: Optional[int]
url: str
url_600: Optional[str]
size_x: Optional[float]
size_y: Optional[float]
listing_id: Optional[int] = Field(default=None, foreign_key="listings.id")
class Image(ImageBase, table=True):
__tablename__ = 'images'
id: int = Field(primary_key=True)
created_at: datetime = Field(default=datetime.now())
updated_at: datetime = Field(default=datetime.now(),
sa_column_kwargs={'onupdate': datetime.now()})
listing: Optional[Listing] = Relationship(back_populates="images",
# sa_relationship_kwargs={'lazy': 'selectin'}
)
class ImageRead(ImageBase):
id: int
created_at: datetime
updated_at: datetime
class ImageCreate(ImageBase):
pass
# #############################################################################
class PlaceNearbyBase(SQLModel):
id: Optional[int]
latitude: Optional[float] = None
longitude: Optional[float] = None
query: Optional[str] = None
name: str
address: str
distance: int
website: Optional[str] = None
website_domain: Optional[str] = None
chain_name: Optional[str] = None
listing_id: Optional[int] = Field(default=None, foreign_key="listings.id")
class PlaceNearby(PlaceNearbyBase, table=True):
__tablename__ = 'places_nearby'
id: Optional[int] = Field(primary_key=True)
created_at: datetime = Field(default=datetime.now())
updated_at: datetime = Field(default=datetime.now(),
sa_column_kwargs={'onupdate': datetime.now()})
listing: Optional[Listing] = Relationship(back_populates="places_nearby",)
class PlaceNearbyRead(PlaceNearbyBase):
id: int
created_at: datetime
updated_at: datetime
class PlaceNearbyCreate(PlaceNearbyBase):
pass
# #############################################################################
class InterestPointBase(SQLModel):
id: Optional[int]
name: str
is_active: bool
latitude: Optional[float] = None
longitude: Optional[float] = None
class InterestPoint(InterestPointBase, table=True):
__tablename__ = 'interest_points'
id: Optional[int] = Field(primary_key=True)
created_at: datetime = Field(default=datetime.now())
updated_at: datetime = Field(default=datetime.now(),
sa_column_kwargs={'onupdate': datetime.now()})
class InterestPointRead(InterestPointBase):
id: int
created_at: datetime
updated_at: datetime
class InterestPointCreate(InterestPointBase):
pass
# #############################################################################
class RouteBase(SQLModel):
id: Optional[int]
waking_distance: Optional[int] = 0
total_distance: Optional[int] = 0
total_time: Optional[int] = 0
public_transport_count: Optional[int] = 0
listing_id: Optional[int] = Field(default=None, foreign_key="listings.id")
interest_point_id: Optional[int] = Field(
default=None, foreign_key="interest_points.id")
class Route(RouteBase, table=True):
__tablename__ = 'routes'
id: int = Field(primary_key=True)
created_at: datetime = Field(default=datetime.now())
updated_at: datetime = Field(
default=datetime.now(),
sa_column_kwargs={'onupdate': datetime.now()})
listing: Optional[Listing] = Relationship(back_populates="routes",)
interest_point: Optional[InterestPoint] = Relationship()
class RouteRead(RouteBase):
id: int
created_at: datetime
updated_at: datetime
class RouteCreate(RouteBase):
id: Optional[int] = None
# #############################################################################
# #############################################################################
class ImageReadWithListings(ImageRead):
listing: Optional[Listing] = None
class ListingReadWithRelations(ListingRead):
images: List["ImageRead"] = []
facilities: List["Facility"] = []
places_nearby: List["PlaceNearby"] = []
routes: List["Route"] = []
class ListingCreateWithRelations(ListingCreate):
images: List["ImageCreate"] = []
facilities: List["Facility"] = []
|
[
"sqlmodel.Relationship",
"sqlmodel.Field"
] |
[((300, 350), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""listings.id"""', 'primary_key': '(True)'}), "(foreign_key='listings.id', primary_key=True)\n", (305, 350), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((388, 440), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""facilities.id"""', 'primary_key': '(True)'}), "(foreign_key='facilities.id', primary_key=True)\n", (393, 440), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((698, 721), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (703, 721), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((1225, 1248), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (1230, 1248), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((1374, 1397), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (1379, 1397), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((2327, 2350), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (2332, 2350), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((2573, 2611), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""listing"""'}), "(back_populates='listing')\n", (2585, 2611), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((2705, 2749), 'sqlmodel.Relationship', 'Relationship', ([], {'link_model': 'ListingFacilityLink'}), '(link_model=ListingFacilityLink)\n', (2717, 2749), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((2791, 2829), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""listing"""'}), "(back_populates='listing')\n", (2803, 2829), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((2868, 2906), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""listing"""'}), "(back_populates='listing')\n", (2880, 2906), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((3547, 3570), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (3552, 3570), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((4171, 4217), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""listings.id"""'}), "(default=None, foreign_key='listings.id')\n", (4176, 4217), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((4299, 4322), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (4304, 4322), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((4550, 4587), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""images"""'}), "(back_populates='images')\n", (4562, 4587), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((5301, 5347), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""listings.id"""'}), "(default=None, foreign_key='listings.id')\n", (5306, 5347), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((5458, 5481), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (5463, 5481), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((5709, 5753), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""places_nearby"""'}), "(back_populates='places_nearby')\n", (5721, 5753), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((6277, 6300), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (6282, 6300), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((6986, 7032), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""listings.id"""'}), "(default=None, foreign_key='listings.id')\n", (6991, 7032), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((7072, 7125), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""interest_points.id"""'}), "(default=None, foreign_key='interest_points.id')\n", (7077, 7125), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((7216, 7239), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (7221, 7239), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((7452, 7489), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""routes"""'}), "(back_populates='routes')\n", (7464, 7489), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((7537, 7551), 'sqlmodel.Relationship', 'Relationship', ([], {}), '()\n', (7549, 7551), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((763, 777), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (775, 777), False, 'from datetime import date, datetime\n'), ((820, 834), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (832, 834), False, 'from datetime import date, datetime\n'), ((2392, 2406), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2404, 2406), False, 'from datetime import date, datetime\n'), ((2449, 2463), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2461, 2463), False, 'from datetime import date, datetime\n'), ((3612, 3626), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3624, 3626), False, 'from datetime import date, datetime\n'), ((3669, 3683), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3681, 3683), False, 'from datetime import date, datetime\n'), ((4364, 4378), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4376, 4378), False, 'from datetime import date, datetime\n'), ((4421, 4435), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4433, 4435), False, 'from datetime import date, datetime\n'), ((5523, 5537), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5535, 5537), False, 'from datetime import date, datetime\n'), ((5580, 5594), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5592, 5594), False, 'from datetime import date, datetime\n'), ((6342, 6356), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6354, 6356), False, 'from datetime import date, datetime\n'), ((6399, 6413), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6411, 6413), False, 'from datetime import date, datetime\n'), ((7281, 7295), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7293, 7295), False, 'from datetime import date, datetime\n'), ((7347, 7361), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7359, 7361), False, 'from datetime import date, datetime\n'), ((899, 913), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (911, 913), False, 'from datetime import date, datetime\n'), ((2528, 2542), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2540, 2542), False, 'from datetime import date, datetime\n'), ((3748, 3762), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3760, 3762), False, 'from datetime import date, datetime\n'), ((4500, 4514), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4512, 4514), False, 'from datetime import date, datetime\n'), ((5659, 5673), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5671, 5673), False, 'from datetime import date, datetime\n'), ((6478, 6492), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6490, 6492), False, 'from datetime import date, datetime\n'), ((7401, 7415), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7413, 7415), False, 'from datetime import date, datetime\n')]
|
import typing as t
from sqlmodel import SQLModel, Field, Relationship
from datetime import datetime
from .users import DB_User
class DB_AccessToken(SQLModel, table=True):
__tablename__ = 'access_tokens'
id: t.Optional[int] = Field(default=None, primary_key=True)
"""The ID of the access token. This is handled by the database."""
token: str = Field(max_length=40, sa_column_kwargs={'unique': True})
"""The unique access token."""
user_id: t.Optional[int] = Field(default=None, foreign_key="users.id")
user: DB_User = Relationship(back_populates="access_tokens")
"""To what user does this token belong to."""
last_activity_at: datetime
"""When was the access token last active."""
created_at: datetime
"""When was this token created at?"""
type: str = Field(max_length=100)
"""The type of the access token (example: `'session_remember'`)"""
title: t.Optional[str] = Field(max_length=150)
"""The title of the access token."""
last_ip_address: t.Optional[str] = Field(max_length=45)
"""The last IP address associated with this access token."""
last_user_agent: t.Optional[str] = Field(max_length=255)
"""The last browser's user agent that used this token."""
class DB_APIKey(SQLModel, table=True):
__tablename__ = 'api_keys'
id: t.Optional[int] = Field(default=None, primary_key=True)
"""The ID of the API key. This is handled by the database."""
key: str = Field(max_length=100)
"""The unique API key."""
allowedips: t.Optional[str] = Field(max_length=255)
"""The IP addresses that are allowed to use this API key."""
scopes: t.Optional[str] = Field(max_length=255)
"""The scopes that this API key has access to."""
user_id: t.Optional[int] = Field(default=None, foreign_key="users.id")
user: DB_User = Relationship(back_populates="api_keys")
"""As what user to perform actions when using this API key."""
created_at: datetime = Field(default=datetime.utcnow())
"""When was this API key created at?"""
last_activity_at: t.Optional[datetime]
"""When was the API key last active?"""
|
[
"sqlmodel.Relationship",
"sqlmodel.Field"
] |
[((238, 275), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (243, 275), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((365, 420), 'sqlmodel.Field', 'Field', ([], {'max_length': '(40)', 'sa_column_kwargs': "{'unique': True}"}), "(max_length=40, sa_column_kwargs={'unique': True})\n", (370, 420), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((487, 530), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""users.id"""'}), "(default=None, foreign_key='users.id')\n", (492, 530), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((551, 595), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""access_tokens"""'}), "(back_populates='access_tokens')\n", (563, 595), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((811, 832), 'sqlmodel.Field', 'Field', ([], {'max_length': '(100)'}), '(max_length=100)\n', (816, 832), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((934, 955), 'sqlmodel.Field', 'Field', ([], {'max_length': '(150)'}), '(max_length=150)\n', (939, 955), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((1036, 1056), 'sqlmodel.Field', 'Field', ([], {'max_length': '(45)'}), '(max_length=45)\n', (1041, 1056), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((1161, 1182), 'sqlmodel.Field', 'Field', ([], {'max_length': '(255)'}), '(max_length=255)\n', (1166, 1182), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((1344, 1381), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (1349, 1381), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((1464, 1485), 'sqlmodel.Field', 'Field', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1469, 1485), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((1551, 1572), 'sqlmodel.Field', 'Field', ([], {'max_length': '(255)'}), '(max_length=255)\n', (1556, 1572), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((1668, 1689), 'sqlmodel.Field', 'Field', ([], {'max_length': '(255)'}), '(max_length=255)\n', (1673, 1689), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((1775, 1818), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""users.id"""'}), "(default=None, foreign_key='users.id')\n", (1780, 1818), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((1839, 1878), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""api_keys"""'}), "(back_populates='api_keys')\n", (1851, 1878), False, 'from sqlmodel import SQLModel, Field, Relationship\n'), ((1988, 2005), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (2003, 2005), False, 'from datetime import datetime\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
"""VGG Series
VGG: `"Very Deep Convolutional Networks for Large-Scale Image Recognition"
<https://arxiv.org/abs/1409.1556>`_
"""
from typing import Any, Mapping, Sequence
import megengine as mge
import megengine.hub as hub
import megengine.module as M
from basecls.layers import activation, build_head, conv2d, init_weights, norm2d
from basecls.utils import recursive_update, registers
__all__ = ["VGGStage", "VGG"]
class VGGStage(M.Module):
"""VGG stage (sequence of blocks w/ the same output shape)."""
def __init__(self, w_in: int, w_out: int, depth: int, norm_name: str, act_name: str):
super().__init__()
self.depth = depth
for i in range(depth):
block = M.Sequential(
conv2d(w_in, w_out, 3), norm2d(norm_name, w_out), activation(act_name)
)
setattr(self, f"b{i + 1}", block)
w_in = w_out
self.max_pool = M.MaxPool2d(kernel_size=2, stride=2)
def __len__(self):
return self.depth
def forward(self, x: mge.Tensor) -> mge.Tensor:
for i in range(self.depth):
block = getattr(self, f"b{i + 1}")
x = block(x)
x = self.max_pool(x)
return x
@registers.models.register()
class VGG(M.Module):
"""VGG model.
Args:
depths: depth for each stage (number of blocks in the stage).
widths: width for each stage (width of each block in the stage).
norm_name: normalization function. Default: ``None``
act_name: activation function. Default: ``"relu"``
head: head args. Default: ``None``
"""
def __init__(
self,
depths: Sequence[int],
widths: Sequence[int],
norm_name: str = None,
act_name: str = "relu",
head: Mapping[str, Any] = None,
):
super().__init__()
self.depths = depths
model_args = [depths, widths]
prev_w = 3
for i, (d, w) in enumerate(zip(*model_args)):
stage = VGGStage(prev_w, w, d, norm_name, act_name)
setattr(self, f"s{i + 1}", stage)
prev_w = w
self.head = build_head(prev_w, head, None, act_name)
self.apply(init_weights)
def forward(self, x: mge.Tensor) -> mge.Tensor:
for i in range(len(self.depths)):
stage = getattr(self, f"s{i + 1}")
x = stage(x)
if getattr(self, "head", None) is not None:
x = self.head(x)
return x
def _build_vgg(**kwargs):
model_args = dict(head=dict(name="VGGHead", dropout_prob=0.5))
recursive_update(model_args, kwargs)
return VGG(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg11/vgg11.pkl")
def vgg11(**kwargs):
model_args = dict(depths=[1, 1, 2, 2, 2], widths=[64, 128, 256, 512, 512])
recursive_update(model_args, kwargs)
return _build_vgg(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg11_bn/vgg11_bn.pkl")
def vgg11_bn(**kwargs):
model_args = dict(norm_name="BN")
recursive_update(model_args, kwargs)
return vgg11(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg13/vgg13.pkl")
def vgg13(**kwargs):
model_args = dict(depths=[2, 2, 2, 2, 2], widths=[64, 128, 256, 512, 512])
recursive_update(model_args, kwargs)
return _build_vgg(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg13_bn/vgg13_bn.pkl")
def vgg13_bn(**kwargs):
model_args = dict(norm_name="BN")
recursive_update(model_args, kwargs)
return vgg13(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg16/vgg16.pkl")
def vgg16(**kwargs):
model_args = dict(depths=[2, 2, 3, 3, 3], widths=[64, 128, 256, 512, 512])
recursive_update(model_args, kwargs)
return _build_vgg(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg16_bn/vgg16_bn.pkl")
def vgg16_bn(**kwargs):
model_args = dict(norm_name="BN")
recursive_update(model_args, kwargs)
return vgg16(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg19/vgg19.pkl")
def vgg19(**kwargs):
model_args = dict(depths=[2, 2, 4, 4, 4], widths=[64, 128, 256, 512, 512])
recursive_update(model_args, kwargs)
return _build_vgg(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg19_bn/vgg19_bn.pkl")
def vgg19_bn(**kwargs):
model_args = dict(norm_name="BN")
recursive_update(model_args, kwargs)
return vgg19(**model_args)
|
[
"megengine.module.MaxPool2d",
"megengine.hub.pretrained"
] |
[((1300, 1327), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (1325, 1327), False, 'from basecls.utils import recursive_update, registers\n'), ((2727, 2754), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (2752, 2754), False, 'from basecls.utils import recursive_update, registers\n'), ((2756, 2857), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/vgg/vgg11/vgg11.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/vgg/vgg11/vgg11.pkl'\n )\n", (2770, 2857), True, 'import megengine.hub as hub\n'), ((3028, 3055), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (3053, 3055), False, 'from basecls.utils import recursive_update, registers\n'), ((3057, 3164), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/vgg/vgg11_bn/vgg11_bn.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/vgg/vgg11_bn/vgg11_bn.pkl'\n )\n", (3071, 3164), True, 'import megengine.hub as hub\n'), ((3292, 3319), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (3317, 3319), False, 'from basecls.utils import recursive_update, registers\n'), ((3321, 3422), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/vgg/vgg13/vgg13.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/vgg/vgg13/vgg13.pkl'\n )\n", (3335, 3422), True, 'import megengine.hub as hub\n'), ((3593, 3620), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (3618, 3620), False, 'from basecls.utils import recursive_update, registers\n'), ((3622, 3729), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/vgg/vgg13_bn/vgg13_bn.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/vgg/vgg13_bn/vgg13_bn.pkl'\n )\n", (3636, 3729), True, 'import megengine.hub as hub\n'), ((3857, 3884), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (3882, 3884), False, 'from basecls.utils import recursive_update, registers\n'), ((3886, 3987), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/vgg/vgg16/vgg16.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/vgg/vgg16/vgg16.pkl'\n )\n", (3900, 3987), True, 'import megengine.hub as hub\n'), ((4158, 4185), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (4183, 4185), False, 'from basecls.utils import recursive_update, registers\n'), ((4187, 4294), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/vgg/vgg16_bn/vgg16_bn.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/vgg/vgg16_bn/vgg16_bn.pkl'\n )\n", (4201, 4294), True, 'import megengine.hub as hub\n'), ((4422, 4449), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (4447, 4449), False, 'from basecls.utils import recursive_update, registers\n'), ((4451, 4552), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/vgg/vgg19/vgg19.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/vgg/vgg19/vgg19.pkl'\n )\n", (4465, 4552), True, 'import megengine.hub as hub\n'), ((4723, 4750), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (4748, 4750), False, 'from basecls.utils import recursive_update, registers\n'), ((4752, 4859), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/vgg/vgg19_bn/vgg19_bn.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/vgg/vgg19_bn/vgg19_bn.pkl'\n )\n", (4766, 4859), True, 'import megengine.hub as hub\n'), ((2658, 2694), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (2674, 2694), False, 'from basecls.utils import recursive_update, registers\n'), ((2952, 2988), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (2968, 2988), False, 'from basecls.utils import recursive_update, registers\n'), ((3221, 3257), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (3237, 3257), False, 'from basecls.utils import recursive_update, registers\n'), ((3517, 3553), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (3533, 3553), False, 'from basecls.utils import recursive_update, registers\n'), ((3786, 3822), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (3802, 3822), False, 'from basecls.utils import recursive_update, registers\n'), ((4082, 4118), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (4098, 4118), False, 'from basecls.utils import recursive_update, registers\n'), ((4351, 4387), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (4367, 4387), False, 'from basecls.utils import recursive_update, registers\n'), ((4647, 4683), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (4663, 4683), False, 'from basecls.utils import recursive_update, registers\n'), ((4916, 4952), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (4932, 4952), False, 'from basecls.utils import recursive_update, registers\n'), ((1003, 1039), 'megengine.module.MaxPool2d', 'M.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (1014, 1039), True, 'import megengine.module as M\n'), ((2219, 2259), 'basecls.layers.build_head', 'build_head', (['prev_w', 'head', 'None', 'act_name'], {}), '(prev_w, head, None, act_name)\n', (2229, 2259), False, 'from basecls.layers import activation, build_head, conv2d, init_weights, norm2d\n'), ((823, 845), 'basecls.layers.conv2d', 'conv2d', (['w_in', 'w_out', '(3)'], {}), '(w_in, w_out, 3)\n', (829, 845), False, 'from basecls.layers import activation, build_head, conv2d, init_weights, norm2d\n'), ((847, 871), 'basecls.layers.norm2d', 'norm2d', (['norm_name', 'w_out'], {}), '(norm_name, w_out)\n', (853, 871), False, 'from basecls.layers import activation, build_head, conv2d, init_weights, norm2d\n'), ((873, 893), 'basecls.layers.activation', 'activation', (['act_name'], {}), '(act_name)\n', (883, 893), False, 'from basecls.layers import activation, build_head, conv2d, init_weights, norm2d\n')]
|
from __future__ import print_function
from __future__ import absolute_import
import numpy as nm
import sys
from six.moves import range
sys.path.append('.')
from sfepy.base.base import output, assert_
from sfepy.base.ioutils import ensure_path
from sfepy.linalg import cycle
from sfepy.discrete.fem.mesh import Mesh
from sfepy.mesh.mesh_tools import elems_q2t
def get_tensor_product_conn(shape):
"""
Generate vertex connectivity for cells of a tensor-product mesh of the
given shape.
Parameters
----------
shape : array of 2 or 3 ints
Shape (counts of nodes in x, y, z) of the mesh.
Returns
-------
conn : array
The vertex connectivity array.
desc : str
The cell kind.
"""
shape = nm.asarray(shape)
dim = len(shape)
assert_(1 <= dim <= 3)
n_nod = nm.prod(shape)
n_el = nm.prod(shape - 1)
grid = nm.arange(n_nod, dtype=nm.int32)
grid.shape = shape
if dim == 1:
conn = nm.zeros((n_el, 2), dtype=nm.int32)
conn[:, 0] = grid[:-1]
conn[:, 1] = grid[1:]
desc = '1_2'
elif dim == 2:
conn = nm.zeros((n_el, 4), dtype=nm.int32)
conn[:, 0] = grid[:-1, :-1].flat
conn[:, 1] = grid[1:, :-1].flat
conn[:, 2] = grid[1:, 1:].flat
conn[:, 3] = grid[:-1, 1:].flat
desc = '2_4'
else:
conn = nm.zeros((n_el, 8), dtype=nm.int32)
conn[:, 0] = grid[:-1, :-1, :-1].flat
conn[:, 1] = grid[1:, :-1, :-1].flat
conn[:, 2] = grid[1:, 1:, :-1].flat
conn[:, 3] = grid[:-1, 1:, :-1].flat
conn[:, 4] = grid[:-1, :-1, 1:].flat
conn[:, 5] = grid[1:, :-1, 1:].flat
conn[:, 6] = grid[1:, 1:, 1:].flat
conn[:, 7] = grid[:-1, 1:, 1:].flat
desc = '3_8'
return conn, desc
def gen_block_mesh(dims, shape, centre, mat_id=0, name='block',
coors=None, verbose=True):
"""
Generate a 2D or 3D block mesh. The dimension is determined by the
lenght of the shape argument.
Parameters
----------
dims : array of 2 or 3 floats
Dimensions of the block.
shape : array of 2 or 3 ints
Shape (counts of nodes in x, y, z) of the block mesh.
centre : array of 2 or 3 floats
Centre of the block.
mat_id : int, optional
The material id of all elements.
name : string
Mesh name.
verbose : bool
If True, show progress of the mesh generation.
Returns
-------
mesh : Mesh instance
"""
dims = nm.asarray(dims, dtype=nm.float64)
shape = nm.asarray(shape, dtype=nm.int32)
centre = nm.asarray(centre, dtype=nm.float64)
dim = shape.shape[0]
centre = centre[:dim]
dims = dims[:dim]
n_nod = nm.prod(shape)
output('generating %d vertices...' % n_nod, verbose=verbose)
x0 = centre - 0.5 * dims
dd = dims / (shape - 1)
ngrid = nm.mgrid[[slice(ii) for ii in shape]]
ngrid.shape = (dim, n_nod)
coors = x0 + ngrid.T * dd
output('...done', verbose=verbose)
n_el = nm.prod(shape - 1)
output('generating %d cells...' % n_el, verbose=verbose)
mat_ids = nm.empty((n_el,), dtype=nm.int32)
mat_ids.fill(mat_id)
conn, desc = get_tensor_product_conn(shape)
output('...done', verbose=verbose)
mesh = Mesh.from_data(name, coors, None, [conn], [mat_ids], [desc])
return mesh
def gen_cylinder_mesh(dims, shape, centre, axis='x', force_hollow=False,
is_open=False, open_angle=0.0, non_uniform=False,
name='cylinder', verbose=True):
"""
Generate a cylindrical mesh along an axis. Its cross-section can be
ellipsoidal.
Parameters
----------
dims : array of 5 floats
Dimensions of the cylinder: inner surface semi-axes a1, b1, outer
surface semi-axes a2, b2, length.
shape : array of 3 ints
Shape (counts of nodes in radial, circumferential and longitudinal
directions) of the cylinder mesh.
centre : array of 3 floats
Centre of the cylinder.
axis: one of 'x', 'y', 'z'
The axis of the cylinder.
force_hollow : boolean
Force hollow mesh even if inner radii a1 = b1 = 0.
is_open : boolean
Generate an open cylinder segment.
open_angle : float
Opening angle in radians.
non_uniform : boolean
If True, space the mesh nodes in radial direction so that the element
volumes are (approximately) the same, making thus the elements towards
the outer surface thinner.
name : string
Mesh name.
verbose : bool
If True, show progress of the mesh generation.
Returns
-------
mesh : Mesh instance
"""
dims = nm.asarray(dims, dtype=nm.float64)
shape = nm.asarray(shape, dtype=nm.int32)
centre = nm.asarray(centre, dtype=nm.float64)
a1, b1, a2, b2, length = dims
nr, nfi, nl = shape
origin = centre - nm.array([0.5 * length, 0.0, 0.0])
dfi = 2.0 * (nm.pi - open_angle) / nfi
if is_open:
nnfi = nfi + 1
else:
nnfi = nfi
is_hollow = force_hollow or not (max(abs(a1), abs(b1)) < 1e-15)
if is_hollow:
mr = 0
else:
mr = (nnfi - 1) * nl
grid = nm.zeros((nr, nnfi, nl), dtype=nm.int32)
n_nod = nr * nnfi * nl - mr
coors = nm.zeros((n_nod, 3), dtype=nm.float64)
angles = nm.linspace(open_angle, open_angle+(nfi)*dfi, nfi+1)
xs = nm.linspace(0.0, length, nl)
if non_uniform:
ras = nm.zeros((nr,), dtype=nm.float64)
rbs = nm.zeros_like(ras)
advol = (a2**2 - a1**2) / (nr - 1)
bdvol = (b2**2 - b1**2) / (nr - 1)
ras[0], rbs[0] = a1, b1
for ii in range(1, nr):
ras[ii] = nm.sqrt(advol + ras[ii-1]**2)
rbs[ii] = nm.sqrt(bdvol + rbs[ii-1]**2)
else:
ras = nm.linspace(a1, a2, nr)
rbs = nm.linspace(b1, b2, nr)
# This is 3D only...
output('generating %d vertices...' % n_nod, verbose=verbose)
ii = 0
for ix in range(nr):
a, b = ras[ix], rbs[ix]
for iy, fi in enumerate(angles[:nnfi]):
for iz, x in enumerate(xs):
grid[ix,iy,iz] = ii
coors[ii] = origin + [x, a * nm.cos(fi), b * nm.sin(fi)]
ii += 1
if not is_hollow and (ix == 0):
if iy > 0:
grid[ix,iy,iz] = grid[ix,0,iz]
ii -= 1
assert_(ii == n_nod)
output('...done', verbose=verbose)
n_el = (nr - 1) * nfi * (nl - 1)
conn = nm.zeros((n_el, 8), dtype=nm.int32)
output('generating %d cells...' % n_el, verbose=verbose)
ii = 0
for (ix, iy, iz) in cycle([nr-1, nnfi, nl-1]):
if iy < (nnfi - 1):
conn[ii,:] = [grid[ix ,iy ,iz ], grid[ix+1,iy ,iz ],
grid[ix+1,iy+1,iz ], grid[ix ,iy+1,iz ],
grid[ix ,iy ,iz+1], grid[ix+1,iy ,iz+1],
grid[ix+1,iy+1,iz+1], grid[ix ,iy+1,iz+1]]
ii += 1
elif not is_open:
conn[ii,:] = [grid[ix ,iy ,iz ], grid[ix+1,iy ,iz ],
grid[ix+1,0,iz ], grid[ix ,0,iz ],
grid[ix ,iy ,iz+1], grid[ix+1,iy ,iz+1],
grid[ix+1,0,iz+1], grid[ix ,0,iz+1]]
ii += 1
mat_id = nm.zeros((n_el,), dtype = nm.int32)
desc = '3_8'
assert_(n_nod == (conn.max() + 1))
output('...done', verbose=verbose)
if axis == 'z':
coors = coors[:,[1,2,0]]
elif axis == 'y':
coors = coors[:,[2,0,1]]
mesh = Mesh.from_data(name, coors, None, [conn], [mat_id], [desc])
return mesh
def _spread_along_axis(axis, coors, tangents, grading_fun):
"""
Spread the coordinates along the given axis using the grading function, and
the tangents in the other two directions.
"""
oo = list(set([0, 1, 2]).difference([axis]))
c0, c1, c2 = coors[:, axis], coors[:, oo[0]], coors[:, oo[1]]
out = nm.empty_like(coors)
mi, ma = c0.min(), c0.max()
nc0 = (c0 - mi) / (ma - mi)
out[:, axis] = oc0 = grading_fun(nc0) * (ma - mi) + mi
nc = oc0 - oc0.min()
mi, ma = c1.min(), c1.max()
n1 = 2 * (c1 - mi) / (ma - mi) - 1
out[:, oo[0]] = c1 + n1 * nc * tangents[oo[0]]
mi, ma = c2.min(), c2.max()
n2 = 2 * (c2 - mi) / (ma - mi) - 1
out[:, oo[1]] = c2 + n2 * nc * tangents[oo[1]]
return out
def _get_extension_side(side, grading_fun, mat_id,
b_dims, b_shape, e_dims, e_shape, centre):
"""
Get a mesh extending the given side of a block mesh.
"""
# Pure extension dimensions.
pe_dims = 0.5 * (e_dims - b_dims)
coff = 0.5 * (b_dims + pe_dims)
cc = centre + coff * nm.eye(3)[side]
if side == 0: # x axis.
dims = [pe_dims[0], b_dims[1], b_dims[2]]
shape = [e_shape, b_shape[1], b_shape[2]]
tangents = [0, pe_dims[1] / pe_dims[0], pe_dims[2] / pe_dims[0]]
elif side == 1: # y axis.
dims = [b_dims[0], pe_dims[1], b_dims[2]]
shape = [b_shape[0], e_shape, b_shape[2]]
tangents = [pe_dims[0] / pe_dims[1], 0, pe_dims[2] / pe_dims[1]]
elif side == 2: # z axis.
dims = [b_dims[0], b_dims[1], pe_dims[2]]
shape = [b_shape[0], b_shape[1], e_shape]
tangents = [pe_dims[0] / pe_dims[2], pe_dims[1] / pe_dims[2], 0]
e_mesh = gen_block_mesh(dims, shape, cc, mat_id=mat_id, verbose=False)
e_mesh.coors[:] = _spread_along_axis(side, e_mesh.coors, tangents,
grading_fun)
return e_mesh, shape
def gen_extended_block_mesh(b_dims, b_shape, e_dims, e_shape, centre,
grading_fun=None, name=None):
"""
Generate a 3D mesh with a central block and (coarse) extending side meshes.
The resulting mesh is again a block. Each of the components has a different
material id.
Parameters
----------
b_dims : array of 3 floats
The dimensions of the central block.
b_shape : array of 3 ints
The shape (counts of nodes in x, y, z) of the central block mesh.
e_dims : array of 3 floats
The dimensions of the complete block (central block + extensions).
e_shape : int
The count of nodes of extending blocks in the direction from the
central block.
centre : array of 3 floats
The centre of the mesh.
grading_fun : callable, optional
A function of :math:`x \in [0, 1]` that can be used to shift nodes in
the extension axis directions to allow smooth grading of element sizes
from the centre. The default function is :math:`x**p` with :math:`p`
determined so that the element sizes next to the central block have the
size of the shortest edge of the central block.
name : string, optional
The mesh name.
Returns
-------
mesh : Mesh instance
"""
b_dims = nm.asarray(b_dims, dtype=nm.float64)
b_shape = nm.asarray(b_shape, dtype=nm.int32)
e_dims = nm.asarray(e_dims, dtype=nm.float64)
centre = nm.asarray(centre, dtype=nm.float64)
# Pure extension dimensions.
pe_dims = 0.5 * (e_dims - b_dims)
# Central block element sizes.
dd = (b_dims / (b_shape - 1))
# The "first x" going to grading_fun.
nc = 1.0 / (e_shape - 1)
# Grading power and function.
power = nm.log(dd.min() / pe_dims.min()) / nm.log(nc)
grading_fun = (lambda x: x**power) if grading_fun is None else grading_fun
# Central block mesh.
b_mesh = gen_block_mesh(b_dims, b_shape, centre, mat_id=0, verbose=False)
# 'x' extension.
e_mesh, xs = _get_extension_side(0, grading_fun, 10,
b_dims, b_shape, e_dims, e_shape, centre)
mesh = b_mesh + e_mesh
# Mirror by 'x'.
e_mesh.coors[:, 0] = (2 * centre[0]) - e_mesh.coors[:, 0]
e_mesh.cmesh.cell_groups.fill(11)
mesh = mesh + e_mesh
# 'y' extension.
e_mesh, ys = _get_extension_side(1, grading_fun, 20,
b_dims, b_shape, e_dims, e_shape, centre)
mesh = mesh + e_mesh
# Mirror by 'y'.
e_mesh.coors[:, 1] = (2 * centre[1]) - e_mesh.coors[:, 1]
e_mesh.cmesh.cell_groups.fill(21)
mesh = mesh + e_mesh
# 'z' extension.
e_mesh, zs = _get_extension_side(2, grading_fun, 30,
b_dims, b_shape, e_dims, e_shape, centre)
mesh = mesh + e_mesh
# Mirror by 'z'.
e_mesh.coors[:, 2] = (2 * centre[2]) - e_mesh.coors[:, 2]
e_mesh.cmesh.cell_groups.fill(31)
mesh = mesh + e_mesh
if name is not None:
mesh.name = name
# Verify merging by checking the number of nodes.
n_nod = (nm.prod(nm.maximum(b_shape - 2, 0)) + 2 * nm.prod(xs)
+ 2 * (max(ys[0] - 2, 0) * ys[1] * ys[2])
+ 2 * (max(zs[0] - 2, 0) * max(zs[1] - 2, 0) * zs[2]))
if n_nod != mesh.n_nod:
raise ValueError('Merge of meshes failed! (%d == %d)'
% (n_nod, mesh.n_nod))
return mesh
def tiled_mesh1d(conn, coors, ngrps, idim, n_rep, bb, eps=1e-6, ndmap=False):
from sfepy.discrete.fem.periodic import match_grid_plane
s1 = nm.nonzero(coors[:,idim] < (bb[0] + eps))[0]
s2 = nm.nonzero(coors[:,idim] > (bb[1] - eps))[0]
if s1.shape != s2.shape:
raise ValueError('incompatible shapes: %s == %s'\
% (s1.shape, s2.shape))
(nnod0, dim) = coors.shape
nnod = nnod0 * n_rep - s1.shape[0] * (n_rep - 1)
(nel0, nnel) = conn.shape
nel = nel0 * n_rep
dd = nm.zeros((dim,), dtype=nm.float64)
dd[idim] = bb[1] - bb[0]
m1, m2 = match_grid_plane(coors[s1], coors[s2], idim)
oconn = nm.zeros((nel, nnel), dtype=nm.int32)
ocoors = nm.zeros((nnod, dim), dtype=nm.float64)
ongrps = nm.zeros((nnod,), dtype=nm.int32)
if type(ndmap) is bool:
ret_ndmap = ndmap
else:
ret_ndmap= True
ndmap_out = nm.zeros((nnod,), dtype=nm.int32)
el_off = 0
nd_off = 0
for ii in range(n_rep):
if ii == 0:
oconn[0:nel0,:] = conn
ocoors[0:nnod0,:] = coors
ongrps[0:nnod0] = ngrps.squeeze()
nd_off += nnod0
mapto = s2[m2]
mask = nm.ones((nnod0,), dtype=nm.int32)
mask[s1] = 0
remap0 = nm.cumsum(mask) - 1
nnod0r = nnod0 - s1.shape[0]
cidx = nm.where(mask)
if ret_ndmap:
ndmap_out[0:nnod0] = nm.arange(nnod0)
else:
remap = remap0 + nd_off
remap[s1[m1]] = mapto
mapto = remap[s2[m2]]
ocoors[nd_off:(nd_off + nnod0r),:] =\
(coors[cidx,:] + ii * dd)
ongrps[nd_off:(nd_off + nnod0r)] = ngrps[cidx].squeeze()
oconn[el_off:(el_off + nel0),:] = remap[conn]
if ret_ndmap:
ndmap_out[nd_off:(nd_off + nnod0r)] = cidx[0]
nd_off += nnod0r
el_off += nel0
if ret_ndmap:
if ndmap is not None:
max_nd_ref = nm.max(ndmap)
idxs = nm.where(ndmap_out > max_nd_ref)
ndmap_out[idxs] = ndmap[ndmap_out[idxs]]
return oconn, ocoors, ongrps, ndmap_out
else:
return oconn, ocoors, ongrps
def gen_tiled_mesh(mesh, grid=None, scale=1.0, eps=1e-6, ret_ndmap=False):
"""
Generate a new mesh by repeating a given periodic element
along each axis.
Parameters
----------
mesh : Mesh instance
The input periodic FE mesh.
grid : array
Number of repetition along each axis.
scale : float, optional
Scaling factor.
eps : float, optional
Tolerance for boundary detection.
ret_ndmap : bool, optional
If True, return global node map.
Returns
-------
mesh_out : Mesh instance
FE mesh.
ndmap : array
Maps: actual node id --> node id in the reference cell.
"""
bbox = mesh.get_bounding_box()
if grid is None:
iscale = max(int(1.0 / scale), 1)
grid = [iscale] * mesh.dim
conn = mesh.get_conn(mesh.descs[0])
mat_ids = mesh.cmesh.cell_groups
coors = mesh.coors
ngrps = mesh.cmesh.vertex_groups
nrep = nm.prod(grid)
ndmap = None
output('repeating %s ...' % grid)
nblk = 1
for ii, gr in enumerate(grid):
if ret_ndmap:
(conn, coors,
ngrps, ndmap0) = tiled_mesh1d(conn, coors, ngrps,
ii, gr, bbox.transpose()[ii],
eps=eps, ndmap=ndmap)
ndmap = ndmap0
else:
conn, coors, ngrps = tiled_mesh1d(conn, coors, ngrps,
ii, gr, bbox.transpose()[ii],
eps=eps)
nblk *= gr
output('...done')
mat_ids = nm.tile(mat_ids, (nrep,))
mesh_out = Mesh.from_data('tiled mesh', coors * scale, ngrps,
[conn], [mat_ids], [mesh.descs[0]])
if ret_ndmap:
return mesh_out, ndmap
else:
return mesh_out
def gen_misc_mesh(mesh_dir, force_create, kind, args, suffix='.mesh',
verbose=False):
"""
Create sphere or cube mesh according to `kind` in the given
directory if it does not exist and return path to it.
"""
import os
from sfepy import data_dir
defdir = os.path.join(data_dir, 'meshes')
if mesh_dir is None:
mesh_dir = defdir
def retype(args, types, defaults):
args=list(args)
args.extend(defaults[len(args):len(defaults)])
return tuple([type(value) for type, value in zip(types, args) ])
if kind == 'sphere':
default = [5, 41, args[0]]
args = retype(args, [float, int, float], default)
mesh_pattern = os.path.join(mesh_dir, 'sphere-%.2f-%.2f-%i')
else:
assert_(kind == 'cube')
args = retype(args,
(int, float, int, float, int, float),
(args[0], args[1], args[0], args[1], args[0], args[1]))
mesh_pattern = os.path.join(mesh_dir, 'cube-%i_%.2f-%i_%.2f-%i_%.2f')
if verbose:
output(args)
filename = mesh_pattern % args
if not force_create:
if os.path.exists(filename): return filename
if os.path.exists(filename + '.mesh') : return filename + '.mesh'
if os.path.exists(filename + '.vtk'): return filename + '.vtk'
if kind == 'cube':
filename = filename + suffix
ensure_path(filename)
output('creating new cube mesh')
output('(%i nodes in %.2f) x (%i nodes in %.2f) x (%i nodes in %.2f)'
% args)
output('to file %s...' % filename)
mesh = gen_block_mesh(args[1::2], args[0::2],
(0.0, 0.0, 0.0), name=filename)
mesh.write(filename, io='auto')
output('...done')
else:
import subprocess, shutil, tempfile
filename = filename + '.mesh'
ensure_path(filename)
output('creating new sphere mesh (%i nodes, r=%.2f) and gradation %d'
% args)
output('to file %s...' % filename)
f = open(os.path.join(defdir, 'quantum', 'sphere.geo'))
tmp_dir = tempfile.mkdtemp()
tmpfile = os.path.join(tmp_dir, 'sphere.geo.temp')
ff = open(tmpfile, "w")
ff.write("""
R = %i.0;
n = %i.0;
dens = %f;
""" % args)
ff.write(f.read())
f.close()
ff.close()
subprocess.call(['gmsh', '-3', tmpfile, '-format', 'mesh',
'-o', filename])
shutil.rmtree(tmp_dir)
output('...done')
return filename
def gen_mesh_from_string(mesh_name, mesh_dir):
import re
result = re.match('^\\s*([a-zA-Z]+)[:\\(]([^\\):]*)[:\\)](\\*)?\\s*$',
mesh_name)
if result is None:
return mesh_name
else:
args = re.split(',', result.group(2))
kind = result.group(1)
return gen_misc_mesh(mesh_dir, result.group(3)=='*', kind, args)
def gen_mesh_from_geom(geo, a=None, verbose=False, refine=False):
"""
Runs mesh generator - tetgen for 3D or triangle for 2D meshes.
Parameters
----------
geo : geometry
geometry description
a : int, optional
a maximum area/volume constraint
verbose : bool, optional
detailed information
refine : bool, optional
refines mesh
Returns
-------
mesh : Mesh instance
triangular or tetrahedral mesh
"""
import os.path as op
import pexpect
import tempfile
import shutil
tmp_dir = tempfile.mkdtemp()
polyfilename = op.join(tmp_dir, 'meshgen.poly')
# write geometry to poly file
geo.to_poly_file(polyfilename)
meshgen_call = {2: ('triangle', ''), 3: ('tetgen', 'BFENk')}
params = "-ACp"
params += "q" if refine else ''
params += "V" if verbose else "Q"
params += meshgen_call[geo.dim][1]
if a is not None:
params += "a%f" % (a)
params += " %s" % (polyfilename)
cmd = "%s %s" % (meshgen_call[geo.dim][0], params)
if verbose: print("Generating mesh using", cmd)
p=pexpect.run(cmd, timeout=None)
bname, ext = op.splitext(polyfilename)
if geo.dim == 2:
mesh = Mesh.from_file(bname + '.1.node')
if geo.dim == 3:
mesh = Mesh.from_file(bname + '.1.vtk')
shutil.rmtree(tmp_dir)
return mesh
def gen_mesh_from_voxels(voxels, dims, etype='q'):
"""
Generate FE mesh from voxels (volumetric data).
Parameters
----------
voxels : array
Voxel matrix, 1=material.
dims : array
Size of one voxel.
etype : integer, optional
'q' - quadrilateral or hexahedral elements
't' - triangular or tetrahedral elements
Returns
-------
mesh : Mesh instance
Finite element mesh.
"""
dims = nm.array(dims).squeeze()
dim = len(dims)
nddims = nm.array(voxels.shape) + 2
nodemtx = nm.zeros(nddims, dtype=nm.int32)
if dim == 2:
#iy, ix = nm.where(voxels.transpose())
iy, ix = nm.where(voxels)
nel = ix.shape[0]
if etype == 'q':
nodemtx[ix,iy] += 1
nodemtx[ix + 1,iy] += 1
nodemtx[ix + 1,iy + 1] += 1
nodemtx[ix,iy + 1] += 1
elif etype == 't':
nodemtx[ix,iy] += 2
nodemtx[ix + 1,iy] += 1
nodemtx[ix + 1,iy + 1] += 2
nodemtx[ix,iy + 1] += 1
nel *= 2
elif dim == 3:
#iy, ix, iz = nm.where(voxels.transpose(1, 0, 2))
iy, ix, iz = nm.where(voxels)
nel = ix.shape[0]
if etype == 'q':
nodemtx[ix,iy,iz] += 1
nodemtx[ix + 1,iy,iz] += 1
nodemtx[ix + 1,iy + 1,iz] += 1
nodemtx[ix,iy + 1,iz] += 1
nodemtx[ix,iy,iz + 1] += 1
nodemtx[ix + 1,iy,iz + 1] += 1
nodemtx[ix + 1,iy + 1,iz + 1] += 1
nodemtx[ix,iy + 1,iz + 1] += 1
elif etype == 't':
nodemtx[ix,iy,iz] += 6
nodemtx[ix + 1,iy,iz] += 2
nodemtx[ix + 1,iy + 1,iz] += 2
nodemtx[ix,iy + 1,iz] += 2
nodemtx[ix,iy,iz + 1] += 2
nodemtx[ix + 1,iy,iz + 1] += 2
nodemtx[ix + 1,iy + 1,iz + 1] += 6
nodemtx[ix,iy + 1,iz + 1] += 2
nel *= 6
else:
msg = 'incorrect voxel dimension! (%d)' % dim
raise ValueError(msg)
ndidx = nm.where(nodemtx)
coors = nm.array(ndidx).transpose() * dims
nnod = coors.shape[0]
nodeid = -nm.ones(nddims, dtype=nm.int32)
nodeid[ndidx] = nm.arange(nnod)
# generate elements
if dim == 2:
elems = nm.array([nodeid[ix,iy],
nodeid[ix + 1,iy],
nodeid[ix + 1,iy + 1],
nodeid[ix,iy + 1]]).transpose()
elif dim == 3:
elems = nm.array([nodeid[ix,iy,iz],
nodeid[ix + 1,iy,iz],
nodeid[ix + 1,iy + 1,iz],
nodeid[ix,iy + 1,iz],
nodeid[ix,iy,iz + 1],
nodeid[ix + 1,iy,iz + 1],
nodeid[ix + 1,iy + 1,iz + 1],
nodeid[ix,iy + 1,iz + 1]]).transpose()
if etype == 't':
elems = elems_q2t(elems)
eid = etype + str(dim)
eltab = {'q2': 4, 'q3': 8, 't2': 3, 't3': 4}
mesh = Mesh.from_data('voxel_data',
coors, nm.ones((nnod,), dtype=nm.int32),
[nm.ascontiguousarray(elems)],
[nm.ones((nel,), dtype=nm.int32)],
['%d_%d' % (dim, eltab[eid])])
return mesh
def main():
mesh = gen_block_mesh(nm.array((1.0, 2.0, 3.0)),
nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)),
name='')
mesh.write('0.mesh', io = 'auto')
mesh = gen_cylinder_mesh(nm.array((1.0, 1.0, 2.0, 2.0, 3)),
nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)),
is_open=False, open_angle = 0.0,
name='')
mesh.write('1.mesh', io = 'auto')
mesh = gen_cylinder_mesh(nm.array((1.0, 1.0, 2.0, 2.0, 3)),
nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)),
is_open=True, open_angle = 0.0,
name='')
mesh.write('2.mesh', io = 'auto')
mesh = gen_cylinder_mesh(nm.array((1.0, 1.0, 2.0, 2.0, 3)),
nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)),
is_open=True, open_angle = 0.5,
name='')
mesh.write('3.mesh', io = 'auto')
mesh = gen_cylinder_mesh(nm.array((0.0, 0.0, 2.0, 2.0, 3)),
nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)),
is_open=False, open_angle = 0.0,
name='')
mesh.write('4.mesh', io = 'auto')
mesh = gen_cylinder_mesh(nm.array((0.0, 0.0, 1.0, 2.0, 3)),
nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)),
is_open=True, open_angle = 0.5,
name='')
mesh.write('5.mesh', io = 'auto')
mesh = gen_cylinder_mesh(nm.array((0.0, 0.0, 1.0, 2.0, 3)),
nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)),
is_open=True, open_angle = 0.5, non_uniform=True,
name='')
mesh.write('6.mesh', io = 'auto')
mesh = gen_cylinder_mesh(nm.array((0.5, 0.5, 1.0, 2.0, 3)),
nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)),
is_open=True, open_angle = 0.5, non_uniform=True,
name='')
mesh.write('7.mesh', io = 'auto')
if __name__ == '__main__':
main()
|
[
"sfepy.linalg.cycle",
"sfepy.discrete.fem.mesh.Mesh.from_data",
"sfepy.base.ioutils.ensure_path",
"sfepy.discrete.fem.mesh.Mesh.from_file",
"sfepy.discrete.fem.periodic.match_grid_plane",
"sfepy.base.base.output",
"sfepy.base.base.assert_",
"sfepy.mesh.mesh_tools.elems_q2t"
] |
[((135, 155), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (150, 155), False, 'import sys\n'), ((756, 773), 'numpy.asarray', 'nm.asarray', (['shape'], {}), '(shape)\n', (766, 773), True, 'import numpy as nm\n'), ((799, 821), 'sfepy.base.base.assert_', 'assert_', (['(1 <= dim <= 3)'], {}), '(1 <= dim <= 3)\n', (806, 821), False, 'from sfepy.base.base import output, assert_\n'), ((835, 849), 'numpy.prod', 'nm.prod', (['shape'], {}), '(shape)\n', (842, 849), True, 'import numpy as nm\n'), ((861, 879), 'numpy.prod', 'nm.prod', (['(shape - 1)'], {}), '(shape - 1)\n', (868, 879), True, 'import numpy as nm\n'), ((892, 924), 'numpy.arange', 'nm.arange', (['n_nod'], {'dtype': 'nm.int32'}), '(n_nod, dtype=nm.int32)\n', (901, 924), True, 'import numpy as nm\n'), ((2543, 2577), 'numpy.asarray', 'nm.asarray', (['dims'], {'dtype': 'nm.float64'}), '(dims, dtype=nm.float64)\n', (2553, 2577), True, 'import numpy as nm\n'), ((2590, 2623), 'numpy.asarray', 'nm.asarray', (['shape'], {'dtype': 'nm.int32'}), '(shape, dtype=nm.int32)\n', (2600, 2623), True, 'import numpy as nm\n'), ((2637, 2673), 'numpy.asarray', 'nm.asarray', (['centre'], {'dtype': 'nm.float64'}), '(centre, dtype=nm.float64)\n', (2647, 2673), True, 'import numpy as nm\n'), ((2762, 2776), 'numpy.prod', 'nm.prod', (['shape'], {}), '(shape)\n', (2769, 2776), True, 'import numpy as nm\n'), ((2781, 2841), 'sfepy.base.base.output', 'output', (["('generating %d vertices...' % n_nod)"], {'verbose': 'verbose'}), "('generating %d vertices...' % n_nod, verbose=verbose)\n", (2787, 2841), False, 'from sfepy.base.base import output, assert_\n'), ((3017, 3051), 'sfepy.base.base.output', 'output', (['"""...done"""'], {'verbose': 'verbose'}), "('...done', verbose=verbose)\n", (3023, 3051), False, 'from sfepy.base.base import output, assert_\n'), ((3064, 3082), 'numpy.prod', 'nm.prod', (['(shape - 1)'], {}), '(shape - 1)\n', (3071, 3082), True, 'import numpy as nm\n'), ((3087, 3143), 'sfepy.base.base.output', 'output', (["('generating %d cells...' % n_el)"], {'verbose': 'verbose'}), "('generating %d cells...' % n_el, verbose=verbose)\n", (3093, 3143), False, 'from sfepy.base.base import output, assert_\n'), ((3159, 3192), 'numpy.empty', 'nm.empty', (['(n_el,)'], {'dtype': 'nm.int32'}), '((n_el,), dtype=nm.int32)\n', (3167, 3192), True, 'import numpy as nm\n'), ((3271, 3305), 'sfepy.base.base.output', 'output', (['"""...done"""'], {'verbose': 'verbose'}), "('...done', verbose=verbose)\n", (3277, 3305), False, 'from sfepy.base.base import output, assert_\n'), ((3318, 3378), 'sfepy.discrete.fem.mesh.Mesh.from_data', 'Mesh.from_data', (['name', 'coors', 'None', '[conn]', '[mat_ids]', '[desc]'], {}), '(name, coors, None, [conn], [mat_ids], [desc])\n', (3332, 3378), False, 'from sfepy.discrete.fem.mesh import Mesh\n'), ((4747, 4781), 'numpy.asarray', 'nm.asarray', (['dims'], {'dtype': 'nm.float64'}), '(dims, dtype=nm.float64)\n', (4757, 4781), True, 'import numpy as nm\n'), ((4794, 4827), 'numpy.asarray', 'nm.asarray', (['shape'], {'dtype': 'nm.int32'}), '(shape, dtype=nm.int32)\n', (4804, 4827), True, 'import numpy as nm\n'), ((4841, 4877), 'numpy.asarray', 'nm.asarray', (['centre'], {'dtype': 'nm.float64'}), '(centre, dtype=nm.float64)\n', (4851, 4877), True, 'import numpy as nm\n'), ((5260, 5300), 'numpy.zeros', 'nm.zeros', (['(nr, nnfi, nl)'], {'dtype': 'nm.int32'}), '((nr, nnfi, nl), dtype=nm.int32)\n', (5268, 5300), True, 'import numpy as nm\n'), ((5346, 5384), 'numpy.zeros', 'nm.zeros', (['(n_nod, 3)'], {'dtype': 'nm.float64'}), '((n_nod, 3), dtype=nm.float64)\n', (5354, 5384), True, 'import numpy as nm\n'), ((5399, 5455), 'numpy.linspace', 'nm.linspace', (['open_angle', '(open_angle + nfi * dfi)', '(nfi + 1)'], {}), '(open_angle, open_angle + nfi * dfi, nfi + 1)\n', (5410, 5455), True, 'import numpy as nm\n'), ((5461, 5489), 'numpy.linspace', 'nm.linspace', (['(0.0)', 'length', 'nl'], {}), '(0.0, length, nl)\n', (5472, 5489), True, 'import numpy as nm\n'), ((5961, 6021), 'sfepy.base.base.output', 'output', (["('generating %d vertices...' % n_nod)"], {'verbose': 'verbose'}), "('generating %d vertices...' % n_nod, verbose=verbose)\n", (5967, 6021), False, 'from sfepy.base.base import output, assert_\n'), ((6047, 6056), 'six.moves.range', 'range', (['nr'], {}), '(nr)\n', (6052, 6056), False, 'from six.moves import range\n'), ((6482, 6502), 'sfepy.base.base.assert_', 'assert_', (['(ii == n_nod)'], {}), '(ii == n_nod)\n', (6489, 6502), False, 'from sfepy.base.base import output, assert_\n'), ((6507, 6541), 'sfepy.base.base.output', 'output', (['"""...done"""'], {'verbose': 'verbose'}), "('...done', verbose=verbose)\n", (6513, 6541), False, 'from sfepy.base.base import output, assert_\n'), ((6591, 6626), 'numpy.zeros', 'nm.zeros', (['(n_el, 8)'], {'dtype': 'nm.int32'}), '((n_el, 8), dtype=nm.int32)\n', (6599, 6626), True, 'import numpy as nm\n'), ((6632, 6688), 'sfepy.base.base.output', 'output', (["('generating %d cells...' % n_el)"], {'verbose': 'verbose'}), "('generating %d cells...' % n_el, verbose=verbose)\n", (6638, 6688), False, 'from sfepy.base.base import output, assert_\n'), ((6724, 6753), 'sfepy.linalg.cycle', 'cycle', (['[nr - 1, nnfi, nl - 1]'], {}), '([nr - 1, nnfi, nl - 1])\n', (6729, 6753), False, 'from sfepy.linalg import cycle\n'), ((7407, 7440), 'numpy.zeros', 'nm.zeros', (['(n_el,)'], {'dtype': 'nm.int32'}), '((n_el,), dtype=nm.int32)\n', (7415, 7440), True, 'import numpy as nm\n'), ((7504, 7538), 'sfepy.base.base.output', 'output', (['"""...done"""'], {'verbose': 'verbose'}), "('...done', verbose=verbose)\n", (7510, 7538), False, 'from sfepy.base.base import output, assert_\n'), ((7660, 7719), 'sfepy.discrete.fem.mesh.Mesh.from_data', 'Mesh.from_data', (['name', 'coors', 'None', '[conn]', '[mat_id]', '[desc]'], {}), '(name, coors, None, [conn], [mat_id], [desc])\n', (7674, 7719), False, 'from sfepy.discrete.fem.mesh import Mesh\n'), ((8065, 8085), 'numpy.empty_like', 'nm.empty_like', (['coors'], {}), '(coors)\n', (8078, 8085), True, 'import numpy as nm\n'), ((11013, 11049), 'numpy.asarray', 'nm.asarray', (['b_dims'], {'dtype': 'nm.float64'}), '(b_dims, dtype=nm.float64)\n', (11023, 11049), True, 'import numpy as nm\n'), ((11064, 11099), 'numpy.asarray', 'nm.asarray', (['b_shape'], {'dtype': 'nm.int32'}), '(b_shape, dtype=nm.int32)\n', (11074, 11099), True, 'import numpy as nm\n'), ((11113, 11149), 'numpy.asarray', 'nm.asarray', (['e_dims'], {'dtype': 'nm.float64'}), '(e_dims, dtype=nm.float64)\n', (11123, 11149), True, 'import numpy as nm\n'), ((11163, 11199), 'numpy.asarray', 'nm.asarray', (['centre'], {'dtype': 'nm.float64'}), '(centre, dtype=nm.float64)\n', (11173, 11199), True, 'import numpy as nm\n'), ((13654, 13688), 'numpy.zeros', 'nm.zeros', (['(dim,)'], {'dtype': 'nm.float64'}), '((dim,), dtype=nm.float64)\n', (13662, 13688), True, 'import numpy as nm\n'), ((13732, 13776), 'sfepy.discrete.fem.periodic.match_grid_plane', 'match_grid_plane', (['coors[s1]', 'coors[s2]', 'idim'], {}), '(coors[s1], coors[s2], idim)\n', (13748, 13776), False, 'from sfepy.discrete.fem.periodic import match_grid_plane\n'), ((13790, 13827), 'numpy.zeros', 'nm.zeros', (['(nel, nnel)'], {'dtype': 'nm.int32'}), '((nel, nnel), dtype=nm.int32)\n', (13798, 13827), True, 'import numpy as nm\n'), ((13841, 13880), 'numpy.zeros', 'nm.zeros', (['(nnod, dim)'], {'dtype': 'nm.float64'}), '((nnod, dim), dtype=nm.float64)\n', (13849, 13880), True, 'import numpy as nm\n'), ((13894, 13927), 'numpy.zeros', 'nm.zeros', (['(nnod,)'], {'dtype': 'nm.int32'}), '((nnod,), dtype=nm.int32)\n', (13902, 13927), True, 'import numpy as nm\n'), ((14118, 14130), 'six.moves.range', 'range', (['n_rep'], {}), '(n_rep)\n', (14123, 14130), False, 'from six.moves import range\n'), ((16329, 16342), 'numpy.prod', 'nm.prod', (['grid'], {}), '(grid)\n', (16336, 16342), True, 'import numpy as nm\n'), ((16365, 16398), 'sfepy.base.base.output', 'output', (["('repeating %s ...' % grid)"], {}), "('repeating %s ...' % grid)\n", (16371, 16398), False, 'from sfepy.base.base import output, assert_\n'), ((16959, 16976), 'sfepy.base.base.output', 'output', (['"""...done"""'], {}), "('...done')\n", (16965, 16976), False, 'from sfepy.base.base import output, assert_\n'), ((16992, 17017), 'numpy.tile', 'nm.tile', (['mat_ids', '(nrep,)'], {}), '(mat_ids, (nrep,))\n', (16999, 17017), True, 'import numpy as nm\n'), ((17033, 17124), 'sfepy.discrete.fem.mesh.Mesh.from_data', 'Mesh.from_data', (['"""tiled mesh"""', '(coors * scale)', 'ngrps', '[conn]', '[mat_ids]', '[mesh.descs[0]]'], {}), "('tiled mesh', coors * scale, ngrps, [conn], [mat_ids], [mesh\n .descs[0]])\n", (17047, 17124), False, 'from sfepy.discrete.fem.mesh import Mesh\n'), ((17536, 17568), 'os.path.join', 'os.path.join', (['data_dir', '"""meshes"""'], {}), "(data_dir, 'meshes')\n", (17548, 17568), False, 'import os\n'), ((19896, 19968), 're.match', 're.match', (['"""^\\\\s*([a-zA-Z]+)[:\\\\(]([^\\\\):]*)[:\\\\)](\\\\*)?\\\\s*$"""', 'mesh_name'], {}), "('^\\\\s*([a-zA-Z]+)[:\\\\(]([^\\\\):]*)[:\\\\)](\\\\*)?\\\\s*$', mesh_name)\n", (19904, 19968), False, 'import re\n'), ((20787, 20805), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (20803, 20805), False, 'import subprocess, shutil, tempfile\n'), ((20825, 20857), 'os.path.join', 'op.join', (['tmp_dir', '"""meshgen.poly"""'], {}), "(tmp_dir, 'meshgen.poly')\n", (20832, 20857), True, 'import os.path as op\n'), ((21331, 21361), 'pexpect.run', 'pexpect.run', (['cmd'], {'timeout': 'None'}), '(cmd, timeout=None)\n', (21342, 21361), False, 'import pexpect\n'), ((21379, 21404), 'os.path.splitext', 'op.splitext', (['polyfilename'], {}), '(polyfilename)\n', (21390, 21404), True, 'import os.path as op\n'), ((21549, 21571), 'shutil.rmtree', 'shutil.rmtree', (['tmp_dir'], {}), '(tmp_dir)\n', (21562, 21571), False, 'import subprocess, shutil, tempfile\n'), ((22157, 22189), 'numpy.zeros', 'nm.zeros', (['nddims'], {'dtype': 'nm.int32'}), '(nddims, dtype=nm.int32)\n', (22165, 22189), True, 'import numpy as nm\n'), ((23659, 23676), 'numpy.where', 'nm.where', (['nodemtx'], {}), '(nodemtx)\n', (23667, 23676), True, 'import numpy as nm\n'), ((23817, 23832), 'numpy.arange', 'nm.arange', (['nnod'], {}), '(nnod)\n', (23826, 23832), True, 'import numpy as nm\n'), ((981, 1016), 'numpy.zeros', 'nm.zeros', (['(n_el, 2)'], {'dtype': 'nm.int32'}), '((n_el, 2), dtype=nm.int32)\n', (989, 1016), True, 'import numpy as nm\n'), ((4959, 4993), 'numpy.array', 'nm.array', (['[0.5 * length, 0.0, 0.0]'], {}), '([0.5 * length, 0.0, 0.0])\n', (4967, 4993), True, 'import numpy as nm\n'), ((5524, 5557), 'numpy.zeros', 'nm.zeros', (['(nr,)'], {'dtype': 'nm.float64'}), '((nr,), dtype=nm.float64)\n', (5532, 5557), True, 'import numpy as nm\n'), ((5572, 5590), 'numpy.zeros_like', 'nm.zeros_like', (['ras'], {}), '(ras)\n', (5585, 5590), True, 'import numpy as nm\n'), ((5727, 5739), 'six.moves.range', 'range', (['(1)', 'nr'], {}), '(1, nr)\n', (5732, 5739), False, 'from six.moves import range\n'), ((5869, 5892), 'numpy.linspace', 'nm.linspace', (['a1', 'a2', 'nr'], {}), '(a1, a2, nr)\n', (5880, 5892), True, 'import numpy as nm\n'), ((5907, 5930), 'numpy.linspace', 'nm.linspace', (['b1', 'b2', 'nr'], {}), '(b1, b2, nr)\n', (5918, 5930), True, 'import numpy as nm\n'), ((11493, 11503), 'numpy.log', 'nm.log', (['nc'], {}), '(nc)\n', (11499, 11503), True, 'import numpy as nm\n'), ((13281, 13321), 'numpy.nonzero', 'nm.nonzero', (['(coors[:, idim] < bb[0] + eps)'], {}), '(coors[:, idim] < bb[0] + eps)\n', (13291, 13321), True, 'import numpy as nm\n'), ((13335, 13375), 'numpy.nonzero', 'nm.nonzero', (['(coors[:, idim] > bb[1] - eps)'], {}), '(coors[:, idim] > bb[1] - eps)\n', (13345, 13375), True, 'import numpy as nm\n'), ((14038, 14071), 'numpy.zeros', 'nm.zeros', (['(nnod,)'], {'dtype': 'nm.int32'}), '((nnod,), dtype=nm.int32)\n', (14046, 14071), True, 'import numpy as nm\n'), ((17954, 17999), 'os.path.join', 'os.path.join', (['mesh_dir', '"""sphere-%.2f-%.2f-%i"""'], {}), "(mesh_dir, 'sphere-%.2f-%.2f-%i')\n", (17966, 17999), False, 'import os\n'), ((18019, 18042), 'sfepy.base.base.assert_', 'assert_', (["(kind == 'cube')"], {}), "(kind == 'cube')\n", (18026, 18042), False, 'from sfepy.base.base import output, assert_\n'), ((18233, 18287), 'os.path.join', 'os.path.join', (['mesh_dir', '"""cube-%i_%.2f-%i_%.2f-%i_%.2f"""'], {}), "(mesh_dir, 'cube-%i_%.2f-%i_%.2f-%i_%.2f')\n", (18245, 18287), False, 'import os\n'), ((18313, 18325), 'sfepy.base.base.output', 'output', (['args'], {}), '(args)\n', (18319, 18325), False, 'from sfepy.base.base import output, assert_\n'), ((18398, 18422), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (18412, 18422), False, 'import os\n'), ((18451, 18485), 'os.path.exists', 'os.path.exists', (["(filename + '.mesh')"], {}), "(filename + '.mesh')\n", (18465, 18485), False, 'import os\n'), ((18525, 18558), 'os.path.exists', 'os.path.exists', (["(filename + '.vtk')"], {}), "(filename + '.vtk')\n", (18539, 18558), False, 'import os\n'), ((18654, 18675), 'sfepy.base.ioutils.ensure_path', 'ensure_path', (['filename'], {}), '(filename)\n', (18665, 18675), False, 'from sfepy.base.ioutils import ensure_path\n'), ((18685, 18717), 'sfepy.base.base.output', 'output', (['"""creating new cube mesh"""'], {}), "('creating new cube mesh')\n", (18691, 18717), False, 'from sfepy.base.base import output, assert_\n'), ((18726, 18803), 'sfepy.base.base.output', 'output', (["('(%i nodes in %.2f) x (%i nodes in %.2f) x (%i nodes in %.2f)' % args)"], {}), "('(%i nodes in %.2f) x (%i nodes in %.2f) x (%i nodes in %.2f)' % args)\n", (18732, 18803), False, 'from sfepy.base.base import output, assert_\n'), ((18827, 18861), 'sfepy.base.base.output', 'output', (["('to file %s...' % filename)"], {}), "('to file %s...' % filename)\n", (18833, 18861), False, 'from sfepy.base.base import output, assert_\n'), ((19027, 19044), 'sfepy.base.base.output', 'output', (['"""...done"""'], {}), "('...done')\n", (19033, 19044), False, 'from sfepy.base.base import output, assert_\n'), ((19146, 19167), 'sfepy.base.ioutils.ensure_path', 'ensure_path', (['filename'], {}), '(filename)\n', (19157, 19167), False, 'from sfepy.base.ioutils import ensure_path\n'), ((19177, 19254), 'sfepy.base.base.output', 'output', (["('creating new sphere mesh (%i nodes, r=%.2f) and gradation %d' % args)"], {}), "('creating new sphere mesh (%i nodes, r=%.2f) and gradation %d' % args)\n", (19183, 19254), False, 'from sfepy.base.base import output, assert_\n'), ((19278, 19312), 'sfepy.base.base.output', 'output', (["('to file %s...' % filename)"], {}), "('to file %s...' % filename)\n", (19284, 19312), False, 'from sfepy.base.base import output, assert_\n'), ((19396, 19414), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (19412, 19414), False, 'import subprocess, shutil, tempfile\n'), ((19433, 19473), 'os.path.join', 'os.path.join', (['tmp_dir', '"""sphere.geo.temp"""'], {}), "(tmp_dir, 'sphere.geo.temp')\n", (19445, 19473), False, 'import os\n'), ((19642, 19717), 'subprocess.call', 'subprocess.call', (["['gmsh', '-3', tmpfile, '-format', 'mesh', '-o', filename]"], {}), "(['gmsh', '-3', tmpfile, '-format', 'mesh', '-o', filename])\n", (19657, 19717), False, 'import subprocess, shutil, tempfile\n'), ((19751, 19773), 'shutil.rmtree', 'shutil.rmtree', (['tmp_dir'], {}), '(tmp_dir)\n', (19764, 19773), False, 'import subprocess, shutil, tempfile\n'), ((19782, 19799), 'sfepy.base.base.output', 'output', (['"""...done"""'], {}), "('...done')\n", (19788, 19799), False, 'from sfepy.base.base import output, assert_\n'), ((21441, 21474), 'sfepy.discrete.fem.mesh.Mesh.from_file', 'Mesh.from_file', (["(bname + '.1.node')"], {}), "(bname + '.1.node')\n", (21455, 21474), False, 'from sfepy.discrete.fem.mesh import Mesh\n'), ((21511, 21543), 'sfepy.discrete.fem.mesh.Mesh.from_file', 'Mesh.from_file', (["(bname + '.1.vtk')"], {}), "(bname + '.1.vtk')\n", (21525, 21543), False, 'from sfepy.discrete.fem.mesh import Mesh\n'), ((22115, 22137), 'numpy.array', 'nm.array', (['voxels.shape'], {}), '(voxels.shape)\n', (22123, 22137), True, 'import numpy as nm\n'), ((22272, 22288), 'numpy.where', 'nm.where', (['voxels'], {}), '(voxels)\n', (22280, 22288), True, 'import numpy as nm\n'), ((23765, 23796), 'numpy.ones', 'nm.ones', (['nddims'], {'dtype': 'nm.int32'}), '(nddims, dtype=nm.int32)\n', (23772, 23796), True, 'import numpy as nm\n'), ((24539, 24555), 'sfepy.mesh.mesh_tools.elems_q2t', 'elems_q2t', (['elems'], {}), '(elems)\n', (24548, 24555), False, 'from sfepy.mesh.mesh_tools import elems_q2t\n'), ((24707, 24739), 'numpy.ones', 'nm.ones', (['(nnod,)'], {'dtype': 'nm.int32'}), '((nnod,), dtype=nm.int32)\n', (24714, 24739), True, 'import numpy as nm\n'), ((24973, 24998), 'numpy.array', 'nm.array', (['(1.0, 2.0, 3.0)'], {}), '((1.0, 2.0, 3.0))\n', (24981, 24998), True, 'import numpy as nm\n'), ((25026, 25048), 'numpy.array', 'nm.array', (['(10, 10, 10)'], {}), '((10, 10, 10))\n', (25034, 25048), True, 'import numpy as nm\n'), ((25048, 25073), 'numpy.array', 'nm.array', (['(1.0, 2.0, 3.0)'], {}), '((1.0, 2.0, 3.0))\n', (25056, 25073), True, 'import numpy as nm\n'), ((25178, 25211), 'numpy.array', 'nm.array', (['(1.0, 1.0, 2.0, 2.0, 3)'], {}), '((1.0, 1.0, 2.0, 2.0, 3))\n', (25186, 25211), True, 'import numpy as nm\n'), ((25242, 25264), 'numpy.array', 'nm.array', (['(10, 10, 10)'], {}), '((10, 10, 10))\n', (25250, 25264), True, 'import numpy as nm\n'), ((25264, 25289), 'numpy.array', 'nm.array', (['(1.0, 2.0, 3.0)'], {}), '((1.0, 2.0, 3.0))\n', (25272, 25289), True, 'import numpy as nm\n'), ((25458, 25491), 'numpy.array', 'nm.array', (['(1.0, 1.0, 2.0, 2.0, 3)'], {}), '((1.0, 1.0, 2.0, 2.0, 3))\n', (25466, 25491), True, 'import numpy as nm\n'), ((25522, 25544), 'numpy.array', 'nm.array', (['(10, 10, 10)'], {}), '((10, 10, 10))\n', (25530, 25544), True, 'import numpy as nm\n'), ((25544, 25569), 'numpy.array', 'nm.array', (['(1.0, 2.0, 3.0)'], {}), '((1.0, 2.0, 3.0))\n', (25552, 25569), True, 'import numpy as nm\n'), ((25737, 25770), 'numpy.array', 'nm.array', (['(1.0, 1.0, 2.0, 2.0, 3)'], {}), '((1.0, 1.0, 2.0, 2.0, 3))\n', (25745, 25770), True, 'import numpy as nm\n'), ((25801, 25823), 'numpy.array', 'nm.array', (['(10, 10, 10)'], {}), '((10, 10, 10))\n', (25809, 25823), True, 'import numpy as nm\n'), ((25823, 25848), 'numpy.array', 'nm.array', (['(1.0, 2.0, 3.0)'], {}), '((1.0, 2.0, 3.0))\n', (25831, 25848), True, 'import numpy as nm\n'), ((26017, 26050), 'numpy.array', 'nm.array', (['(0.0, 0.0, 2.0, 2.0, 3)'], {}), '((0.0, 0.0, 2.0, 2.0, 3))\n', (26025, 26050), True, 'import numpy as nm\n'), ((26081, 26103), 'numpy.array', 'nm.array', (['(10, 10, 10)'], {}), '((10, 10, 10))\n', (26089, 26103), True, 'import numpy as nm\n'), ((26103, 26128), 'numpy.array', 'nm.array', (['(1.0, 2.0, 3.0)'], {}), '((1.0, 2.0, 3.0))\n', (26111, 26128), True, 'import numpy as nm\n'), ((26298, 26331), 'numpy.array', 'nm.array', (['(0.0, 0.0, 1.0, 2.0, 3)'], {}), '((0.0, 0.0, 1.0, 2.0, 3))\n', (26306, 26331), True, 'import numpy as nm\n'), ((26362, 26384), 'numpy.array', 'nm.array', (['(10, 10, 10)'], {}), '((10, 10, 10))\n', (26370, 26384), True, 'import numpy as nm\n'), ((26384, 26409), 'numpy.array', 'nm.array', (['(1.0, 2.0, 3.0)'], {}), '((1.0, 2.0, 3.0))\n', (26392, 26409), True, 'import numpy as nm\n'), ((26578, 26611), 'numpy.array', 'nm.array', (['(0.0, 0.0, 1.0, 2.0, 3)'], {}), '((0.0, 0.0, 1.0, 2.0, 3))\n', (26586, 26611), True, 'import numpy as nm\n'), ((26642, 26664), 'numpy.array', 'nm.array', (['(10, 10, 10)'], {}), '((10, 10, 10))\n', (26650, 26664), True, 'import numpy as nm\n'), ((26664, 26689), 'numpy.array', 'nm.array', (['(1.0, 2.0, 3.0)'], {}), '((1.0, 2.0, 3.0))\n', (26672, 26689), True, 'import numpy as nm\n'), ((26876, 26909), 'numpy.array', 'nm.array', (['(0.5, 0.5, 1.0, 2.0, 3)'], {}), '((0.5, 0.5, 1.0, 2.0, 3))\n', (26884, 26909), True, 'import numpy as nm\n'), ((26940, 26962), 'numpy.array', 'nm.array', (['(10, 10, 10)'], {}), '((10, 10, 10))\n', (26948, 26962), True, 'import numpy as nm\n'), ((26962, 26987), 'numpy.array', 'nm.array', (['(1.0, 2.0, 3.0)'], {}), '((1.0, 2.0, 3.0))\n', (26970, 26987), True, 'import numpy as nm\n'), ((1134, 1169), 'numpy.zeros', 'nm.zeros', (['(n_el, 4)'], {'dtype': 'nm.int32'}), '((n_el, 4), dtype=nm.int32)\n', (1142, 1169), True, 'import numpy as nm\n'), ((1377, 1412), 'numpy.zeros', 'nm.zeros', (['(n_el, 8)'], {'dtype': 'nm.int32'}), '((n_el, 8), dtype=nm.int32)\n', (1385, 1412), True, 'import numpy as nm\n'), ((5763, 5796), 'numpy.sqrt', 'nm.sqrt', (['(advol + ras[ii - 1] ** 2)'], {}), '(advol + ras[ii - 1] ** 2)\n', (5770, 5796), True, 'import numpy as nm\n'), ((5815, 5848), 'numpy.sqrt', 'nm.sqrt', (['(bdvol + rbs[ii - 1] ** 2)'], {}), '(bdvol + rbs[ii - 1] ** 2)\n', (5822, 5848), True, 'import numpy as nm\n'), ((14346, 14379), 'numpy.ones', 'nm.ones', (['(nnod0,)'], {'dtype': 'nm.int32'}), '((nnod0,), dtype=nm.int32)\n', (14353, 14379), True, 'import numpy as nm\n'), ((14506, 14520), 'numpy.where', 'nm.where', (['mask'], {}), '(mask)\n', (14514, 14520), True, 'import numpy as nm\n'), ((15154, 15167), 'numpy.max', 'nm.max', (['ndmap'], {}), '(ndmap)\n', (15160, 15167), True, 'import numpy as nm\n'), ((15187, 15219), 'numpy.where', 'nm.where', (['(ndmap_out > max_nd_ref)'], {}), '(ndmap_out > max_nd_ref)\n', (15195, 15219), True, 'import numpy as nm\n'), ((19331, 19376), 'os.path.join', 'os.path.join', (['defdir', '"""quantum"""', '"""sphere.geo"""'], {}), "(defdir, 'quantum', 'sphere.geo')\n", (19343, 19376), False, 'import os\n'), ((22057, 22071), 'numpy.array', 'nm.array', (['dims'], {}), '(dims)\n', (22065, 22071), True, 'import numpy as nm\n'), ((22777, 22793), 'numpy.where', 'nm.where', (['voxels'], {}), '(voxels)\n', (22785, 22793), True, 'import numpy as nm\n'), ((24768, 24795), 'numpy.ascontiguousarray', 'nm.ascontiguousarray', (['elems'], {}), '(elems)\n', (24788, 24795), True, 'import numpy as nm\n'), ((24825, 24856), 'numpy.ones', 'nm.ones', (['(nel,)'], {'dtype': 'nm.int32'}), '((nel,), dtype=nm.int32)\n', (24832, 24856), True, 'import numpy as nm\n'), ((8822, 8831), 'numpy.eye', 'nm.eye', (['(3)'], {}), '(3)\n', (8828, 8831), True, 'import numpy as nm\n'), ((14426, 14441), 'numpy.cumsum', 'nm.cumsum', (['mask'], {}), '(mask)\n', (14435, 14441), True, 'import numpy as nm\n'), ((14584, 14600), 'numpy.arange', 'nm.arange', (['nnod0'], {}), '(nnod0)\n', (14593, 14600), True, 'import numpy as nm\n'), ((23689, 23704), 'numpy.array', 'nm.array', (['ndidx'], {}), '(ndidx)\n', (23697, 23704), True, 'import numpy as nm\n'), ((23891, 23985), 'numpy.array', 'nm.array', (['[nodeid[ix, iy], nodeid[ix + 1, iy], nodeid[ix + 1, iy + 1], nodeid[ix, iy + 1]\n ]'], {}), '([nodeid[ix, iy], nodeid[ix + 1, iy], nodeid[ix + 1, iy + 1],\n nodeid[ix, iy + 1]])\n', (23899, 23985), True, 'import numpy as nm\n'), ((12807, 12833), 'numpy.maximum', 'nm.maximum', (['(b_shape - 2)', '(0)'], {}), '(b_shape - 2, 0)\n', (12817, 12833), True, 'import numpy as nm\n'), ((12841, 12852), 'numpy.prod', 'nm.prod', (['xs'], {}), '(xs)\n', (12848, 12852), True, 'import numpy as nm\n'), ((24104, 24330), 'numpy.array', 'nm.array', (['[nodeid[ix, iy, iz], nodeid[ix + 1, iy, iz], nodeid[ix + 1, iy + 1, iz],\n nodeid[ix, iy + 1, iz], nodeid[ix, iy, iz + 1], nodeid[ix + 1, iy, iz +\n 1], nodeid[ix + 1, iy + 1, iz + 1], nodeid[ix, iy + 1, iz + 1]]'], {}), '([nodeid[ix, iy, iz], nodeid[ix + 1, iy, iz], nodeid[ix + 1, iy + 1,\n iz], nodeid[ix, iy + 1, iz], nodeid[ix, iy, iz + 1], nodeid[ix + 1, iy,\n iz + 1], nodeid[ix + 1, iy + 1, iz + 1], nodeid[ix, iy + 1, iz + 1]])\n', (24112, 24330), True, 'import numpy as nm\n'), ((6259, 6269), 'numpy.cos', 'nm.cos', (['fi'], {}), '(fi)\n', (6265, 6269), True, 'import numpy as nm\n'), ((6275, 6285), 'numpy.sin', 'nm.sin', (['fi'], {}), '(fi)\n', (6281, 6285), True, 'import numpy as nm\n')]
|
import megengine
import megengine.module as M
import megengine.functional as F
def default_init_weights(module, scale=1, nonlinearity="relu"):
"""
nonlinearity: leaky_relu
"""
for m in module.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_in", nonlinearity=nonlinearity)
m.weight *= scale
if m.bias is not None:
M.init.zeros_(m.bias)
else:
pass
|
[
"megengine.module.init.zeros_",
"megengine.module.init.msra_normal_"
] |
[((272, 343), 'megengine.module.init.msra_normal_', 'M.init.msra_normal_', (['m.weight'], {'mode': '"""fan_in"""', 'nonlinearity': 'nonlinearity'}), "(m.weight, mode='fan_in', nonlinearity=nonlinearity)\n", (291, 343), True, 'import megengine.module as M\n'), ((425, 446), 'megengine.module.init.zeros_', 'M.init.zeros_', (['m.bias'], {}), '(m.bias)\n', (438, 446), True, 'import megengine.module as M\n')]
|
import os
import megengine as mge
import megengine.functional as F
import argparse
import numpy as np
import cv2
from nets import Model
def load_model(model_path):
print("Loading model:", os.path.abspath(model_path))
pretrained_dict = mge.load(model_path)
model = Model(max_disp=256, mixed_precision=False, test_mode=True)
model.load_state_dict(pretrained_dict["state_dict"], strict=True)
model.eval()
return model
def inference(left, right, model, n_iter=20):
print("Model Forwarding...")
imgL = left.transpose(2, 0, 1)
imgR = right.transpose(2, 0, 1)
imgL = np.ascontiguousarray(imgL[None, :, :, :])
imgR = np.ascontiguousarray(imgR[None, :, :, :])
imgL = mge.tensor(imgL).astype("float32")
imgR = mge.tensor(imgR).astype("float32")
imgL_dw2 = F.nn.interpolate(
imgL,
size=(imgL.shape[2] // 2, imgL.shape[3] // 2),
mode="bilinear",
align_corners=True,
)
imgR_dw2 = F.nn.interpolate(
imgR,
size=(imgL.shape[2] // 2, imgL.shape[3] // 2),
mode="bilinear",
align_corners=True,
)
pred_flow_dw2 = model(imgL_dw2, imgR_dw2, iters=n_iter, flow_init=None)
pred_flow = model(imgL, imgR, iters=n_iter, flow_init=pred_flow_dw2)
pred_disp = F.squeeze(pred_flow[:, 0, :, :]).numpy()
return pred_disp
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="A demo to run CREStereo.")
parser.add_argument(
"--model_path",
default="crestereo_eth3d.mge",
help="The path of pre-trained MegEngine model.",
)
parser.add_argument(
"--left", default="img/test/left.png", help="The path of left image."
)
parser.add_argument(
"--right", default="img/test/right.png", help="The path of right image."
)
parser.add_argument(
"--size",
default="1024x1536",
help="The image size for inference. Te default setting is 1024x1536. \
To evaluate on ETH3D Benchmark, use 768x1024 instead.",
)
parser.add_argument(
"--output", default="disparity.png", help="The path of output disparity."
)
args = parser.parse_args()
assert os.path.exists(args.model_path), "The model path do not exist."
assert os.path.exists(args.left), "The left image path do not exist."
assert os.path.exists(args.right), "The right image path do not exist."
model_func = load_model(args.model_path)
left = cv2.imread(args.left)
right = cv2.imread(args.right)
assert left.shape == right.shape, "The input images have inconsistent shapes."
in_h, in_w = left.shape[:2]
print("Images resized:", args.size)
eval_h, eval_w = [int(e) for e in args.size.split("x")]
left_img = cv2.resize(left, (eval_w, eval_h), interpolation=cv2.INTER_LINEAR)
right_img = cv2.resize(right, (eval_w, eval_h), interpolation=cv2.INTER_LINEAR)
pred = inference(left_img, right_img, model_func, n_iter=20)
t = float(in_w) / float(eval_w)
disp = cv2.resize(pred, (in_w, in_h), interpolation=cv2.INTER_LINEAR) * t
disp_vis = (disp - disp.min()) / (disp.max() - disp.min()) * 255.0
disp_vis = disp_vis.astype("uint8")
disp_vis = cv2.applyColorMap(disp_vis, cv2.COLORMAP_INFERNO)
parent_path = os.path.abspath(os.path.join(args.output, os.pardir))
if not os.path.exists(parent_path):
os.makedirs(parent_path)
cv2.imwrite(args.output, disp_vis)
print("Done! Result path:", os.path.abspath(args.output))
|
[
"megengine.functional.nn.interpolate",
"megengine.functional.squeeze",
"megengine.tensor",
"megengine.load"
] |
[((247, 267), 'megengine.load', 'mge.load', (['model_path'], {}), '(model_path)\n', (255, 267), True, 'import megengine as mge\n'), ((280, 338), 'nets.Model', 'Model', ([], {'max_disp': '(256)', 'mixed_precision': '(False)', 'test_mode': '(True)'}), '(max_disp=256, mixed_precision=False, test_mode=True)\n', (285, 338), False, 'from nets import Model\n'), ((608, 649), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['imgL[None, :, :, :]'], {}), '(imgL[None, :, :, :])\n', (628, 649), True, 'import numpy as np\n'), ((661, 702), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['imgR[None, :, :, :]'], {}), '(imgR[None, :, :, :])\n', (681, 702), True, 'import numpy as np\n'), ((812, 923), 'megengine.functional.nn.interpolate', 'F.nn.interpolate', (['imgL'], {'size': '(imgL.shape[2] // 2, imgL.shape[3] // 2)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(imgL, size=(imgL.shape[2] // 2, imgL.shape[3] // 2), mode=\n 'bilinear', align_corners=True)\n", (828, 923), True, 'import megengine.functional as F\n'), ((973, 1084), 'megengine.functional.nn.interpolate', 'F.nn.interpolate', (['imgR'], {'size': '(imgL.shape[2] // 2, imgL.shape[3] // 2)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(imgR, size=(imgL.shape[2] // 2, imgL.shape[3] // 2), mode=\n 'bilinear', align_corners=True)\n", (989, 1084), True, 'import megengine.functional as F\n'), ((1390, 1453), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""A demo to run CREStereo."""'}), "(description='A demo to run CREStereo.')\n", (1413, 1453), False, 'import argparse\n'), ((2219, 2250), 'os.path.exists', 'os.path.exists', (['args.model_path'], {}), '(args.model_path)\n', (2233, 2250), False, 'import os\n'), ((2294, 2319), 'os.path.exists', 'os.path.exists', (['args.left'], {}), '(args.left)\n', (2308, 2319), False, 'import os\n'), ((2368, 2394), 'os.path.exists', 'os.path.exists', (['args.right'], {}), '(args.right)\n', (2382, 2394), False, 'import os\n'), ((2490, 2511), 'cv2.imread', 'cv2.imread', (['args.left'], {}), '(args.left)\n', (2500, 2511), False, 'import cv2\n'), ((2524, 2546), 'cv2.imread', 'cv2.imread', (['args.right'], {}), '(args.right)\n', (2534, 2546), False, 'import cv2\n'), ((2780, 2846), 'cv2.resize', 'cv2.resize', (['left', '(eval_w, eval_h)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(left, (eval_w, eval_h), interpolation=cv2.INTER_LINEAR)\n', (2790, 2846), False, 'import cv2\n'), ((2863, 2930), 'cv2.resize', 'cv2.resize', (['right', '(eval_w, eval_h)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(right, (eval_w, eval_h), interpolation=cv2.INTER_LINEAR)\n', (2873, 2930), False, 'import cv2\n'), ((3239, 3288), 'cv2.applyColorMap', 'cv2.applyColorMap', (['disp_vis', 'cv2.COLORMAP_INFERNO'], {}), '(disp_vis, cv2.COLORMAP_INFERNO)\n', (3256, 3288), False, 'import cv2\n'), ((3439, 3473), 'cv2.imwrite', 'cv2.imwrite', (['args.output', 'disp_vis'], {}), '(args.output, disp_vis)\n', (3450, 3473), False, 'import cv2\n'), ((196, 223), 'os.path.abspath', 'os.path.abspath', (['model_path'], {}), '(model_path)\n', (211, 223), False, 'import os\n'), ((3045, 3107), 'cv2.resize', 'cv2.resize', (['pred', '(in_w, in_h)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(pred, (in_w, in_h), interpolation=cv2.INTER_LINEAR)\n', (3055, 3107), False, 'import cv2\n'), ((3324, 3360), 'os.path.join', 'os.path.join', (['args.output', 'os.pardir'], {}), '(args.output, os.pardir)\n', (3336, 3360), False, 'import os\n'), ((3373, 3400), 'os.path.exists', 'os.path.exists', (['parent_path'], {}), '(parent_path)\n', (3387, 3400), False, 'import os\n'), ((3410, 3434), 'os.makedirs', 'os.makedirs', (['parent_path'], {}), '(parent_path)\n', (3421, 3434), False, 'import os\n'), ((3506, 3534), 'os.path.abspath', 'os.path.abspath', (['args.output'], {}), '(args.output)\n', (3521, 3534), False, 'import os\n'), ((715, 731), 'megengine.tensor', 'mge.tensor', (['imgL'], {}), '(imgL)\n', (725, 731), True, 'import megengine as mge\n'), ((761, 777), 'megengine.tensor', 'mge.tensor', (['imgR'], {}), '(imgR)\n', (771, 777), True, 'import megengine as mge\n'), ((1285, 1317), 'megengine.functional.squeeze', 'F.squeeze', (['pred_flow[:, 0, :, :]'], {}), '(pred_flow[:, 0, :, :])\n', (1294, 1317), True, 'import megengine.functional as F\n')]
|
# This example implements homogenization of piezoeletric porous media.
# The mathematical model and numerical results are described in:
#
# <NAME>., <NAME>.
# Homogenization of the fluid-saturated piezoelectric porous media.
# International Journal of Solids and Structures
# Volume 147, 15 August 2018, Pages 110-125
# https://doi.org/10.1016/j.ijsolstr.2018.05.017
#
# Run simulation:
#
# ./simple.py example_poropiezo-1/poropiezo_macro_dfc.py
#
# The results are stored in `example_poropiezo-1/results` directory.
#
import sys
import numpy as nm
import os.path as osp
from sfepy import data_dir
from sfepy.base.base import Struct
from sfepy.homogenization.micmac import get_homog_coefs_linear
from sfepy.homogenization.recovery import recover_micro_hook_eps
data_dir = 'example_poropiezo-1'
def set_grad(ts, coors, mode=None, problem=None, **kwargs):
if mode == 'qp':
out = problem.data.reshape((coors.shape[0], 1, 1))
return {'cs': out}
# projection of values from integration points into mesh vertices
def linear_projection(pb, cval):
from sfepy.discrete import (FieldVariable, Material, Integral,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.base.base import IndexedStruct
mesh = Mesh.from_file(pb.conf.filename_mesh)
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field = Field.from_args('scf', nm.float64, 'scalar', omega,
approx_order=1)
g = FieldVariable('g', 'unknown', field)
f = FieldVariable('f', 'test', field, primary_var_name='g')
integral = Integral('i', order=2)
m = Material('m', function=set_grad)
t1 = Term.new('dw_volume_dot(f, g)', integral, omega, f=f, g=g)
t2 = Term.new('dw_volume_lvf(m.cs, f)',
integral, omega, m=m, f=f)
eq = Equation('balance', t1 - t2)
eqs = Equations([eq])
ls = ScipyDirect({})
nls_status = IndexedStruct()
nls = Newton({'eps_a': 1e-15}, lin_solver=ls, status=nls_status)
pb = Problem('elasticity', equations=eqs)
pb.set_solver(nls)
out = nm.empty((g.n_dof, cval.shape[2]), dtype=nm.float64)
for ii in range(cval.shape[2]):
pb.data = nm.ascontiguousarray(cval[:, :, ii, :])
pb.time_update()
state = pb.solve()
out[:, ii] = state.get_parts()['g']
return out
# recover microsccopic fields in a given region, see Section 6.2
def recover_micro(pb, state):
rreg = pb.domain.regions['Recovery']
state_dict = state.get_parts()
strain_qp = pb.evaluate('ev_cauchy_strain.i2.Omega(u)', mode='qp')
press_qp = pb.evaluate('ev_volume_integrate.i2.Omega(p)', mode='qp')
pressg_qp = pb.evaluate('ev_grad.i2.Omega(p)', mode='qp')
dim = rreg.dim
displ = state_dict['u']
nodal_data = {
'u': displ.reshape((displ.shape[0] // dim, dim)),
'press': linear_projection(pb, press_qp),
'strain': linear_projection(pb, strain_qp),
'dp': linear_projection(pb, pressg_qp),
}
const_data = {
'phi': pb.conf.phi,
}
pvar = pb.create_variables(['svar'])
def_args = {
'grid0': pb.conf.grid0,
'filename_mesh': pb.conf.filename_mesh_micro,
}
recover_micro_hook_eps(pb.conf.poroela_micro_file, rreg,
pvar['svar'], nodal_data, const_data, pb.conf.eps0,
recovery_file_tag='_%d' % pb.conf.grid0,
define_args=def_args)
# evaluate macroscopic strain and export to output
def post_process(out, pb, state, extend=False):
strain = pb.evaluate('ev_cauchy_strain.i2.Omega(u)', mode='el_avg')
out['e'] = Struct(name='output_data',
mode='cell',
dofs=None,
var_name='u',
data=strain)
if pb.conf.options.get('recover_micro', False):
recover_micro(pb, state)
return out
def coefs2qp(coefs, nqp):
out = {}
for k, v in coefs.items():
if type(v) not in [nm.ndarray, float]:
continue
if type(v) is nm.ndarray:
if len(v.shape) >= 3:
out[k] = v
out[k] = nm.tile(v, (nqp, 1, 1))
return out
# get homogenized coefficients, recalculate them if necessary
def get_homog(coors, mode, pb, micro_filename, **kwargs):
if not (mode == 'qp'):
return
nqp = coors.shape[0]
coefs_filename = 'coefs_poropiezo_%d' % pb.conf.grid0
coefs_filename = osp.join(pb.conf.options.get('output_dir', '.'),
coefs_filename) + '.h5'
def_args = {
'grid0': pb.conf.grid0,
'filename_mesh': pb.conf.filename_mesh_micro,
}
coefs = get_homog_coefs_linear(0, 0, None,
micro_filename=micro_filename,
coefs_filename=coefs_filename,
define_args=def_args)
for k in coefs.keys():
v = coefs[k]
if type(v) is nm.ndarray:
if len(v.shape) == 0:
coefs[k] = v.reshape((1, 1))
elif len(v.shape) == 1:
coefs[k] = v[:, nm.newaxis]
elif isinstance(v, float):
coefs[k] = nm.array([[v]])
out = coefs2qp(coefs, nqp)
phi = pb.conf.phi
Hf, Zf = 0, 0
for ii in range(2):
Hf += out['H%d' % ii] * phi[ii]
Zf += out['Z%d' % ii] * phi[ii]
out['Hf'] = Hf
out['Zf'] = Zf
return out
def define(grid0=16, bcmode='example'):
eps0 = 0.01 / grid0
phi = nm.array([-1, 1]) * 1e3
poroela_micro_file = osp.join(data_dir, 'poropiezo_micro_dfc.py')
fields = {
'displacement': ('real', 'vector', 'Omega', 1),
'pressure': ('real', 'scalar', 'Omega', 0),
'sfield': ('real', 'scalar', 'Omega', 1),
}
variables = {
'u': ('unknown field', 'displacement'),
'v': ('test field', 'displacement', 'u'),
'p': ('unknown field', 'pressure'),
'q': ('test field', 'pressure', 'p'),
'U': ('parameter field', 'displacement', 'u'),
'P': ('parameter field', 'pressure', 'p'),
'svar': ('parameter field', 'sfield', 'p'),
}
functions = {
'get_homog': (lambda ts, coors, mode=None, problem=None, **kwargs:\
get_homog(coors, mode, problem, poroela_micro_file, **kwargs),),
}
materials = {
'hom': 'get_homog',
}
integrals = {
'i2': 2,
}
solvers = {
'ls': ('ls.scipy_direct', {}),
'newton': ('nls.newton',
{'i_max': 10,
'eps_a': 1e-1,
'eps_r': 1e-3,
'problem': 'nonlinear',
})
}
options = {
'output_dir': osp.join(data_dir, 'results'),
'nls': 'newton',
'absolute_mesh_path': True,
'recover_micro': True,
'post_process_hook': 'post_process',
}
regions = {
'Omega': 'all',
'Left': ('vertices in (x < 0.00001)', 'facet'),
'Right': ('vertices in (x > 0.00999)', 'facet'),
'Top': ('vertices in (z > 0.00999)', 'facet'),
'Bottom': ('vertices in (z < 0.00001)', 'facet'),
'Far': ('vertices in (y > 0.00249)', 'facet'),
'Near': ('vertices in (y < 0.00001)', 'facet'),
'Recovery': ('vertices in (x > 0.008) & (z > 0.008) & (y < 0.0013)', 'cell'),
}
filename_mesh = osp.join(data_dir, 'piezo_mesh_macro.vtk')
filename_mesh_micro = osp.join(data_dir, 'piezo_mesh_micro_dfc.vtk')
ebcs = {
'fixed_u_left': ('Left', {'u.all': 0.0}),
}
epbcs = {}
# equations (47)
equations = {
'balance_of_forces': """
dw_lin_elastic.i2.Omega(hom.A, v, u)
- dw_biot.i2.Omega(hom.B, v, p)
=
- dw_lin_prestress.i2.Omega(hom.Hf, v)""",
'mass_conservation': """
- dw_biot.i2.Omega(hom.B, u, q)
- dw_volume_dot.i2.Omega(hom.M, q, p)
=
- dw_volume_integrate.i2.Omega(hom.Zf, q)"""
}
return locals()
|
[
"sfepy.base.base.IndexedStruct",
"sfepy.discrete.Equation",
"sfepy.discrete.Material",
"sfepy.base.base.Struct",
"sfepy.discrete.fem.Mesh.from_file",
"sfepy.solvers.ls.ScipyDirect",
"sfepy.homogenization.micmac.get_homog_coefs_linear",
"sfepy.homogenization.recovery.recover_micro_hook_eps",
"sfepy.discrete.fem.Field.from_args",
"sfepy.solvers.nls.Newton",
"sfepy.terms.Term.new",
"sfepy.discrete.Equations",
"sfepy.discrete.Integral",
"sfepy.discrete.fem.FEDomain",
"sfepy.discrete.Problem",
"sfepy.discrete.FieldVariable"
] |
[((1432, 1469), 'sfepy.discrete.fem.Mesh.from_file', 'Mesh.from_file', (['pb.conf.filename_mesh'], {}), '(pb.conf.filename_mesh)\n', (1446, 1469), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((1483, 1507), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['"""domain"""', 'mesh'], {}), "('domain', mesh)\n", (1491, 1507), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((1569, 1636), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""scf"""', 'nm.float64', '"""scalar"""', 'omega'], {'approx_order': '(1)'}), "('scf', nm.float64, 'scalar', omega, approx_order=1)\n", (1584, 1636), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((1674, 1710), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""g"""', '"""unknown"""', 'field'], {}), "('g', 'unknown', field)\n", (1687, 1710), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Equation, Equations, Problem\n'), ((1719, 1774), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""f"""', '"""test"""', 'field'], {'primary_var_name': '"""g"""'}), "('f', 'test', field, primary_var_name='g')\n", (1732, 1774), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Equation, Equations, Problem\n'), ((1791, 1813), 'sfepy.discrete.Integral', 'Integral', (['"""i"""'], {'order': '(2)'}), "('i', order=2)\n", (1799, 1813), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Equation, Equations, Problem\n'), ((1822, 1854), 'sfepy.discrete.Material', 'Material', (['"""m"""'], {'function': 'set_grad'}), "('m', function=set_grad)\n", (1830, 1854), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Equation, Equations, Problem\n'), ((1865, 1923), 'sfepy.terms.Term.new', 'Term.new', (['"""dw_volume_dot(f, g)"""', 'integral', 'omega'], {'f': 'f', 'g': 'g'}), "('dw_volume_dot(f, g)', integral, omega, f=f, g=g)\n", (1873, 1923), False, 'from sfepy.terms import Term\n'), ((1933, 1994), 'sfepy.terms.Term.new', 'Term.new', (['"""dw_volume_lvf(m.cs, f)"""', 'integral', 'omega'], {'m': 'm', 'f': 'f'}), "('dw_volume_lvf(m.cs, f)', integral, omega, m=m, f=f)\n", (1941, 1994), False, 'from sfepy.terms import Term\n'), ((2022, 2050), 'sfepy.discrete.Equation', 'Equation', (['"""balance"""', '(t1 - t2)'], {}), "('balance', t1 - t2)\n", (2030, 2050), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Equation, Equations, Problem\n'), ((2061, 2076), 'sfepy.discrete.Equations', 'Equations', (['[eq]'], {}), '([eq])\n', (2070, 2076), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Equation, Equations, Problem\n'), ((2086, 2101), 'sfepy.solvers.ls.ScipyDirect', 'ScipyDirect', (['{}'], {}), '({})\n', (2097, 2101), False, 'from sfepy.solvers.ls import ScipyDirect\n'), ((2120, 2135), 'sfepy.base.base.IndexedStruct', 'IndexedStruct', ([], {}), '()\n', (2133, 2135), False, 'from sfepy.base.base import IndexedStruct\n'), ((2146, 2204), 'sfepy.solvers.nls.Newton', 'Newton', (["{'eps_a': 1e-15}"], {'lin_solver': 'ls', 'status': 'nls_status'}), "({'eps_a': 1e-15}, lin_solver=ls, status=nls_status)\n", (2152, 2204), False, 'from sfepy.solvers.nls import Newton\n'), ((2214, 2250), 'sfepy.discrete.Problem', 'Problem', (['"""elasticity"""'], {'equations': 'eqs'}), "('elasticity', equations=eqs)\n", (2221, 2250), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Equation, Equations, Problem\n'), ((2285, 2337), 'numpy.empty', 'nm.empty', (['(g.n_dof, cval.shape[2])'], {'dtype': 'nm.float64'}), '((g.n_dof, cval.shape[2]), dtype=nm.float64)\n', (2293, 2337), True, 'import numpy as nm\n'), ((3414, 3594), 'sfepy.homogenization.recovery.recover_micro_hook_eps', 'recover_micro_hook_eps', (['pb.conf.poroela_micro_file', 'rreg', "pvar['svar']", 'nodal_data', 'const_data', 'pb.conf.eps0'], {'recovery_file_tag': "('_%d' % pb.conf.grid0)", 'define_args': 'def_args'}), "(pb.conf.poroela_micro_file, rreg, pvar['svar'],\n nodal_data, const_data, pb.conf.eps0, recovery_file_tag='_%d' % pb.conf\n .grid0, define_args=def_args)\n", (3436, 3594), False, 'from sfepy.homogenization.recovery import recover_micro_hook_eps\n'), ((3856, 3933), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""cell"""', 'dofs': 'None', 'var_name': '"""u"""', 'data': 'strain'}), "(name='output_data', mode='cell', dofs=None, var_name='u', data=strain)\n", (3862, 3933), False, 'from sfepy.base.base import Struct\n'), ((4912, 5034), 'sfepy.homogenization.micmac.get_homog_coefs_linear', 'get_homog_coefs_linear', (['(0)', '(0)', 'None'], {'micro_filename': 'micro_filename', 'coefs_filename': 'coefs_filename', 'define_args': 'def_args'}), '(0, 0, None, micro_filename=micro_filename,\n coefs_filename=coefs_filename, define_args=def_args)\n', (4934, 5034), False, 'from sfepy.homogenization.micmac import get_homog_coefs_linear\n'), ((5811, 5855), 'os.path.join', 'osp.join', (['data_dir', '"""poropiezo_micro_dfc.py"""'], {}), "(data_dir, 'poropiezo_micro_dfc.py')\n", (5819, 5855), True, 'import os.path as osp\n'), ((7649, 7691), 'os.path.join', 'osp.join', (['data_dir', '"""piezo_mesh_macro.vtk"""'], {}), "(data_dir, 'piezo_mesh_macro.vtk')\n", (7657, 7691), True, 'import os.path as osp\n'), ((7718, 7764), 'os.path.join', 'osp.join', (['data_dir', '"""piezo_mesh_micro_dfc.vtk"""'], {}), "(data_dir, 'piezo_mesh_micro_dfc.vtk')\n", (7726, 7764), True, 'import os.path as osp\n'), ((2392, 2431), 'numpy.ascontiguousarray', 'nm.ascontiguousarray', (['cval[:, :, ii, :]'], {}), '(cval[:, :, ii, :])\n', (2412, 2431), True, 'import numpy as nm\n'), ((4378, 4401), 'numpy.tile', 'nm.tile', (['v', '(nqp, 1, 1)'], {}), '(v, (nqp, 1, 1))\n', (4385, 4401), True, 'import numpy as nm\n'), ((5761, 5778), 'numpy.array', 'nm.array', (['[-1, 1]'], {}), '([-1, 1])\n', (5769, 5778), True, 'import numpy as nm\n'), ((6984, 7013), 'os.path.join', 'osp.join', (['data_dir', '"""results"""'], {}), "(data_dir, 'results')\n", (6992, 7013), True, 'import os.path as osp\n'), ((5436, 5451), 'numpy.array', 'nm.array', (['[[v]]'], {}), '([[v]])\n', (5444, 5451), True, 'import numpy as nm\n')]
|
# Copyright 2021 Modelyst LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from uuid import UUID
import sqlalchemy.types as types
from sqlalchemy import Column
from sqlalchemy.orm import registry
from sqlmodel import Field, select
from dbgen.core.decorators import transform
from dbgen.core.entity import Entity
from dbgen.core.generator import Generator
from dbgen.core.model import Model
from dbgen.core.node.extract import Extract
from dbgen.core.node.query import Query
my_registry = registry()
class Parent(Entity, registry=my_registry, table=True):
__identifying__ = {"label"}
label: str
myColumn: Optional[dict] = Field(None, sa_column=Column(types.JSON()))
class Child(Entity, registry=my_registry, table=True):
__identifying__ = {"label", "parent_id"}
label: str
new_col: str = "test"
parent_id: Optional[UUID] = Field(None, foreign_key="public.parent.id")
class CustomExtractor(Extract):
n: int = 1000
def extract(self):
for i in range(self.n):
yield {'out': str(i)}
def length(self, **_):
return self.n
@transform
def failing_func():
raise ValueError("Failed")
@transform
def inputs_skipped():
from dbgen.exceptions import DBgenSkipException
raise DBgenSkipException(msg="Skip!")
def make_model():
with Model(name='new_api', registry=my_registry) as model:
with Generator('add_parent'):
new_extract = CustomExtractor(n=1000)
Parent.load(insert=True, label=new_extract["out"], validation='strict', myColumn={'a': 1})
with Generator('add_parents_v2'):
Parent.load(insert=True, label="parentier")
with Generator('add_parents_v3'):
Parent.load(insert=True, label="parent")
@transform
def concise_func(label: str) -> str:
return f"{label}-test"
with Generator('add_child'):
query = Query(select(Parent.id, Parent.label))
parent_id, parent_label = query.results()
concise_pyblock = concise_func(query["label"])
Child.load(insert=True, label=concise_pyblock.results(), parent_id=query["id"])
with Generator('failing_gen'):
failing_func()
with Generator('skip_gen'):
inputs_skipped()
return model
|
[
"sqlmodel.select",
"sqlmodel.Field"
] |
[((1041, 1051), 'sqlalchemy.orm.registry', 'registry', ([], {}), '()\n', (1049, 1051), False, 'from sqlalchemy.orm import registry\n'), ((1407, 1450), 'sqlmodel.Field', 'Field', (['None'], {'foreign_key': '"""public.parent.id"""'}), "(None, foreign_key='public.parent.id')\n", (1412, 1450), False, 'from sqlmodel import Field, select\n'), ((1805, 1836), 'dbgen.exceptions.DBgenSkipException', 'DBgenSkipException', ([], {'msg': '"""Skip!"""'}), "(msg='Skip!')\n", (1823, 1836), False, 'from dbgen.exceptions import DBgenSkipException\n'), ((1866, 1909), 'dbgen.core.model.Model', 'Model', ([], {'name': '"""new_api"""', 'registry': 'my_registry'}), "(name='new_api', registry=my_registry)\n", (1871, 1909), False, 'from dbgen.core.model import Model\n'), ((1933, 1956), 'dbgen.core.generator.Generator', 'Generator', (['"""add_parent"""'], {}), "('add_parent')\n", (1942, 1956), False, 'from dbgen.core.generator import Generator\n'), ((2125, 2152), 'dbgen.core.generator.Generator', 'Generator', (['"""add_parents_v2"""'], {}), "('add_parents_v2')\n", (2134, 2152), False, 'from dbgen.core.generator import Generator\n'), ((2224, 2251), 'dbgen.core.generator.Generator', 'Generator', (['"""add_parents_v3"""'], {}), "('add_parents_v3')\n", (2233, 2251), False, 'from dbgen.core.generator import Generator\n'), ((2420, 2442), 'dbgen.core.generator.Generator', 'Generator', (['"""add_child"""'], {}), "('add_child')\n", (2429, 2442), False, 'from dbgen.core.generator import Generator\n'), ((2722, 2746), 'dbgen.core.generator.Generator', 'Generator', (['"""failing_gen"""'], {}), "('failing_gen')\n", (2731, 2746), False, 'from dbgen.core.generator import Generator\n'), ((2789, 2810), 'dbgen.core.generator.Generator', 'Generator', (['"""skip_gen"""'], {}), "('skip_gen')\n", (2798, 2810), False, 'from dbgen.core.generator import Generator\n'), ((1217, 1229), 'sqlalchemy.types.JSON', 'types.JSON', ([], {}), '()\n', (1227, 1229), True, 'import sqlalchemy.types as types\n'), ((2470, 2501), 'sqlmodel.select', 'select', (['Parent.id', 'Parent.label'], {}), '(Parent.id, Parent.label)\n', (2476, 2501), False, 'from sqlmodel import Field, select\n')]
|
import random
from datetime import datetime
from typing import List, Optional
from sqlmodel import or_, select
from config.notif_config import NotifConfig
from src.api.fixtures_client import FixturesClient
from src.db.db_manager import NotifierDBManager
from src.db.notif_sql_models import Fixture as DBFixture
from src.db.notif_sql_models import League as DBLeague
from src.db.notif_sql_models import Team as DBTeam
from src.emojis import Emojis
from src.entities import Fixture, TeamStanding
from src.senders.email_sender import send_email_html
from src.senders.telegram_sender import send_telegram_message
from src.utils.date_utils import get_date_spanish_text_format
from src.utils.fixtures_utils import (
get_image_search,
get_last_fixture,
get_last_fixture_db,
get_next_fixture,
get_next_fixture_db,
get_youtube_highlights_videos,
)
from src.utils.message_utils import (
get_first_phrase_msg,
get_team_intro_message,
is_subscripted_for_team,
)
class TeamFixturesManager:
def __init__(self, season: str, team_id: str) -> None:
self._season = season
self._team_id = team_id
self._fixtures_client = FixturesClient()
self._notifier_db_manager = NotifierDBManager()
def get_next_team_fixture_text(self, user: str = "") -> tuple:
next_team_fixture = self.get_next_team_fixture()
return (
self._telegram_next_fixture_notification(next_team_fixture, True, user)
if next_team_fixture
else ("Fixture para el equipo no encontrado", "")
)
def get_last_team_fixture_text(self, user: str = "") -> tuple:
last_team_fixture = self.get_last_team_fixture()
return (
self._telegram_last_fixture_notification(last_team_fixture, user)
if last_team_fixture
else ("Fixture para el equipo no encontrado", "")
)
def get_next_team_fixture(self) -> Optional[Fixture]:
fixtures_statement = select(DBFixture).where(
or_(
DBFixture.home_team == self._team_id,
DBFixture.away_team == self._team_id,
)
)
team_fixtures = self._notifier_db_manager.select_records(fixtures_statement)
next_team_fixture = None
if len(team_fixtures):
next_team_fixture = get_next_fixture_db(team_fixtures)
return next_team_fixture
def notify_next_fixture_db(self) -> None:
next_team_fixture = self.get_next_team_fixture()
if next_team_fixture:
if next_team_fixture.remaining_time().days < NotifConfig.NEXT_MATCH_THRESHOLD:
self._perform_fixture_notification(next_team_fixture)
def notify_next_fixture(self) -> None:
team_fixtures = self._fixtures_client.get_fixtures_by(
self._season, self._team_id
)
next_team_fixture = None
if "response" in team_fixtures.as_dict:
next_team_fixture = get_next_fixture(
team_fixtures.as_dict["response"], self._team_id
)
if next_team_fixture:
if next_team_fixture.remaining_time().days < 500:
self._perform_fixture_notification(next_team_fixture)
def notify_fixture_line_up_update(self) -> None:
team_fixtures = self._fixtures_client.get_fixtures_by(
self._season, self._team_id
)
next_team_fixture = None
if "response" in team_fixtures.as_dict:
next_team_fixture = get_next_fixture(
team_fixtures.as_dict["response"], self._team_id
)
if next_team_fixture:
if (
next_team_fixture.remaining_time().days < 1
and next_team_fixture.remaining_time().hours < 6
and next_team_fixture.line_up
):
self._perform_line_up_confirmed_notification(next_team_fixture)
else:
print(
f"There is still no line up for the match of {next_team_fixture.home_team} vs {next_team_fixture.away_team}"
)
print(str(next_team_fixture.remaining_time()))
def get_last_team_fixture(self) -> Optional[Fixture]:
fixtures_statement = select(DBFixture).where(
or_(
DBFixture.home_team == self._team_id,
DBFixture.away_team == self._team_id,
)
)
team_fixtures = self._notifier_db_manager.select_records(fixtures_statement)
last_team_fixture = None
if team_fixtures:
last_team_fixture = get_last_fixture_db(team_fixtures)
return last_team_fixture
def notify_last_fixture_db(self) -> None:
fixtures_statement = select(DBFixture).where(
or_(
DBFixture.home_team == self._team_id,
DBFixture.away_team == self._team_id,
)
)
team_fixtures = self._notifier_db_manager.select_records(fixtures_statement)
last_team_fixture = None
if team_fixtures:
last_team_fixture = get_last_fixture_db(team_fixtures)
if last_team_fixture:
if (
NotifConfig.LAST_MATCH_THRESHOLD_DAYS
<= last_team_fixture.remaining_time().days
<= 0
):
self._perform_last_fixture_notification(last_team_fixture)
def notify_last_fixture(self) -> None:
team_fixtures = self._fixtures_client.get_fixtures_by(
self._season, self._team_id
)
last_team_fixture = get_last_fixture(
team_fixtures.as_dict["response"], self._team_id
)
if last_team_fixture:
if (
-1
<= last_team_fixture.remaining_time().days
<= NotifConfig.LAST_MATCH_THRESHOLD_DAYS
):
last_team_fixture.highlights = get_youtube_highlights_videos(
last_team_fixture.home_team, last_team_fixture.away_team
)
self._perform_last_fixture_notification(last_team_fixture)
def _telegram_last_fixture_notification(
self, team_fixture: Fixture, user: str = ""
) -> tuple:
match_images = self._get_match_images(team_fixture.championship.league_id)
match_image_url = random.choice(match_images)
spanish_format_date = get_date_spanish_text_format(team_fixture.bsas_date)
team_intro_message = get_team_intro_message(
team_fixture.home_team
if str(team_fixture.home_team.id) == str(self._team_id)
else team_fixture.away_team
)["last_match"]
highlights_yt_url = f"https://www.youtube.com/results?search_query={team_fixture.home_team.name}+vs+{team_fixture.away_team.name}+jugadas+resumen"
highlights_text = f"{Emojis.FILM_PROJECTOR.value} <a href='{highlights_yt_url}'>HIGHLIGHTS</a>"
telegram_message = (
f"{Emojis.WAVING_HAND.value}Hola {user}!\n\n"
f"{team_intro_message} "
f"jugó el {spanish_format_date}! \nEste fue el resultado: \n\n"
f"{team_fixture.matched_played_telegram_like_repr()}"
f"{highlights_text}"
)
return (telegram_message, match_image_url)
def _telegram_next_fixture_notification(
self, team_fixture: Fixture, is_on_demand: False, user: str = ""
) -> tuple:
spanish_format_date = get_date_spanish_text_format(team_fixture.bsas_date)
match_images = self._get_match_images(team_fixture.championship.league_id)
match_image_url = random.choice(match_images)
date_text = (
"es HOY!"
if team_fixture.bsas_date.day == datetime.today().day
else f"es el {Emojis.SPIRAL_CALENDAR.value} {spanish_format_date}."
)
first_phrase = get_first_phrase_msg(True, is_on_demand)
team_intro_message = get_team_intro_message(
team_fixture.home_team
if str(team_fixture.home_team.id) == str(self._team_id)
else team_fixture.away_team
)["next_match"]
intro_message = f"{first_phrase} {team_intro_message}"
telegram_message = (
f"{Emojis.WAVING_HAND.value}Hola {user}! "
f"\n\n{intro_message} {date_text}\n\n{team_fixture.telegram_like_repr()}"
)
return (telegram_message, match_image_url)
def _perform_last_fixture_notification(
self, team_fixture: Fixture, team_standing: TeamStanding = None
) -> None:
match_images = self._get_match_images(team_fixture.championship.league_id)
match_image_url = random.choice(match_images)
# telegram
team_standing_msg = (
f"{Emojis.RED_EXCLAMATION_MARK.value} Situación actual en el campeonato: \n\n{team_standing.telegram_like_repr()}\n"
if team_standing
else ""
)
team_intro_message = get_team_intro_message(
team_fixture.home_team
if str(team_fixture.home_team.id) == str(self._team_id)
else team_fixture.away_team
)["last_match"]
highlights_yt_url = f"https://www.youtube.com/results?search_query={team_fixture.home_team.name}+vs+{team_fixture.away_team.name}+jugadas+resumen"
highlights_text = f"{Emojis.FILM_PROJECTOR.value} <a href='{highlights_yt_url}'>HIGHLIGHTS</a>"
FOOTBALL_TELEGRAM_RECIPIENTS = NotifConfig.TELEGRAM_RECIPIENTS
for recipient in FOOTBALL_TELEGRAM_RECIPIENTS:
if is_subscripted_for_team(recipient, self._team_id):
telegram_message = (
f"{Emojis.WAVING_HAND.value}Hola {recipient.name}!\n\n"
f"{team_intro_message} "
f"jugó ayer! \nEste fue el resultado: \n\n"
f"{team_fixture.matched_played_telegram_like_repr()}"
f"\n{highlights_text}"
)
send_telegram_message(
recipient.telegram_id,
telegram_message,
match_image_url,
)
# email
team_standing_email_msg = (
f"Situación actual en el campeonato: \n\n{team_standing.email_like_repr()}"
if team_standing
else ""
)
match_image_text = f"<img src='{match_image_url}'>"
email_standing_message = (
f"{Emojis.RED_EXCLAMATION_MARK.value}{team_standing_email_msg}\n"
)
highlights_text = f"https://www.youtube.com/results?search_query={team_fixture.home_team.name}+vs+{team_fixture.away_team.name}+jugadas+resumen"
EMAIL_RECIPIENTS = NotifConfig.EMAIL_RECIPIENTS
for recipient in EMAIL_RECIPIENTS:
message = (
f"{Emojis.WAVING_HAND.value}Hola {recipient.name}!\n\n{team_intro_message} "
f"jugó ayer!<br /><br />{match_image_text}<br /><br />Este fue el resultado: \n\n{team_fixture.matched_played_email_like_repr()}"
f"<br /><br />{email_standing_message}<br /><br />{highlights_text}"
)
send_email_html(
f"{team_fixture.home_team.name} ({team_fixture.match_score.home_score}) - "
f"({team_fixture.match_score.away_score}) {team_fixture.away_team.name}",
message,
recipient.email,
)
def _perform_fixture_notification(self, team_fixture: Fixture) -> None:
spanish_format_date = get_date_spanish_text_format(team_fixture.bsas_date)
match_images = self._get_match_images(team_fixture.championship.league_id)
match_image_url = random.choice(match_images)
match_image_text = f"<img width='100%' height='100%' src='{match_image_url}'>"
date_text = (
"es HOY!"
if team_fixture.bsas_date.day == datetime.today().day
else f"es el {Emojis.SPIRAL_CALENDAR.value} {spanish_format_date}."
)
first_phrase = get_first_phrase_msg(True)
team_intro_message = get_team_intro_message(
team_fixture.home_team
if str(team_fixture.home_team.id) == str(self._team_id)
else team_fixture.away_team
)["next_match"]
intro_message = f"{first_phrase} {team_intro_message}"
# telegram
FOOTBALL_TELEGRAM_RECIPIENTS = NotifConfig.TELEGRAM_RECIPIENTS
for recipient in FOOTBALL_TELEGRAM_RECIPIENTS:
if is_subscripted_for_team(recipient, self._team_id):
telegram_message = (
f"{Emojis.WAVING_HAND.value}Hola "
f"{recipient.name}!\n\n{intro_message} {date_text}\n\n{team_fixture.telegram_like_repr()}"
)
send_telegram_message(
recipient.telegram_id,
telegram_message,
photo=match_image_url,
)
# email
EMAIL_RECIPIENTS = NotifConfig.EMAIL_RECIPIENTS
for recipient in EMAIL_RECIPIENTS:
message = f"{Emojis.WAVING_HAND.value}Hola {recipient.name}!\n\n{intro_message} {date_text}\n\n<br /><br />{match_image_text}<br /><br />{team_fixture.email_like_repr()}"
send_email_html(
f"{team_fixture.home_team.name} vs. {team_fixture.away_team.name}",
message,
recipient.email,
)
def _perform_line_up_confirmed_notification(self, team_fixture: Fixture) -> None:
match_teams = f"{team_fixture.home_team.name} vs {team_fixture.away_team.name}"
match_image_url = get_image_search(match_teams)
match_image_text = f"<img src='{match_image_url}'>"
# telegram
FOOTBALL_TELEGRAM_RECIPIENTS = NotifConfig.TELEGRAM_RECIPIENTS
for recipient in FOOTBALL_TELEGRAM_RECIPIENTS:
intro_message = f"Se actualizó la alineación para {match_teams}:"
telegram_message = f"{Emojis.WAVING_HAND.value}Hola {recipient.name}!\n\n{intro_message}\n\n{team_fixture.telegram_like_repr()}"
send_telegram_message(
recipient.telegram_id,
telegram_message,
photo=match_image_url,
)
# email
EMAIL_RECIPIENTS = NotifConfig.EMAIL_RECIPIENTS
for recipient in EMAIL_RECIPIENTS:
message = f"{Emojis.WAVING_HAND.value}Hola {recipient.name}!\n\n{intro_message}\n\n<br /><br />{match_image_text}<br /><br />{team_fixture.email_like_repr()}"
send_email_html(
f"{team_fixture.home_team.name} vs. {team_fixture.away_team.name}",
message,
recipient.email,
)
def _get_match_images(self, league_id: int) -> List[str]:
match_image_url_team_statement = select(DBTeam).where(
DBTeam.id == self._team_id
)
match_image_url_league_statement = select(DBLeague).where(
DBLeague.id == league_id
)
team_image_url = self._notifier_db_manager.select_records(
match_image_url_team_statement
)[0].picture
league_image_url = self._notifier_db_manager.select_records(
match_image_url_league_statement
)[0].logo
return [team_image_url, league_image_url]
|
[
"sqlmodel.select",
"sqlmodel.or_"
] |
[((1170, 1186), 'src.api.fixtures_client.FixturesClient', 'FixturesClient', ([], {}), '()\n', (1184, 1186), False, 'from src.api.fixtures_client import FixturesClient\n'), ((1223, 1242), 'src.db.db_manager.NotifierDBManager', 'NotifierDBManager', ([], {}), '()\n', (1240, 1242), False, 'from src.db.db_manager import NotifierDBManager\n'), ((5615, 5681), 'src.utils.fixtures_utils.get_last_fixture', 'get_last_fixture', (["team_fixtures.as_dict['response']", 'self._team_id'], {}), "(team_fixtures.as_dict['response'], self._team_id)\n", (5631, 5681), False, 'from src.utils.fixtures_utils import get_image_search, get_last_fixture, get_last_fixture_db, get_next_fixture, get_next_fixture_db, get_youtube_highlights_videos\n'), ((6373, 6400), 'random.choice', 'random.choice', (['match_images'], {}), '(match_images)\n', (6386, 6400), False, 'import random\n'), ((6431, 6483), 'src.utils.date_utils.get_date_spanish_text_format', 'get_date_spanish_text_format', (['team_fixture.bsas_date'], {}), '(team_fixture.bsas_date)\n', (6459, 6483), False, 'from src.utils.date_utils import get_date_spanish_text_format\n'), ((7492, 7544), 'src.utils.date_utils.get_date_spanish_text_format', 'get_date_spanish_text_format', (['team_fixture.bsas_date'], {}), '(team_fixture.bsas_date)\n', (7520, 7544), False, 'from src.utils.date_utils import get_date_spanish_text_format\n'), ((7654, 7681), 'random.choice', 'random.choice', (['match_images'], {}), '(match_images)\n', (7667, 7681), False, 'import random\n'), ((7906, 7946), 'src.utils.message_utils.get_first_phrase_msg', 'get_first_phrase_msg', (['(True)', 'is_on_demand'], {}), '(True, is_on_demand)\n', (7926, 7946), False, 'from src.utils.message_utils import get_first_phrase_msg, get_team_intro_message, is_subscripted_for_team\n'), ((8705, 8732), 'random.choice', 'random.choice', (['match_images'], {}), '(match_images)\n', (8718, 8732), False, 'import random\n'), ((11566, 11618), 'src.utils.date_utils.get_date_spanish_text_format', 'get_date_spanish_text_format', (['team_fixture.bsas_date'], {}), '(team_fixture.bsas_date)\n', (11594, 11618), False, 'from src.utils.date_utils import get_date_spanish_text_format\n'), ((11728, 11755), 'random.choice', 'random.choice', (['match_images'], {}), '(match_images)\n', (11741, 11755), False, 'import random\n'), ((12067, 12093), 'src.utils.message_utils.get_first_phrase_msg', 'get_first_phrase_msg', (['(True)'], {}), '(True)\n', (12087, 12093), False, 'from src.utils.message_utils import get_first_phrase_msg, get_team_intro_message, is_subscripted_for_team\n'), ((13677, 13706), 'src.utils.fixtures_utils.get_image_search', 'get_image_search', (['match_teams'], {}), '(match_teams)\n', (13693, 13706), False, 'from src.utils.fixtures_utils import get_image_search, get_last_fixture, get_last_fixture_db, get_next_fixture, get_next_fixture_db, get_youtube_highlights_videos\n'), ((2024, 2103), 'sqlmodel.or_', 'or_', (['(DBFixture.home_team == self._team_id)', '(DBFixture.away_team == self._team_id)'], {}), '(DBFixture.home_team == self._team_id, DBFixture.away_team == self._team_id)\n', (2027, 2103), False, 'from sqlmodel import or_, select\n'), ((2344, 2378), 'src.utils.fixtures_utils.get_next_fixture_db', 'get_next_fixture_db', (['team_fixtures'], {}), '(team_fixtures)\n', (2363, 2378), False, 'from src.utils.fixtures_utils import get_image_search, get_last_fixture, get_last_fixture_db, get_next_fixture, get_next_fixture_db, get_youtube_highlights_videos\n'), ((2981, 3047), 'src.utils.fixtures_utils.get_next_fixture', 'get_next_fixture', (["team_fixtures.as_dict['response']", 'self._team_id'], {}), "(team_fixtures.as_dict['response'], self._team_id)\n", (2997, 3047), False, 'from src.utils.fixtures_utils import get_image_search, get_last_fixture, get_last_fixture_db, get_next_fixture, get_next_fixture_db, get_youtube_highlights_videos\n'), ((3523, 3589), 'src.utils.fixtures_utils.get_next_fixture', 'get_next_fixture', (["team_fixtures.as_dict['response']", 'self._team_id'], {}), "(team_fixtures.as_dict['response'], self._team_id)\n", (3539, 3589), False, 'from src.utils.fixtures_utils import get_image_search, get_last_fixture, get_last_fixture_db, get_next_fixture, get_next_fixture_db, get_youtube_highlights_videos\n'), ((4310, 4389), 'sqlmodel.or_', 'or_', (['(DBFixture.home_team == self._team_id)', '(DBFixture.away_team == self._team_id)'], {}), '(DBFixture.home_team == self._team_id, DBFixture.away_team == self._team_id)\n', (4313, 4389), False, 'from sqlmodel import or_, select\n'), ((4625, 4659), 'src.utils.fixtures_utils.get_last_fixture_db', 'get_last_fixture_db', (['team_fixtures'], {}), '(team_fixtures)\n', (4644, 4659), False, 'from src.utils.fixtures_utils import get_image_search, get_last_fixture, get_last_fixture_db, get_next_fixture, get_next_fixture_db, get_youtube_highlights_videos\n'), ((4807, 4886), 'sqlmodel.or_', 'or_', (['(DBFixture.home_team == self._team_id)', '(DBFixture.away_team == self._team_id)'], {}), '(DBFixture.home_team == self._team_id, DBFixture.away_team == self._team_id)\n', (4810, 4886), False, 'from sqlmodel import or_, select\n'), ((5122, 5156), 'src.utils.fixtures_utils.get_last_fixture_db', 'get_last_fixture_db', (['team_fixtures'], {}), '(team_fixtures)\n', (5141, 5156), False, 'from src.utils.fixtures_utils import get_image_search, get_last_fixture, get_last_fixture_db, get_next_fixture, get_next_fixture_db, get_youtube_highlights_videos\n'), ((9594, 9643), 'src.utils.message_utils.is_subscripted_for_team', 'is_subscripted_for_team', (['recipient', 'self._team_id'], {}), '(recipient, self._team_id)\n', (9617, 9643), False, 'from src.utils.message_utils import get_first_phrase_msg, get_team_intro_message, is_subscripted_for_team\n'), ((11188, 11385), 'src.senders.email_sender.send_email_html', 'send_email_html', (['f"""{team_fixture.home_team.name} ({team_fixture.match_score.home_score}) - ({team_fixture.match_score.away_score}) {team_fixture.away_team.name}"""', 'message', 'recipient.email'], {}), "(\n f'{team_fixture.home_team.name} ({team_fixture.match_score.home_score}) - ({team_fixture.match_score.away_score}) {team_fixture.away_team.name}'\n , message, recipient.email)\n", (11203, 11385), False, 'from src.senders.email_sender import send_email_html\n'), ((12539, 12588), 'src.utils.message_utils.is_subscripted_for_team', 'is_subscripted_for_team', (['recipient', 'self._team_id'], {}), '(recipient, self._team_id)\n', (12562, 12588), False, 'from src.utils.message_utils import get_first_phrase_msg, get_team_intro_message, is_subscripted_for_team\n'), ((13303, 13421), 'src.senders.email_sender.send_email_html', 'send_email_html', (['f"""{team_fixture.home_team.name} vs. {team_fixture.away_team.name}"""', 'message', 'recipient.email'], {}), "(\n f'{team_fixture.home_team.name} vs. {team_fixture.away_team.name}',\n message, recipient.email)\n", (13318, 13421), False, 'from src.senders.email_sender import send_email_html\n'), ((14144, 14234), 'src.senders.telegram_sender.send_telegram_message', 'send_telegram_message', (['recipient.telegram_id', 'telegram_message'], {'photo': 'match_image_url'}), '(recipient.telegram_id, telegram_message, photo=\n match_image_url)\n', (14165, 14234), False, 'from src.senders.telegram_sender import send_telegram_message\n'), ((14592, 14710), 'src.senders.email_sender.send_email_html', 'send_email_html', (['f"""{team_fixture.home_team.name} vs. {team_fixture.away_team.name}"""', 'message', 'recipient.email'], {}), "(\n f'{team_fixture.home_team.name} vs. {team_fixture.away_team.name}',\n message, recipient.email)\n", (14607, 14710), False, 'from src.senders.email_sender import send_email_html\n'), ((1987, 2004), 'sqlmodel.select', 'select', (['DBFixture'], {}), '(DBFixture)\n', (1993, 2004), False, 'from sqlmodel import or_, select\n'), ((4273, 4290), 'sqlmodel.select', 'select', (['DBFixture'], {}), '(DBFixture)\n', (4279, 4290), False, 'from sqlmodel import or_, select\n'), ((4770, 4787), 'sqlmodel.select', 'select', (['DBFixture'], {}), '(DBFixture)\n', (4776, 4787), False, 'from sqlmodel import or_, select\n'), ((5949, 6040), 'src.utils.fixtures_utils.get_youtube_highlights_videos', 'get_youtube_highlights_videos', (['last_team_fixture.home_team', 'last_team_fixture.away_team'], {}), '(last_team_fixture.home_team,\n last_team_fixture.away_team)\n', (5978, 6040), False, 'from src.utils.fixtures_utils import get_image_search, get_last_fixture, get_last_fixture_db, get_next_fixture, get_next_fixture_db, get_youtube_highlights_videos\n'), ((10018, 10097), 'src.senders.telegram_sender.send_telegram_message', 'send_telegram_message', (['recipient.telegram_id', 'telegram_message', 'match_image_url'], {}), '(recipient.telegram_id, telegram_message, match_image_url)\n', (10039, 10097), False, 'from src.senders.telegram_sender import send_telegram_message\n'), ((12827, 12917), 'src.senders.telegram_sender.send_telegram_message', 'send_telegram_message', (['recipient.telegram_id', 'telegram_message'], {'photo': 'match_image_url'}), '(recipient.telegram_id, telegram_message, photo=\n match_image_url)\n', (12848, 12917), False, 'from src.senders.telegram_sender import send_telegram_message\n'), ((14869, 14883), 'sqlmodel.select', 'select', (['DBTeam'], {}), '(DBTeam)\n', (14875, 14883), False, 'from sqlmodel import or_, select\n'), ((14983, 14999), 'sqlmodel.select', 'select', (['DBLeague'], {}), '(DBLeague)\n', (14989, 14999), False, 'from sqlmodel import or_, select\n'), ((7771, 7787), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (7785, 7787), False, 'from datetime import datetime\n'), ((11932, 11948), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (11946, 11948), False, 'from datetime import datetime\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied
import json
import os
import pytest
from megengine import Parameter, tensor
from megengine.core import option
from megengine.module import Module
from megengine.utils.profiler import Profiler, scope
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.23], dtype="float32")
def forward(self, x):
x = x * self.a
return x
def test_profiler():
profile_prefix = "pytest_profile"
profile_format = "chrome_timeline.json"
profile_path = "{}.{}".format(profile_prefix, profile_format)
with Profiler(profile_prefix, format=profile_format):
with scope("my_scope"):
oup = Simple()(tensor([1.23], dtype="float32"))
with open(profile_path, "r") as f:
events = json.load(f)
os.remove(profile_path)
prev_ts = {}
scope_count = 0
for event in events:
if "dur" in event:
assert event["dur"] >= 0
elif "ts" in event and "tid" in event:
ts = event["ts"]
tid = event["tid"]
if ts == 0:
continue
assert (tid not in prev_ts) or prev_ts[tid] <= ts
prev_ts[tid] = ts
if "name" in event and event["name"] == "my_scope":
scope_count += 1
assert scope_count > 0 and scope_count % 2 == 0
|
[
"megengine.tensor",
"megengine.utils.profiler.scope",
"megengine.Parameter",
"megengine.utils.profiler.Profiler"
] |
[((1165, 1188), 'os.remove', 'os.remove', (['profile_path'], {}), '(profile_path)\n', (1174, 1188), False, 'import os\n'), ((669, 703), 'megengine.Parameter', 'Parameter', (['[1.23]'], {'dtype': '"""float32"""'}), "([1.23], dtype='float32')\n", (678, 703), False, 'from megengine import Parameter, tensor\n'), ((951, 998), 'megengine.utils.profiler.Profiler', 'Profiler', (['profile_prefix'], {'format': 'profile_format'}), '(profile_prefix, format=profile_format)\n', (959, 998), False, 'from megengine.utils.profiler import Profiler, scope\n'), ((1148, 1160), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1157, 1160), False, 'import json\n'), ((1013, 1030), 'megengine.utils.profiler.scope', 'scope', (['"""my_scope"""'], {}), "('my_scope')\n", (1018, 1030), False, 'from megengine.utils.profiler import Profiler, scope\n'), ((1059, 1090), 'megengine.tensor', 'tensor', (['[1.23]'], {'dtype': '"""float32"""'}), "([1.23], dtype='float32')\n", (1065, 1090), False, 'from megengine import Parameter, tensor\n')]
|
import numpy as np
from megengine import tensor
def _default_compare_fn(x, y):
np.testing.assert_allclose(x.numpy(), y, rtol=1e-6)
def opr_test(cases, func, compare_fn=_default_compare_fn, ref_fn=None, **kwargs):
"""
:param cases: the list which have dict element, the list length should be 2 for dynamic shape test.
and the dict should have input,
and should have output if ref_fn is None.
should use list for multiple inputs and outputs for each case.
:param func: the function to run opr.
:param compare_fn: the function to compare the result and expected, use
``np.testing.assert_allclose`` if None.
:param ref_fn: the function to generate expected data, should assign output if None.
Examples:
.. code-block::
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases,
F.eye,
ref_fn=lambda n, m: np.eye(n, m).astype(dtype),
dtype=dtype)
"""
def check_results(results, expected):
if not isinstance(results, (tuple, list)):
results = (results,)
for r, e in zip(results, expected):
compare_fn(r, e)
def get_param(cases, idx):
case = cases[idx]
inp = case.get("input", None)
outp = case.get("output", None)
if inp is None:
raise ValueError("the test case should have input")
if not isinstance(inp, (tuple, list)):
inp = (inp,)
if ref_fn is not None and callable(ref_fn):
outp = ref_fn(*inp)
if outp is None:
raise ValueError("the test case should have output or reference function")
if not isinstance(outp, (tuple, list)):
outp = (outp,)
return inp, outp
if len(cases) == 0:
raise ValueError("should give one case at least")
if not callable(func):
raise ValueError("the input func should be callable")
inp, outp = get_param(cases, 0)
inp_tensor = [tensor(inpi) for inpi in inp]
results = func(*inp_tensor, **kwargs)
check_results(results, outp)
|
[
"megengine.tensor"
] |
[((2055, 2067), 'megengine.tensor', 'tensor', (['inpi'], {}), '(inpi)\n', (2061, 2067), False, 'from megengine import tensor\n')]
|
"""Example code"""
from sqlmodel import Field, SQLModel
class CarBase(SQLModel):
"""
CarBase is the base model. This is a data-only model(Pydantic), since it lacks `table=True`.
"""
name: str
manufacturer: str
class Car(CarBase, table=True): # type: ignore
"""
Add `id` property to the base model. Since `table=True` it is a pydantic AND SQLAlchemy model
and represents a database table.
"""
id: int = Field(default=None, primary_key=True)
class CarRead(CarBase):
id: int # Make `id` a required field in our response model
class CarCreate(CarBase):
"""
This is a data-only pydantic model. Used to create new songs.
"""
pass
|
[
"sqlmodel.Field"
] |
[((449, 486), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (454, 486), False, 'from sqlmodel import Field, SQLModel\n')]
|
# 26.02.2007, c
# last revision: 25.02.2008
from sfepy import data_dir
filename_mesh = data_dir + '/meshes/3d/elbow2.mesh'
options = {
'nls' : 'newton',
'ls' : 'ls',
'post_process_hook' : 'verify_incompressibility',
}
field_1 = {
'name' : '3_velocity',
'dtype' : 'real',
'shape' : (3,),
'region' : 'Omega',
'approx_order' : '1B',
}
field_2 = {
'name' : 'pressure',
'dtype' : 'real',
'shape' : (1,),
'region' : 'Omega',
'approx_order' : 1,
}
# Can use logical operations '&' (and), '|' (or).
region_1000 = {
'name' : 'Omega',
'select' : 'elements of group 6',
}
region_0 = {
'name' : 'Walls',
'select' : 'nodes of surface -n (r.Outlet +n r.Inlet)',
'can_cells' : False,
}
region_1 = {
'name' : 'Inlet',
'select' : 'nodes by cinc0', # In
'can_cells' : False,
}
region_2 = {
'name' : 'Outlet',
'select' : 'nodes by cinc1', # Out
'can_cells' : False,
}
ebc_1 = {
'name' : 'Walls',
'region' : 'Walls',
'dofs' : {'u.all' : 0.0},
}
ebc_2 = {
'name' : 'Inlet',
'region' : 'Inlet',
'dofs' : {'u.1' : 1.0, 'u.[0,2]' : 0.0},
}
material_1 = {
'name' : 'fluid',
'values' : {
'viscosity' : 1.25e-3,
'density' : 1e0,
},
}
variable_1 = {
'name' : 'u',
'kind' : 'unknown field',
'field' : '3_velocity',
'order' : 0,
}
variable_2 = {
'name' : 'v',
'kind' : 'test field',
'field' : '3_velocity',
'dual' : 'u',
}
variable_3 = {
'name' : 'p',
'kind' : 'unknown field',
'field' : 'pressure',
'order' : 1,
}
variable_4 = {
'name' : 'q',
'kind' : 'test field',
'field' : 'pressure',
'dual' : 'p',
}
variable_5 = {
'name' : 'pp',
'kind' : 'parameter field',
'field' : 'pressure',
'like' : 'p',
}
integral_1 = {
'name' : 'i1',
'kind' : 'v',
'quadrature' : 'gauss_o2_d3',
}
integral_2 = {
'name' : 'i2',
'kind' : 'v',
'quadrature' : 'gauss_o3_d3',
}
##
# Stationary Navier-Stokes equations.
equations = {
'balance' :
"""+ dw_div_grad.i2.Omega( fluid.viscosity, v, u )
+ dw_convect.i2.Omega( v, u )
- dw_stokes.i1.Omega( v, p ) = 0""",
'incompressibility' :
"""dw_stokes.i1.Omega( u, q ) = 0""",
}
##
# FE assembling parameters.
fe = {
'chunk_size' : 1000
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 5,
'eps_a' : 1e-8,
'eps_r' : 1.0,
'macheps' : 1e-16,
'lin_red' : 1e-2, # Linear system error < (eps_a * lin_red).
'ls_red' : 0.1,
'ls_red_warp' : 0.001,
'ls_on' : 0.99999,
'ls_min' : 1e-5,
'check' : 0,
'delta' : 1e-6,
'is_plot' : False,
'problem' : 'nonlinear', # 'nonlinear' or 'linear' (ignore i_max)
}
def verify_incompressibility( out, problem, state, extend = False ):
"""This hook is normally used for post-processing (additional results can
be inserted into `out` dictionary), but here we just verify the weak
incompressibility condition."""
from sfepy.base.base import Struct, debug, nm, output, assert_
vv = problem.get_variables()
one = nm.ones( (vv['p'].field.n_nod,), dtype = nm.float64 )
vv['p'].data_from_any( one )
zero = problem.evaluate('dw_stokes.i1.Omega( u, p )', p=one, u=vv['u'](),
call_mode='d_eval')
output('div( u ) = %.3e' % zero)
assert_(abs(zero) < 1e-14)
return out
##
# Functions.
import os.path as op
import utils
cinc_name = 'cinc_' + op.splitext(op.basename(filename_mesh))[0]
cinc = getattr(utils, cinc_name)
functions = {
'cinc0' : (lambda coors, domain=None: cinc(coors, 0),),
'cinc1' : (lambda coors, domain=None: cinc(coors, 1),),
}
|
[
"sfepy.base.base.output",
"sfepy.base.base.nm.ones"
] |
[((3241, 3290), 'sfepy.base.base.nm.ones', 'nm.ones', (["(vv['p'].field.n_nod,)"], {'dtype': 'nm.float64'}), "((vv['p'].field.n_nod,), dtype=nm.float64)\n", (3248, 3290), False, 'from sfepy.base.base import Struct, debug, nm, output, assert_\n'), ((3458, 3490), 'sfepy.base.base.output', 'output', (["('div( u ) = %.3e' % zero)"], {}), "('div( u ) = %.3e' % zero)\n", (3464, 3490), False, 'from sfepy.base.base import Struct, debug, nm, output, assert_\n'), ((3625, 3651), 'os.path.basename', 'op.basename', (['filename_mesh'], {}), '(filename_mesh)\n', (3636, 3651), True, 'import os.path as op\n')]
|
from typing import Optional
from sqlmodel import Field, SQLModel
from datetime import datetime
class Meter(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
serial_number: str
class Measurement(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
meter_id: Optional[int] = Field(default=None, foreign_key="meter.id")
capture_time: datetime = Field()
voltage_phase_1: float = Field(default=0.0)
voltage_phase_2: float = Field(default=0.0)
voltage_phase_3: float = Field(default=0.0)
power: float = Field(default=0.0)
thd_1: float = Field(default=0.0)
thd_2: float = Field(default=0.0)
thd_3: float = Field(default=0.0)
thd_4: float = Field(default=0.0)
thd_5: float = Field(default=0.0)
thd_6: float = Field(default=0.0)
thd_7: float = Field(default=0.0)
thd_8: float = Field(default=0.0)
thd_9: float = Field(default=0.0)
thd_10: float = Field(default=0.0)
class Label(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field()
color: str = Field()
class LabelAssignment(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
label_id: Optional[int] = Field(default=None, foreign_key="label.id")
meter_id: Optional[int] = Field(default=None, foreign_key="meter.id")
start_time: datetime = Field()
end_time: datetime = Field()
|
[
"sqlmodel.Field"
] |
[((156, 193), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (161, 193), False, 'from sqlmodel import Field, SQLModel\n'), ((284, 321), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (289, 321), False, 'from sqlmodel import Field, SQLModel\n'), ((352, 395), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""meter.id"""'}), "(default=None, foreign_key='meter.id')\n", (357, 395), False, 'from sqlmodel import Field, SQLModel\n'), ((425, 432), 'sqlmodel.Field', 'Field', ([], {}), '()\n', (430, 432), False, 'from sqlmodel import Field, SQLModel\n'), ((462, 480), 'sqlmodel.Field', 'Field', ([], {'default': '(0.0)'}), '(default=0.0)\n', (467, 480), False, 'from sqlmodel import Field, SQLModel\n'), ((510, 528), 'sqlmodel.Field', 'Field', ([], {'default': '(0.0)'}), '(default=0.0)\n', (515, 528), False, 'from sqlmodel import Field, SQLModel\n'), ((558, 576), 'sqlmodel.Field', 'Field', ([], {'default': '(0.0)'}), '(default=0.0)\n', (563, 576), False, 'from sqlmodel import Field, SQLModel\n'), ((596, 614), 'sqlmodel.Field', 'Field', ([], {'default': '(0.0)'}), '(default=0.0)\n', (601, 614), False, 'from sqlmodel import Field, SQLModel\n'), ((634, 652), 'sqlmodel.Field', 'Field', ([], {'default': '(0.0)'}), '(default=0.0)\n', (639, 652), False, 'from sqlmodel import Field, SQLModel\n'), ((672, 690), 'sqlmodel.Field', 'Field', ([], {'default': '(0.0)'}), '(default=0.0)\n', (677, 690), False, 'from sqlmodel import Field, SQLModel\n'), ((710, 728), 'sqlmodel.Field', 'Field', ([], {'default': '(0.0)'}), '(default=0.0)\n', (715, 728), False, 'from sqlmodel import Field, SQLModel\n'), ((748, 766), 'sqlmodel.Field', 'Field', ([], {'default': '(0.0)'}), '(default=0.0)\n', (753, 766), False, 'from sqlmodel import Field, SQLModel\n'), ((786, 804), 'sqlmodel.Field', 'Field', ([], {'default': '(0.0)'}), '(default=0.0)\n', (791, 804), False, 'from sqlmodel import Field, SQLModel\n'), ((824, 842), 'sqlmodel.Field', 'Field', ([], {'default': '(0.0)'}), '(default=0.0)\n', (829, 842), False, 'from sqlmodel import Field, SQLModel\n'), ((862, 880), 'sqlmodel.Field', 'Field', ([], {'default': '(0.0)'}), '(default=0.0)\n', (867, 880), False, 'from sqlmodel import Field, SQLModel\n'), ((900, 918), 'sqlmodel.Field', 'Field', ([], {'default': '(0.0)'}), '(default=0.0)\n', (905, 918), False, 'from sqlmodel import Field, SQLModel\n'), ((938, 956), 'sqlmodel.Field', 'Field', ([], {'default': '(0.0)'}), '(default=0.0)\n', (943, 956), False, 'from sqlmodel import Field, SQLModel\n'), ((977, 995), 'sqlmodel.Field', 'Field', ([], {'default': '(0.0)'}), '(default=0.0)\n', (982, 995), False, 'from sqlmodel import Field, SQLModel\n'), ((1057, 1094), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (1062, 1094), False, 'from sqlmodel import Field, SQLModel\n'), ((1112, 1119), 'sqlmodel.Field', 'Field', ([], {}), '()\n', (1117, 1119), False, 'from sqlmodel import Field, SQLModel\n'), ((1138, 1145), 'sqlmodel.Field', 'Field', ([], {}), '()\n', (1143, 1145), False, 'from sqlmodel import Field, SQLModel\n'), ((1217, 1254), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (1222, 1254), False, 'from sqlmodel import Field, SQLModel\n'), ((1285, 1328), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""label.id"""'}), "(default=None, foreign_key='label.id')\n", (1290, 1328), False, 'from sqlmodel import Field, SQLModel\n'), ((1359, 1402), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""meter.id"""'}), "(default=None, foreign_key='meter.id')\n", (1364, 1402), False, 'from sqlmodel import Field, SQLModel\n'), ((1430, 1437), 'sqlmodel.Field', 'Field', ([], {}), '()\n', (1435, 1437), False, 'from sqlmodel import Field, SQLModel\n'), ((1463, 1470), 'sqlmodel.Field', 'Field', ([], {}), '()\n', (1468, 1470), False, 'from sqlmodel import Field, SQLModel\n')]
|
from sqlmodel import Session
from aot_quotes.common.db import Quotes, engine
def seed_data(filename):
with Session(engine) as session:
with open(filename, "r") as fp:
for line in fp.readlines():
quote_obj = Quotes(quote=line.strip())
session.add(quote_obj)
session.commit()
if __name__ == "__main__":
seed_data("./seed/data.txt")
|
[
"sqlmodel.Session"
] |
[((114, 129), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (121, 129), False, 'from sqlmodel import Session\n')]
|
from sqlmodel import Session
from .database import create_db_and_tables, engine
from .hero_model import Hero
from .team_model import Team
def create_heroes():
with Session(engine) as session:
team_z_force = Team(name="Z-Force", headquarters="<NAME>")
hero_deadpond = Hero(
name="Deadpond", secret_name="<NAME>", team=team_z_force, experience_points=1
)
session.add(hero_deadpond)
session.commit()
session.refresh(hero_deadpond)
print("Created hero:", hero_deadpond)
print("Hero's team:", hero_deadpond.team)
def main():
create_db_and_tables()
create_heroes()
if __name__ == "__main__":
main()
|
[
"sqlmodel.Session"
] |
[((171, 186), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (178, 186), False, 'from sqlmodel import Session\n')]
|
"""
Functions for a mesh refinement with hanging nodes.
Notes
-----
Using LCBCs with hanging nodes is not supported.
"""
from __future__ import absolute_import
from six.moves import range, zip
import numpy as nm
from sfepy.base.base import assert_
from sfepy.discrete import Functions, Function
from sfepy.discrete.fem import Mesh, FEDomain
# Rows = facets of reference cell, columns = [sub_cell_i, local facet_i]
refine_edges_2_4 = nm.array([[[0, 0], [1, 3]],
[[1, 0], [2, 3]],
[[2, 0], [3, 3]],
[[3, 0], [0, 3]]])
refine_faces_3_8 = nm.array([[[0, 0], [1, 0], [2, 0], [3, 0]],
[[0, 1], [3, 2], [4, 2], [7, 1]],
[[0, 2], [1, 1], [4, 1], [5, 2]],
[[4, 0], [5, 0], [6, 0], [7, 0]],
[[1, 2], [2, 1], [5, 1], [6, 2]],
[[2, 2], [3, 1], [6, 1], [7, 2]]])
refine_edges_3_8 = nm.array([[[0, 0], [1, 3]],
[[1, 0], [2, 3]],
[[2, 0], [3, 3]],
[[3, 0], [0, 3]],
[[4, 3], [5, 0]],
[[5, 3], [6, 0]],
[[6, 3], [7, 0]],
[[7, 3], [4, 0]],
[[0, 8], [4, 8]],
[[1, 8], [5, 8]],
[[2, 8], [6, 8]],
[[3, 8], [7, 8]]])
def find_level_interface(domain, refine_flag):
"""
Find facets of the coarse mesh that are on the coarse-refined cell
boundary.
ids w.r.t. current mesh:
- facets: global, local w.r.t. cells[:, 0], local w.r.t. cells[:, 1]
- interface cells:
- cells[:, 0] - cells to refine
- cells[:, 1] - their facet sharing neighbors (w.r.t. both meshes)
- cells[:, 2] - facet kind: 0 = face, 1 = edge
"""
if not refine_flag.any():
facets = nm.zeros((0, 3), dtype=nm.uint32)
cells = nm.zeros((0, 3), dtype=nm.uint32)
return facets, cells, 0, None, None
def _get_refine(coors, domain=None):
return nm.nonzero(refine_flag)[0]
def _get_coarse(coors, domain=None):
return nm.nonzero(1 - refine_flag)[0]
get_refine = Function('get_refine', _get_refine)
get_coarse = Function('get_coarse', _get_coarse)
functions = Functions([get_refine, get_coarse])
region0 = domain.create_region('coarse', 'cells by get_coarse',
functions=functions, add_to_regions=False,
allow_empty=True)
region1 = domain.create_region('refine', 'cells by get_refine',
functions=functions, add_to_regions=False)
cmesh = domain.mesh.cmesh
dim = cmesh.dim
if dim == 2:
oe = 0
facets = nm.intersect1d(region0.facets, region1.facets)
cmesh.setup_connectivity(dim - 1, dim)
cells, offs = cmesh.get_incident(dim, facets, dim - 1,
ret_offsets=True)
assert_((nm.diff(offs) == 2).all())
ii = cmesh.get_local_ids(facets, dim - 1, cells, offs, dim)
ii = ii.reshape((-1, 2))
cells = cells.reshape((-1, 2))
ii = nm.where(refine_flag[cells], ii[:, :1], ii[:, 1:])
cells = nm.where(refine_flag[cells], cells[:, :1], cells[:, 1:])
facets = nm.c_[facets, ii]
cells = nm.c_[cells, nm.zeros_like(cells[:, 1])]
else: # if dim == 3:
gel = domain.geom_els['3_8']
epf = gel.get_edges_per_face()
cmesh.setup_connectivity(dim, dim)
fc, coffs = cmesh.get_incident(dim, region1.cells, dim,
ret_offsets=True)
cc = nm.repeat(region1.cells, nm.diff(coffs))
aux = nm.c_[cc, fc]
"""
nnn[:, 0] cells to refine, nnn[:, 1] non-refined neighbours, nnn[:, 2]
neighbour kind : 0 face, 1 edge.
"""
nn = aux[refine_flag[fc] == 0]
cf = nn[:, 0].copy().astype(nm.uint32)
cc = nn[:, 1].copy().astype(nm.uint32)
vc, vco = cmesh.get_incident(0, cc, dim, ret_offsets=True)
vf, vfo = cmesh.get_incident(0, cf, dim, ret_offsets=True)
vc = vc.reshape((-1, 8))
vf = vf.reshape((-1, 8))
nnn = []
oe = 0
ov = nn.shape[0]
for ii in range(vc.shape[0]):
aux = set(vc[ii]).intersection(vf[ii])
nc = len(aux)
if nc == 1:
nnn.append((0, 0, 2))
ov -= 1
elif nc == 4:
nnn.append((nn[ii, 0], nn[ii, 1], 0))
oe += 1
else:
nnn.append((nn[ii, 0], nn[ii, 1], 1))
nnn = nm.array(nnn)
if nnn.shape[0] == 0:
facets = nm.zeros((0, 3), dtype=nm.uint32)
cells = nm.zeros((0, 4), dtype=nm.uint32)
return facets, cells, 0, region0, region1
# Sort by neighbour kind, skip vertex-only neighbours.
ii = nm.argsort(nnn[:, 2])
nnn = nnn[ii][:ov]
cf = cf[ii][:ov]
cc = cc[ii][:ov]
ec, eco = cmesh.get_incident(1, cc, dim, ret_offsets=True)
ef, efo = cmesh.get_incident(1, cf, dim, ret_offsets=True)
ec = ec.reshape((-1, 12))
ef = ef.reshape((-1, 12))
fc, fco = cmesh.get_incident(2, cc, dim, ret_offsets=True)
ff, ffo = cmesh.get_incident(2, cf, dim, ret_offsets=True)
fc = fc.reshape((-1, 6))
ff = ff.reshape((-1, 6))
emask = nm.zeros((domain.shape.n_el, 12), dtype=nm.bool)
ffs = []
for ii in range(oe):
facet = nm.intersect1d(fc[ii], ff[ii])[0]
i1 = nm.where(ff[ii] == facet)[0][0]
i0 = nm.where(fc[ii] == facet)[0][0]
ffs.append((facet, i1, i0))
emask[nnn[ii, 0], epf[i1]] = True
for ii in range(oe, nnn.shape[0]):
facet = nm.intersect1d(ec[ii], ef[ii])[0]
i1 = nm.where(ef[ii] == facet)[0][0]
i0 = nm.where(ec[ii] == facet)[0][0]
ffs.append((facet, i1, i0))
ffs = nm.array(ffs)
ie = nm.where(nnn[:, 2] == 1)[0]
ennn = nnn[ie]
effs = ffs[ie]
omit = ie[emask[ennn[:, 0], effs[:, 1]]]
valid = nm.ones(nnn.shape[0], dtype=nm.bool)
valid[omit] = False
cells = nnn[valid]
facets = ffs[valid]
return facets, cells, oe, region0, region1
def refine_region(domain0, region0, region1):
"""
Coarse cell sub_cells[ii, 0] in mesh0 is split into sub_cells[ii, 1:] in
mesh1.
The new fine cells are interleaved among the original coarse cells so that
the indices of the coarse cells do not change.
The cell groups are preserved. The vertex groups are preserved only in the
coarse (non-refined) cells.
"""
if region1 is None:
return domain0, None
mesh0 = domain0.mesh
mesh1 = Mesh.from_region(region1, mesh0)
domain1 = FEDomain('d', mesh1)
domain1r = domain1.refine()
mesh1r = domain1r.mesh
n_cell = region1.shape.n_cell
n_sub = 4 if mesh0.cmesh.tdim == 2 else 8
sub_cells = nm.empty((n_cell, n_sub + 1), dtype=nm.uint32)
sub_cells[:, 0] = region1.cells
sub_cells[:, 1] = region1.cells
aux = nm.arange((n_sub - 1) * n_cell, dtype=nm.uint32)
sub_cells[:, 2:] = mesh0.n_el + aux.reshape((n_cell, -1))
coors0, vgs0, conns0, mat_ids0, descs0 = mesh0._get_io_data()
coors, vgs, _conns, _mat_ids, descs = mesh1r._get_io_data()
# Preserve vertex groups of non-refined cells.
vgs[:len(vgs0)] = vgs0
def _interleave_refined(c0, c1):
if c1.ndim == 1:
c0 = c0[:, None]
c1 = c1[:, None]
n_row, n_col = c1.shape
n_new = region0.shape.n_cell + n_row
out = nm.empty((n_new, n_col), dtype=c0.dtype)
out[region0.cells] = c0[region0.cells]
out[region1.cells] = c1[::n_sub]
aux = c1.reshape((-1, n_col * n_sub))
out[mesh0.n_el:] = aux[:, n_col:].reshape((-1, n_col))
return out
conn = _interleave_refined(conns0[0], _conns[0])
mat_id = _interleave_refined(mat_ids0[0], _mat_ids[0]).squeeze()
mesh = Mesh.from_data('a', coors, vgs, [conn], [mat_id], descs)
domain = FEDomain('d', mesh)
return domain, sub_cells
def find_facet_substitutions(facets, cells, sub_cells, refine_facets):
"""
Find facet substitutions in connectivity.
sub = [coarse cell, coarse facet, fine1 cell, fine1 facet, fine2 cell,
fine2 facet]
"""
subs = []
for ii, fac in enumerate(facets):
fine = cells[ii, 0]
coarse = cells[ii, 1]
isub = nm.searchsorted(sub_cells[:, 0], fine)
refined = sub_cells[isub, 1:]
rf = refine_facets[fac[1]]
used = refined[rf[:, 0]]
fused = rf[:, 1]
master = [coarse, fac[2]]
slave = list(zip(used, fused))
sub = nm.r_[[master], slave].ravel()
subs.append(sub)
subs = nm.array(subs)
return subs
def refine(domain0, refine, subs=None, ret_sub_cells=False):
desc = domain0.mesh.descs[0]
assert_(desc in ['2_4', '3_8'])
facets, cells, oe, region0, region1 = find_level_interface(domain0, refine)
if region1 is None:
return domain0, None
domain, sub_cells = refine_region(domain0, region0, region1)
if facets.shape[0] > 0:
desc = domain0.mesh.descs[0]
conn0 = domain0.mesh.get_conn(desc)
conn1 = domain.mesh.get_conn(desc)
assert_((conn0[cells[:, 1]] == conn1[cells[:, 1]]).all())
desc = domain0.mesh.descs[0]
if desc == '2_4':
subs1 = find_facet_substitutions(facets, cells,
sub_cells, refine_edges_2_4)
if subs is None:
subs = subs1 if len(subs1) else None
elif len(subs1):
subs = nm.r_[subs, subs1]
else:
subs1f = find_facet_substitutions(facets[:oe], cells[:oe],
sub_cells, refine_faces_3_8)
subs1e = find_facet_substitutions(facets[oe:], cells[oe:],
sub_cells, refine_edges_3_8)
if subs is None:
subs = (subs1f if len(subs1f) else None,
subs1e if len(subs1f) else None) # !!!
elif len(subs1f):
subsf, subse = subs
subsf = nm.r_[subsf, subs1f]
if len(subse):
if len(subs1e):
subse = nm.r_[subse, subs1e]
subs = (subsf, subse)
if (isinstance(subs, tuple)
and (subs[0] is None) and (subs[1] is None)): subs = None
out = (domain, subs)
if ret_sub_cells:
out += (sub_cells,)
return out
|
[
"sfepy.base.base.assert_",
"sfepy.discrete.fem.Mesh.from_region",
"sfepy.discrete.Function",
"sfepy.discrete.fem.Mesh.from_data",
"sfepy.discrete.fem.FEDomain",
"sfepy.discrete.Functions"
] |
[((437, 523), 'numpy.array', 'nm.array', (['[[[0, 0], [1, 3]], [[1, 0], [2, 3]], [[2, 0], [3, 3]], [[3, 0], [0, 3]]]'], {}), '([[[0, 0], [1, 3]], [[1, 0], [2, 3]], [[2, 0], [3, 3]], [[3, 0], [0,\n 3]]])\n', (445, 523), True, 'import numpy as nm\n'), ((627, 850), 'numpy.array', 'nm.array', (['[[[0, 0], [1, 0], [2, 0], [3, 0]], [[0, 1], [3, 2], [4, 2], [7, 1]], [[0, 2\n ], [1, 1], [4, 1], [5, 2]], [[4, 0], [5, 0], [6, 0], [7, 0]], [[1, 2],\n [2, 1], [5, 1], [6, 2]], [[2, 2], [3, 1], [6, 1], [7, 2]]]'], {}), '([[[0, 0], [1, 0], [2, 0], [3, 0]], [[0, 1], [3, 2], [4, 2], [7, 1]\n ], [[0, 2], [1, 1], [4, 1], [5, 2]], [[4, 0], [5, 0], [6, 0], [7, 0]],\n [[1, 2], [2, 1], [5, 1], [6, 2]], [[2, 2], [3, 1], [6, 1], [7, 2]]])\n', (635, 850), True, 'import numpy as nm\n'), ((1007, 1245), 'numpy.array', 'nm.array', (['[[[0, 0], [1, 3]], [[1, 0], [2, 3]], [[2, 0], [3, 3]], [[3, 0], [0, 3]], [[\n 4, 3], [5, 0]], [[5, 3], [6, 0]], [[6, 3], [7, 0]], [[7, 3], [4, 0]], [\n [0, 8], [4, 8]], [[1, 8], [5, 8]], [[2, 8], [6, 8]], [[3, 8], [7, 8]]]'], {}), '([[[0, 0], [1, 3]], [[1, 0], [2, 3]], [[2, 0], [3, 3]], [[3, 0], [0,\n 3]], [[4, 3], [5, 0]], [[5, 3], [6, 0]], [[6, 3], [7, 0]], [[7, 3], [4,\n 0]], [[0, 8], [4, 8]], [[1, 8], [5, 8]], [[2, 8], [6, 8]], [[3, 8], [7,\n 8]]])\n', (1015, 1245), True, 'import numpy as nm\n'), ((2358, 2393), 'sfepy.discrete.Function', 'Function', (['"""get_refine"""', '_get_refine'], {}), "('get_refine', _get_refine)\n", (2366, 2393), False, 'from sfepy.discrete import Functions, Function\n'), ((2411, 2446), 'sfepy.discrete.Function', 'Function', (['"""get_coarse"""', '_get_coarse'], {}), "('get_coarse', _get_coarse)\n", (2419, 2446), False, 'from sfepy.discrete import Functions, Function\n'), ((2463, 2498), 'sfepy.discrete.Functions', 'Functions', (['[get_refine, get_coarse]'], {}), '([get_refine, get_coarse])\n', (2472, 2498), False, 'from sfepy.discrete import Functions, Function\n'), ((7074, 7106), 'sfepy.discrete.fem.Mesh.from_region', 'Mesh.from_region', (['region1', 'mesh0'], {}), '(region1, mesh0)\n', (7090, 7106), False, 'from sfepy.discrete.fem import Mesh, FEDomain\n'), ((7121, 7141), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['"""d"""', 'mesh1'], {}), "('d', mesh1)\n", (7129, 7141), False, 'from sfepy.discrete.fem import Mesh, FEDomain\n'), ((7299, 7345), 'numpy.empty', 'nm.empty', (['(n_cell, n_sub + 1)'], {'dtype': 'nm.uint32'}), '((n_cell, n_sub + 1), dtype=nm.uint32)\n', (7307, 7345), True, 'import numpy as nm\n'), ((7428, 7476), 'numpy.arange', 'nm.arange', (['((n_sub - 1) * n_cell)'], {'dtype': 'nm.uint32'}), '((n_sub - 1) * n_cell, dtype=nm.uint32)\n', (7437, 7476), True, 'import numpy as nm\n'), ((8356, 8412), 'sfepy.discrete.fem.Mesh.from_data', 'Mesh.from_data', (['"""a"""', 'coors', 'vgs', '[conn]', '[mat_id]', 'descs'], {}), "('a', coors, vgs, [conn], [mat_id], descs)\n", (8370, 8412), False, 'from sfepy.discrete.fem import Mesh, FEDomain\n'), ((8426, 8445), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['"""d"""', 'mesh'], {}), "('d', mesh)\n", (8434, 8445), False, 'from sfepy.discrete.fem import Mesh, FEDomain\n'), ((9164, 9178), 'numpy.array', 'nm.array', (['subs'], {}), '(subs)\n', (9172, 9178), True, 'import numpy as nm\n'), ((9294, 9325), 'sfepy.base.base.assert_', 'assert_', (["(desc in ['2_4', '3_8'])"], {}), "(desc in ['2_4', '3_8'])\n", (9301, 9325), False, 'from sfepy.base.base import assert_\n'), ((2040, 2073), 'numpy.zeros', 'nm.zeros', (['(0, 3)'], {'dtype': 'nm.uint32'}), '((0, 3), dtype=nm.uint32)\n', (2048, 2073), True, 'import numpy as nm\n'), ((2090, 2123), 'numpy.zeros', 'nm.zeros', (['(0, 3)'], {'dtype': 'nm.uint32'}), '((0, 3), dtype=nm.uint32)\n', (2098, 2123), True, 'import numpy as nm\n'), ((2946, 2992), 'numpy.intersect1d', 'nm.intersect1d', (['region0.facets', 'region1.facets'], {}), '(region0.facets, region1.facets)\n', (2960, 2992), True, 'import numpy as nm\n'), ((3363, 3413), 'numpy.where', 'nm.where', (['refine_flag[cells]', 'ii[:, :1]', 'ii[:, 1:]'], {}), '(refine_flag[cells], ii[:, :1], ii[:, 1:])\n', (3371, 3413), True, 'import numpy as nm\n'), ((3430, 3486), 'numpy.where', 'nm.where', (['refine_flag[cells]', 'cells[:, :1]', 'cells[:, 1:]'], {}), '(refine_flag[cells], cells[:, :1], cells[:, 1:])\n', (3438, 3486), True, 'import numpy as nm\n'), ((4485, 4503), 'six.moves.range', 'range', (['vc.shape[0]'], {}), '(vc.shape[0])\n', (4490, 4503), False, 'from six.moves import range, zip\n'), ((4860, 4873), 'numpy.array', 'nm.array', (['nnn'], {}), '(nnn)\n', (4868, 4873), True, 'import numpy as nm\n'), ((5146, 5167), 'numpy.argsort', 'nm.argsort', (['nnn[:, 2]'], {}), '(nnn[:, 2])\n', (5156, 5167), True, 'import numpy as nm\n'), ((5666, 5714), 'numpy.zeros', 'nm.zeros', (['(domain.shape.n_el, 12)'], {'dtype': 'nm.bool'}), '((domain.shape.n_el, 12), dtype=nm.bool)\n', (5674, 5714), True, 'import numpy as nm\n'), ((5751, 5760), 'six.moves.range', 'range', (['oe'], {}), '(oe)\n', (5756, 5760), False, 'from six.moves import range, zip\n'), ((6020, 6043), 'six.moves.range', 'range', (['oe', 'nnn.shape[0]'], {}), '(oe, nnn.shape[0])\n', (6025, 6043), False, 'from six.moves import range, zip\n'), ((6252, 6265), 'numpy.array', 'nm.array', (['ffs'], {}), '(ffs)\n', (6260, 6265), True, 'import numpy as nm\n'), ((6420, 6456), 'numpy.ones', 'nm.ones', (['nnn.shape[0]'], {'dtype': 'nm.bool'}), '(nnn.shape[0], dtype=nm.bool)\n', (6427, 6456), True, 'import numpy as nm\n'), ((7963, 8003), 'numpy.empty', 'nm.empty', (['(n_new, n_col)'], {'dtype': 'c0.dtype'}), '((n_new, n_col), dtype=c0.dtype)\n', (7971, 8003), True, 'import numpy as nm\n'), ((8836, 8874), 'numpy.searchsorted', 'nm.searchsorted', (['sub_cells[:, 0]', 'fine'], {}), '(sub_cells[:, 0], fine)\n', (8851, 8874), True, 'import numpy as nm\n'), ((2225, 2248), 'numpy.nonzero', 'nm.nonzero', (['refine_flag'], {}), '(refine_flag)\n', (2235, 2248), True, 'import numpy as nm\n'), ((2309, 2336), 'numpy.nonzero', 'nm.nonzero', (['(1 - refine_flag)'], {}), '(1 - refine_flag)\n', (2319, 2336), True, 'import numpy as nm\n'), ((3886, 3900), 'numpy.diff', 'nm.diff', (['coffs'], {}), '(coffs)\n', (3893, 3900), True, 'import numpy as nm\n'), ((4926, 4959), 'numpy.zeros', 'nm.zeros', (['(0, 3)'], {'dtype': 'nm.uint32'}), '((0, 3), dtype=nm.uint32)\n', (4934, 4959), True, 'import numpy as nm\n'), ((4980, 5013), 'numpy.zeros', 'nm.zeros', (['(0, 4)'], {'dtype': 'nm.uint32'}), '((0, 4), dtype=nm.uint32)\n', (4988, 5013), True, 'import numpy as nm\n'), ((6280, 6304), 'numpy.where', 'nm.where', (['(nnn[:, 2] == 1)'], {}), '(nnn[:, 2] == 1)\n', (6288, 6304), True, 'import numpy as nm\n'), ((9063, 9079), 'six.moves.zip', 'zip', (['used', 'fused'], {}), '(used, fused)\n', (9066, 9079), False, 'from six.moves import range, zip\n'), ((3553, 3579), 'numpy.zeros_like', 'nm.zeros_like', (['cells[:, 1]'], {}), '(cells[:, 1])\n', (3566, 3579), True, 'import numpy as nm\n'), ((5782, 5812), 'numpy.intersect1d', 'nm.intersect1d', (['fc[ii]', 'ff[ii]'], {}), '(fc[ii], ff[ii])\n', (5796, 5812), True, 'import numpy as nm\n'), ((6065, 6095), 'numpy.intersect1d', 'nm.intersect1d', (['ec[ii]', 'ef[ii]'], {}), '(ec[ii], ef[ii])\n', (6079, 6095), True, 'import numpy as nm\n'), ((5833, 5858), 'numpy.where', 'nm.where', (['(ff[ii] == facet)'], {}), '(ff[ii] == facet)\n', (5841, 5858), True, 'import numpy as nm\n'), ((5882, 5907), 'numpy.where', 'nm.where', (['(fc[ii] == facet)'], {}), '(fc[ii] == facet)\n', (5890, 5907), True, 'import numpy as nm\n'), ((6116, 6141), 'numpy.where', 'nm.where', (['(ef[ii] == facet)'], {}), '(ef[ii] == facet)\n', (6124, 6141), True, 'import numpy as nm\n'), ((6165, 6190), 'numpy.where', 'nm.where', (['(ec[ii] == facet)'], {}), '(ec[ii] == facet)\n', (6173, 6190), True, 'import numpy as nm\n'), ((3180, 3193), 'numpy.diff', 'nm.diff', (['offs'], {}), '(offs)\n', (3187, 3193), True, 'import numpy as nm\n')]
|
from __future__ import annotations
from sqlmodel import Session, select
from src.models import User
from src.schemas.user import CreateUser
from src.services.auth import check_password_hash, generate_password_hash
def get_user_by_email(db: Session, email: str) -> User | None:
"""
Finds a user with the given email in the database and returns it.
Returns None if a user with the email does not exists.
"""
stmt = select(User).where(User.email == email)
return db.exec(stmt).first()
def create_user(user_data: CreateUser) -> User:
"""
Creates a user and returns it.
"""
password = generate_password_hash(user_data.password)
created_user = User(name=user_data.name, email=user_data.email, password=password)
return created_user
def get_user_by_email_and_password(
db: Session, email: str, password: str
) -> User | None:
"""
Checks if the given email and password are valid, and returns the user.
Returns None if the email or password is wrong.
"""
user = get_user_by_email(db, email)
if user is None or not check_password_hash(password, user.password):
return None
return user
|
[
"sqlmodel.select"
] |
[((627, 669), 'src.services.auth.generate_password_hash', 'generate_password_hash', (['user_data.password'], {}), '(user_data.password)\n', (649, 669), False, 'from src.services.auth import check_password_hash, generate_password_hash\n'), ((690, 757), 'src.models.User', 'User', ([], {'name': 'user_data.name', 'email': 'user_data.email', 'password': 'password'}), '(name=user_data.name, email=user_data.email, password=password)\n', (694, 757), False, 'from src.models import User\n'), ((437, 449), 'sqlmodel.select', 'select', (['User'], {}), '(User)\n', (443, 449), False, 'from sqlmodel import Session, select\n'), ((1094, 1138), 'src.services.auth.check_password_hash', 'check_password_hash', (['password', 'user.password'], {}), '(password, user.password)\n', (1113, 1138), False, 'from src.services.auth import check_password_hash, generate_password_hash\n')]
|
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.distributed.helper import get_device_count_by_fork
from megengine.quantization.observer import (
ExponentialMovingAverageObserver,
MinMaxObserver,
Observer,
PassiveObserver,
SyncExponentialMovingAverageObserver,
SyncMinMaxObserver,
)
def test_observer():
with pytest.raises(TypeError):
Observer("qint8")
def test_min_max_observer():
x = np.random.rand(3, 3, 3, 3).astype("float32")
np_min, np_max = x.min(), x.max()
x = mge.tensor(x)
m = MinMaxObserver()
m(x)
np.testing.assert_allclose(m.min_val.numpy(), np_min)
np.testing.assert_allclose(m.max_val.numpy(), np_max)
def test_exponential_moving_average_observer():
t = np.random.rand()
x1 = np.random.rand(3, 3, 3, 3).astype("float32")
x2 = np.random.rand(3, 3, 3, 3).astype("float32")
expected_min = x1.min() * t + x2.min() * (1 - t)
expected_max = x1.max() * t + x2.max() * (1 - t)
m = ExponentialMovingAverageObserver(momentum=t)
m(mge.tensor(x1, dtype=np.float32))
m(mge.tensor(x2, dtype=np.float32))
np.testing.assert_allclose(m.min_val.numpy(), expected_min)
np.testing.assert_allclose(m.max_val.numpy(), expected_max)
def test_passive_observer():
q_dict = {"scale": mge.tensor(1.0)}
m = PassiveObserver(q_dict, "qint8")
assert m.orig_scale == 1.0
assert m.scale == 1.0
m.scale = 2.0
assert m.scale == 2.0
assert m.get_qparams() == {"scale": mge.tensor(2.0)}
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device")
@pytest.mark.isolated_distributed
def test_sync_min_max_observer():
word_size = get_device_count_by_fork("gpu")
x = np.random.rand(3 * word_size, 3, 3, 3).astype("float32")
np_min, np_max = x.min(), x.max()
@dist.launcher
def worker():
rank = dist.get_rank()
m = SyncMinMaxObserver()
y = mge.tensor(x[rank * 3 : (rank + 1) * 3])
m(y)
assert m.min_val == np_min and m.max_val == np_max
worker()
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device")
@pytest.mark.isolated_distributed
def test_sync_exponential_moving_average_observer():
word_size = get_device_count_by_fork("gpu")
t = np.random.rand()
x1 = np.random.rand(3 * word_size, 3, 3, 3).astype("float32")
x2 = np.random.rand(3 * word_size, 3, 3, 3).astype("float32")
expected_min = x1.min() * t + x2.min() * (1 - t)
expected_max = x1.max() * t + x2.max() * (1 - t)
@dist.launcher
def worker():
rank = dist.get_rank()
m = SyncExponentialMovingAverageObserver(momentum=t)
y1 = mge.tensor(x1[rank * 3 : (rank + 1) * 3])
y2 = mge.tensor(x2[rank * 3 : (rank + 1) * 3])
m(y1)
m(y2)
np.testing.assert_allclose(m.min_val.numpy(), expected_min, atol=1e-6)
np.testing.assert_allclose(m.max_val.numpy(), expected_max, atol=1e-6)
worker()
|
[
"megengine.quantization.observer.Observer",
"megengine.tensor",
"megengine.distributed.get_rank",
"megengine.quantization.observer.SyncMinMaxObserver",
"megengine.distributed.helper.get_device_count_by_fork",
"megengine.quantization.observer.PassiveObserver",
"megengine.quantization.observer.ExponentialMovingAverageObserver",
"megengine.quantization.observer.SyncExponentialMovingAverageObserver",
"megengine.quantization.observer.MinMaxObserver"
] |
[((599, 612), 'megengine.tensor', 'mge.tensor', (['x'], {}), '(x)\n', (609, 612), True, 'import megengine as mge\n'), ((621, 637), 'megengine.quantization.observer.MinMaxObserver', 'MinMaxObserver', ([], {}), '()\n', (635, 637), False, 'from megengine.quantization.observer import ExponentialMovingAverageObserver, MinMaxObserver, Observer, PassiveObserver, SyncExponentialMovingAverageObserver, SyncMinMaxObserver\n'), ((821, 837), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (835, 837), True, 'import numpy as np\n'), ((1060, 1104), 'megengine.quantization.observer.ExponentialMovingAverageObserver', 'ExponentialMovingAverageObserver', ([], {'momentum': 't'}), '(momentum=t)\n', (1092, 1104), False, 'from megengine.quantization.observer import ExponentialMovingAverageObserver, MinMaxObserver, Observer, PassiveObserver, SyncExponentialMovingAverageObserver, SyncMinMaxObserver\n'), ((1392, 1424), 'megengine.quantization.observer.PassiveObserver', 'PassiveObserver', (['q_dict', '"""qint8"""'], {}), "(q_dict, 'qint8')\n", (1407, 1424), False, 'from megengine.quantization.observer import ExponentialMovingAverageObserver, MinMaxObserver, Observer, PassiveObserver, SyncExponentialMovingAverageObserver, SyncMinMaxObserver\n'), ((1959, 1990), 'megengine.distributed.helper.get_device_count_by_fork', 'get_device_count_by_fork', (['"""gpu"""'], {}), "('gpu')\n", (1983, 1990), False, 'from megengine.distributed.helper import get_device_count_by_fork\n'), ((2730, 2761), 'megengine.distributed.helper.get_device_count_by_fork', 'get_device_count_by_fork', (['"""gpu"""'], {}), "('gpu')\n", (2754, 2761), False, 'from megengine.distributed.helper import get_device_count_by_fork\n'), ((2770, 2786), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2784, 2786), True, 'import numpy as np\n'), ((417, 441), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (430, 441), False, 'import pytest\n'), ((451, 468), 'megengine.quantization.observer.Observer', 'Observer', (['"""qint8"""'], {}), "('qint8')\n", (459, 468), False, 'from megengine.quantization.observer import ExponentialMovingAverageObserver, MinMaxObserver, Observer, PassiveObserver, SyncExponentialMovingAverageObserver, SyncMinMaxObserver\n'), ((1111, 1143), 'megengine.tensor', 'mge.tensor', (['x1'], {'dtype': 'np.float32'}), '(x1, dtype=np.float32)\n', (1121, 1143), True, 'import megengine as mge\n'), ((1151, 1183), 'megengine.tensor', 'mge.tensor', (['x2'], {'dtype': 'np.float32'}), '(x2, dtype=np.float32)\n', (1161, 1183), True, 'import megengine as mge\n'), ((1367, 1382), 'megengine.tensor', 'mge.tensor', (['(1.0)'], {}), '(1.0)\n', (1377, 1382), True, 'import megengine as mge\n'), ((2147, 2162), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (2160, 2162), True, 'import megengine.distributed as dist\n'), ((2175, 2195), 'megengine.quantization.observer.SyncMinMaxObserver', 'SyncMinMaxObserver', ([], {}), '()\n', (2193, 2195), False, 'from megengine.quantization.observer import ExponentialMovingAverageObserver, MinMaxObserver, Observer, PassiveObserver, SyncExponentialMovingAverageObserver, SyncMinMaxObserver\n'), ((2208, 2246), 'megengine.tensor', 'mge.tensor', (['x[rank * 3:(rank + 1) * 3]'], {}), '(x[rank * 3:(rank + 1) * 3])\n', (2218, 2246), True, 'import megengine as mge\n'), ((1610, 1627), 'platform.system', 'platform.system', ([], {}), '()\n', (1625, 1627), False, 'import platform\n'), ((1710, 1727), 'platform.system', 'platform.system', ([], {}), '()\n', (1725, 1727), False, 'import platform\n'), ((1807, 1838), 'megengine.distributed.helper.get_device_count_by_fork', 'get_device_count_by_fork', (['"""gpu"""'], {}), "('gpu')\n", (1831, 1838), False, 'from megengine.distributed.helper import get_device_count_by_fork\n'), ((3078, 3093), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (3091, 3093), True, 'import megengine.distributed as dist\n'), ((3106, 3154), 'megengine.quantization.observer.SyncExponentialMovingAverageObserver', 'SyncExponentialMovingAverageObserver', ([], {'momentum': 't'}), '(momentum=t)\n', (3142, 3154), False, 'from megengine.quantization.observer import ExponentialMovingAverageObserver, MinMaxObserver, Observer, PassiveObserver, SyncExponentialMovingAverageObserver, SyncMinMaxObserver\n'), ((3168, 3207), 'megengine.tensor', 'mge.tensor', (['x1[rank * 3:(rank + 1) * 3]'], {}), '(x1[rank * 3:(rank + 1) * 3])\n', (3178, 3207), True, 'import megengine as mge\n'), ((3223, 3262), 'megengine.tensor', 'mge.tensor', (['x2[rank * 3:(rank + 1) * 3]'], {}), '(x2[rank * 3:(rank + 1) * 3])\n', (3233, 3262), True, 'import megengine as mge\n'), ((2362, 2379), 'platform.system', 'platform.system', ([], {}), '()\n', (2377, 2379), False, 'import platform\n'), ((2462, 2479), 'platform.system', 'platform.system', ([], {}), '()\n', (2477, 2479), False, 'import platform\n'), ((2559, 2590), 'megengine.distributed.helper.get_device_count_by_fork', 'get_device_count_by_fork', (['"""gpu"""'], {}), "('gpu')\n", (2583, 2590), False, 'from megengine.distributed.helper import get_device_count_by_fork\n'), ((508, 534), 'numpy.random.rand', 'np.random.rand', (['(3)', '(3)', '(3)', '(3)'], {}), '(3, 3, 3, 3)\n', (522, 534), True, 'import numpy as np\n'), ((847, 873), 'numpy.random.rand', 'np.random.rand', (['(3)', '(3)', '(3)', '(3)'], {}), '(3, 3, 3, 3)\n', (861, 873), True, 'import numpy as np\n'), ((901, 927), 'numpy.random.rand', 'np.random.rand', (['(3)', '(3)', '(3)', '(3)'], {}), '(3, 3, 3, 3)\n', (915, 927), True, 'import numpy as np\n'), ((1566, 1581), 'megengine.tensor', 'mge.tensor', (['(2.0)'], {}), '(2.0)\n', (1576, 1581), True, 'import megengine as mge\n'), ((1999, 2037), 'numpy.random.rand', 'np.random.rand', (['(3 * word_size)', '(3)', '(3)', '(3)'], {}), '(3 * word_size, 3, 3, 3)\n', (2013, 2037), True, 'import numpy as np\n'), ((2796, 2834), 'numpy.random.rand', 'np.random.rand', (['(3 * word_size)', '(3)', '(3)', '(3)'], {}), '(3 * word_size, 3, 3, 3)\n', (2810, 2834), True, 'import numpy as np\n'), ((2862, 2900), 'numpy.random.rand', 'np.random.rand', (['(3 * word_size)', '(3)', '(3)', '(3)'], {}), '(3 * word_size, 3, 3, 3)\n', (2876, 2900), True, 'import numpy as np\n')]
|
from __future__ import absolute_import
import re
from copy import copy
import numpy as nm
from sfepy.base.base import (as_float_or_complex, get_default, assert_,
Container, Struct, basestr, goptions)
from sfepy.base.compat import in1d
# Used for imports in term files.
from sfepy.terms.extmods import terms
import six
from six.moves import range
from functools import reduce
_match_args = re.compile('^([^\(\}]*)\((.*)\)$').match
_match_virtual = re.compile('^virtual$').match
_match_state = re.compile('^state(_[_a-zA-Z0-9]+)?$').match
_match_parameter = re.compile('^parameter(_[_a-zA-Z0-9]+)?$').match
_match_material = re.compile('^material(_[_a-zA-Z0-9]+)?$').match
_match_material_opt = re.compile('^opt_material(_[_a-zA-Z0-9]+)?$').match
_match_material_root = re.compile('(.+)\.(.*)').match
_match_ts = re.compile('^ts$').match
def get_arg_kinds(arg_types):
"""
Translate `arg_types` of a Term to a canonical form.
Parameters
----------
arg_types : tuple of strings
The term argument types, as given in the `arg_types` attribute.
Returns
-------
arg_kinds : list of strings
The argument kinds - one of 'virtual_variable', 'state_variable',
'parameter_variable', 'opt_material', 'ts', 'user'.
"""
arg_kinds = []
for ii, arg_type in enumerate(arg_types):
if _match_virtual(arg_type):
arg_kinds.append('virtual_variable')
elif _match_state(arg_type):
arg_kinds.append('state_variable')
elif _match_parameter(arg_type):
arg_kinds.append('parameter_variable')
elif _match_material(arg_type):
arg_kinds.append('material')
elif _match_material_opt(arg_type):
arg_kinds.append('opt_material')
if ii > 0:
msg = 'opt_material at position %d, must be at 0!' % ii
raise ValueError(msg)
elif _match_ts(arg_type):
arg_kinds.append('ts')
else:
arg_kinds.append('user')
return arg_kinds
def get_shape_kind(integration):
"""
Get data shape kind for given integration type.
"""
if integration == 'surface':
shape_kind = 'surface'
elif integration in ('volume', 'plate', 'surface_extra'):
shape_kind = 'volume'
elif integration == 'point':
shape_kind = 'point'
else:
raise NotImplementedError('unsupported term integration! (%s)'
% integration)
return shape_kind
def split_complex_args(args):
"""
Split complex arguments to real and imaginary parts.
Returns
-------
newargs : dictionary
Dictionary with lists corresponding to `args` such that each
argument of numpy.complex128 data type is split to its real and
imaginary part. The output depends on the number of complex
arguments in 'args':
- 0: list (key 'r') identical to input one
- 1: two lists with keys 'r', 'i' corresponding to real
and imaginary parts
- 2: output dictionary contains four lists:
- 'r' - real(arg1), real(arg2)
- 'i' - imag(arg1), imag(arg2)
- 'ri' - real(arg1), imag(arg2)
- 'ir' - imag(arg1), real(arg2)
"""
newargs = {}
cai = []
for ii, arg in enumerate(args):
if isinstance(arg, nm.ndarray) and (arg.dtype == nm.complex128):
cai.append(ii)
if len(cai) > 0:
newargs['r'] = list(args[:])
newargs['i'] = list(args[:])
arg1 = cai[0]
newargs['r'][arg1] = args[arg1].real.copy()
newargs['i'][arg1] = args[arg1].imag.copy()
if len(cai) == 2:
arg2 = cai[1]
newargs['r'][arg2] = args[arg2].real.copy()
newargs['i'][arg2] = args[arg2].imag.copy()
newargs['ri'] = list(args[:])
newargs['ir'] = list(args[:])
newargs['ri'][arg1] = newargs['r'][arg1]
newargs['ri'][arg2] = newargs['i'][arg2]
newargs['ir'][arg1] = newargs['i'][arg1]
newargs['ir'][arg2] = newargs['r'][arg2]
elif len(cai) > 2:
raise NotImplementedError('more than 2 complex arguments! (%d)'
% len(cai))
else:
newargs['r'] = args[:]
return newargs
def create_arg_parser():
from pyparsing import Literal, Word, delimitedList, Group, \
StringStart, StringEnd, Optional, nums, alphas, alphanums
inumber = Word("+-" + nums, nums)
history = Optional(Literal('[').suppress() + inumber
+ Literal(']').suppress(), default=0)("history")
history.setParseAction(lambda str, loc, toks: int(toks[0]))
variable = Group(Word(alphas, alphanums + '._') + history)
derivative = Group(Literal('d') + variable\
+ Literal('/').suppress() + Literal('dt'))
trace = Group(Literal('tr') + Literal('(').suppress() + variable \
+ Literal(')').suppress())
generalized_var = derivative | trace | variable
args = StringStart() + delimitedList(generalized_var) + StringEnd()
return args
class ConnInfo(Struct):
def get_region(self, can_trace=True):
if self.is_trace and can_trace:
return self.region.get_mirror_region()
else:
return self.region
def get_region_name(self, can_trace=True):
if self.is_trace and can_trace:
reg = self.region.get_mirror_region()
else:
reg = self.region
if reg is not None:
return reg.name
else:
return None
class Terms(Container):
@staticmethod
def from_desc(term_descs, regions, integrals=None):
"""
Create terms, assign each term its region.
"""
from sfepy.terms import term_table
terms = Terms()
for td in term_descs:
try:
constructor = term_table[td.name]
except:
msg = "term '%s' is not in %s" % (td.name,
sorted(term_table.keys()))
raise ValueError(msg)
try:
region = regions[td.region]
except IndexError:
raise KeyError('region "%s" does not exist!' % td.region)
term = Term.from_desc(constructor, td, region, integrals=integrals)
terms.append(term)
return terms
def __init__(self, objs=None):
Container.__init__(self, objs=objs)
self.update_expression()
def insert(self, ii, obj):
Container.insert(self, ii, obj)
self.update_expression()
def append(self, obj):
Container.append(self, obj)
self.update_expression()
def update_expression(self):
self.expression = []
for term in self:
aux = [term.sign, term.name, term.arg_str,
term.integral_name, term.region.name]
self.expression.append(aux)
def __mul__(self, other):
out = Terms()
for name, term in self.iteritems():
out.append(term * other)
return out
def __rmul__(self, other):
return self * other
def __add__(self, other):
if isinstance(other, Term):
out = self.copy()
out.append(other)
elif isinstance(other, Terms):
out = Terms(self._objs + other._objs)
else:
raise ValueError('cannot add Terms with %s!' % other)
return out
def __radd__(self, other):
return self + other
def __sub__(self, other):
if isinstance(other, Term):
out = self + (-other)
elif isinstance(other, Terms):
out = self + (-other)
else:
raise ValueError('cannot subtract Terms with %s!' % other)
return out
def __rsub__(self, other):
return -self + other
def __pos__(self):
return self
def __neg__(self):
return -1.0 * self
def setup(self):
for term in self:
term.setup()
def assign_args(self, variables, materials, user=None):
"""
Assign all term arguments.
"""
for term in self:
term.assign_args(variables, materials, user)
def get_variable_names(self):
out = []
for term in self:
out.extend(term.get_variable_names())
return list(set(out))
def get_material_names(self):
out = []
for term in self:
out.extend(term.get_material_names())
return list(set(out))
def get_user_names(self):
out = []
for term in self:
out.extend(term.get_user_names())
return list(set(out))
class Term(Struct):
name = ''
arg_types = ()
arg_shapes = {}
integration = 'volume'
geometries = ['1_2', '2_3', '2_4', '3_4', '3_8']
@staticmethod
def new(name, integral, region, **kwargs):
from sfepy.terms import term_table
arg_str = _match_args(name)
if arg_str is not None:
name, arg_str = arg_str.groups()
else:
raise ValueError('bad term syntax! (%s)' % name)
if name in term_table:
constructor = term_table[name]
else:
msg = "term '%s' is not in %s" % (name, sorted(term_table.keys()))
raise ValueError(msg)
obj = constructor(name, arg_str, integral, region, **kwargs)
return obj
@staticmethod
def from_desc(constructor, desc, region, integrals=None):
from sfepy.discrete import Integrals
if integrals is None:
integrals = Integrals()
integral = integrals.get(desc.integral)
obj = constructor(desc.name, desc.args, integral, region)
obj.sign = desc.sign
return obj
def __init__(self, name, arg_str, integral, region, **kwargs):
self.name = name
self.arg_str = arg_str
self.region = region
self._kwargs = kwargs
self._integration = self.integration
self.sign = 1.0
self.set_integral(integral)
def __mul__(self, other):
try:
mul = as_float_or_complex(other)
except ValueError:
raise ValueError('cannot multiply Term with %s!' % other)
out = self.copy(name=self.name)
out.sign = mul * self.sign
return out
def __rmul__(self, other):
return self * other
def __add__(self, other):
if isinstance(other, Term):
out = Terms([self, other])
else:
out = NotImplemented
return out
def __sub__(self, other):
if isinstance(other, Term):
out = Terms([self, -1.0 * other])
else:
out = NotImplemented
return out
def __pos__(self):
return self
def __neg__(self):
out = -1.0 * self
return out
def get_str(self):
return ('%+.2e * %s.%d.%s(%s)'
% (self.sign, self.name, self.integral.order,
self.region.name, self.arg_str))
def set_integral(self, integral):
"""
Set the term integral.
"""
self.integral = integral
if self.integral is not None:
self.integral_name = self.integral.name
def setup(self):
self.function = Struct.get(self, 'function', None)
self.step = 0
self.dt = 1.0
self.is_quasistatic = False
self.has_region = True
self.setup_formal_args()
if self._kwargs:
self.setup_args(**self._kwargs)
else:
self.args = []
def setup_formal_args(self):
self.arg_names = []
self.arg_steps = {}
self.arg_derivatives = {}
self.arg_traces = {}
parser = create_arg_parser()
self.arg_desc = parser.parseString(self.arg_str)
for arg in self.arg_desc:
trace = False
derivative = None
if isinstance(arg[1], int):
name, step = arg
else:
kind = arg[0]
name, step = arg[1]
if kind == 'd':
derivative = arg[2]
elif kind == 'tr':
trace = True
match = _match_material_root(name)
if match:
name = (match.group(1), match.group(2))
self.arg_names.append(name)
self.arg_steps[name] = step
self.arg_derivatives[name] = derivative
self.arg_traces[name] = trace
def setup_args(self, **kwargs):
self._kwargs = kwargs
self.args = []
for arg_name in self.arg_names:
if isinstance(arg_name, basestr):
self.args.append(self._kwargs[arg_name])
else:
self.args.append((self._kwargs[arg_name[0]], arg_name[1]))
self.classify_args()
self.check_args()
def assign_args(self, variables, materials, user=None):
"""
Check term argument existence in variables, materials, user data
and assign the arguments to terms. Also check compatibility of
field and term regions.
"""
if user is None:
user = {}
user.setdefault('ts', Struct())
kwargs = {}
for arg_name in self.arg_names:
if isinstance(arg_name, basestr):
if arg_name in variables.names:
kwargs[arg_name] = variables[arg_name]
elif arg_name in user:
kwargs[arg_name] = user[arg_name]
else:
raise ValueError('argument %s not found!' % arg_name)
else:
arg_name = arg_name[0]
if arg_name in materials.names:
kwargs[arg_name] = materials[arg_name]
else:
raise ValueError('material argument %s not found!'
% arg_name)
self.setup_args(**kwargs)
def classify_args(self):
"""
Classify types of the term arguments and find matching call
signature.
A state variable can be in place of a parameter variable and
vice versa.
"""
self.names = Struct(name='arg_names',
material=[], variable=[], user=[],
state=[], virtual=[], parameter=[])
# Prepare for 'opt_material' - just prepend a None argument if needed.
if isinstance(self.arg_types[0], tuple):
arg_types = self.arg_types[0]
else:
arg_types = self.arg_types
if len(arg_types) == (len(self.args) + 1):
self.args.insert(0, (None, None))
self.arg_names.insert(0, (None, None))
if isinstance(self.arg_types[0], tuple):
assert_(len(self.modes) == len(self.arg_types))
# Find matching call signature using variable arguments - material
# and user arguments are ignored!
matched = []
for it, arg_types in enumerate(self.arg_types):
arg_kinds = get_arg_kinds(arg_types)
if self._check_variables(arg_kinds):
matched.append((it, arg_kinds))
if len(matched) == 1:
i_match, arg_kinds = matched[0]
arg_types = self.arg_types[i_match]
self.mode = self.modes[i_match]
elif len(matched) == 0:
msg = 'cannot match arguments! (%s)' % self.arg_names
raise ValueError(msg)
else:
msg = 'ambiguous arguments! (%s)' % self.arg_names
raise ValueError(msg)
else:
arg_types = self.arg_types
arg_kinds = get_arg_kinds(self.arg_types)
self.mode = Struct.get(self, 'mode', None)
if not self._check_variables(arg_kinds):
raise ValueError('cannot match variables! (%s)'
% self.arg_names)
# Set actual argument types.
self.ats = list(arg_types)
for ii, arg_kind in enumerate(arg_kinds):
name = self.arg_names[ii]
if arg_kind.endswith('variable'):
names = self.names.variable
if arg_kind == 'virtual_variable':
self.names.virtual.append(name)
elif arg_kind == 'state_variable':
self.names.state.append(name)
elif arg_kind == 'parameter_variable':
self.names.parameter.append(name)
elif arg_kind.endswith('material'):
names = self.names.material
else:
names = self.names.user
names.append(name)
self.n_virtual = len(self.names.virtual)
if self.n_virtual > 1:
raise ValueError('at most one virtual variable is allowed! (%d)'
% self.n_virtual)
self.set_arg_types()
self.setup_integration()
def _check_variables(self, arg_kinds):
for ii, arg_kind in enumerate(arg_kinds):
if arg_kind.endswith('variable'):
var = self.args[ii]
check = {'virtual_variable' : var.is_virtual,
'state_variable' : var.is_state_or_parameter,
'parameter_variable' : var.is_state_or_parameter}
if not check[arg_kind]():
return False
else:
return True
def set_arg_types(self):
pass
def check_args(self):
"""
Common checking to all terms.
Check compatibility of field and term regions.
"""
vns = self.get_variable_names()
for name in vns:
field = self._kwargs[name].get_field()
if field is None:
continue
if not nm.all(in1d(self.region.vertices,
field.region.vertices)):
msg = ('%s: incompatible regions: (self, field %s)'
+ '(%s in %s)') %\
(self.name, field.name,
self.region.vertices, field.region.vertices)
raise ValueError(msg)
def get_variable_names(self):
return self.names.variable
def get_material_names(self):
out = []
for aux in self.names.material:
if aux[0] is not None:
out.append(aux[0])
return out
def get_user_names(self):
return self.names.user
def get_virtual_name(self):
if not self.names.virtual:
return None
var = self.get_virtual_variable()
return var.name
def get_state_names(self):
"""
If variables are given, return only true unknowns whose data are of
the current time step (0).
"""
variables = self.get_state_variables()
return [var.name for var in variables]
def get_parameter_names(self):
return copy(self.names.parameter)
def get_conn_key(self):
"""The key to be used in DOF connectivity information."""
key = (self.name,) + tuple(self.arg_names)
key += (self.integral_name, self.region.name)
return key
def get_conn_info(self):
vvar = self.get_virtual_variable()
svars = self.get_state_variables()
pvars = self.get_parameter_variables()
all_vars = self.get_variables()
dc_type = self.get_dof_conn_type()
tgs = self.get_geometry_types()
v_tg = None
if vvar is not None:
field = vvar.get_field()
if field is not None:
if vvar.name in tgs:
v_tg = tgs[vvar.name]
else:
v_tg = None
else:
# No virtual variable -> all unknowns are in fact known parameters.
pvars += svars
svars = []
region = self.get_region()
if region is not None:
is_any_trace = reduce(lambda x, y: x or y,
list(self.arg_traces.values()))
if is_any_trace:
region.setup_mirror_region()
vals = []
aux_pvars = []
for svar in svars:
# Allow only true state variables.
if not svar.is_state():
aux_pvars.append(svar)
continue
field = svar.get_field()
is_trace = self.arg_traces[svar.name]
if svar.name in tgs:
ps_tg = tgs[svar.name]
else:
ps_tg = v_tg
val = ConnInfo(virtual=vvar,
state=svar,
primary=svar,
has_virtual=True,
has_state=True,
is_trace=is_trace,
dc_type=dc_type,
v_tg=v_tg,
ps_tg=ps_tg,
region=region,
all_vars=all_vars)
vals.append(val)
pvars += aux_pvars
for pvar in pvars:
field = pvar.get_field()
is_trace = self.arg_traces[pvar.name]
if pvar.name in tgs:
ps_tg = tgs[pvar.name]
else:
ps_tg = v_tg
val = ConnInfo(virtual=vvar,
state=None,
primary=pvar.get_primary(),
has_virtual=vvar is not None,
has_state=False,
is_trace=is_trace,
dc_type=dc_type,
v_tg=v_tg,
ps_tg=ps_tg,
region=region,
all_vars=all_vars)
vals.append(val)
if vvar and (len(vals) == 0):
# No state, parameter variables, just the virtual one.
val = ConnInfo(virtual=vvar,
state=vvar.get_primary(),
primary=vvar.get_primary(),
has_virtual=True,
has_state=False,
is_trace=False,
dc_type=dc_type,
v_tg=v_tg,
ps_tg=v_tg,
region=region,
all_vars=all_vars)
vals.append(val)
return vals
def get_args_by_name(self, arg_names):
"""
Return arguments by name.
"""
out = []
for name in arg_names:
try:
ii = self.arg_names.index(name)
except ValueError:
raise ValueError('non-existing argument! (%s)' % name)
out.append(self.args[ii])
return out
def get_args(self, arg_types=None, **kwargs):
"""
Return arguments by type as specified in arg_types (or
self.ats). Arguments in **kwargs can override the ones assigned
at the term construction - this is useful for passing user data.
"""
ats = self.ats
if arg_types is None:
arg_types = ats
args = []
region_name, iorder = self.region.name, self.integral.order
for at in arg_types:
ii = ats.index(at)
arg_name = self.arg_names[ii]
if isinstance(arg_name, basestr):
if arg_name in kwargs:
args.append(kwargs[arg_name])
else:
args.append(self.args[ii])
else:
mat, par_name = self.args[ii]
if mat is not None:
mat_data = mat.get_data((region_name, iorder), par_name)
else:
mat_data = None
args.append(mat_data)
return args
def get_kwargs(self, keys, **kwargs):
"""Extract arguments from **kwargs listed in keys (default is
None)."""
return [kwargs.get(name) for name in keys]
def get_arg_name(self, arg_type, full=False, join=None):
"""
Get the name of the argument specified by `arg_type.`
Parameters
----------
arg_type : str
The argument type string.
full : bool
If True, return the full name. For example, if the name of a
variable argument is 'u' and its time derivative is
requested, the full name is 'du/dt'.
join : str, optional
Optionally, the material argument name tuple can be joined
to a single string using the `join` string.
Returns
-------
name : str
The argument name.
"""
try:
ii = self.ats.index(arg_type)
except ValueError:
return None
name = self.arg_names[ii]
if full:
# Include derivatives.
if self.arg_derivatives[name]:
name = 'd%s/%s' % (name, self.arg_derivatives[name])
if (join is not None) and isinstance(name, tuple):
name = join.join(name)
return name
def setup_integration(self):
self.has_geometry = True
self.geometry_types = {}
if isinstance(self.integration, basestr):
for var in self.get_variables():
self.geometry_types[var.name] = self.integration
else:
if self.mode is not None:
self.integration = self._integration[self.mode]
if self.integration is not None:
for arg_type, gtype in six.iteritems(self.integration):
var = self.get_args(arg_types=[arg_type])[0]
self.geometry_types[var.name] = gtype
gtypes = list(set(self.geometry_types.values()))
if 'surface_extra' in gtypes:
self.dof_conn_type = 'volume'
elif len(gtypes):
self.dof_conn_type = gtypes[0]
def get_region(self):
return self.region
def get_geometry_types(self):
"""
Returns
-------
out : dict
The required geometry types for each variable argument.
"""
return self.geometry_types
def get_dof_conn_type(self):
return Struct(name='dof_conn_info', type=self.dof_conn_type,
region_name=self.region.name)
def get_assembling_cells(self, shape=None):
"""
Return the assembling cell indices into a DOF connectivity.
"""
cells = nm.arange(shape[0], dtype=nm.int32)
return cells
def time_update(self, ts):
if ts is not None:
self.step = ts.step
self.dt = ts.dt
self.is_quasistatic = ts.is_quasistatic
if 'ts' in self._kwargs:
self._kwargs['ts'].update(ts)
def advance(self, ts):
"""
Advance to the next time step. Implemented in subclasses.
"""
def get_vector(self, variable):
"""Get the vector stored in `variable` according to self.arg_steps
and self.arg_derivatives. Supports only the backward difference w.r.t.
time."""
name = variable.name
return variable(step=self.arg_steps[name],
derivative=self.arg_derivatives[name])
def get_variables(self, as_list=True):
if as_list:
variables = self.get_args_by_name(self.names.variable)
else:
variables = {}
for var in self.get_args_by_name(self.names.variable):
variables[var.name] = var
return variables
def get_virtual_variable(self):
aux = self.get_args_by_name(self.names.virtual)
if len(aux) == 1:
var = aux[0]
else:
var = None
return var
def get_state_variables(self, unknown_only=False):
variables = self.get_args_by_name(self.names.state)
if unknown_only:
variables = [var for var in variables
if (var.kind == 'unknown') and
(self.arg_steps[var.name] == 0)]
return variables
def get_parameter_variables(self):
return self.get_args_by_name(self.names.parameter)
def get_materials(self, join=False):
materials = self.get_args_by_name(self.names.material)
for mat in materials:
if mat[0] is None:
materials.remove(mat)
if join:
materials = list(set(mat[0] for mat in materials))
return materials
def get_qp_key(self):
"""
Return a key identifying uniquely the term quadrature points.
"""
return (self.region.name, self.integral.order)
def get_physical_qps(self):
"""
Get physical quadrature points corresponding to the term region
and integral.
"""
from sfepy.discrete.common.mappings import get_physical_qps, PhysicalQPs
if self.integration == 'point':
phys_qps = PhysicalQPs()
else:
phys_qps = get_physical_qps(self.region, self.integral)
return phys_qps
def get_mapping(self, variable, get_saved=False, return_key=False):
"""
Get the reference mapping from a variable.
Notes
-----
This is a convenience wrapper of Field.get_mapping() that
initializes the arguments using the term data.
"""
integration = self.geometry_types[variable.name]
is_trace = self.arg_traces[variable.name]
if is_trace:
region = self.region.get_mirror_region()
else:
region = self.region
out = variable.field.get_mapping(region,
self.integral, integration,
get_saved=get_saved,
return_key=return_key)
return out
def get_data_shape(self, variable):
"""
Get data shape information from variable.
Notes
-----
This is a convenience wrapper of FieldVariable.get_data_shape() that
initializes the arguments using the term data.
"""
integration = self.geometry_types[variable.name]
is_trace = self.arg_traces[variable.name]
if is_trace:
region = self.region.get_mirror_region()
else:
region = self.region
out = variable.get_data_shape(self.integral, integration, region.name)
return out
def get(self, variable, quantity_name, bf=None, integration=None,
step=None, time_derivative=None):
"""
Get the named quantity related to the variable.
Notes
-----
This is a convenience wrapper of Variable.evaluate() that
initializes the arguments using the term data.
"""
name = variable.name
step = get_default(step, self.arg_steps[name])
time_derivative = get_default(time_derivative,
self.arg_derivatives[name])
integration = get_default(integration, self.geometry_types[name])
data = variable.evaluate(mode=quantity_name,
region=self.region, integral=self.integral,
integration=integration,
step=step, time_derivative=time_derivative,
is_trace=self.arg_traces[name], bf=bf)
return data
def check_shapes(self, *args, **kwargs):
"""
Check term argument shapes at run-time.
"""
from sfepy.base.base import output
from sfepy.mechanics.tensors import dim2sym
dim = self.region.dim
sym = dim2sym(dim)
def _parse_scalar_shape(sh):
if isinstance(sh, basestr):
if sh == 'D':
return dim
elif sh == 'D2':
return dim**2
elif sh == 'S':
return sym
elif sh == 'N': # General number.
return nm.inf
elif sh == 'str':
return 'str'
else:
return int(sh)
else:
return sh
def _parse_tuple_shape(sh):
if isinstance(sh, basestr):
return tuple((_parse_scalar_shape(ii.strip())
for ii in sh.split(',')))
else:
return (int(sh),)
arg_kinds = get_arg_kinds(self.ats)
arg_shapes_list = self.arg_shapes
if not isinstance(arg_shapes_list, list):
arg_shapes_list = [arg_shapes_list]
# Loop allowed shapes until a match is found, else error.
allowed_shapes = []
prev_shapes = {}
actual_shapes = {}
for _arg_shapes in arg_shapes_list:
# Unset shapes are taken from the previous iteration.
arg_shapes = copy(prev_shapes)
arg_shapes.update(_arg_shapes)
prev_shapes = arg_shapes
allowed_shapes.append(arg_shapes)
n_ok = 0
for ii, arg_kind in enumerate(arg_kinds):
if arg_kind in ('user', 'ts'):
n_ok += 1
continue
arg = args[ii]
key = '%s:%s' % (self.ats[ii], self.arg_names[ii])
if self.mode is not None:
extended_ats = self.ats[ii] + ('/%s' % self.mode)
else:
extended_ats = self.ats[ii]
try:
sh = arg_shapes[self.ats[ii]]
except KeyError:
sh = arg_shapes[extended_ats]
if arg_kind.endswith('variable'):
n_el, n_qp, _dim, n_en, n_c = self.get_data_shape(arg)
actual_shapes[key] = (n_c,)
shape = _parse_scalar_shape(sh[0] if isinstance(sh, tuple)
else sh)
if nm.isinf(shape):
n_ok += 1
else:
n_ok += shape == n_c
elif arg_kind.endswith('material'):
if arg is None: # Switched-off opt_material.
n_ok += sh is None
continue
if sh is None:
continue
prefix = ''
if isinstance(sh, basestr):
aux = sh.split(':')
if len(aux) == 2:
prefix, sh = aux
if sh == 'str':
n_ok += isinstance(arg, basestr)
continue
shape = _parse_tuple_shape(sh)
ls = len(shape)
aarg = nm.array(arg, ndmin=1)
actual_shapes[key] = aarg.shape
# Substiture general dimension 'N' with actual value.
iinfs = nm.where(nm.isinf(shape))[0]
if len(iinfs):
shape = list(shape)
for iinf in iinfs:
shape[iinf] = aarg.shape[-ls+iinf]
shape = tuple(shape)
if (ls > 1) or (shape[0] > 1):
# Array.
n_ok += shape == aarg.shape[-ls:]
actual_shapes[key] = aarg.shape[-ls:]
elif (ls == 1) and (shape[0] == 1):
# Scalar constant.
from numbers import Number
n_ok += isinstance(arg, Number)
else:
n_ok += 1
if n_ok == len(arg_kinds):
break
else:
term_str = self.get_str()
output('allowed argument shapes for term "%s":' % term_str)
output(allowed_shapes)
output('actual argument shapes:')
output(actual_shapes)
raise ValueError('wrong arguments shapes for "%s" term! (see above)'
% term_str)
def standalone_setup(self):
from sfepy.discrete import create_adof_conns, Variables
conn_info = {'aux' : self.get_conn_info()}
adcs = create_adof_conns(conn_info, None)
variables = Variables(self.get_variables())
variables.set_adof_conns(adcs)
materials = self.get_materials(join=True)
for mat in materials:
mat.time_update(None, [Struct(terms=[self])])
def call_get_fargs(self, args, kwargs):
try:
fargs = self.get_fargs(*args, **kwargs)
except (RuntimeError, ValueError):
terms.errclear()
raise
return fargs
def call_function(self, out, fargs):
try:
status = self.function(out, *fargs)
except (RuntimeError, ValueError):
terms.errclear()
raise
if status:
terms.errclear()
raise ValueError('term evaluation failed! (%s)' % self.name)
return status
def eval_real(self, shape, fargs, mode='eval', term_mode=None,
diff_var=None, **kwargs):
out = nm.empty(shape, dtype=nm.float64)
if mode == 'eval':
status = self.call_function(out, fargs)
# Sum over elements but not over components.
out1 = nm.sum(out, 0).squeeze()
return out1, status
else:
status = self.call_function(out, fargs)
return out, status
def eval_complex(self, shape, fargs, mode='eval', term_mode=None,
diff_var=None, **kwargs):
rout = nm.empty(shape, dtype=nm.float64)
fargsd = split_complex_args(fargs)
# Assuming linear forms. Then the matrix is the
# same both for real and imaginary part.
rstatus = self.call_function(rout, fargsd['r'])
if (diff_var is None) and len(fargsd) >= 2:
iout = nm.empty(shape, dtype=nm.float64)
istatus = self.call_function(iout, fargsd['i'])
if mode == 'eval' and len(fargsd) >= 4:
irout = nm.empty(shape, dtype=nm.float64)
irstatus = self.call_function(irout, fargsd['ir'])
riout = nm.empty(shape, dtype=nm.float64)
ristatus = self.call_function(riout, fargsd['ri'])
out = (rout - iout) + (riout + irout) * 1j
status = rstatus or istatus or ristatus or irstatus
else:
out = rout + 1j * iout
status = rstatus or istatus
else:
out, status = rout + 0j, rstatus
if mode == 'eval':
out1 = nm.sum(out, 0).squeeze()
return out1, status
else:
return out, status
def evaluate(self, mode='eval', diff_var=None,
standalone=True, ret_status=False, **kwargs):
"""
Evaluate the term.
Parameters
----------
mode : 'eval' (default), or 'weak'
The term evaluation mode.
Returns
-------
val : float or array
In 'eval' mode, the term returns a single value (the
integral, it does not need to be a scalar), while in 'weak'
mode it returns an array for each element.
status : int, optional
The flag indicating evaluation success (0) or failure
(nonzero). Only provided if `ret_status` is True.
iels : array of ints, optional
The local elements indices in 'weak' mode. Only provided in
non-'eval' modes.
"""
if standalone:
self.standalone_setup()
kwargs = kwargs.copy()
term_mode = kwargs.pop('term_mode', None)
if mode in ('eval', 'el_eval', 'el_avg', 'qp'):
args = self.get_args(**kwargs)
self.check_shapes(*args)
emode = 'eval' if mode == 'el_eval' else mode
_args = tuple(args) + (emode, term_mode, diff_var)
fargs = self.call_get_fargs(_args, kwargs)
shape, dtype = self.get_eval_shape(*_args, **kwargs)
if dtype == nm.float64:
val, status = self.eval_real(shape, fargs, mode, term_mode,
**kwargs)
elif dtype == nm.complex128:
val, status = self.eval_complex(shape, fargs, mode, term_mode,
**kwargs)
else:
raise ValueError('unsupported term dtype! (%s)' % dtype)
val *= self.sign
out = (val,)
elif mode == 'weak':
varr = self.get_virtual_variable()
if varr is None:
raise ValueError('no virtual variable in weak mode! (in "%s")'
% self.get_str())
if diff_var is not None:
varc = self.get_variables(as_list=False)[diff_var]
args = self.get_args(**kwargs)
self.check_shapes(*args)
_args = tuple(args) + (mode, term_mode, diff_var)
fargs = self.call_get_fargs(_args, kwargs)
n_elr, n_qpr, dim, n_enr, n_cr = self.get_data_shape(varr)
n_row = n_cr * n_enr
if diff_var is None:
shape = (n_elr, 1, n_row, 1)
else:
n_elc, n_qpc, dim, n_enc, n_cc = self.get_data_shape(varc)
n_col = n_cc * n_enc
shape = (n_elr, 1, n_row, n_col)
if varr.dtype == nm.float64:
vals, status = self.eval_real(shape, fargs, mode, term_mode,
diff_var, **kwargs)
elif varr.dtype == nm.complex128:
vals, status = self.eval_complex(shape, fargs, mode, term_mode,
diff_var, **kwargs)
else:
raise ValueError('unsupported term dtype! (%s)'
% varr.dtype)
if not isinstance(vals, tuple):
vals *= self.sign
iels = self.get_assembling_cells(vals.shape)
else:
vals = (self.sign * vals[0],) + vals[1:]
iels = None
out = (vals, iels)
if goptions['check_term_finiteness']:
assert_(nm.isfinite(out[0]).all(),
msg='"%s" term values not finite!' % self.get_str())
if ret_status:
out = out + (status,)
if len(out) == 1:
out = out[0]
return out
def assemble_to(self, asm_obj, val, iels, mode='vector', diff_var=None):
"""
Assemble the results of term evaluation.
For standard terms, assemble the values in `val` corresponding to
elements/cells `iels` into a vector or a CSR sparse matrix `asm_obj`,
depending on `mode`.
For terms with a dynamic connectivity (e.g. contact terms), in
`'matrix'` mode, return the extra COO sparse matrix instead. The extra
matrix has to be added to the global matrix by the caller. By default,
this is done in :func:`Equations.evaluate()
<sfepy.discrete.equations.Equations.evaluate()>`.
"""
import sfepy.discrete.common.extmods.assemble as asm
vvar = self.get_virtual_variable()
dc_type = self.get_dof_conn_type()
extra = None
if mode == 'vector':
if asm_obj.dtype == nm.float64:
assemble = asm.assemble_vector
else:
assert_(asm_obj.dtype == nm.complex128)
assemble = asm.assemble_vector_complex
for ii in range(len(val)):
if not(val[ii].dtype == nm.complex128):
val[ii] = nm.complex128(val[ii])
if not isinstance(val, tuple):
dc = vvar.get_dof_conn(dc_type)
assert_(val.shape[2] == dc.shape[1])
assemble(asm_obj, val, iels, 1.0, dc)
else:
vals, rows, var = val
if var.eq_map is not None:
eq = var.eq_map.eq
rows = eq[rows]
active = (rows >= 0)
vals, rows = vals[active], rows[active]
# Assumes no repeated indices in rows!
asm_obj[rows] += vals
elif mode == 'matrix':
if asm_obj.dtype == nm.float64:
assemble = asm.assemble_matrix
else:
assert_(asm_obj.dtype == nm.complex128)
assemble = asm.assemble_matrix_complex
svar = diff_var
tmd = (asm_obj.data, asm_obj.indptr, asm_obj.indices)
if ((asm_obj.dtype == nm.complex128)
and (val.dtype == nm.float64)):
val = val.astype(nm.complex128)
sign = 1.0
if self.arg_derivatives[svar.name]:
if not self.is_quasistatic or (self.step > 0):
sign *= 1.0 / self.dt
else:
sign = 0.0
if not isinstance(val, tuple):
rdc = vvar.get_dof_conn(dc_type)
is_trace = self.arg_traces[svar.name]
cdc = svar.get_dof_conn(dc_type, is_trace=is_trace)
assert_(val.shape[2:] == (rdc.shape[1], cdc.shape[1]))
assemble(tmd[0], tmd[1], tmd[2], val, iels, sign, rdc, cdc)
else:
from scipy.sparse import coo_matrix
vals, rows, cols, rvar, cvar = val
if rvar.eq_map is not None:
req, ceq = rvar.eq_map.eq, cvar.eq_map.eq
rows, cols = req[rows], ceq[cols]
active = (rows >= 0) & (cols >= 0)
vals, rows, cols = vals[active], rows[active], cols[active]
extra = coo_matrix((sign * vals, (rows, cols)),
shape=asm_obj.shape)
else:
raise ValueError('unknown assembling mode! (%s)' % mode)
return extra
|
[
"sfepy.terms.term_table.keys",
"sfepy.base.base.Container.append",
"sfepy.terms.extmods.terms.append",
"sfepy.base.base.assert_",
"sfepy.discrete.Integrals",
"sfepy.discrete.common.mappings.get_physical_qps",
"sfepy.base.base.Struct",
"sfepy.discrete.create_adof_conns",
"sfepy.discrete.common.mappings.PhysicalQPs",
"sfepy.base.base.get_default",
"sfepy.base.base.as_float_or_complex",
"sfepy.base.base.Container.__init__",
"sfepy.base.base.Container.insert",
"sfepy.mechanics.tensors.dim2sym",
"sfepy.base.base.output",
"sfepy.terms.extmods.terms.errclear",
"sfepy.base.compat.in1d",
"sfepy.base.base.Struct.get"
] |
[((422, 460), 're.compile', 're.compile', (['"""^([^\\\\(\\\\}]*)\\\\((.*)\\\\)$"""'], {}), "('^([^\\\\(\\\\}]*)\\\\((.*)\\\\)$')\n", (432, 460), False, 'import re\n'), ((480, 503), 're.compile', 're.compile', (['"""^virtual$"""'], {}), "('^virtual$')\n", (490, 503), False, 'import re\n'), ((525, 563), 're.compile', 're.compile', (['"""^state(_[_a-zA-Z0-9]+)?$"""'], {}), "('^state(_[_a-zA-Z0-9]+)?$')\n", (535, 563), False, 'import re\n'), ((589, 631), 're.compile', 're.compile', (['"""^parameter(_[_a-zA-Z0-9]+)?$"""'], {}), "('^parameter(_[_a-zA-Z0-9]+)?$')\n", (599, 631), False, 'import re\n'), ((656, 697), 're.compile', 're.compile', (['"""^material(_[_a-zA-Z0-9]+)?$"""'], {}), "('^material(_[_a-zA-Z0-9]+)?$')\n", (666, 697), False, 'import re\n'), ((726, 771), 're.compile', 're.compile', (['"""^opt_material(_[_a-zA-Z0-9]+)?$"""'], {}), "('^opt_material(_[_a-zA-Z0-9]+)?$')\n", (736, 771), False, 'import re\n'), ((801, 826), 're.compile', 're.compile', (['"""(.+)\\\\.(.*)"""'], {}), "('(.+)\\\\.(.*)')\n", (811, 826), False, 'import re\n'), ((844, 862), 're.compile', 're.compile', (['"""^ts$"""'], {}), "('^ts$')\n", (854, 862), False, 'import re\n'), ((4570, 4593), 'pyparsing.Word', 'Word', (["('+-' + nums)", 'nums'], {}), "('+-' + nums, nums)\n", (4574, 4593), False, 'from pyparsing import Literal, Word, delimitedList, Group, StringStart, StringEnd, Optional, nums, alphas, alphanums\n'), ((5198, 5209), 'pyparsing.StringEnd', 'StringEnd', ([], {}), '()\n', (5207, 5209), False, 'from pyparsing import Literal, Word, delimitedList, Group, StringStart, StringEnd, Optional, nums, alphas, alphanums\n'), ((6587, 6622), 'sfepy.base.base.Container.__init__', 'Container.__init__', (['self'], {'objs': 'objs'}), '(self, objs=objs)\n', (6605, 6622), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((6697, 6728), 'sfepy.base.base.Container.insert', 'Container.insert', (['self', 'ii', 'obj'], {}), '(self, ii, obj)\n', (6713, 6728), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((6798, 6825), 'sfepy.base.base.Container.append', 'Container.append', (['self', 'obj'], {}), '(self, obj)\n', (6814, 6825), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((11514, 11548), 'sfepy.base.base.Struct.get', 'Struct.get', (['self', '"""function"""', 'None'], {}), "(self, 'function', None)\n", (11524, 11548), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((14479, 14578), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""arg_names"""', 'material': '[]', 'variable': '[]', 'user': '[]', 'state': '[]', 'virtual': '[]', 'parameter': '[]'}), "(name='arg_names', material=[], variable=[], user=[], state=[],\n virtual=[], parameter=[])\n", (14485, 14578), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((19303, 19329), 'copy.copy', 'copy', (['self.names.parameter'], {}), '(self.names.parameter)\n', (19307, 19329), False, 'from copy import copy\n'), ((26721, 26809), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""dof_conn_info"""', 'type': 'self.dof_conn_type', 'region_name': 'self.region.name'}), "(name='dof_conn_info', type=self.dof_conn_type, region_name=self.\n region.name)\n", (26727, 26809), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((26984, 27019), 'numpy.arange', 'nm.arange', (['shape[0]'], {'dtype': 'nm.int32'}), '(shape[0], dtype=nm.int32)\n', (26993, 27019), True, 'import numpy as nm\n'), ((31397, 31436), 'sfepy.base.base.get_default', 'get_default', (['step', 'self.arg_steps[name]'], {}), '(step, self.arg_steps[name])\n', (31408, 31436), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((31463, 31519), 'sfepy.base.base.get_default', 'get_default', (['time_derivative', 'self.arg_derivatives[name]'], {}), '(time_derivative, self.arg_derivatives[name])\n', (31474, 31519), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((31580, 31631), 'sfepy.base.base.get_default', 'get_default', (['integration', 'self.geometry_types[name]'], {}), '(integration, self.geometry_types[name])\n', (31591, 31631), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((32248, 32260), 'sfepy.mechanics.tensors.dim2sym', 'dim2sym', (['dim'], {}), '(dim)\n', (32255, 32260), False, 'from sfepy.mechanics.tensors import dim2sym\n'), ((36934, 36968), 'sfepy.discrete.create_adof_conns', 'create_adof_conns', (['conn_info', 'None'], {}), '(conn_info, None)\n', (36951, 36968), False, 'from sfepy.discrete import create_adof_conns, Variables\n'), ((37889, 37922), 'numpy.empty', 'nm.empty', (['shape'], {'dtype': 'nm.float64'}), '(shape, dtype=nm.float64)\n', (37897, 37922), True, 'import numpy as nm\n'), ((38368, 38401), 'numpy.empty', 'nm.empty', (['shape'], {'dtype': 'nm.float64'}), '(shape, dtype=nm.float64)\n', (38376, 38401), True, 'import numpy as nm\n'), ((4810, 4840), 'pyparsing.Word', 'Word', (['alphas', "(alphanums + '._')"], {}), "(alphas, alphanums + '._')\n", (4814, 4840), False, 'from pyparsing import Literal, Word, delimitedList, Group, StringStart, StringEnd, Optional, nums, alphas, alphanums\n'), ((4952, 4965), 'pyparsing.Literal', 'Literal', (['"""dt"""'], {}), "('dt')\n", (4959, 4965), False, 'from pyparsing import Literal, Word, delimitedList, Group, StringStart, StringEnd, Optional, nums, alphas, alphanums\n'), ((5149, 5162), 'pyparsing.StringStart', 'StringStart', ([], {}), '()\n', (5160, 5162), False, 'from pyparsing import Literal, Word, delimitedList, Group, StringStart, StringEnd, Optional, nums, alphas, alphanums\n'), ((5165, 5195), 'pyparsing.delimitedList', 'delimitedList', (['generalized_var'], {}), '(generalized_var)\n', (5178, 5195), False, 'from pyparsing import Literal, Word, delimitedList, Group, StringStart, StringEnd, Optional, nums, alphas, alphanums\n'), ((6502, 6520), 'sfepy.terms.extmods.terms.append', 'terms.append', (['term'], {}), '(term)\n', (6514, 6520), False, 'from sfepy.terms.extmods import terms\n'), ((9797, 9808), 'sfepy.discrete.Integrals', 'Integrals', ([], {}), '()\n', (9806, 9808), False, 'from sfepy.discrete import Integrals\n'), ((10324, 10350), 'sfepy.base.base.as_float_or_complex', 'as_float_or_complex', (['other'], {}), '(other)\n', (10343, 10350), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((13469, 13477), 'sfepy.base.base.Struct', 'Struct', ([], {}), '()\n', (13475, 13477), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((16067, 16097), 'sfepy.base.base.Struct.get', 'Struct.get', (['self', '"""mode"""', 'None'], {}), "(self, 'mode', None)\n", (16077, 16097), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((29485, 29498), 'sfepy.discrete.common.mappings.PhysicalQPs', 'PhysicalQPs', ([], {}), '()\n', (29496, 29498), False, 'from sfepy.discrete.common.mappings import get_physical_qps, PhysicalQPs\n'), ((29537, 29581), 'sfepy.discrete.common.mappings.get_physical_qps', 'get_physical_qps', (['self.region', 'self.integral'], {}), '(self.region, self.integral)\n', (29553, 29581), False, 'from sfepy.discrete.common.mappings import get_physical_qps, PhysicalQPs\n'), ((33504, 33521), 'copy.copy', 'copy', (['prev_shapes'], {}), '(prev_shapes)\n', (33508, 33521), False, 'from copy import copy\n'), ((36473, 36532), 'sfepy.base.base.output', 'output', (['(\'allowed argument shapes for term "%s":\' % term_str)'], {}), '(\'allowed argument shapes for term "%s":\' % term_str)\n', (36479, 36532), False, 'from sfepy.base.base import output\n'), ((36545, 36567), 'sfepy.base.base.output', 'output', (['allowed_shapes'], {}), '(allowed_shapes)\n', (36551, 36567), False, 'from sfepy.base.base import output\n'), ((36580, 36613), 'sfepy.base.base.output', 'output', (['"""actual argument shapes:"""'], {}), "('actual argument shapes:')\n", (36586, 36613), False, 'from sfepy.base.base import output\n'), ((36626, 36647), 'sfepy.base.base.output', 'output', (['actual_shapes'], {}), '(actual_shapes)\n', (36632, 36647), False, 'from sfepy.base.base import output\n'), ((37650, 37666), 'sfepy.terms.extmods.terms.errclear', 'terms.errclear', ([], {}), '()\n', (37664, 37666), False, 'from sfepy.terms.extmods import terms\n'), ((38679, 38712), 'numpy.empty', 'nm.empty', (['shape'], {'dtype': 'nm.float64'}), '(shape, dtype=nm.float64)\n', (38687, 38712), True, 'import numpy as nm\n'), ((26040, 26071), 'six.iteritems', 'six.iteritems', (['self.integration'], {}), '(self.integration)\n', (26053, 26071), False, 'import six\n'), ((37367, 37383), 'sfepy.terms.extmods.terms.errclear', 'terms.errclear', ([], {}), '()\n', (37381, 37383), False, 'from sfepy.terms.extmods import terms\n'), ((37583, 37599), 'sfepy.terms.extmods.terms.errclear', 'terms.errclear', ([], {}), '()\n', (37597, 37599), False, 'from sfepy.terms.extmods import terms\n'), ((38850, 38883), 'numpy.empty', 'nm.empty', (['shape'], {'dtype': 'nm.float64'}), '(shape, dtype=nm.float64)\n', (38858, 38883), True, 'import numpy as nm\n'), ((38975, 39008), 'numpy.empty', 'nm.empty', (['shape'], {'dtype': 'nm.float64'}), '(shape, dtype=nm.float64)\n', (38983, 39008), True, 'import numpy as nm\n'), ((44359, 44398), 'sfepy.base.base.assert_', 'assert_', (['(asm_obj.dtype == nm.complex128)'], {}), '(asm_obj.dtype == nm.complex128)\n', (44366, 44398), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((44722, 44758), 'sfepy.base.base.assert_', 'assert_', (['(val.shape[2] == dc.shape[1])'], {}), '(val.shape[2] == dc.shape[1])\n', (44729, 44758), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((4876, 4888), 'pyparsing.Literal', 'Literal', (['"""d"""'], {}), "('d')\n", (4883, 4888), False, 'from pyparsing import Literal, Word, delimitedList, Group, StringStart, StringEnd, Optional, nums, alphas, alphanums\n'), ((4986, 4999), 'pyparsing.Literal', 'Literal', (['"""tr"""'], {}), "('tr')\n", (4993, 4999), False, 'from pyparsing import Literal, Word, delimitedList, Group, StringStart, StringEnd, Optional, nums, alphas, alphanums\n'), ((5059, 5071), 'pyparsing.Literal', 'Literal', (['""")"""'], {}), "(')')\n", (5066, 5071), False, 'from pyparsing import Literal, Word, delimitedList, Group, StringStart, StringEnd, Optional, nums, alphas, alphanums\n'), ((18172, 18221), 'sfepy.base.compat.in1d', 'in1d', (['self.region.vertices', 'field.region.vertices'], {}), '(self.region.vertices, field.region.vertices)\n', (18176, 18221), False, 'from sfepy.base.compat import in1d\n'), ((34603, 34618), 'numpy.isinf', 'nm.isinf', (['shape'], {}), '(shape)\n', (34611, 34618), True, 'import numpy as nm\n'), ((37178, 37198), 'sfepy.base.base.Struct', 'Struct', ([], {'terms': '[self]'}), '(terms=[self])\n', (37184, 37198), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((38079, 38093), 'numpy.sum', 'nm.sum', (['out', '(0)'], {}), '(out, 0)\n', (38085, 38093), True, 'import numpy as nm\n'), ((39413, 39427), 'numpy.sum', 'nm.sum', (['out', '(0)'], {}), '(out, 0)\n', (39419, 39427), True, 'import numpy as nm\n'), ((45343, 45382), 'sfepy.base.base.assert_', 'assert_', (['(asm_obj.dtype == nm.complex128)'], {}), '(asm_obj.dtype == nm.complex128)\n', (45350, 45382), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((46142, 46196), 'sfepy.base.base.assert_', 'assert_', (['(val.shape[2:] == (rdc.shape[1], cdc.shape[1]))'], {}), '(val.shape[2:] == (rdc.shape[1], cdc.shape[1]))\n', (46149, 46196), False, 'from sfepy.base.base import as_float_or_complex, get_default, assert_, Container, Struct, basestr, goptions\n'), ((46718, 46778), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(sign * vals, (rows, cols))'], {'shape': 'asm_obj.shape'}), '((sign * vals, (rows, cols)), shape=asm_obj.shape)\n', (46728, 46778), False, 'from scipy.sparse import coo_matrix\n'), ((4677, 4689), 'pyparsing.Literal', 'Literal', (['"""]"""'], {}), "(']')\n", (4684, 4689), False, 'from pyparsing import Literal, Word, delimitedList, Group, StringStart, StringEnd, Optional, nums, alphas, alphanums\n'), ((4926, 4938), 'pyparsing.Literal', 'Literal', (['"""/"""'], {}), "('/')\n", (4933, 4938), False, 'from pyparsing import Literal, Word, delimitedList, Group, StringStart, StringEnd, Optional, nums, alphas, alphanums\n'), ((9473, 9490), 'sfepy.terms.term_table.keys', 'term_table.keys', ([], {}), '()\n', (9488, 9490), False, 'from sfepy.terms import term_table\n'), ((35444, 35466), 'numpy.array', 'nm.array', (['arg'], {'ndmin': '(1)'}), '(arg, ndmin=1)\n', (35452, 35466), True, 'import numpy as nm\n'), ((43130, 43149), 'numpy.isfinite', 'nm.isfinite', (['out[0]'], {}), '(out[0])\n', (43141, 43149), True, 'import numpy as nm\n'), ((44591, 44613), 'numpy.complex128', 'nm.complex128', (['val[ii]'], {}), '(val[ii])\n', (44604, 44613), True, 'import numpy as nm\n'), ((4618, 4630), 'pyparsing.Literal', 'Literal', (['"""["""'], {}), "('[')\n", (4625, 4630), False, 'from pyparsing import Literal, Word, delimitedList, Group, StringStart, StringEnd, Optional, nums, alphas, alphanums\n'), ((5002, 5014), 'pyparsing.Literal', 'Literal', (['"""("""'], {}), "('(')\n", (5009, 5014), False, 'from pyparsing import Literal, Word, delimitedList, Group, StringStart, StringEnd, Optional, nums, alphas, alphanums\n'), ((6184, 6201), 'sfepy.terms.term_table.keys', 'term_table.keys', ([], {}), '()\n', (6199, 6201), False, 'from sfepy.terms import term_table\n'), ((35631, 35646), 'numpy.isinf', 'nm.isinf', (['shape'], {}), '(shape)\n', (35639, 35646), True, 'import numpy as nm\n')]
|
# Copyright 2021 Modelyst LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from typing import Optional
from sqlmodel import Field, Session, SQLModel, create_engine, func, select
class Item(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
created: datetime
deleted: bool = False
category: str
version: float = 1
data: str
# Create and save records to show that the query itself is working.
item_1 = Item(created=datetime.now(), category="category_1", data="❤️ I love SQLModel.")
item_2 = Item(
created=datetime.now(),
category="category_1",
data="❤️ I love FastAPI.",
deleted=True,
)
item_3 = Item(
created=datetime.now(),
category="category_2",
data="🥰 I appreciate your work on all of it!",
)
engine = create_engine("sqlite://")
SQLModel.metadata.create_all(engine)
with Session(engine) as session:
session.add(item_1)
session.add(item_2)
session.add(item_3)
session.commit()
# This "statement" is where the issue presents itself in PyCharm
statement = (
select(
Item.category,
func.count(Item.id).label("my_count"),
func.total(Item.deleted).label("delete_count"),
func.min(Item.created).label("oldest_timestamp"),
func.max(Item.created).label("newest_timestamp"),
func.group_concat(Item.version).label("version_list"),
)
.distinct()
.group_by(Item.category)
)
category_metadata = session.exec(statement)
for result in category_metadata:
print(dict(result))
|
[
"sqlmodel.func.min",
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.func.total",
"sqlmodel.func.group_concat",
"sqlmodel.Session",
"sqlmodel.Field",
"sqlmodel.func.count",
"sqlmodel.func.max",
"sqlmodel.create_engine"
] |
[((1345, 1371), 'sqlmodel.create_engine', 'create_engine', (['"""sqlite://"""'], {}), "('sqlite://')\n", (1358, 1371), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, func, select\n'), ((1373, 1409), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (1401, 1409), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, func, select\n'), ((791, 828), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (796, 828), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, func, select\n'), ((1416, 1431), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1423, 1431), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, func, select\n'), ((1024, 1038), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1036, 1038), False, 'from datetime import datetime\n'), ((1118, 1132), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1130, 1132), False, 'from datetime import datetime\n'), ((1239, 1253), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1251, 1253), False, 'from datetime import datetime\n'), ((1680, 1699), 'sqlmodel.func.count', 'func.count', (['Item.id'], {}), '(Item.id)\n', (1690, 1699), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, func, select\n'), ((1731, 1755), 'sqlmodel.func.total', 'func.total', (['Item.deleted'], {}), '(Item.deleted)\n', (1741, 1755), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, func, select\n'), ((1791, 1813), 'sqlmodel.func.min', 'func.min', (['Item.created'], {}), '(Item.created)\n', (1799, 1813), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, func, select\n'), ((1853, 1875), 'sqlmodel.func.max', 'func.max', (['Item.created'], {}), '(Item.created)\n', (1861, 1875), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, func, select\n'), ((1915, 1946), 'sqlmodel.func.group_concat', 'func.group_concat', (['Item.version'], {}), '(Item.version)\n', (1932, 1946), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, func, select\n')]
|
# A file containing fixtures for testing
# Fixtures defined here are available for the whole scope
from fastapi.testclient import TestClient
import pytest
import os
from ..main import app, session
from sqlmodel import SQLModel, Session, create_engine
from sqlmodel.pool import StaticPool
from ..utils import get_session
db_name = "test_db.sqlite"
test_con = f"sqlite:///{db_name}"
test_engine = create_engine(
test_con, connect_args={"check_same_thread": False}, echo=True
)
@pytest.fixture(name="create_db", scope="session")
def create_db():
# setup
SQLModel.metadata.create_all(test_engine)
yield
# teardown
os.remove(db_name)
@pytest.fixture(name="session")
def session_fixture(create_db):
create_db
with Session(test_engine) as session:
yield session
@pytest.fixture(name="client")
def client_fixture(session: Session):
def get_session_override():
return session
app.dependency_overrides[get_session] = get_session_override
client = TestClient(app)
yield client
app.dependency_overrides.clear()
|
[
"sqlmodel.create_engine",
"sqlmodel.Session",
"sqlmodel.SQLModel.metadata.create_all"
] |
[((397, 474), 'sqlmodel.create_engine', 'create_engine', (['test_con'], {'connect_args': "{'check_same_thread': False}", 'echo': '(True)'}), "(test_con, connect_args={'check_same_thread': False}, echo=True)\n", (410, 474), False, 'from sqlmodel import SQLModel, Session, create_engine\n'), ((484, 533), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""create_db"""', 'scope': '"""session"""'}), "(name='create_db', scope='session')\n", (498, 533), False, 'import pytest\n'), ((660, 690), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""session"""'}), "(name='session')\n", (674, 690), False, 'import pytest\n'), ((805, 834), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""client"""'}), "(name='client')\n", (819, 834), False, 'import pytest\n'), ((567, 608), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['test_engine'], {}), '(test_engine)\n', (595, 608), False, 'from sqlmodel import SQLModel, Session, create_engine\n'), ((638, 656), 'os.remove', 'os.remove', (['db_name'], {}), '(db_name)\n', (647, 656), False, 'import os\n'), ((1007, 1022), 'fastapi.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (1017, 1022), False, 'from fastapi.testclient import TestClient\n'), ((747, 767), 'sqlmodel.Session', 'Session', (['test_engine'], {}), '(test_engine)\n', (754, 767), False, 'from sqlmodel import SQLModel, Session, create_engine\n')]
|
"""
Time stepping solvers.
"""
import numpy as nm
from sfepy.base.base import output, Struct, IndexedStruct, basestr
from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver
from sfepy.discrete.mass_operator import MassOperator
from sfepy.solvers.ts import TimeStepper, VariableTimeStepper
class StationarySolver(TimeSteppingSolver):
"""
Solver for stationary problems without time stepping.
This class is provided to have a unified interface of the time stepping
solvers also for stationary problems.
"""
name = 'ts.stationary'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
problem = self.problem
problem.time_update()
state = problem.solve(state0=state0, nls_status=nls_status)
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def replace_virtuals(deps, pairs):
out = {}
for key, val in deps.iteritems():
out[pairs[key]] = val
return out
class EquationSequenceSolver(TimeSteppingSolver):
"""
Solver for stationary problems with an equation sequence.
"""
name = 'ts.equation_sequence'
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, ts=None, **kwargs)
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
from sfepy.base.base import invert_dict, get_subdict
from sfepy.base.resolve_deps import resolve
problem = self.problem
if state0 is None:
state0 = problem.create_state()
variables = problem.get_variables()
vtos = variables.get_dual_names()
vdeps = problem.equations.get_variable_dependencies()
sdeps = replace_virtuals(vdeps, vtos)
sorder = resolve(sdeps)
stov = invert_dict(vtos)
vorder = [[stov[ii] for ii in block] for block in sorder]
parts0 = state0.get_parts()
state = state0.copy()
solved = []
for ib, block in enumerate(vorder):
output('solving for %s...' % sorder[ib])
subpb = problem.create_subproblem(block, solved)
subpb.equations.print_terms()
subpb.time_update()
substate0 = subpb.create_state()
vals = get_subdict(parts0, block)
substate0.set_parts(vals)
substate = subpb.solve(state0=substate0, nls_status=nls_status)
state.set_parts(substate.get_parts())
solved.extend(sorder[ib])
output('...done')
if step_hook is not None:
step_hook(problem, None, state)
if save_results:
problem.save_state(problem.get_output_name(), state,
post_process_hook=post_process_hook,
file_per_var=None)
return state
def get_initial_state(problem):
"""
Create a zero state vector and apply initial conditions.
"""
state = problem.create_state()
problem.setup_ic()
state.apply_ic()
return state
def prepare_save_data(ts, conf):
"""
Given a time stepper configuration, return a list of time steps when the
state should be saved.
"""
try:
save_steps = conf.options.save_steps
except:
save_steps = -1
if save_steps == -1:
save_steps = ts.n_step
is_save = nm.linspace(0, ts.n_step - 1, save_steps).astype(nm.int32)
is_save = nm.unique(is_save)
return ts.suffix, is_save
def prepare_matrix(problem, state):
"""
Pre-assemble tangent system matrix.
"""
problem.update_materials()
ev = problem.get_evaluator()
try:
mtx = ev.eval_tangent_matrix(state(), is_full=True)
except ValueError:
output('matrix evaluation failed, giving up...')
raise
return mtx
def make_implicit_step(ts, state0, problem, nls_status=None):
"""
Make a step of an implicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
if not ts.is_quasistatic:
problem.init_time(ts)
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state(), is_full=True)
except ValueError:
output('initial residual evaluation failed, giving up...')
raise
else:
err = nm.linalg.norm(vec_r)
output('initial residual: %e' % err)
if problem.is_linear():
mtx = prepare_matrix(problem, state)
else:
mtx = None
# Initialize solvers (and possibly presolve the matrix).
presolve = mtx is not None
problem.init_solvers(nls_status=nls_status, mtx=mtx, presolve=presolve)
# Initialize variables with history.
state0.init_history()
if ts.is_quasistatic:
# Ordinary solve.
state = problem.solve(state0=state0, nls_status=nls_status)
else:
if (ts.step == 1) and ts.is_quasistatic and problem.is_linear():
mtx = prepare_matrix(problem, state0)
problem.init_solvers(nls_status=nls_status, mtx=mtx)
state = problem.solve(state0=state0, nls_status=nls_status)
return state
def make_explicit_step(ts, state0, problem, mass, nls_status=None):
"""
Make a step of an explicit time stepping solver.
"""
problem.time_update(ts)
if ts.step == 0:
state0.apply_ebc()
state = state0.copy(deep=True)
problem.init_time(ts)
# Initialize variables with history.
state0.init_history()
ev = problem.get_evaluator()
try:
vec_r = ev.eval_residual(state0(), is_full=True)
except ValueError:
output('residual evaluation failed, giving up...')
raise
else:
err = nm.linalg.norm(vec_r)
output('residual: %e' % err)
if ts.step > 0:
variables = problem.get_variables()
vec_rf = variables.make_full_vec(vec_r, force_value=0.0)
rhs = -ts.dt * vec_rf + mass.action(state0())
vec = mass.inverse_action(rhs)
state = state0.copy(preserve_caches=True)
state.set_full(vec)
state.apply_ebc()
return state
def get_min_dt(adt):
red = adt.red
while red >= adt.red_max:
red *= adt.red_factor
dt = adt.dt0 * red
return dt
def adapt_time_step(ts, status, adt, problem=None):
"""
Adapt the time step of `ts` according to the exit status of the
nonlinear solver.
The time step dt is reduced, if the nonlinear solver did not converge. If it
converged in less then a specified number of iterations for several time
steps, the time step is increased. This is governed by the following
parameters:
- red_factor : time step reduction factor
- red_max : maximum time step reduction factor
- inc_factor : time step increase factor
- inc_on_iter : increase time step if the nonlinear solver converged in
less than this amount of iterations...
- inc_wait : ...for this number of consecutive time steps
Parameters
----------
ts : VariableTimeStepper instance
The time stepper.
status : IndexedStruct instance
The nonlinear solver exit status.
adt : Struct instance
The adaptivity parameters of the time solver:
problem : Problem instance, optional
This canbe used in user-defined adaptivity functions. Not used here.
Returns
-------
is_break : bool
If True, the adaptivity loop should stop.
"""
is_break = False
if status.condition == 0:
if status.n_iter <= adt.inc_on_iter:
adt.wait += 1
if adt.wait > adt.inc_wait:
if adt.red < 1.0:
adt.red = adt.red * adt.inc_factor
ts.set_time_step(adt.dt0 * adt.red)
output('+++++ new time step: %e +++++' % ts.dt)
adt.wait = 0
else:
adt.wait = 0
is_break = True
else:
adt.red = adt.red * adt.red_factor
if adt.red < adt.red_max:
is_break = True
else:
ts.set_time_step(adt.dt0 * adt.red, update_time=True)
output('----- new time step: %e -----' % ts.dt)
adt.wait = 0
return is_break
class SimpleTimeSteppingSolver(TimeSteppingSolver):
"""
Implicit time stepping solver with a fixed time step.
"""
name = 'ts.simple'
@staticmethod
def process_conf(conf, kwargs):
"""
Process configuration options.
"""
get = make_get_conf(conf, kwargs)
common = TimeSteppingSolver.process_conf(conf)
return Struct(t0=get('t0', 0.0),
t1=get('t1', 1.0),
dt=get('dt', None),
n_step=get('n_step', 10),
quasistatic=get('quasistatic', False)) + common
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, **kwargs)
self.ts = TimeStepper.from_conf(self.conf)
nd = self.ts.n_digit
format = '====== time %%e (step %%%dd of %%%dd) =====' % (nd, nd)
self.format = format
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
"""
Solve the time-dependent problem.
"""
problem = self.problem
ts = self.ts
suffix, is_save = prepare_save_data(ts, problem.conf)
if state0 is None:
state0 = get_initial_state(problem)
ii = 0
for step, time in ts:
output(self.format % (time, step + 1, ts.n_step))
state = self.solve_step(ts, state0, nls_status=nls_status)
state0 = state.copy(deep=True)
if step_hook is not None:
step_hook(problem, ts, state)
if save_results and (is_save[ii] == ts.step):
filename = problem.get_output_name(suffix=suffix % ts.step)
problem.save_state(filename, state,
post_process_hook=post_process_hook,
file_per_var=None,
ts=ts)
ii += 1
problem.advance(ts)
return state
def solve_step(self, ts, state0, nls_status=None):
"""
Solve a single time step.
"""
state = make_implicit_step(ts, state0, self.problem,
nls_status=nls_status)
return state
class ExplicitTimeSteppingSolver(SimpleTimeSteppingSolver):
"""
Explicit time stepping solver with a fixed time step.
"""
name = 'ts.explicit'
@staticmethod
def process_conf(conf, kwargs):
"""
Process configuration options.
"""
get = make_get_conf(conf, kwargs)
common = SimpleTimeSteppingSolver.process_conf(conf, kwargs)
return Struct(mass=get('mass', None,
'missing "mass" in options!'),
lumped=get('lumped', False)) + common
def __init__(self, conf, **kwargs):
SimpleTimeSteppingSolver.__init__(self, conf, **kwargs)
self.mass = MassOperator(self.problem, self.conf)
def solve_step(self, ts, state0, nls_status=None):
"""
Solve a single time step.
"""
state = make_explicit_step(ts, state0, self.problem, self.mass,
nls_status=nls_status)
return state
class AdaptiveTimeSteppingSolver(SimpleTimeSteppingSolver):
"""
Implicit time stepping solver with an adaptive time step.
Either the built-in or user supplied function can be used to adapt the time
step.
"""
name = 'ts.adaptive'
@staticmethod
def process_conf(conf, kwargs):
"""
Process configuration options.
"""
get = make_get_conf(conf, kwargs)
common = SimpleTimeSteppingSolver.process_conf(conf, kwargs)
adt = Struct(red_factor=get('dt_red_factor', 0.2),
red_max=get('dt_red_max', 1e-3),
inc_factor=get('dt_inc_factor', 1.25),
inc_on_iter=get('dt_inc_on_iter', 4),
inc_wait=get('dt_inc_wait', 5),
red=1.0, wait=0, dt0=0.0)
return Struct(adapt_fun=get('adapt_fun', adapt_time_step),
adt=adt) + common
def __init__(self, conf, **kwargs):
TimeSteppingSolver.__init__(self, conf, **kwargs)
self.ts = VariableTimeStepper.from_conf(self.conf)
self.adt = adt = self.conf.adt
adt.dt0 = self.ts.get_default_time_step()
self.ts.set_n_digit_from_min_dt(get_min_dt(adt))
self.format = '====== time %e (dt %e, wait %d, step %d of %d) ====='
if isinstance(self.conf.adapt_fun, basestr):
self.adapt_time_step = self.problem.functions[self.conf.adapt_fun]
else:
self.adapt_time_step = self.conf.adapt_fun
def __call__(self, state0=None, save_results=True, step_hook=None,
post_process_hook=None, nls_status=None):
"""
Solve the time-dependent problem.
"""
problem = self.problem
ts = self.ts
if state0 is None:
state0 = get_initial_state(problem)
ii = 0
for step, time in ts:
output(self.format % (time, ts.dt, self.adt.wait,
step + 1, ts.n_step))
state = self.solve_step(ts, state0, nls_status=nls_status)
state0 = state.copy(deep=True)
if step_hook is not None:
step_hook(problem, ts, state)
if save_results:
filename = problem.get_output_name(suffix=ts.suffix % ts.step)
problem.save_state(filename, state,
post_process_hook=post_process_hook,
file_per_var=None,
ts=ts)
ii += 1
problem.advance(ts)
return state
def solve_step(self, ts, state0, nls_status=None):
"""
Solve a single time step.
"""
status = IndexedStruct(n_iter=0, condition=0)
while 1:
state = make_implicit_step(ts, state0, self.problem,
nls_status=status)
is_break = self.adapt_time_step(ts, status, self.adt, self.problem)
if is_break:
break
if nls_status is not None:
nls_status.update(status)
return state
|
[
"sfepy.base.base.invert_dict",
"sfepy.base.base.output",
"sfepy.solvers.solvers.TimeSteppingSolver.__init__",
"sfepy.solvers.solvers.TimeSteppingSolver.process_conf",
"sfepy.discrete.mass_operator.MassOperator",
"sfepy.solvers.ts.VariableTimeStepper.from_conf",
"sfepy.base.base.IndexedStruct",
"sfepy.base.base.get_subdict",
"sfepy.solvers.solvers.make_get_conf",
"sfepy.solvers.ts.TimeStepper.from_conf",
"sfepy.base.resolve_deps.resolve"
] |
[((3883, 3901), 'numpy.unique', 'nm.unique', (['is_save'], {}), '(is_save)\n', (3892, 3901), True, 'import numpy as nm\n'), ((616, 674), 'sfepy.solvers.solvers.TimeSteppingSolver.__init__', 'TimeSteppingSolver.__init__', (['self', 'conf'], {'ts': 'None'}), '(self, conf, ts=None, **kwargs)\n', (643, 674), False, 'from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver\n'), ((1592, 1650), 'sfepy.solvers.solvers.TimeSteppingSolver.__init__', 'TimeSteppingSolver.__init__', (['self', 'conf'], {'ts': 'None'}), '(self, conf, ts=None, **kwargs)\n', (1619, 1650), False, 'from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver\n'), ((2212, 2226), 'sfepy.base.resolve_deps.resolve', 'resolve', (['sdeps'], {}), '(sdeps)\n', (2219, 2226), False, 'from sfepy.base.resolve_deps import resolve\n'), ((2243, 2260), 'sfepy.base.base.invert_dict', 'invert_dict', (['vtos'], {}), '(vtos)\n', (2254, 2260), False, 'from sfepy.base.base import invert_dict, get_subdict\n'), ((6330, 6351), 'numpy.linalg.norm', 'nm.linalg.norm', (['vec_r'], {}), '(vec_r)\n', (6344, 6351), True, 'import numpy as nm\n'), ((6360, 6388), 'sfepy.base.base.output', 'output', (["('residual: %e' % err)"], {}), "('residual: %e' % err)\n", (6366, 6388), False, 'from sfepy.base.base import output, Struct, IndexedStruct, basestr\n'), ((9131, 9158), 'sfepy.solvers.solvers.make_get_conf', 'make_get_conf', (['conf', 'kwargs'], {}), '(conf, kwargs)\n', (9144, 9158), False, 'from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver\n'), ((9176, 9213), 'sfepy.solvers.solvers.TimeSteppingSolver.process_conf', 'TimeSteppingSolver.process_conf', (['conf'], {}), '(conf)\n', (9207, 9213), False, 'from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver\n'), ((9506, 9555), 'sfepy.solvers.solvers.TimeSteppingSolver.__init__', 'TimeSteppingSolver.__init__', (['self', 'conf'], {}), '(self, conf, **kwargs)\n', (9533, 9555), False, 'from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver\n'), ((9575, 9607), 'sfepy.solvers.ts.TimeStepper.from_conf', 'TimeStepper.from_conf', (['self.conf'], {}), '(self.conf)\n', (9596, 9607), False, 'from sfepy.solvers.ts import TimeStepper, VariableTimeStepper\n'), ((11419, 11446), 'sfepy.solvers.solvers.make_get_conf', 'make_get_conf', (['conf', 'kwargs'], {}), '(conf, kwargs)\n', (11432, 11446), False, 'from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver\n'), ((11810, 11847), 'sfepy.discrete.mass_operator.MassOperator', 'MassOperator', (['self.problem', 'self.conf'], {}), '(self.problem, self.conf)\n', (11822, 11847), False, 'from sfepy.discrete.mass_operator import MassOperator\n'), ((12501, 12528), 'sfepy.solvers.solvers.make_get_conf', 'make_get_conf', (['conf', 'kwargs'], {}), '(conf, kwargs)\n', (12514, 12528), False, 'from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver\n'), ((13088, 13137), 'sfepy.solvers.solvers.TimeSteppingSolver.__init__', 'TimeSteppingSolver.__init__', (['self', 'conf'], {}), '(self, conf, **kwargs)\n', (13115, 13137), False, 'from sfepy.solvers.solvers import make_get_conf, TimeSteppingSolver\n'), ((13157, 13197), 'sfepy.solvers.ts.VariableTimeStepper.from_conf', 'VariableTimeStepper.from_conf', (['self.conf'], {}), '(self.conf)\n', (13186, 13197), False, 'from sfepy.solvers.ts import TimeStepper, VariableTimeStepper\n'), ((14854, 14890), 'sfepy.base.base.IndexedStruct', 'IndexedStruct', ([], {'n_iter': '(0)', 'condition': '(0)'}), '(n_iter=0, condition=0)\n', (14867, 14890), False, 'from sfepy.base.base import output, Struct, IndexedStruct, basestr\n'), ((2470, 2510), 'sfepy.base.base.output', 'output', (["('solving for %s...' % sorder[ib])"], {}), "('solving for %s...' % sorder[ib])\n", (2476, 2510), False, 'from sfepy.base.base import output, Struct, IndexedStruct, basestr\n'), ((2714, 2740), 'sfepy.base.base.get_subdict', 'get_subdict', (['parts0', 'block'], {}), '(parts0, block)\n', (2725, 2740), False, 'from sfepy.base.base import invert_dict, get_subdict\n'), ((2958, 2975), 'sfepy.base.base.output', 'output', (['"""...done"""'], {}), "('...done')\n", (2964, 2975), False, 'from sfepy.base.base import output, Struct, IndexedStruct, basestr\n'), ((3810, 3851), 'numpy.linspace', 'nm.linspace', (['(0)', '(ts.n_step - 1)', 'save_steps'], {}), '(0, ts.n_step - 1, save_steps)\n', (3821, 3851), True, 'import numpy as nm\n'), ((4192, 4240), 'sfepy.base.base.output', 'output', (['"""matrix evaluation failed, giving up..."""'], {}), "('matrix evaluation failed, giving up...')\n", (4198, 4240), False, 'from sfepy.base.base import output, Struct, IndexedStruct, basestr\n'), ((6241, 6291), 'sfepy.base.base.output', 'output', (['"""residual evaluation failed, giving up..."""'], {}), "('residual evaluation failed, giving up...')\n", (6247, 6291), False, 'from sfepy.base.base import output, Struct, IndexedStruct, basestr\n'), ((8755, 8802), 'sfepy.base.base.output', 'output', (["('----- new time step: %e -----' % ts.dt)"], {}), "('----- new time step: %e -----' % ts.dt)\n", (8761, 8802), False, 'from sfepy.base.base import output, Struct, IndexedStruct, basestr\n'), ((10188, 10237), 'sfepy.base.base.output', 'output', (['(self.format % (time, step + 1, ts.n_step))'], {}), '(self.format % (time, step + 1, ts.n_step))\n', (10194, 10237), False, 'from sfepy.base.base import output, Struct, IndexedStruct, basestr\n'), ((14009, 14080), 'sfepy.base.base.output', 'output', (['(self.format % (time, ts.dt, self.adt.wait, step + 1, ts.n_step))'], {}), '(self.format % (time, ts.dt, self.adt.wait, step + 1, ts.n_step))\n', (14015, 14080), False, 'from sfepy.base.base import output, Struct, IndexedStruct, basestr\n'), ((4879, 4900), 'numpy.linalg.norm', 'nm.linalg.norm', (['vec_r'], {}), '(vec_r)\n', (4893, 4900), True, 'import numpy as nm\n'), ((4917, 4953), 'sfepy.base.base.output', 'output', (["('initial residual: %e' % err)"], {}), "('initial residual: %e' % err)\n", (4923, 4953), False, 'from sfepy.base.base import output, Struct, IndexedStruct, basestr\n'), ((4758, 4816), 'sfepy.base.base.output', 'output', (['"""initial residual evaluation failed, giving up..."""'], {}), "('initial residual evaluation failed, giving up...')\n", (4764, 4816), False, 'from sfepy.base.base import output, Struct, IndexedStruct, basestr\n'), ((8404, 8451), 'sfepy.base.base.output', 'output', (["('+++++ new time step: %e +++++' % ts.dt)"], {}), "('+++++ new time step: %e +++++' % ts.dt)\n", (8410, 8451), False, 'from sfepy.base.base import output, Struct, IndexedStruct, basestr\n')]
|
import uuid
from datetime import datetime
from typing import Optional
from pydantic import UUID4
from sqlmodel import Field, SQLModel
class DocumentInput(SQLModel):
id: UUID4 = Field(default_factory=uuid.uuid4, primary_key=True)
content: str
class Document(DocumentInput, table=True):
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
category: Optional[str] = Field(default=None)
|
[
"sqlmodel.Field"
] |
[((184, 235), 'sqlmodel.Field', 'Field', ([], {'default_factory': 'uuid.uuid4', 'primary_key': '(True)'}), '(default_factory=uuid.uuid4, primary_key=True)\n', (189, 235), False, 'from sqlmodel import Field, SQLModel\n'), ((325, 363), 'sqlmodel.Field', 'Field', ([], {'default_factory': 'datetime.utcnow'}), '(default_factory=datetime.utcnow)\n', (330, 363), False, 'from sqlmodel import Field, SQLModel\n'), ((391, 429), 'sqlmodel.Field', 'Field', ([], {'default_factory': 'datetime.utcnow'}), '(default_factory=datetime.utcnow)\n', (396, 429), False, 'from sqlmodel import Field, SQLModel\n'), ((460, 479), 'sqlmodel.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (465, 479), False, 'from sqlmodel import Field, SQLModel\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument(
"-m",
"--mode",
default="qat",
type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
test_proc = dist.launcher(worker) if world_size > 1 else worker
test_proc(world_size, args)
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
model = models.__dict__[args.arch]()
if args.mode != "normal":
quantize_qat(model, qconfig=Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
quantize(model)
# Define valid graph
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.functional.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.functional.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
# Build valid datasets
logger.info("preparing dataset..")
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[T.Resize(256), T.CenterCrop(224), T.Normalize(mean=128), T.ToMode("CHW")]
),
num_workers=args.workers,
)
_, valid_acc, valid_acc5 = infer(valid_func, valid_queue, args)
if rank == 0:
logger.info("TEST %f, %f", valid_acc, valid_acc5)
def infer(model, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
total_time = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
n = image.shape[0]
image = mge.tensor(image, dtype="float32")
label = mge.tensor(label, dtype="int32")
loss, acc1, acc5 = model(image, label)
objs.update(loss.numpy()[0], n)
top1.update(100 * acc1.numpy()[0], n)
top5.update(100 * acc5.numpy()[0], n)
total_time.update(time.time() - t)
t = time.time()
if step % args.report_freq == 0 and dist.get_rank() == 0:
logger.info("Step %d, %s %s %s %s", step, objs, top1, top5, total_time)
return objs.avg, top1.avg, top5.avg
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=":.3f"):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
if __name__ == "__main__":
main()
|
[
"megengine.distributed.is_distributed",
"megengine.data.transform.ToMode",
"megengine.data.transform.CenterCrop",
"megengine.data.transform.Normalize",
"megengine.data.SequentialSampler",
"megengine.distributed.get_rank",
"megengine.distributed.get_world_size",
"megengine.get_logger",
"megengine.data.dataset.ImageNet",
"megengine.data.transform.Resize",
"megengine.tensor",
"megengine.distributed.helper.get_device_count_by_fork",
"megengine.functional.loss.cross_entropy",
"megengine.quantization.quantize.quantize",
"megengine.quantization.quantize.quantize_qat",
"megengine.distributed.functional.all_reduce_sum",
"megengine.functional.topk_accuracy",
"megengine.load",
"megengine.distributed.launcher"
] |
[((950, 974), 'megengine.get_logger', 'mge.get_logger', (['__name__'], {}), '(__name__)\n', (964, 974), True, 'import megengine as mge\n'), ((1002, 1027), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1025, 1027), False, 'import argparse\n'), ((2354, 2369), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (2367, 2369), True, 'import megengine.distributed as dist\n'), ((3627, 3672), 'megengine.data.dataset.ImageNet', 'data.dataset.ImageNet', (['args.data'], {'train': '(False)'}), '(args.data, train=False)\n', (3648, 3672), True, 'import megengine.data as data\n'), ((3693, 3763), 'megengine.data.SequentialSampler', 'data.SequentialSampler', (['valid_dataset'], {'batch_size': '(100)', 'drop_last': '(False)'}), '(valid_dataset, batch_size=100, drop_last=False)\n', (3715, 3763), True, 'import megengine.data as data\n'), ((4362, 4373), 'time.time', 'time.time', ([], {}), '()\n', (4371, 4373), False, 'import time\n'), ((2010, 2053), 'megengine.distributed.helper.get_device_count_by_fork', 'dist.helper.get_device_count_by_fork', (['"""gpu"""'], {}), "('gpu')\n", (2046, 2053), True, 'import megengine.distributed as dist\n'), ((2184, 2205), 'megengine.distributed.launcher', 'dist.launcher', (['worker'], {}), '(worker)\n', (2197, 2205), True, 'import megengine.distributed as dist\n'), ((2608, 2660), 'megengine.quantization.quantize.quantize_qat', 'quantize_qat', (['model'], {'qconfig': 'Q.ema_fakequant_qconfig'}), '(model, qconfig=Q.ema_fakequant_qconfig)\n', (2620, 2660), False, 'from megengine.quantization.quantize import quantize, quantize_qat\n'), ((2773, 2798), 'megengine.load', 'mge.load', (['args.checkpoint'], {}), '(args.checkpoint)\n', (2781, 2798), True, 'import megengine as mge\n'), ((2959, 2974), 'megengine.quantization.quantize.quantize', 'quantize', (['model'], {}), '(model)\n', (2967, 2974), False, 'from megengine.quantization.quantize import quantize, quantize_qat\n'), ((3101, 3154), 'megengine.functional.loss.cross_entropy', 'F.loss.cross_entropy', (['logits', 'label'], {'label_smooth': '(0.1)'}), '(logits, label, label_smooth=0.1)\n', (3121, 3154), True, 'import megengine.functional as F\n'), ((3176, 3214), 'megengine.functional.topk_accuracy', 'F.topk_accuracy', (['logits', 'label', '(1, 5)'], {}), '(logits, label, (1, 5))\n', (3191, 3214), True, 'import megengine.functional as F\n'), ((3226, 3247), 'megengine.distributed.is_distributed', 'dist.is_distributed', ([], {}), '()\n', (3245, 3247), True, 'import megengine.distributed as dist\n'), ((4472, 4506), 'megengine.tensor', 'mge.tensor', (['image'], {'dtype': '"""float32"""'}), "(image, dtype='float32')\n", (4482, 4506), True, 'import megengine as mge\n'), ((4523, 4555), 'megengine.tensor', 'mge.tensor', (['label'], {'dtype': '"""int32"""'}), "(label, dtype='int32')\n", (4533, 4555), True, 'import megengine as mge\n'), ((4792, 4803), 'time.time', 'time.time', ([], {}), '()\n', (4801, 4803), False, 'import time\n'), ((3287, 3323), 'megengine.distributed.functional.all_reduce_sum', 'dist.functional.all_reduce_sum', (['loss'], {}), '(loss)\n', (3317, 3323), True, 'import megengine.distributed as dist\n'), ((3326, 3347), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (3345, 3347), True, 'import megengine.distributed as dist\n'), ((3367, 3403), 'megengine.distributed.functional.all_reduce_sum', 'dist.functional.all_reduce_sum', (['acc1'], {}), '(acc1)\n', (3397, 3403), True, 'import megengine.distributed as dist\n'), ((3406, 3427), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (3425, 3427), True, 'import megengine.distributed as dist\n'), ((3447, 3483), 'megengine.distributed.functional.all_reduce_sum', 'dist.functional.all_reduce_sum', (['acc5'], {}), '(acc5)\n', (3477, 3483), True, 'import megengine.distributed as dist\n'), ((3486, 3507), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (3505, 3507), True, 'import megengine.distributed as dist\n'), ((4763, 4774), 'time.time', 'time.time', ([], {}), '()\n', (4772, 4774), False, 'import time\n'), ((4849, 4864), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (4862, 4864), True, 'import megengine.distributed as dist\n'), ((3909, 3922), 'megengine.data.transform.Resize', 'T.Resize', (['(256)'], {}), '(256)\n', (3917, 3922), True, 'import megengine.data.transform as T\n'), ((3924, 3941), 'megengine.data.transform.CenterCrop', 'T.CenterCrop', (['(224)'], {}), '(224)\n', (3936, 3941), True, 'import megengine.data.transform as T\n'), ((3943, 3964), 'megengine.data.transform.Normalize', 'T.Normalize', ([], {'mean': '(128)'}), '(mean=128)\n', (3954, 3964), True, 'import megengine.data.transform as T\n'), ((3966, 3981), 'megengine.data.transform.ToMode', 'T.ToMode', (['"""CHW"""'], {}), "('CHW')\n", (3974, 3981), True, 'import megengine.data.transform as T\n')]
|
import numpy as nm
from sfepy.linalg import dot_sequences
from sfepy.terms.terms import Term, terms
from sfepy.terms.terms_th import THTerm, ETHTerm
from sfepy.terms.terms_elastic import CauchyStressTerm
class BiotTerm(Term):
r"""
Biot coupling term with :math:`\alpha_{ij}` given in:
* vector form exploiting symmetry - in 3D it has the
indices ordered as :math:`[11, 22, 33, 12, 13, 23]`, in 2D it has
the indices ordered as :math:`[11, 22, 12]`,
* matrix form - non-symmetric coupling parameter.
Corresponds to weak forms of Biot gradient and divergence terms.
Can be evaluated. Can use derivatives.
:Definition:
.. math::
\int_{\Omega} p\ \alpha_{ij} e_{ij}(\ul{v}) \mbox{ , } \int_{\Omega}
q\ \alpha_{ij} e_{ij}(\ul{u})
:Arguments 1:
- material : :math:`\alpha_{ij}`
- virtual : :math:`\ul{v}`
- state : :math:`p`
:Arguments 2:
- material : :math:`\alpha_{ij}`
- state : :math:`\ul{u}`
- virtual : :math:`q`
:Arguments 3:
- material : :math:`\alpha_{ij}`
- parameter_v : :math:`\ul{u}`
- parameter_s : :math:`p`
"""
name = 'dw_biot'
arg_types = (('material', 'virtual', 'state'),
('material', 'state', 'virtual'),
('material', 'parameter_v', 'parameter_s'))
arg_shapes = [{'material' : 'S, 1',
'virtual/grad' : ('D', None), 'state/grad' : 1,
'virtual/div' : (1, None), 'state/div' : 'D',
'parameter_v' : 'D', 'parameter_s' : 1},
{'material' : 'D, D'}]
modes = ('grad', 'div', 'eval')
def get_fargs(self, mat, vvar, svar,
mode=None, term_mode=None, diff_var=None, **kwargs):
sym_mode = False if mat.shape[-2] == mat.shape[-1] > 1 else True
if not sym_mode:
sh = mat.shape
# the gradient given by 'self.get' is transposed
mat = nm.swapaxes(mat, 2, 3)
mat = mat.reshape(sh[:2] + (sh[2]**2, 1))
if self.mode == 'grad':
qp_var, qp_name = svar, 'val'
else:
if sym_mode:
qp_var, qp_name = vvar, 'cauchy_strain'
else:
qp_var, qp_name = vvar, 'grad'
if mode == 'weak':
vvg, _ = self.get_mapping(vvar)
svg, _ = self.get_mapping(svar)
if diff_var is None:
val_qp = self.get(qp_var, qp_name)
if qp_name == 'grad':
sh = val_qp.shape
val_qp = val_qp.reshape(sh[:2] + (sh[2]**2, 1))
fmode = 0
else:
val_qp = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return 1.0, val_qp, mat, svg, vvg, fmode
elif mode == 'eval':
vvg, _ = self.get_mapping(vvar)
if sym_mode:
strain = self.get(vvar, 'cauchy_strain')
else:
strain = self.get(vvar, 'grad')
sh = strain.shape
strain = strain.reshape(sh[:2] + (sh[2]**2, 1))
pval = self.get(svar, 'val')
return 1.0, pval, strain, mat, vvg
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
def get_eval_shape(self, mat, vvar, svar,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(vvar)
return (n_el, 1, 1, 1), vvar.dtype
def set_arg_types(self):
self.function = {
'grad' : terms.dw_biot_grad,
'div' : terms.dw_biot_div,
'eval' : terms.d_biot_div,
}[self.mode]
class BiotStressTerm(CauchyStressTerm):
r"""
Evaluate Biot stress tensor.
It is given in the usual vector form exploiting symmetry: in 3D it has 6
components with the indices ordered as :math:`[11, 22, 33, 12, 13, 23]`, in
2D it has 3 components with the indices ordered as :math:`[11, 22, 12]`.
Supports 'eval', 'el_avg' and 'qp' evaluation modes.
:Definition:
.. math::
- \int_{\Omega} \alpha_{ij} \bar{p}
.. math::
\mbox{vector for } K \from \Ical_h:
- \int_{T_K} \alpha_{ij} \bar{p} / \int_{T_K} 1
.. math::
- \alpha_{ij} \bar{p}|_{qp}
:Arguments:
- material : :math:`\alpha_{ij}`
- parameter : :math:`\bar{p}`
"""
name = 'ev_biot_stress'
arg_types = ('material', 'parameter')
arg_shapes = {'material' : 'S, 1', 'parameter' : 1}
@staticmethod
def function(out, val_qp, mat, vg, fmode):
if fmode == 2:
out[:] = dot_sequences(mat, val_qp)
status = 0
else:
status = terms.de_cauchy_stress(out, val_qp, mat, vg, fmode)
out *= -1.0
return status
def get_fargs(self, mat, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(parameter)
val_qp = self.get(parameter, 'val')
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
return val_qp, mat, vg, fmode
class BiotTHTerm(BiotTerm, THTerm):
r"""
Fading memory Biot term. Can use derivatives.
:Definition:
.. math::
\begin{array}{l}
\int_{\Omega} \left [\int_0^t \alpha_{ij}(t-\tau)\,p(\tau)) \difd{\tau}
\right]\,e_{ij}(\ul{v}) \mbox{ ,} \\
\int_{\Omega} \left [\int_0^t
\alpha_{ij}(t-\tau) e_{kl}(\ul{u}(\tau)) \difd{\tau} \right] q
\end{array}
:Arguments 1:
- ts : :class:`TimeStepper` instance
- material : :math:`\alpha_{ij}(\tau)`
- virtual : :math:`\ul{v}`
- state : :math:`p`
:Arguments 2:
- ts : :class:`TimeStepper` instance
- material : :math:`\alpha_{ij}(\tau)`
- state : :math:`\ul{u}`
- virtual : :math:`q`
"""
name = 'dw_biot_th'
arg_types = (('ts', 'material', 'virtual', 'state'),
('ts', 'material', 'state', 'virtual'))
arg_shapes = {'material' : '.: N, S, 1',
'virtual/grad' : ('D', None), 'state/grad' : 1,
'virtual/div' : (1, None), 'state/div' : 'D'}
modes = ('grad', 'div')
def get_fargs(self, ts, mats, vvar, svar,
mode=None, term_mode=None, diff_var=None, **kwargs):
if self.mode == 'grad':
qp_var, qp_name = svar, 'val'
else:
qp_var, qp_name = vvar, 'cauchy_strain'
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(svar)
if mode == 'weak':
vvg, _ = self.get_mapping(vvar)
svg, _ = self.get_mapping(svar)
if diff_var is None:
def iter_kernel():
for ii, mat in enumerate(mats):
val_qp = self.get(qp_var, qp_name, step=-ii)
mat = nm.tile(mat, (n_el, n_qp, 1, 1))
yield ii, (ts.dt, val_qp, mat, svg, vvg, 0)
fargs = iter_kernel
else:
val_qp = nm.array([0], ndmin=4, dtype=nm.float64)
mat = nm.tile(mats[0], (n_el, n_qp, 1, 1))
fargs = ts.dt, val_qp, mat, svg, vvg, 1
return fargs
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
class BiotETHTerm(BiotTerm, ETHTerm):
r"""
This term has the same definition as dw_biot_th, but assumes an
exponential approximation of the convolution kernel resulting in much
higher efficiency. Can use derivatives.
:Definition:
.. math::
\begin{array}{l}
\int_{\Omega} \left [\int_0^t \alpha_{ij}(t-\tau)\,p(\tau)) \difd{\tau}
\right]\,e_{ij}(\ul{v}) \mbox{ ,} \\
\int_{\Omega} \left [\int_0^t
\alpha_{ij}(t-\tau) e_{kl}(\ul{u}(\tau)) \difd{\tau} \right] q
\end{array}
:Arguments 1:
- ts : :class:`TimeStepper` instance
- material_0 : :math:`\alpha_{ij}(0)`
- material_1 : :math:`\exp(-\lambda \Delta t)` (decay at :math:`t_1`)
- virtual : :math:`\ul{v}`
- state : :math:`p`
:Arguments 2:
- ts : :class:`TimeStepper` instance
- material_0 : :math:`\alpha_{ij}(0)`
- material_1 : :math:`\exp(-\lambda \Delta t)` (decay at :math:`t_1`)
- state : :math:`\ul{u}`
- virtual : :math:`q`
"""
name = 'dw_biot_eth'
arg_types = (('ts', 'material_0', 'material_1', 'virtual', 'state'),
('ts', 'material_0', 'material_1', 'state', 'virtual'))
arg_shapes = {'material_0' : 'S, 1', 'material_1' : '1, 1',
'virtual/grad' : ('D', None), 'state/grad' : 1,
'virtual/div' : (1, None), 'state/div' : 'D'}
modes = ('grad', 'div')
def get_fargs(self, ts, mat0, mat1, vvar, svar,
mode=None, term_mode=None, diff_var=None, **kwargs):
if self.mode == 'grad':
qp_var, qp_name, iv = svar, 'val', 4
else:
qp_var, qp_name, iv = vvar, 'cauchy_strain', 3
if mode == 'weak':
vvg, _, key = self.get_mapping(vvar, return_key=True)
svg, _ = self.get_mapping(svar)
if diff_var is None:
val_qp = self.get(qp_var, qp_name)
key += tuple(self.arg_names[ii] for ii in [1, 2, iv])
data = self.get_eth_data(key, qp_var, mat1, val_qp)
val = data.history + data.values
fargs = (ts.dt, val, mat0, svg, vvg, 0)
else:
val_qp = nm.array([0], ndmin=4, dtype=nm.float64)
fargs = (ts.dt, val_qp, mat0, svg, vvg, 1)
return fargs
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
|
[
"sfepy.linalg.dot_sequences",
"sfepy.terms.terms.terms.de_cauchy_stress"
] |
[((2000, 2022), 'numpy.swapaxes', 'nm.swapaxes', (['mat', '(2)', '(3)'], {}), '(mat, 2, 3)\n', (2011, 2022), True, 'import numpy as nm\n'), ((4783, 4809), 'sfepy.linalg.dot_sequences', 'dot_sequences', (['mat', 'val_qp'], {}), '(mat, val_qp)\n', (4796, 4809), False, 'from sfepy.linalg import dot_sequences\n'), ((4869, 4920), 'sfepy.terms.terms.terms.de_cauchy_stress', 'terms.de_cauchy_stress', (['out', 'val_qp', 'mat', 'vg', 'fmode'], {}), '(out, val_qp, mat, vg, fmode)\n', (4891, 4920), False, 'from sfepy.terms.terms import Term, terms\n'), ((2729, 2769), 'numpy.array', 'nm.array', (['[0]'], {'ndmin': '(4)', 'dtype': 'nm.float64'}), '([0], ndmin=4, dtype=nm.float64)\n', (2737, 2769), True, 'import numpy as nm\n'), ((7237, 7277), 'numpy.array', 'nm.array', (['[0]'], {'ndmin': '(4)', 'dtype': 'nm.float64'}), '([0], ndmin=4, dtype=nm.float64)\n', (7245, 7277), True, 'import numpy as nm\n'), ((7300, 7336), 'numpy.tile', 'nm.tile', (['mats[0]', '(n_el, n_qp, 1, 1)'], {}), '(mats[0], (n_el, n_qp, 1, 1))\n', (7307, 7336), True, 'import numpy as nm\n'), ((9827, 9867), 'numpy.array', 'nm.array', (['[0]'], {'ndmin': '(4)', 'dtype': 'nm.float64'}), '([0], ndmin=4, dtype=nm.float64)\n', (9835, 9867), True, 'import numpy as nm\n'), ((7056, 7088), 'numpy.tile', 'nm.tile', (['mat', '(n_el, n_qp, 1, 1)'], {}), '(mat, (n_el, n_qp, 1, 1))\n', (7063, 7088), True, 'import numpy as nm\n')]
|
# BSD 3-Clause License
# Copyright (c) <NAME> 2016,
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
import megengine.module as M
__all__ = ['MobileNetV2', 'mobilenet_v2']
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class InvertedResidual(M.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(M.ConvBnRelu2d(inp, hidden_dim, kernel_size=1, bias=False))
layers.extend([
# dw
M.ConvBnRelu2d(hidden_dim, hidden_dim, kernel_size=3, padding=1,
stride=stride, groups=hidden_dim, bias=False),
# pw-linear
M.ConvBn2d(hidden_dim, oup, kernel_size=1, bias=False)
])
self.conv = M.Sequential(*layers)
self.add = M.Elemwise("ADD")
def forward(self, x):
if self.use_res_connect:
return self.add(x, self.conv(x))
else:
return self.conv(x)
class MobileNetV2(M.Module):
def __init__(self, num_classes=1000, width_mult=1.0, inverted_residual_setting=None, round_nearest=8):
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
"""
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
if inverted_residual_setting is None:
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError("inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting))
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
features = [M.ConvBnRelu2d(3, input_channel, kernel_size=3, padding=1, stride=2, bias=False)]
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t))
input_channel = output_channel
# building last several layers
features.append(M.ConvBnRelu2d(input_channel, self.last_channel, kernel_size=1, bias=False))
# make it M.Sequential
self.features = M.Sequential(*features)
# building classifier
self.classifier = M.Sequential(
M.Dropout(0.2),
M.Linear(self.last_channel, num_classes),
)
self.quant = M.QuantStub()
self.dequant = M.DequantStub()
# weight initialization
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode='fan_out')
if m.bias is not None:
M.init.zeros_(m.bias)
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
elif isinstance(m, M.Linear):
M.init.normal_(m.weight, 0, 0.01)
M.init.zeros_(m.bias)
def forward(self, x):
x = self.quant(x)
x = self.features(x)
x = F.avg_pool2d(x, 7)
x = F.flatten(x, 1)
x = self.dequant(x)
x = self.classifier(x)
return x
def mobilenet_v2(**kwargs):
"""
Constructs a MobileNetV2 architecture from
`"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_.
"""
model = MobileNetV2(**kwargs)
return model
|
[
"megengine.module.Elemwise",
"megengine.module.ConvBnRelu2d",
"megengine.module.Dropout",
"megengine.functional.flatten",
"megengine.module.DequantStub",
"megengine.module.Linear",
"megengine.module.ConvBn2d",
"megengine.module.init.msra_normal_",
"megengine.module.init.zeros_",
"megengine.module.init.normal_",
"megengine.module.init.ones_",
"megengine.functional.avg_pool2d",
"megengine.module.Sequential",
"megengine.module.QuantStub"
] |
[((3759, 3780), 'megengine.module.Sequential', 'M.Sequential', (['*layers'], {}), '(*layers)\n', (3771, 3780), True, 'import megengine.module as M\n'), ((3800, 3817), 'megengine.module.Elemwise', 'M.Elemwise', (['"""ADD"""'], {}), "('ADD')\n", (3810, 3817), True, 'import megengine.module as M\n'), ((6265, 6288), 'megengine.module.Sequential', 'M.Sequential', (['*features'], {}), '(*features)\n', (6277, 6288), True, 'import megengine.module as M\n'), ((6474, 6487), 'megengine.module.QuantStub', 'M.QuantStub', ([], {}), '()\n', (6485, 6487), True, 'import megengine.module as M\n'), ((6511, 6526), 'megengine.module.DequantStub', 'M.DequantStub', ([], {}), '()\n', (6524, 6526), True, 'import megengine.module as M\n'), ((7124, 7142), 'megengine.functional.avg_pool2d', 'F.avg_pool2d', (['x', '(7)'], {}), '(x, 7)\n', (7136, 7142), True, 'import megengine.functional as F\n'), ((7155, 7170), 'megengine.functional.flatten', 'F.flatten', (['x', '(1)'], {}), '(x, 1)\n', (7164, 7170), True, 'import megengine.functional as F\n'), ((5599, 5684), 'megengine.module.ConvBnRelu2d', 'M.ConvBnRelu2d', (['(3)', 'input_channel'], {'kernel_size': '(3)', 'padding': '(1)', 'stride': '(2)', 'bias': '(False)'}), '(3, input_channel, kernel_size=3, padding=1, stride=2, bias=False\n )\n', (5613, 5684), True, 'import megengine.module as M\n'), ((6133, 6208), 'megengine.module.ConvBnRelu2d', 'M.ConvBnRelu2d', (['input_channel', 'self.last_channel'], {'kernel_size': '(1)', 'bias': '(False)'}), '(input_channel, self.last_channel, kernel_size=1, bias=False)\n', (6147, 6208), True, 'import megengine.module as M\n'), ((6372, 6386), 'megengine.module.Dropout', 'M.Dropout', (['(0.2)'], {}), '(0.2)\n', (6381, 6386), True, 'import megengine.module as M\n'), ((6400, 6440), 'megengine.module.Linear', 'M.Linear', (['self.last_channel', 'num_classes'], {}), '(self.last_channel, num_classes)\n', (6408, 6440), True, 'import megengine.module as M\n'), ((3385, 3443), 'megengine.module.ConvBnRelu2d', 'M.ConvBnRelu2d', (['inp', 'hidden_dim'], {'kernel_size': '(1)', 'bias': '(False)'}), '(inp, hidden_dim, kernel_size=1, bias=False)\n', (3399, 3443), True, 'import megengine.module as M\n'), ((3498, 3613), 'megengine.module.ConvBnRelu2d', 'M.ConvBnRelu2d', (['hidden_dim', 'hidden_dim'], {'kernel_size': '(3)', 'padding': '(1)', 'stride': 'stride', 'groups': 'hidden_dim', 'bias': '(False)'}), '(hidden_dim, hidden_dim, kernel_size=3, padding=1, stride=\n stride, groups=hidden_dim, bias=False)\n', (3512, 3613), True, 'import megengine.module as M\n'), ((3673, 3727), 'megengine.module.ConvBn2d', 'M.ConvBn2d', (['hidden_dim', 'oup'], {'kernel_size': '(1)', 'bias': '(False)'}), '(hidden_dim, oup, kernel_size=1, bias=False)\n', (3683, 3727), True, 'import megengine.module as M\n'), ((6649, 6694), 'megengine.module.init.msra_normal_', 'M.init.msra_normal_', (['m.weight'], {'mode': '"""fan_out"""'}), "(m.weight, mode='fan_out')\n", (6668, 6694), True, 'import megengine.module as M\n'), ((6754, 6775), 'megengine.module.init.zeros_', 'M.init.zeros_', (['m.bias'], {}), '(m.bias)\n', (6767, 6775), True, 'import megengine.module as M\n'), ((6839, 6861), 'megengine.module.init.ones_', 'M.init.ones_', (['m.weight'], {}), '(m.weight)\n', (6851, 6861), True, 'import megengine.module as M\n'), ((6878, 6899), 'megengine.module.init.zeros_', 'M.init.zeros_', (['m.bias'], {}), '(m.bias)\n', (6891, 6899), True, 'import megengine.module as M\n'), ((6958, 6991), 'megengine.module.init.normal_', 'M.init.normal_', (['m.weight', '(0)', '(0.01)'], {}), '(m.weight, 0, 0.01)\n', (6972, 6991), True, 'import megengine.module as M\n'), ((7008, 7029), 'megengine.module.init.zeros_', 'M.init.zeros_', (['m.bias'], {}), '(m.bias)\n', (7021, 7029), True, 'import megengine.module as M\n')]
|
import typing as ty
from nepali_dictionary.common.db import Dictionary
from sqlmodel import Session, select
class SearchService:
def search(self, query: str, session: ty.Type[Session], engine) -> ty.Optional[dict]:
with session(engine) as s:
statement = select(Dictionary).where(Dictionary.word == query)
result: ty.Any = s.execute(statement).fetchone()
if result:
return result[0].dict()
|
[
"sqlmodel.select"
] |
[((282, 300), 'sqlmodel.select', 'select', (['Dictionary'], {}), '(Dictionary)\n', (288, 300), False, 'from sqlmodel import Session, select\n')]
|
from typing import TYPE_CHECKING, Optional
from uuid import UUID
from sqlalchemy.orm import joinedload
from sqlalchemy.schema import Column, ForeignKey
from sqlmodel import Field, Relationship
from sqlmodel.sql.sqltypes import GUID
from joj.horse.models.base import ORMUtils
from joj.horse.utils.base import is_uuid
if TYPE_CHECKING:
from joj.horse.models import Problem, ProblemSet
class ProblemProblemSetLink(ORMUtils, table=True): # type: ignore[call-arg]
__tablename__ = "problem_problem_set_links"
problem_id: UUID = Field(
sa_column=Column(
GUID, ForeignKey("problems.id", ondelete="CASCADE"), primary_key=True
),
)
problem_set_id: UUID = Field(
sa_column=Column(
GUID, ForeignKey("problem_sets.id", ondelete="CASCADE"), primary_key=True
),
)
position: int = Field(
index=True, nullable=False, sa_column_kwargs={"server_default": "0"}
)
problem: "Problem" = Relationship(back_populates="problem_problem_set_links")
problem_set: "ProblemSet" = Relationship(back_populates="problem_problem_set_links")
@classmethod
async def find_by_problem_set_and_problem(
cls, problem_set: str, problem: str
) -> Optional["ProblemProblemSetLink"]:
# this is buggy, do not use!
# not sure how much it's better than three queries (maybe even worse)
from joj.horse import models
statement = cls.sql_select().options(
joinedload(cls.problem_set, innerjoin=True),
joinedload(cls.problem, innerjoin=True),
)
if is_uuid(problem_set):
statement = statement.where(cls.problem_set_id == problem_set)
else:
statement = statement.where(models.ProblemSet.url == problem_set)
if is_uuid(problem):
statement = statement.where(cls.problem_id == problem)
else:
statement = statement.where(models.Problem.url == problem)
from loguru import logger
logger.info(statement)
result = await cls.session_exec(statement)
logger.info(result.all())
return result.one_or_none()
|
[
"sqlmodel.Relationship",
"sqlmodel.Field"
] |
[((856, 931), 'sqlmodel.Field', 'Field', ([], {'index': '(True)', 'nullable': '(False)', 'sa_column_kwargs': "{'server_default': '0'}"}), "(index=True, nullable=False, sa_column_kwargs={'server_default': '0'})\n", (861, 931), False, 'from sqlmodel import Field, Relationship\n'), ((972, 1028), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""problem_problem_set_links"""'}), "(back_populates='problem_problem_set_links')\n", (984, 1028), False, 'from sqlmodel import Field, Relationship\n'), ((1061, 1117), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""problem_problem_set_links"""'}), "(back_populates='problem_problem_set_links')\n", (1073, 1117), False, 'from sqlmodel import Field, Relationship\n'), ((1602, 1622), 'joj.horse.utils.base.is_uuid', 'is_uuid', (['problem_set'], {}), '(problem_set)\n', (1609, 1622), False, 'from joj.horse.utils.base import is_uuid\n'), ((1802, 1818), 'joj.horse.utils.base.is_uuid', 'is_uuid', (['problem'], {}), '(problem)\n', (1809, 1818), False, 'from joj.horse.utils.base import is_uuid\n'), ((2015, 2037), 'loguru.logger.info', 'logger.info', (['statement'], {}), '(statement)\n', (2026, 2037), False, 'from loguru import logger\n'), ((1483, 1526), 'sqlalchemy.orm.joinedload', 'joinedload', (['cls.problem_set'], {'innerjoin': '(True)'}), '(cls.problem_set, innerjoin=True)\n', (1493, 1526), False, 'from sqlalchemy.orm import joinedload\n'), ((1540, 1579), 'sqlalchemy.orm.joinedload', 'joinedload', (['cls.problem'], {'innerjoin': '(True)'}), '(cls.problem, innerjoin=True)\n', (1550, 1579), False, 'from sqlalchemy.orm import joinedload\n'), ((592, 637), 'sqlalchemy.schema.ForeignKey', 'ForeignKey', (['"""problems.id"""'], {'ondelete': '"""CASCADE"""'}), "('problems.id', ondelete='CASCADE')\n", (602, 637), False, 'from sqlalchemy.schema import Column, ForeignKey\n'), ((751, 800), 'sqlalchemy.schema.ForeignKey', 'ForeignKey', (['"""problem_sets.id"""'], {'ondelete': '"""CASCADE"""'}), "('problem_sets.id', ondelete='CASCADE')\n", (761, 800), False, 'from sqlalchemy.schema import Column, ForeignKey\n')]
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import math
import megengine as mge
import megengine.functional as F
import megengine.module as M
from yolox.utils import bboxes_iou
from .losses import binary_cross_entropy, iou_loss, l1_loss
from .network_blocks import BaseConv, DWConv
def meshgrid(x, y):
"""meshgrid wrapper for megengine"""
assert len(x.shape) == 1
assert len(y.shape) == 1
mesh_shape = (y.shape[0], x.shape[0])
mesh_x = F.broadcast_to(x, mesh_shape)
mesh_y = F.broadcast_to(y.reshape(-1, 1), mesh_shape)
return mesh_x, mesh_y
class YOLOXHead(M.Module):
def __init__(
self, num_classes, width=1.0, strides=[8, 16, 32, 64],
in_channels=[256, 512, 1024, 1024], act="silu", depthwise=False
):
"""
Args:
act (str): activation type of conv. Defalut value: "silu".
depthwise (bool): wheather apply depthwise conv in conv branch. Defalut value: False.
"""
super().__init__()
self.n_anchors = 1
self.num_classes = num_classes
self.decode_in_inference = True # for deploy, set to False
self.cls_convs = []
self.reg_convs = []
self.cls_preds = []
self.reg_preds = []
self.obj_preds = []
self.stems = []
Conv = DWConv if depthwise else BaseConv
for i in range(len(in_channels)):
self.stems.append(
BaseConv(
in_channels=int(in_channels[i] * width),
out_channels=int(256 * width),
ksize=1,
stride=1,
act=act,
)
)
self.cls_convs.append(
M.Sequential(
*[
Conv(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
Conv(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
]
)
)
self.reg_convs.append(
M.Sequential(
*[
Conv(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
Conv(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
]
)
)
self.cls_preds.append(
M.Conv2d(
in_channels=int(256 * width),
out_channels=self.n_anchors * self.num_classes,
kernel_size=1,
stride=1,
padding=0,
)
)
self.reg_preds.append(
M.Conv2d(
in_channels=int(256 * width),
out_channels=4,
kernel_size=1,
stride=1,
padding=0,
)
)
self.obj_preds.append(
M.Conv2d(
in_channels=int(256 * width),
out_channels=self.n_anchors * 1,
kernel_size=1,
stride=1,
padding=0,
)
)
self.use_l1 = False
self.strides = strides
self.grids = [F.zeros(1)] * len(in_channels)
self.expanded_strides = [None] * len(in_channels)
def initialize_biases(self, prior_prob):
for conv in self.cls_preds:
bias_value = -math.log((1 - prior_prob) / prior_prob)
M.init.fill_(conv.bias, bias_value)
for conv in self.obj_preds:
bias_value = -math.log((1 - prior_prob) / prior_prob)
M.init.fill_(conv.bias, bias_value)
def forward(self, xin, labels=None, imgs=None):
outputs = []
origin_preds = []
x_shifts = []
y_shifts = []
expanded_strides = []
for k, (cls_conv, reg_conv, stride_this_level, x) in enumerate(
zip(self.cls_convs, self.reg_convs, self.strides, xin)
):
x = self.stems[k](x)
cls_x = x
reg_x = x
cls_feat = cls_conv(cls_x)
cls_output = self.cls_preds[k](cls_feat)
reg_feat = reg_conv(reg_x)
reg_output = self.reg_preds[k](reg_feat)
obj_output = self.obj_preds[k](reg_feat)
if self.training:
output = F.concat([reg_output, obj_output, cls_output], 1)
output, grid = self.get_output_and_grid(output, k, stride_this_level)
x_shifts.append(grid[:, :, 0])
y_shifts.append(grid[:, :, 1])
expanded_strides.append(F.full((1, grid.shape[1]), stride_this_level))
if self.use_l1:
batch_size = reg_output.shape[0]
hsize, wsize = reg_output.shape[-2:]
reg_output = reg_output.reshape(batch_size, self.n_anchors, 4, hsize, wsize)
reg_output = (
F.transpose(reg_output, (0, 1, 3, 4, 2)).reshape(batch_size, -1, 4)
)
origin_preds.append(mge.Tensor(reg_output))
else:
output = F.concat([reg_output, F.sigmoid(obj_output), F.sigmoid(cls_output)], 1)
outputs.append(output)
if self.training:
return self.get_losses(
imgs, x_shifts, y_shifts, expanded_strides,
labels, F.concat(outputs, 1), origin_preds,
)
else:
self.hw = [x.shape[-2:] for x in outputs]
# [batch, n_anchors_all, 85]
outputs = F.concat([F.flatten(x, start_axis=2) for x in outputs], axis=2)
outputs = F.transpose(outputs, (0, 2, 1))
if self.decode_in_inference:
return self.decode_outputs(outputs)
else:
return outputs
def get_output_and_grid(self, output, k, stride):
grid = self.grids[k]
batch_size = output.shape[0]
n_ch = 5 + self.num_classes
hsize, wsize = output.shape[-2:]
if grid.shape[2:4] != output.shape[2:4]:
xv, yv = meshgrid(F.arange(hsize), F.arange(wsize))
grid = F.stack((xv, yv), 2).reshape(1, 1, hsize, wsize, 2)
self.grids[k] = grid
output = output.reshape(batch_size, self.n_anchors, n_ch, hsize, wsize)
output = (
F.transpose(output, (0, 1, 3, 4, 2))
.reshape(batch_size, self.n_anchors * hsize * wsize, -1)
)
grid = grid.reshape(1, -1, 2)
output[..., :2] = (output[..., :2] + grid) * stride
output[..., 2:4] = F.exp(output[..., 2:4]) * stride
return output, grid
def decode_outputs(self, outputs):
grids = []
strides = []
for (hsize, wsize), stride in zip(self.hw, self.strides):
xv, yv = meshgrid(F.arange(hsize), F.arange(wsize))
grid = F.stack((xv, yv), 2).reshape(1, -1, 2)
grids.append(grid)
shape = grid.shape[:2]
strides.append(F.full((*shape, 1), stride))
grids = F.concat(grids, axis=1)
strides = F.concat(strides, axis=1)
outputs[..., :2] = (outputs[..., :2] + grids) * strides
outputs[..., 2:4] = F.exp(outputs[..., 2:4]) * strides
return outputs
def focal_loss_discrite(self, pred, gt):
pos_inds = F.equal(gt, 1).astype("float32")
neg_inds = F.equal(gt, 0).astype("float32")
pos_loss = F.log(pred+1e-5) * F.pow(1-pred, 2) * pos_inds * 0.75
neg_loss = F.log(1-pred+1e-5) * F.pow(pred, 2) * neg_inds * 0.25
loss = -(pos_loss + neg_loss)
return loss
def get_losses(
self, imgs, x_shifts, y_shifts, expanded_strides, labels, outputs, origin_preds,
):
bbox_preds = outputs[:, :, :4] # [batch, n_anchors_all, 4]
obj_preds = F.expand_dims(outputs[:, :, 4], axis=-1) # [batch, n_anchors_all, 1]
cls_preds = outputs[:, :, 5:] # [batch, n_anchors_all, n_cls]
# calculate targets
mixup = labels.shape[2] > 5
if mixup:
label_cut = labels[..., :5]
else:
label_cut = labels
nlabel = (label_cut.sum(axis=2) > 0).sum(axis=1) # number of objects
total_num_anchors = outputs.shape[1]
x_shifts = F.concat(x_shifts, 1) # [1, n_anchors_all]
y_shifts = F.concat(y_shifts, 1) # [1, n_anchors_all]
expanded_strides = F.concat(expanded_strides, 1)
if self.use_l1:
origin_preds = F.concat(origin_preds, 1)
cls_targets = []
reg_targets = []
l1_targets = []
obj_targets = []
fg_masks = []
num_fg = 0.0
num_gts = 0.0
for batch_idx in range(outputs.shape[0]):
num_gt = int(nlabel[batch_idx])
num_gts += num_gt
if num_gt == 0:
cls_target = F.zeros((0, self.num_classes))
reg_target = F.zeros((0, 4))
l1_target = F.zeros((0, 4))
obj_target = F.zeros((total_num_anchors, 1))
fg_mask = F.zeros(total_num_anchors).astype("bool")
else:
gt_bboxes_per_image = labels[batch_idx, :num_gt, 1:5]
gt_classes = labels[batch_idx, :num_gt, 0]
bboxes_preds_per_image = bbox_preds[batch_idx]
gt_matched_classes, fg_mask, pred_ious_this_matching, matched_gt_inds, num_fg_img = self.get_assignments( # noqa
batch_idx, num_gt, total_num_anchors, gt_bboxes_per_image, gt_classes,
bboxes_preds_per_image, expanded_strides, x_shifts, y_shifts,
cls_preds, bbox_preds, obj_preds, labels, imgs,
)
num_fg += num_fg_img
cls_target = F.one_hot(
gt_matched_classes.astype("int32"), self.num_classes
) * F.expand_dims(pred_ious_this_matching, axis=-1)
obj_target = F.expand_dims(fg_mask, axis=-1)
reg_target = gt_bboxes_per_image[matched_gt_inds]
if self.use_l1:
l1_target = self.get_l1_target(
F.zeros((num_fg_img, 4)),
gt_bboxes_per_image[matched_gt_inds],
expanded_strides[0][fg_mask],
x_shifts=x_shifts[0][fg_mask],
y_shifts=y_shifts[0][fg_mask],
)
cls_targets.append(cls_target)
reg_targets.append(reg_target)
obj_targets.append(obj_target)
fg_masks.append(fg_mask)
if self.use_l1:
l1_targets.append(l1_target)
cls_targets = F.concat(cls_targets, 0)
reg_targets = F.concat(reg_targets, 0)
obj_targets = F.concat(obj_targets, 0)
fg_masks = F.concat(fg_masks, 0)
num_fg = max(num_fg, 1)
loss_iou = (iou_loss(bbox_preds.reshape(-1, 4)[fg_masks], reg_targets)).sum() / num_fg
loss_obj = (
# todo 修改为focalloss
self.focal_loss_discrite(F.sigmoid(obj_preds).reshape(-1, 1), obj_targets)
# self.bcewithlog_loss(obj_preds.view(-1, 1), obj_targets) # 原先的loss
).sum() / num_fg
# loss_obj = (binary_cross_entropy(obj_preds.reshape(-1, 1), obj_targets)).sum() / num_fg
loss_cls = (
binary_cross_entropy(cls_preds.reshape(-1, self.num_classes)[fg_masks], cls_targets)
).sum() / num_fg
if self.use_l1:
l1_targets = F.concat(l1_targets, 0)
loss_l1 = (l1_loss(origin_preds.reshape(-1, 4)[fg_masks], l1_targets)).sum() / num_fg
else:
loss_l1 = mge.Tensor(0.0)
reg_weight = 5.0
loss = reg_weight * loss_iou + loss_obj + loss_cls + loss_l1
return loss, reg_weight * loss_iou, loss_obj, loss_cls, loss_l1, num_fg / max(num_gts, 1)
def get_l1_target(self, l1_target, gt, stride, x_shifts, y_shifts, eps=1e-8):
l1_target[:, 0] = gt[:, 0] / stride - x_shifts
l1_target[:, 1] = gt[:, 1] / stride - y_shifts
l1_target[:, 2] = F.log(gt[:, 2] / stride + eps)
l1_target[:, 3] = F.log(gt[:, 3] / stride + eps)
return l1_target
def get_assignments(
self, batch_idx, num_gt, total_num_anchors, gt_bboxes_per_image, gt_classes,
bboxes_preds_per_image, expanded_strides, x_shifts, y_shifts,
cls_preds, bbox_preds, obj_preds, labels, imgs
):
fg_mask, is_in_boxes_and_center = self.get_in_boxes_info(
gt_bboxes_per_image, expanded_strides, x_shifts, y_shifts, total_num_anchors, num_gt,
)
bboxes_preds_per_image = bboxes_preds_per_image[fg_mask]
cls_preds_ = cls_preds[batch_idx][fg_mask]
obj_preds_ = obj_preds[batch_idx][fg_mask]
num_in_boxes_anchor = bboxes_preds_per_image.shape[0]
pair_wise_ious = bboxes_iou(
gt_bboxes_per_image, bboxes_preds_per_image, False
)
# MGE might bring bad exper
gt_cls_per_image = (
F.repeat(
F.expand_dims(
F.one_hot(gt_classes.astype("int32"), self.num_classes).astype("float32"),
axis=1,
),
repeats=num_in_boxes_anchor, axis=1,
)
)
pair_wise_ious_loss = -F.log(pair_wise_ious + 1e-8)
# ditto
cls_preds_ = F.sigmoid(
F.repeat(F.expand_dims(cls_preds_.astype("float32"), axis=0), repeats=num_gt, axis=0)
) * F.sigmoid(F.repeat(F.expand_dims(obj_preds_, axis=0), repeats=num_gt, axis=0))
pair_wise_cls_loss = binary_cross_entropy(
F.sqrt(cls_preds_), gt_cls_per_image, with_logits=False,
).sum(-1)
del cls_preds_
cost = (
pair_wise_cls_loss
+ 3.0 * pair_wise_ious_loss
+ 100000.0 * (~is_in_boxes_and_center)
)
(
num_fg, gt_matched_classes, pred_ious_this_matching, matched_gt_inds
) = self.dynamic_k_matching(cost, pair_wise_ious, gt_classes, num_gt, fg_mask)
del pair_wise_cls_loss, cost, pair_wise_ious, pair_wise_ious_loss
return (
gt_matched_classes.detach(),
fg_mask,
pred_ious_this_matching,
matched_gt_inds,
num_fg
)
def get_in_boxes_info(
self, gt_bboxes_per_image, expanded_strides, x_shifts, y_shifts, total_num_anchors, num_gt,
):
expanded_strides_per_image = expanded_strides[0]
x_shifts_per_image = x_shifts[0] * expanded_strides_per_image
y_shifts_per_image = y_shifts[0] * expanded_strides_per_image
x_centers_per_image = (
F.repeat(
F.expand_dims(x_shifts_per_image + 0.5 * expanded_strides_per_image, axis=0),
repeats=num_gt, axis=0,
)
) # [n_anchor] -> [n_gt, n_anchor]
y_centers_per_image = F.repeat(
F.expand_dims(y_shifts_per_image + 0.5 * expanded_strides_per_image, axis=0),
repeats=num_gt, axis=0,
)
gt_bboxes_per_image_l = F.repeat(
F.expand_dims(gt_bboxes_per_image[:, 0] - 0.5 * gt_bboxes_per_image[:, 2], axis=1),
repeats=total_num_anchors, axis=1,
)
gt_bboxes_per_image_r = F.repeat(
F.expand_dims(gt_bboxes_per_image[:, 0] + 0.5 * gt_bboxes_per_image[:, 2], axis=1),
repeats=total_num_anchors, axis=1,
)
gt_bboxes_per_image_t = F.repeat(
F.expand_dims(gt_bboxes_per_image[:, 1] - 0.5 * gt_bboxes_per_image[:, 3], axis=1),
repeats=total_num_anchors, axis=1,
)
gt_bboxes_per_image_b = F.repeat(
F.expand_dims(gt_bboxes_per_image[:, 1] + 0.5 * gt_bboxes_per_image[:, 3], axis=1),
repeats=total_num_anchors, axis=1,
)
b_l = x_centers_per_image - gt_bboxes_per_image_l
b_r = gt_bboxes_per_image_r - x_centers_per_image
b_t = y_centers_per_image - gt_bboxes_per_image_t
b_b = gt_bboxes_per_image_b - y_centers_per_image
bbox_deltas = F.stack([b_l, b_t, b_r, b_b], 2)
is_in_boxes = bbox_deltas.min(axis=-1) > 0.0
is_in_boxes_all = is_in_boxes.sum(axis=0) > 0
# in fixed center
center_radius = 2.5
gt_bboxes_per_image_l = F.repeat(
F.expand_dims(gt_bboxes_per_image[:, 0], axis=1),
repeats=total_num_anchors, axis=1,
) - center_radius * F.expand_dims(expanded_strides_per_image, axis=0)
gt_bboxes_per_image_r = F.repeat(
F.expand_dims(gt_bboxes_per_image[:, 0], axis=1),
repeats=total_num_anchors, axis=1,
) + center_radius * F.expand_dims(expanded_strides_per_image, axis=0)
gt_bboxes_per_image_t = F.repeat(
F.expand_dims(gt_bboxes_per_image[:, 1], axis=1),
repeats=total_num_anchors, axis=1,
) - center_radius * F.expand_dims(expanded_strides_per_image, axis=0)
gt_bboxes_per_image_b = F.repeat(
F.expand_dims(gt_bboxes_per_image[:, 1], axis=1),
repeats=total_num_anchors, axis=1,
) + center_radius * F.expand_dims(expanded_strides_per_image, axis=0)
c_l = x_centers_per_image - gt_bboxes_per_image_l
c_r = gt_bboxes_per_image_r - x_centers_per_image
c_t = y_centers_per_image - gt_bboxes_per_image_t
c_b = gt_bboxes_per_image_b - y_centers_per_image
center_deltas = F.stack([c_l, c_t, c_r, c_b], 2)
is_in_centers = center_deltas.min(axis=-1) > 0.0
is_in_centers_all = is_in_centers.sum(axis=0) > 0
# in boxes and in centers
is_in_boxes_anchor = is_in_boxes_all | is_in_centers_all
is_in_boxes_and_center = (
is_in_boxes[:, is_in_boxes_anchor] & is_in_centers[:, is_in_boxes_anchor]
)
return is_in_boxes_anchor.detach(), is_in_boxes_and_center.detach()
def dynamic_k_matching(self, cost, pair_wise_ious, gt_classes, num_gt, fg_mask):
# Dynamic K
# ---------------------------------------------------------------
matching_matrix = F.zeros_like(cost)
ious_in_boxes_matrix = pair_wise_ious
n_candidate_k = min(10, ious_in_boxes_matrix.shape[1])
topk_ious, _ = F.topk(ious_in_boxes_matrix, n_candidate_k, descending=True)
dynamic_ks = F.clip(topk_ious.sum(1).astype("int32"), lower=1)
for gt_idx in range(num_gt):
_, pos_idx = F.topk(cost[gt_idx], k=dynamic_ks[gt_idx], descending=False)
matching_matrix[gt_idx, pos_idx] = 1.0
del topk_ious, dynamic_ks, pos_idx
anchor_matching_gt = matching_matrix.sum(0)
if (anchor_matching_gt > 1).sum() > 0:
cost_argmin = F.argmin(cost[:, anchor_matching_gt > 1], axis=0)
matching_matrix[:, anchor_matching_gt > 1] = 0.0
matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0
fg_mask_inboxes = matching_matrix.sum(0) > 0.0
num_fg = fg_mask_inboxes.sum()
# set True part to fg_mask_inboxes
fg_mask[fg_mask] = fg_mask_inboxes
matched_gt_inds = F.argmax(matching_matrix[:, fg_mask_inboxes], axis=0)
gt_matched_classes = gt_classes[matched_gt_inds]
pred_ious_this_matching = (matching_matrix * pair_wise_ious).sum(0)[fg_mask_inboxes]
return (
num_fg.detach(),
gt_matched_classes.detach(),
pred_ious_this_matching.detach(),
matched_gt_inds.detach(),
)
|
[
"megengine.functional.equal",
"megengine.functional.pow",
"megengine.functional.transpose",
"megengine.functional.zeros",
"megengine.functional.argmax",
"megengine.functional.log",
"megengine.functional.broadcast_to",
"megengine.functional.stack",
"megengine.functional.flatten",
"megengine.functional.sqrt",
"megengine.functional.concat",
"megengine.functional.exp",
"megengine.functional.zeros_like",
"megengine.functional.argmin",
"megengine.module.init.fill_",
"megengine.functional.sigmoid",
"megengine.functional.arange",
"megengine.functional.expand_dims",
"megengine.functional.topk",
"megengine.Tensor",
"megengine.functional.full"
] |
[((523, 552), 'megengine.functional.broadcast_to', 'F.broadcast_to', (['x', 'mesh_shape'], {}), '(x, mesh_shape)\n', (537, 552), True, 'import megengine.functional as F\n'), ((8005, 8028), 'megengine.functional.concat', 'F.concat', (['grids'], {'axis': '(1)'}), '(grids, axis=1)\n', (8013, 8028), True, 'import megengine.functional as F\n'), ((8047, 8072), 'megengine.functional.concat', 'F.concat', (['strides'], {'axis': '(1)'}), '(strides, axis=1)\n', (8055, 8072), True, 'import megengine.functional as F\n'), ((8787, 8827), 'megengine.functional.expand_dims', 'F.expand_dims', (['outputs[:, :, 4]'], {'axis': '(-1)'}), '(outputs[:, :, 4], axis=-1)\n', (8800, 8827), True, 'import megengine.functional as F\n'), ((9239, 9260), 'megengine.functional.concat', 'F.concat', (['x_shifts', '(1)'], {}), '(x_shifts, 1)\n', (9247, 9260), True, 'import megengine.functional as F\n'), ((9302, 9323), 'megengine.functional.concat', 'F.concat', (['y_shifts', '(1)'], {}), '(y_shifts, 1)\n', (9310, 9323), True, 'import megengine.functional as F\n'), ((9373, 9402), 'megengine.functional.concat', 'F.concat', (['expanded_strides', '(1)'], {}), '(expanded_strides, 1)\n', (9381, 9402), True, 'import megengine.functional as F\n'), ((11669, 11693), 'megengine.functional.concat', 'F.concat', (['cls_targets', '(0)'], {}), '(cls_targets, 0)\n', (11677, 11693), True, 'import megengine.functional as F\n'), ((11716, 11740), 'megengine.functional.concat', 'F.concat', (['reg_targets', '(0)'], {}), '(reg_targets, 0)\n', (11724, 11740), True, 'import megengine.functional as F\n'), ((11763, 11787), 'megengine.functional.concat', 'F.concat', (['obj_targets', '(0)'], {}), '(obj_targets, 0)\n', (11771, 11787), True, 'import megengine.functional as F\n'), ((11807, 11828), 'megengine.functional.concat', 'F.concat', (['fg_masks', '(0)'], {}), '(fg_masks, 0)\n', (11815, 11828), True, 'import megengine.functional as F\n'), ((13081, 13111), 'megengine.functional.log', 'F.log', (['(gt[:, 2] / stride + eps)'], {}), '(gt[:, 2] / stride + eps)\n', (13086, 13111), True, 'import megengine.functional as F\n'), ((13138, 13168), 'megengine.functional.log', 'F.log', (['(gt[:, 3] / stride + eps)'], {}), '(gt[:, 3] / stride + eps)\n', (13143, 13168), True, 'import megengine.functional as F\n'), ((13867, 13929), 'yolox.utils.bboxes_iou', 'bboxes_iou', (['gt_bboxes_per_image', 'bboxes_preds_per_image', '(False)'], {}), '(gt_bboxes_per_image, bboxes_preds_per_image, False)\n', (13877, 13929), False, 'from yolox.utils import bboxes_iou\n'), ((17118, 17150), 'megengine.functional.stack', 'F.stack', (['[b_l, b_t, b_r, b_b]', '(2)'], {}), '([b_l, b_t, b_r, b_b], 2)\n', (17125, 17150), True, 'import megengine.functional as F\n'), ((18488, 18520), 'megengine.functional.stack', 'F.stack', (['[c_l, c_t, c_r, c_b]', '(2)'], {}), '([c_l, c_t, c_r, c_b], 2)\n', (18495, 18520), True, 'import megengine.functional as F\n'), ((19150, 19168), 'megengine.functional.zeros_like', 'F.zeros_like', (['cost'], {}), '(cost)\n', (19162, 19168), True, 'import megengine.functional as F\n'), ((19302, 19362), 'megengine.functional.topk', 'F.topk', (['ious_in_boxes_matrix', 'n_candidate_k'], {'descending': '(True)'}), '(ious_in_boxes_matrix, n_candidate_k, descending=True)\n', (19308, 19362), True, 'import megengine.functional as F\n'), ((20168, 20221), 'megengine.functional.argmax', 'F.argmax', (['matching_matrix[:, fg_mask_inboxes]'], {'axis': '(0)'}), '(matching_matrix[:, fg_mask_inboxes], axis=0)\n', (20176, 20221), True, 'import megengine.functional as F\n'), ((4374, 4409), 'megengine.module.init.fill_', 'M.init.fill_', (['conv.bias', 'bias_value'], {}), '(conv.bias, bias_value)\n', (4386, 4409), True, 'import megengine.module as M\n'), ((4525, 4560), 'megengine.module.init.fill_', 'M.init.fill_', (['conv.bias', 'bias_value'], {}), '(conv.bias, bias_value)\n', (4537, 4560), True, 'import megengine.module as M\n'), ((6593, 6624), 'megengine.functional.transpose', 'F.transpose', (['outputs', '(0, 2, 1)'], {}), '(outputs, (0, 2, 1))\n', (6604, 6624), True, 'import megengine.functional as F\n'), ((7537, 7560), 'megengine.functional.exp', 'F.exp', (['output[..., 2:4]'], {}), '(output[..., 2:4])\n', (7542, 7560), True, 'import megengine.functional as F\n'), ((8166, 8190), 'megengine.functional.exp', 'F.exp', (['outputs[..., 2:4]'], {}), '(outputs[..., 2:4])\n', (8171, 8190), True, 'import megengine.functional as F\n'), ((9454, 9479), 'megengine.functional.concat', 'F.concat', (['origin_preds', '(1)'], {}), '(origin_preds, 1)\n', (9462, 9479), True, 'import megengine.functional as F\n'), ((12494, 12517), 'megengine.functional.concat', 'F.concat', (['l1_targets', '(0)'], {}), '(l1_targets, 0)\n', (12502, 12517), True, 'import megengine.functional as F\n'), ((12652, 12667), 'megengine.Tensor', 'mge.Tensor', (['(0.0)'], {}), '(0.0)\n', (12662, 12667), True, 'import megengine as mge\n'), ((14321, 14350), 'megengine.functional.log', 'F.log', (['(pair_wise_ious + 1e-08)'], {}), '(pair_wise_ious + 1e-08)\n', (14326, 14350), True, 'import megengine.functional as F\n'), ((15730, 15806), 'megengine.functional.expand_dims', 'F.expand_dims', (['(x_shifts_per_image + 0.5 * expanded_strides_per_image)'], {'axis': '(0)'}), '(x_shifts_per_image + 0.5 * expanded_strides_per_image, axis=0)\n', (15743, 15806), True, 'import megengine.functional as F\n'), ((15958, 16034), 'megengine.functional.expand_dims', 'F.expand_dims', (['(y_shifts_per_image + 0.5 * expanded_strides_per_image)'], {'axis': '(0)'}), '(y_shifts_per_image + 0.5 * expanded_strides_per_image, axis=0)\n', (15971, 16034), True, 'import megengine.functional as F\n'), ((16137, 16223), 'megengine.functional.expand_dims', 'F.expand_dims', (['(gt_bboxes_per_image[:, 0] - 0.5 * gt_bboxes_per_image[:, 2])'], {'axis': '(1)'}), '(gt_bboxes_per_image[:, 0] - 0.5 * gt_bboxes_per_image[:, 2],\n axis=1)\n', (16150, 16223), True, 'import megengine.functional as F\n'), ((16332, 16418), 'megengine.functional.expand_dims', 'F.expand_dims', (['(gt_bboxes_per_image[:, 0] + 0.5 * gt_bboxes_per_image[:, 2])'], {'axis': '(1)'}), '(gt_bboxes_per_image[:, 0] + 0.5 * gt_bboxes_per_image[:, 2],\n axis=1)\n', (16345, 16418), True, 'import megengine.functional as F\n'), ((16527, 16613), 'megengine.functional.expand_dims', 'F.expand_dims', (['(gt_bboxes_per_image[:, 1] - 0.5 * gt_bboxes_per_image[:, 3])'], {'axis': '(1)'}), '(gt_bboxes_per_image[:, 1] - 0.5 * gt_bboxes_per_image[:, 3],\n axis=1)\n', (16540, 16613), True, 'import megengine.functional as F\n'), ((16722, 16808), 'megengine.functional.expand_dims', 'F.expand_dims', (['(gt_bboxes_per_image[:, 1] + 0.5 * gt_bboxes_per_image[:, 3])'], {'axis': '(1)'}), '(gt_bboxes_per_image[:, 1] + 0.5 * gt_bboxes_per_image[:, 3],\n axis=1)\n', (16735, 16808), True, 'import megengine.functional as F\n'), ((19496, 19556), 'megengine.functional.topk', 'F.topk', (['cost[gt_idx]'], {'k': 'dynamic_ks[gt_idx]', 'descending': '(False)'}), '(cost[gt_idx], k=dynamic_ks[gt_idx], descending=False)\n', (19502, 19556), True, 'import megengine.functional as F\n'), ((19778, 19827), 'megengine.functional.argmin', 'F.argmin', (['cost[:, anchor_matching_gt > 1]'], {'axis': '(0)'}), '(cost[:, anchor_matching_gt > 1], axis=0)\n', (19786, 19827), True, 'import megengine.functional as F\n'), ((4125, 4135), 'megengine.functional.zeros', 'F.zeros', (['(1)'], {}), '(1)\n', (4132, 4135), True, 'import megengine.functional as F\n'), ((4322, 4361), 'math.log', 'math.log', (['((1 - prior_prob) / prior_prob)'], {}), '((1 - prior_prob) / prior_prob)\n', (4330, 4361), False, 'import math\n'), ((4473, 4512), 'math.log', 'math.log', (['((1 - prior_prob) / prior_prob)'], {}), '((1 - prior_prob) / prior_prob)\n', (4481, 4512), False, 'import math\n'), ((5258, 5307), 'megengine.functional.concat', 'F.concat', (['[reg_output, obj_output, cls_output]', '(1)'], {}), '([reg_output, obj_output, cls_output], 1)\n', (5266, 5307), True, 'import megengine.functional as F\n'), ((6326, 6346), 'megengine.functional.concat', 'F.concat', (['outputs', '(1)'], {}), '(outputs, 1)\n', (6334, 6346), True, 'import megengine.functional as F\n'), ((7046, 7061), 'megengine.functional.arange', 'F.arange', (['hsize'], {}), '(hsize)\n', (7054, 7061), True, 'import megengine.functional as F\n'), ((7063, 7078), 'megengine.functional.arange', 'F.arange', (['wsize'], {}), '(wsize)\n', (7071, 7078), True, 'import megengine.functional as F\n'), ((7296, 7332), 'megengine.functional.transpose', 'F.transpose', (['output', '(0, 1, 3, 4, 2)'], {}), '(output, (0, 1, 3, 4, 2))\n', (7307, 7332), True, 'import megengine.functional as F\n'), ((7774, 7789), 'megengine.functional.arange', 'F.arange', (['hsize'], {}), '(hsize)\n', (7782, 7789), True, 'import megengine.functional as F\n'), ((7791, 7806), 'megengine.functional.arange', 'F.arange', (['wsize'], {}), '(wsize)\n', (7799, 7806), True, 'import megengine.functional as F\n'), ((7959, 7986), 'megengine.functional.full', 'F.full', (['(*shape, 1)', 'stride'], {}), '((*shape, 1), stride)\n', (7965, 7986), True, 'import megengine.functional as F\n'), ((8293, 8307), 'megengine.functional.equal', 'F.equal', (['gt', '(1)'], {}), '(gt, 1)\n', (8300, 8307), True, 'import megengine.functional as F\n'), ((8345, 8359), 'megengine.functional.equal', 'F.equal', (['gt', '(0)'], {}), '(gt, 0)\n', (8352, 8359), True, 'import megengine.functional as F\n'), ((9828, 9858), 'megengine.functional.zeros', 'F.zeros', (['(0, self.num_classes)'], {}), '((0, self.num_classes))\n', (9835, 9858), True, 'import megengine.functional as F\n'), ((9888, 9903), 'megengine.functional.zeros', 'F.zeros', (['(0, 4)'], {}), '((0, 4))\n', (9895, 9903), True, 'import megengine.functional as F\n'), ((9932, 9947), 'megengine.functional.zeros', 'F.zeros', (['(0, 4)'], {}), '((0, 4))\n', (9939, 9947), True, 'import megengine.functional as F\n'), ((9977, 10008), 'megengine.functional.zeros', 'F.zeros', (['(total_num_anchors, 1)'], {}), '((total_num_anchors, 1))\n', (9984, 10008), True, 'import megengine.functional as F\n'), ((10926, 10957), 'megengine.functional.expand_dims', 'F.expand_dims', (['fg_mask'], {'axis': '(-1)'}), '(fg_mask, axis=-1)\n', (10939, 10957), True, 'import megengine.functional as F\n'), ((17369, 17417), 'megengine.functional.expand_dims', 'F.expand_dims', (['gt_bboxes_per_image[:, 0]'], {'axis': '(1)'}), '(gt_bboxes_per_image[:, 0], axis=1)\n', (17382, 17417), True, 'import megengine.functional as F\n'), ((17494, 17543), 'megengine.functional.expand_dims', 'F.expand_dims', (['expanded_strides_per_image'], {'axis': '(0)'}), '(expanded_strides_per_image, axis=0)\n', (17507, 17543), True, 'import megengine.functional as F\n'), ((17598, 17646), 'megengine.functional.expand_dims', 'F.expand_dims', (['gt_bboxes_per_image[:, 0]'], {'axis': '(1)'}), '(gt_bboxes_per_image[:, 0], axis=1)\n', (17611, 17646), True, 'import megengine.functional as F\n'), ((17723, 17772), 'megengine.functional.expand_dims', 'F.expand_dims', (['expanded_strides_per_image'], {'axis': '(0)'}), '(expanded_strides_per_image, axis=0)\n', (17736, 17772), True, 'import megengine.functional as F\n'), ((17827, 17875), 'megengine.functional.expand_dims', 'F.expand_dims', (['gt_bboxes_per_image[:, 1]'], {'axis': '(1)'}), '(gt_bboxes_per_image[:, 1], axis=1)\n', (17840, 17875), True, 'import megengine.functional as F\n'), ((17952, 18001), 'megengine.functional.expand_dims', 'F.expand_dims', (['expanded_strides_per_image'], {'axis': '(0)'}), '(expanded_strides_per_image, axis=0)\n', (17965, 18001), True, 'import megengine.functional as F\n'), ((18056, 18104), 'megengine.functional.expand_dims', 'F.expand_dims', (['gt_bboxes_per_image[:, 1]'], {'axis': '(1)'}), '(gt_bboxes_per_image[:, 1], axis=1)\n', (18069, 18104), True, 'import megengine.functional as F\n'), ((18181, 18230), 'megengine.functional.expand_dims', 'F.expand_dims', (['expanded_strides_per_image'], {'axis': '(0)'}), '(expanded_strides_per_image, axis=0)\n', (18194, 18230), True, 'import megengine.functional as F\n'), ((5528, 5573), 'megengine.functional.full', 'F.full', (['(1, grid.shape[1])', 'stride_this_level'], {}), '((1, grid.shape[1]), stride_this_level)\n', (5534, 5573), True, 'import megengine.functional as F\n'), ((6517, 6543), 'megengine.functional.flatten', 'F.flatten', (['x'], {'start_axis': '(2)'}), '(x, start_axis=2)\n', (6526, 6543), True, 'import megengine.functional as F\n'), ((7099, 7119), 'megengine.functional.stack', 'F.stack', (['(xv, yv)', '(2)'], {}), '((xv, yv), 2)\n', (7106, 7119), True, 'import megengine.functional as F\n'), ((7827, 7847), 'megengine.functional.stack', 'F.stack', (['(xv, yv)', '(2)'], {}), '((xv, yv), 2)\n', (7834, 7847), True, 'import megengine.functional as F\n'), ((8397, 8416), 'megengine.functional.log', 'F.log', (['(pred + 1e-05)'], {}), '(pred + 1e-05)\n', (8402, 8416), True, 'import megengine.functional as F\n'), ((8416, 8434), 'megengine.functional.pow', 'F.pow', (['(1 - pred)', '(2)'], {}), '(1 - pred, 2)\n', (8421, 8434), True, 'import megengine.functional as F\n'), ((8470, 8493), 'megengine.functional.log', 'F.log', (['(1 - pred + 1e-05)'], {}), '(1 - pred + 1e-05)\n', (8475, 8493), True, 'import megengine.functional as F\n'), ((8491, 8505), 'megengine.functional.pow', 'F.pow', (['pred', '(2)'], {}), '(pred, 2)\n', (8496, 8505), True, 'import megengine.functional as F\n'), ((10849, 10896), 'megengine.functional.expand_dims', 'F.expand_dims', (['pred_ious_this_matching'], {'axis': '(-1)'}), '(pred_ious_this_matching, axis=-1)\n', (10862, 10896), True, 'import megengine.functional as F\n'), ((14528, 14561), 'megengine.functional.expand_dims', 'F.expand_dims', (['obj_preds_'], {'axis': '(0)'}), '(obj_preds_, axis=0)\n', (14541, 14561), True, 'import megengine.functional as F\n'), ((14652, 14670), 'megengine.functional.sqrt', 'F.sqrt', (['cls_preds_'], {}), '(cls_preds_)\n', (14658, 14670), True, 'import megengine.functional as F\n'), ((6003, 6025), 'megengine.Tensor', 'mge.Tensor', (['reg_output'], {}), '(reg_output)\n', (6013, 6025), True, 'import megengine as mge\n'), ((6093, 6114), 'megengine.functional.sigmoid', 'F.sigmoid', (['obj_output'], {}), '(obj_output)\n', (6102, 6114), True, 'import megengine.functional as F\n'), ((6116, 6137), 'megengine.functional.sigmoid', 'F.sigmoid', (['cls_output'], {}), '(cls_output)\n', (6125, 6137), True, 'import megengine.functional as F\n'), ((10035, 10061), 'megengine.functional.zeros', 'F.zeros', (['total_num_anchors'], {}), '(total_num_anchors)\n', (10042, 10061), True, 'import megengine.functional as F\n'), ((11132, 11156), 'megengine.functional.zeros', 'F.zeros', (['(num_fg_img, 4)'], {}), '((num_fg_img, 4))\n', (11139, 11156), True, 'import megengine.functional as F\n'), ((5873, 5913), 'megengine.functional.transpose', 'F.transpose', (['reg_output', '(0, 1, 3, 4, 2)'], {}), '(reg_output, (0, 1, 3, 4, 2))\n', (5884, 5913), True, 'import megengine.functional as F\n'), ((12047, 12067), 'megengine.functional.sigmoid', 'F.sigmoid', (['obj_preds'], {}), '(obj_preds)\n', (12056, 12067), True, 'import megengine.functional as F\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.