code
stringlengths 110
64.5k
| apis
list | extract_api
stringlengths 123
69.9k
|
---|---|---|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multishape(shape):
run_all_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multidtype(dtype):
run_all_reduce_sum((8, 10), dtype)
def run_all_reduce_max(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_max(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.maximum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multishape(shape):
run_all_reduce_max(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multidtype(dtype):
run_all_reduce_max((8, 10), dtype)
def run_all_reduce_min(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_min(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.minimum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multishape(shape):
run_all_reduce_min(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multidtype(dtype):
run_all_reduce_min((8, 10), dtype)
def run_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multishape(shape):
run_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multidtype(dtype):
run_gather((8, 10), dtype)
def run_scatter(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = scatter(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x[: shape[0] // 2], x[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (100, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_scatter_multishape(shape):
run_scatter(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_scatter_multidtype(dtype):
run_scatter((8, 10), dtype)
def run_all_to_all(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_to_all(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
a = np.concatenate((x[: shape[0] // 2], y[: shape[0] // 2]))
b = np.concatenate((x[shape[0] // 2 :], y[shape[0] // 2 :]))
data = (x, y)
expect = (a, b)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (100, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_to_all_multishape(shape):
run_all_to_all(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_to_all_multidtype(dtype):
run_all_to_all((8, 10), dtype)
def run_io_remote(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(val, shape):
rank = dist.get_rank()
if rank == 0: # remote send
x = tensor(val, device="xpu0")
remote_send(x, 1)
sync()
else: # remote recv
y = remote_recv(0)
assert y.device == get_default_device()
np.testing.assert_almost_equal(val, y.numpy())
val = np.random.random_sample(shape).astype(dtype)
worker(val, shape)
@pytest.mark.require_ngpu(2)
@pytest.mark.isolated_distributed
@pytest.mark.parametrize("shape", [(), (1,), (4, 5)], ids=str)
def test_io_remote_multishape(shape):
run_io_remote(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.isolated_distributed
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
def test_io_remote_multidtype(dtype):
run_io_remote((8, 10), dtype)
@pytest.mark.require_ngpu(2)
def test_cuda_init_before_fork():
a = mge.tensor(1, device="gpu0")
@dist.launcher(n_gpus=2)
def worker():
a += 1
b = mge.tensor(2)
with pytest.raises(AssertionError):
worker()
|
[
"megengine.functional.distributed.all_reduce_sum",
"megengine.functional.distributed.all_reduce_max",
"megengine.functional.distributed.broadcast",
"megengine.distributed.get_rank",
"megengine.device.get_default_device",
"megengine.functional.distributed.gather",
"megengine.functional.distributed.all_to_all",
"megengine.functional.distributed.remote_send",
"megengine.tensor",
"megengine.core._imperative_rt.core2.sync",
"megengine.functional.distributed.all_reduce_min",
"megengine.functional.distributed.remote_recv",
"megengine.functional.distributed.all_gather",
"megengine.functional.distributed.reduce_scatter_sum",
"megengine.functional.distributed.scatter",
"megengine.distributed.launcher",
"megengine.functional.distributed.reduce_sum"
] |
[((1418, 1445), 'pytest.mark.require_ngpu', 'pytest.mark.require_ngpu', (['(2)'], {}), '(2)\n', (1442, 1445), False, 'import pytest\n'), ((1447, 1532), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', '[(), (1,), (2, 3), (8, 10), (99, 77)]'], {'ids': 'str'}), "('shape', [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str\n )\n", (1470, 1532), False, 'import pytest\n'), ((1641, 1668), 'pytest.mark.require_ngpu', 'pytest.mark.require_ngpu', (['(2)'], {}), '(2)\n', (1665, 1668), False, 'import pytest\n'), ((1670, 1755), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', "['float32', 'int32', 'int8', 'uint8']"], {'ids': 'str'}), "('dtype', ['float32', 'int32', 'int8', 'uint8'], ids=str\n )\n", (1693, 1755), False, 'import pytest\n'), ((2240, 2267), 'pytest.mark.require_ngpu', 'pytest.mark.require_ngpu', (['(2)'], {}), '(2)\n', (2264, 2267), False, 'import pytest\n'), ((2269, 2354), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', '[(), (1,), (2, 3), (8, 10), (99, 77)]'], {'ids': 'str'}), "('shape', [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str\n )\n", (2292, 2354), False, 'import pytest\n'), ((2461, 2488), 'pytest.mark.require_ngpu', 'pytest.mark.require_ngpu', (['(2)'], {}), '(2)\n', (2485, 2488), False, 'import pytest\n'), ((2490, 2575), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', "['float32', 'int32', 'int8', 'uint8']"], {'ids': 'str'}), "('dtype', ['float32', 'int32', 'int8', 'uint8'], ids=str\n )\n", (2513, 2575), False, 'import pytest\n'), ((3130, 3157), 'pytest.mark.require_ngpu', 'pytest.mark.require_ngpu', (['(2)'], {}), '(2)\n', (3154, 3157), False, 'import pytest\n'), ((3159, 3235), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', '[(1,), (2, 3), (8, 10), (99, 77)]'], {'ids': 'str'}), "('shape', [(1,), (2, 3), (8, 10), (99, 77)], ids=str)\n", (3182, 3235), False, 'import pytest\n'), ((3349, 3376), 'pytest.mark.require_ngpu', 'pytest.mark.require_ngpu', (['(2)'], {}), '(2)\n', (3373, 3376), False, 'import pytest\n'), ((3378, 3463), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', "['float32', 'int32', 'int8', 'uint8']"], {'ids': 'str'}), "('dtype', ['float32', 'int32', 'int8', 'uint8'], ids=str\n )\n", (3401, 3463), False, 'import pytest\n'), ((4053, 4080), 'pytest.mark.require_ngpu', 'pytest.mark.require_ngpu', (['(2)'], {}), '(2)\n', (4077, 4080), False, 'import pytest\n'), ((4082, 4152), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', '[(2, 3), (8, 10), (88, 44)]'], {'ids': 'str'}), "('shape', [(2, 3), (8, 10), (88, 44)], ids=str)\n", (4105, 4152), False, 'import pytest\n'), ((4282, 4309), 'pytest.mark.require_ngpu', 'pytest.mark.require_ngpu', (['(2)'], {}), '(2)\n', (4306, 4309), False, 'import pytest\n'), ((4311, 4396), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', "['float32', 'int32', 'int8', 'uint8']"], {'ids': 'str'}), "('dtype', ['float32', 'int32', 'int8', 'uint8'], ids=str\n )\n", (4334, 4396), False, 'import pytest\n'), ((4960, 4987), 'pytest.mark.require_ngpu', 'pytest.mark.require_ngpu', (['(2)'], {}), '(2)\n', (4984, 4987), False, 'import pytest\n'), ((4989, 5074), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', '[(), (1,), (2, 3), (8, 10), (99, 77)]'], {'ids': 'str'}), "('shape', [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str\n )\n", (5012, 5074), False, 'import pytest\n'), ((5191, 5218), 'pytest.mark.require_ngpu', 'pytest.mark.require_ngpu', (['(2)'], {}), '(2)\n', (5215, 5218), False, 'import pytest\n'), ((5220, 5305), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', "['float32', 'int32', 'int8', 'uint8']"], {'ids': 'str'}), "('dtype', ['float32', 'int32', 'int8', 'uint8'], ids=str\n )\n", (5243, 5305), False, 'import pytest\n'), ((5872, 5899), 'pytest.mark.require_ngpu', 'pytest.mark.require_ngpu', (['(2)'], {}), '(2)\n', (5896, 5899), False, 'import pytest\n'), ((5901, 5986), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', '[(), (1,), (2, 3), (8, 10), (99, 77)]'], {'ids': 'str'}), "('shape', [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str\n )\n", (5924, 5986), False, 'import pytest\n'), ((6103, 6130), 'pytest.mark.require_ngpu', 'pytest.mark.require_ngpu', (['(2)'], {}), '(2)\n', (6127, 6130), False, 'import pytest\n'), ((6132, 6217), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', "['float32', 'int32', 'int8', 'uint8']"], {'ids': 'str'}), "('dtype', ['float32', 'int32', 'int8', 'uint8'], ids=str\n )\n", (6155, 6217), False, 'import pytest\n'), ((6784, 6811), 'pytest.mark.require_ngpu', 'pytest.mark.require_ngpu', (['(2)'], {}), '(2)\n', (6808, 6811), False, 'import pytest\n'), ((6813, 6898), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', '[(), (1,), (2, 3), (8, 10), (99, 77)]'], {'ids': 'str'}), "('shape', [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str\n )\n", (6836, 6898), False, 'import pytest\n'), ((7015, 7042), 'pytest.mark.require_ngpu', 'pytest.mark.require_ngpu', (['(2)'], {}), '(2)\n', (7039, 7042), False, 'import pytest\n'), ((7044, 7129), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', "['float32', 'int32', 'int8', 'uint8']"], {'ids': 'str'}), "('dtype', ['float32', 'int32', 'int8', 'uint8'], ids=str\n )\n", (7067, 7129), False, 'import pytest\n'), ((7763, 7790), 'pytest.mark.require_ngpu', 'pytest.mark.require_ngpu', (['(2)'], {}), '(2)\n', (7787, 7790), False, 'import pytest\n'), ((7792, 7862), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', '[(2, 3), (8, 10), (99, 77)]'], {'ids': 'str'}), "('shape', [(2, 3), (8, 10), (99, 77)], ids=str)\n", (7815, 7862), False, 'import pytest\n'), ((7968, 7995), 'pytest.mark.require_ngpu', 'pytest.mark.require_ngpu', (['(2)'], {}), '(2)\n', (7992, 7995), False, 'import pytest\n'), ((7997, 8082), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', "['float32', 'int32', 'int8', 'uint8']"], {'ids': 'str'}), "('dtype', ['float32', 'int32', 'int8', 'uint8'], ids=str\n )\n", (8020, 8082), False, 'import pytest\n'), ((8589, 8616), 'pytest.mark.require_ngpu', 'pytest.mark.require_ngpu', (['(2)'], {}), '(2)\n', (8613, 8616), False, 'import pytest\n'), ((8618, 8689), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', '[(2, 3), (8, 10), (100, 77)]'], {'ids': 'str'}), "('shape', [(2, 3), (8, 10), (100, 77)], ids=str)\n", (8641, 8689), False, 'import pytest\n'), ((8797, 8824), 'pytest.mark.require_ngpu', 'pytest.mark.require_ngpu', (['(2)'], {}), '(2)\n', (8821, 8824), False, 'import pytest\n'), ((8826, 8911), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', "['float32', 'int32', 'int8', 'uint8']"], {'ids': 'str'}), "('dtype', ['float32', 'int32', 'int8', 'uint8'], ids=str\n )\n", (8849, 8911), False, 'import pytest\n'), ((9561, 9588), 'pytest.mark.require_ngpu', 'pytest.mark.require_ngpu', (['(2)'], {}), '(2)\n', (9585, 9588), False, 'import pytest\n'), ((9590, 9661), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', '[(2, 3), (8, 10), (100, 77)]'], {'ids': 'str'}), "('shape', [(2, 3), (8, 10), (100, 77)], ids=str)\n", (9613, 9661), False, 'import pytest\n'), ((9775, 9802), 'pytest.mark.require_ngpu', 'pytest.mark.require_ngpu', (['(2)'], {}), '(2)\n', (9799, 9802), False, 'import pytest\n'), ((9804, 9889), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', "['float32', 'int32', 'int8', 'uint8']"], {'ids': 'str'}), "('dtype', ['float32', 'int32', 'int8', 'uint8'], ids=str\n )\n", (9827, 9889), False, 'import pytest\n'), ((10498, 10525), 'pytest.mark.require_ngpu', 'pytest.mark.require_ngpu', (['(2)'], {}), '(2)\n', (10522, 10525), False, 'import pytest\n'), ((10561, 10622), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', '[(), (1,), (4, 5)]'], {'ids': 'str'}), "('shape', [(), (1,), (4, 5)], ids=str)\n", (10584, 10622), False, 'import pytest\n'), ((10700, 10727), 'pytest.mark.require_ngpu', 'pytest.mark.require_ngpu', (['(2)'], {}), '(2)\n', (10724, 10727), False, 'import pytest\n'), ((10763, 10848), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', "['float32', 'int32', 'int8', 'uint8']"], {'ids': 'str'}), "('dtype', ['float32', 'int32', 'int8', 'uint8'], ids=str\n )\n", (10786, 10848), False, 'import pytest\n'), ((10919, 10946), 'pytest.mark.require_ngpu', 'pytest.mark.require_ngpu', (['(2)'], {}), '(2)\n', (10943, 10946), False, 'import pytest\n'), ((946, 969), 'megengine.distributed.launcher', 'dist.launcher', ([], {'n_gpus': '(2)'}), '(n_gpus=2)\n', (959, 969), True, 'import megengine.distributed as dist\n'), ((1899, 1922), 'megengine.distributed.launcher', 'dist.launcher', ([], {'n_gpus': '(2)'}), '(n_gpus=2)\n', (1912, 1922), True, 'import megengine.distributed as dist\n'), ((2718, 2741), 'megengine.distributed.launcher', 'dist.launcher', ([], {'n_gpus': '(2)'}), '(n_gpus=2)\n', (2731, 2741), True, 'import megengine.distributed as dist\n'), ((3041, 3063), 'numpy.concatenate', 'np.concatenate', (['(x, y)'], {}), '((x, y))\n', (3055, 3063), True, 'import numpy as np\n'), ((3616, 3639), 'megengine.distributed.launcher', 'dist.launcher', ([], {'n_gpus': '(2)'}), '(n_gpus=2)\n', (3629, 3639), True, 'import megengine.distributed as dist\n'), ((4561, 4584), 'megengine.distributed.launcher', 'dist.launcher', ([], {'n_gpus': '(2)'}), '(n_gpus=2)\n', (4574, 4584), True, 'import megengine.distributed as dist\n'), ((5462, 5485), 'megengine.distributed.launcher', 'dist.launcher', ([], {'n_gpus': '(2)'}), '(n_gpus=2)\n', (5475, 5485), True, 'import megengine.distributed as dist\n'), ((5789, 5805), 'numpy.maximum', 'np.maximum', (['x', 'y'], {}), '(x, y)\n', (5799, 5805), True, 'import numpy as np\n'), ((6374, 6397), 'megengine.distributed.launcher', 'dist.launcher', ([], {'n_gpus': '(2)'}), '(n_gpus=2)\n', (6387, 6397), True, 'import megengine.distributed as dist\n'), ((6701, 6717), 'numpy.minimum', 'np.minimum', (['x', 'y'], {}), '(x, y)\n', (6711, 6717), True, 'import numpy as np\n'), ((7278, 7301), 'megengine.distributed.launcher', 'dist.launcher', ([], {'n_gpus': '(2)'}), '(n_gpus=2)\n', (7291, 7301), True, 'import megengine.distributed as dist\n'), ((7671, 7693), 'numpy.concatenate', 'np.concatenate', (['(x, y)'], {}), '((x, y))\n', (7685, 7693), True, 'import numpy as np\n'), ((8216, 8239), 'megengine.distributed.launcher', 'dist.launcher', ([], {'n_gpus': '(2)'}), '(n_gpus=2)\n', (8229, 8239), True, 'import megengine.distributed as dist\n'), ((9050, 9073), 'megengine.distributed.launcher', 'dist.launcher', ([], {'n_gpus': '(2)'}), '(n_gpus=2)\n', (9063, 9073), True, 'import megengine.distributed as dist\n'), ((9373, 9427), 'numpy.concatenate', 'np.concatenate', (['(x[:shape[0] // 2], y[:shape[0] // 2])'], {}), '((x[:shape[0] // 2], y[:shape[0] // 2]))\n', (9387, 9427), True, 'import numpy as np\n'), ((9438, 9492), 'numpy.concatenate', 'np.concatenate', (['(x[shape[0] // 2:], y[shape[0] // 2:])'], {}), '((x[shape[0] // 2:], y[shape[0] // 2:]))\n', (9452, 9492), True, 'import numpy as np\n'), ((10033, 10056), 'megengine.distributed.launcher', 'dist.launcher', ([], {'n_gpus': '(2)'}), '(n_gpus=2)\n', (10046, 10056), True, 'import megengine.distributed as dist\n'), ((10989, 11017), 'megengine.tensor', 'mge.tensor', (['(1)'], {'device': '"""gpu0"""'}), "(1, device='gpu0')\n", (10999, 11017), True, 'import megengine as mge\n'), ((11024, 11047), 'megengine.distributed.launcher', 'dist.launcher', ([], {'n_gpus': '(2)'}), '(n_gpus=2)\n', (11037, 11047), True, 'import megengine.distributed as dist\n'), ((1015, 1030), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (1028, 1030), True, 'import megengine.distributed as dist\n'), ((1045, 1063), 'megengine.tensor', 'tensor', (['data[rank]'], {}), '(data[rank])\n', (1051, 1063), False, 'from megengine import Parameter, tensor\n'), ((1081, 1096), 'megengine.functional.distributed.reduce_sum', 'reduce_sum', (['inp'], {}), '(inp)\n', (1091, 1096), False, 'from megengine.functional.distributed import all_gather, all_reduce_max, all_reduce_min, all_reduce_sum, all_to_all, broadcast, gather, reduce_scatter_sum, reduce_sum, remote_recv, remote_send, scatter\n'), ((1968, 1983), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (1981, 1983), True, 'import megengine.distributed as dist\n'), ((1998, 2016), 'megengine.tensor', 'tensor', (['data[rank]'], {}), '(data[rank])\n', (2004, 2016), False, 'from megengine import Parameter, tensor\n'), ((2034, 2048), 'megengine.functional.distributed.broadcast', 'broadcast', (['inp'], {}), '(inp)\n', (2043, 2048), False, 'from megengine.functional.distributed import all_gather, all_reduce_max, all_reduce_min, all_reduce_sum, all_to_all, broadcast, gather, reduce_scatter_sum, reduce_sum, remote_recv, remote_send, scatter\n'), ((2787, 2802), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (2800, 2802), True, 'import megengine.distributed as dist\n'), ((2817, 2835), 'megengine.tensor', 'tensor', (['data[rank]'], {}), '(data[rank])\n', (2823, 2835), False, 'from megengine import Parameter, tensor\n'), ((2853, 2868), 'megengine.functional.distributed.all_gather', 'all_gather', (['inp'], {}), '(inp)\n', (2863, 2868), False, 'from megengine.functional.distributed import all_gather, all_reduce_max, all_reduce_min, all_reduce_sum, all_to_all, broadcast, gather, reduce_scatter_sum, reduce_sum, remote_recv, remote_send, scatter\n'), ((3685, 3700), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (3698, 3700), True, 'import megengine.distributed as dist\n'), ((3715, 3733), 'megengine.tensor', 'tensor', (['data[rank]'], {}), '(data[rank])\n', (3721, 3733), False, 'from megengine import Parameter, tensor\n'), ((3751, 3774), 'megengine.functional.distributed.reduce_scatter_sum', 'reduce_scatter_sum', (['inp'], {}), '(inp)\n', (3769, 3774), False, 'from megengine.functional.distributed import all_gather, all_reduce_max, all_reduce_min, all_reduce_sum, all_to_all, broadcast, gather, reduce_scatter_sum, reduce_sum, remote_recv, remote_send, scatter\n'), ((4630, 4645), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (4643, 4645), True, 'import megengine.distributed as dist\n'), ((4660, 4678), 'megengine.tensor', 'tensor', (['data[rank]'], {}), '(data[rank])\n', (4666, 4678), False, 'from megengine import Parameter, tensor\n'), ((4696, 4715), 'megengine.functional.distributed.all_reduce_sum', 'all_reduce_sum', (['inp'], {}), '(inp)\n', (4710, 4715), False, 'from megengine.functional.distributed import all_gather, all_reduce_max, all_reduce_min, all_reduce_sum, all_to_all, broadcast, gather, reduce_scatter_sum, reduce_sum, remote_recv, remote_send, scatter\n'), ((5531, 5546), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (5544, 5546), True, 'import megengine.distributed as dist\n'), ((5561, 5579), 'megengine.tensor', 'tensor', (['data[rank]'], {}), '(data[rank])\n', (5567, 5579), False, 'from megengine import Parameter, tensor\n'), ((5597, 5616), 'megengine.functional.distributed.all_reduce_max', 'all_reduce_max', (['inp'], {}), '(inp)\n', (5611, 5616), False, 'from megengine.functional.distributed import all_gather, all_reduce_max, all_reduce_min, all_reduce_sum, all_to_all, broadcast, gather, reduce_scatter_sum, reduce_sum, remote_recv, remote_send, scatter\n'), ((6443, 6458), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (6456, 6458), True, 'import megengine.distributed as dist\n'), ((6473, 6491), 'megengine.tensor', 'tensor', (['data[rank]'], {}), '(data[rank])\n', (6479, 6491), False, 'from megengine import Parameter, tensor\n'), ((6509, 6528), 'megengine.functional.distributed.all_reduce_min', 'all_reduce_min', (['inp'], {}), '(inp)\n', (6523, 6528), False, 'from megengine.functional.distributed import all_gather, all_reduce_max, all_reduce_min, all_reduce_sum, all_to_all, broadcast, gather, reduce_scatter_sum, reduce_sum, remote_recv, remote_send, scatter\n'), ((7347, 7362), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (7360, 7362), True, 'import megengine.distributed as dist\n'), ((7377, 7395), 'megengine.tensor', 'tensor', (['data[rank]'], {}), '(data[rank])\n', (7383, 7395), False, 'from megengine import Parameter, tensor\n'), ((7413, 7424), 'megengine.functional.distributed.gather', 'gather', (['inp'], {}), '(inp)\n', (7419, 7424), False, 'from megengine.functional.distributed import all_gather, all_reduce_max, all_reduce_min, all_reduce_sum, all_to_all, broadcast, gather, reduce_scatter_sum, reduce_sum, remote_recv, remote_send, scatter\n'), ((8285, 8300), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (8298, 8300), True, 'import megengine.distributed as dist\n'), ((8315, 8333), 'megengine.tensor', 'tensor', (['data[rank]'], {}), '(data[rank])\n', (8321, 8333), False, 'from megengine import Parameter, tensor\n'), ((8351, 8363), 'megengine.functional.distributed.scatter', 'scatter', (['inp'], {}), '(inp)\n', (8358, 8363), False, 'from megengine.functional.distributed import all_gather, all_reduce_max, all_reduce_min, all_reduce_sum, all_to_all, broadcast, gather, reduce_scatter_sum, reduce_sum, remote_recv, remote_send, scatter\n'), ((9119, 9134), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (9132, 9134), True, 'import megengine.distributed as dist\n'), ((9149, 9167), 'megengine.tensor', 'tensor', (['data[rank]'], {}), '(data[rank])\n', (9155, 9167), False, 'from megengine import Parameter, tensor\n'), ((9185, 9200), 'megengine.functional.distributed.all_to_all', 'all_to_all', (['inp'], {}), '(inp)\n', (9195, 9200), False, 'from megengine.functional.distributed import all_gather, all_reduce_max, all_reduce_min, all_reduce_sum, all_to_all, broadcast, gather, reduce_scatter_sum, reduce_sum, remote_recv, remote_send, scatter\n'), ((10100, 10115), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (10113, 10115), True, 'import megengine.distributed as dist\n'), ((11093, 11106), 'megengine.tensor', 'mge.tensor', (['(2)'], {}), '(2)\n', (11103, 11106), True, 'import megengine as mge\n'), ((11117, 11146), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (11130, 11146), False, 'import pytest\n'), ((1237, 1267), 'numpy.random.random_sample', 'np.random.random_sample', (['shape'], {}), '(shape)\n', (1260, 1267), True, 'import numpy as np\n'), ((1290, 1320), 'numpy.random.random_sample', 'np.random.random_sample', (['shape'], {}), '(shape)\n', (1313, 1320), True, 'import numpy as np\n'), ((2115, 2145), 'numpy.random.random_sample', 'np.random.random_sample', (['shape'], {}), '(shape)\n', (2138, 2145), True, 'import numpy as np\n'), ((2935, 2965), 'numpy.random.random_sample', 'np.random.random_sample', (['shape'], {}), '(shape)\n', (2958, 2965), True, 'import numpy as np\n'), ((2988, 3018), 'numpy.random.random_sample', 'np.random.random_sample', (['shape'], {}), '(shape)\n', (3011, 3018), True, 'import numpy as np\n'), ((3841, 3871), 'numpy.random.random_sample', 'np.random.random_sample', (['shape'], {}), '(shape)\n', (3864, 3871), True, 'import numpy as np\n'), ((3894, 3924), 'numpy.random.random_sample', 'np.random.random_sample', (['shape'], {}), '(shape)\n', (3917, 3924), True, 'import numpy as np\n'), ((4782, 4812), 'numpy.random.random_sample', 'np.random.random_sample', (['shape'], {}), '(shape)\n', (4805, 4812), True, 'import numpy as np\n'), ((4835, 4865), 'numpy.random.random_sample', 'np.random.random_sample', (['shape'], {}), '(shape)\n', (4858, 4865), True, 'import numpy as np\n'), ((5683, 5713), 'numpy.random.random_sample', 'np.random.random_sample', (['shape'], {}), '(shape)\n', (5706, 5713), True, 'import numpy as np\n'), ((5736, 5766), 'numpy.random.random_sample', 'np.random.random_sample', (['shape'], {}), '(shape)\n', (5759, 5766), True, 'import numpy as np\n'), ((6595, 6625), 'numpy.random.random_sample', 'np.random.random_sample', (['shape'], {}), '(shape)\n', (6618, 6625), True, 'import numpy as np\n'), ((6648, 6678), 'numpy.random.random_sample', 'np.random.random_sample', (['shape'], {}), '(shape)\n', (6671, 6678), True, 'import numpy as np\n'), ((7565, 7595), 'numpy.random.random_sample', 'np.random.random_sample', (['shape'], {}), '(shape)\n', (7588, 7595), True, 'import numpy as np\n'), ((7618, 7648), 'numpy.random.random_sample', 'np.random.random_sample', (['shape'], {}), '(shape)\n', (7641, 7648), True, 'import numpy as np\n'), ((8430, 8460), 'numpy.random.random_sample', 'np.random.random_sample', (['shape'], {}), '(shape)\n', (8453, 8460), True, 'import numpy as np\n'), ((9267, 9297), 'numpy.random.random_sample', 'np.random.random_sample', (['shape'], {}), '(shape)\n', (9290, 9297), True, 'import numpy as np\n'), ((9320, 9350), 'numpy.random.random_sample', 'np.random.random_sample', (['shape'], {}), '(shape)\n', (9343, 9350), True, 'import numpy as np\n'), ((10169, 10195), 'megengine.tensor', 'tensor', (['val'], {'device': '"""xpu0"""'}), "(val, device='xpu0')\n", (10175, 10195), False, 'from megengine import Parameter, tensor\n'), ((10208, 10225), 'megengine.functional.distributed.remote_send', 'remote_send', (['x', '(1)'], {}), '(x, 1)\n', (10219, 10225), False, 'from megengine.functional.distributed import all_gather, all_reduce_max, all_reduce_min, all_reduce_sum, all_to_all, broadcast, gather, reduce_scatter_sum, reduce_sum, remote_recv, remote_send, scatter\n'), ((10238, 10244), 'megengine.core._imperative_rt.core2.sync', 'sync', ([], {}), '()\n', (10242, 10244), False, 'from megengine.core._imperative_rt.core2 import sync\n'), ((10290, 10304), 'megengine.functional.distributed.remote_recv', 'remote_recv', (['(0)'], {}), '(0)\n', (10301, 10304), False, 'from megengine.functional.distributed import all_gather, all_reduce_max, all_reduce_min, all_reduce_sum, all_to_all, broadcast, gather, reduce_scatter_sum, reduce_sum, remote_recv, remote_send, scatter\n'), ((10427, 10457), 'numpy.random.random_sample', 'np.random.random_sample', (['shape'], {}), '(shape)\n', (10450, 10457), True, 'import numpy as np\n'), ((10336, 10356), 'megengine.device.get_default_device', 'get_default_device', ([], {}), '()\n', (10354, 10356), False, 'from megengine.device import get_default_device, set_default_device\n')]
|
"""
Quadratic eigenvalue problem solvers.
"""
from __future__ import absolute_import
import time
import numpy as nm
import scipy.sparse as sps
from sfepy.base.base import output, get_default
from sfepy.linalg.utils import max_diff_csr
from sfepy.solvers.solvers import QuadraticEVPSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for quadratic
eigensolvers.
"""
def _standard_call(self, mtx_m, mtx_d, mtx_k, n_eigs=None,
eigenvectors=None, status=None, conf=None, **kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx_m = get_default(mtx_m, self.mtx_m)
mtx_d = get_default(mtx_d, self.mtx_d)
mtx_k = get_default(mtx_k, self.mtx_k)
n_eigs = get_default(n_eigs, self.n_eigs)
eigenvectors = get_default(eigenvectors, self.eigenvectors)
status = get_default(status, self.status)
result = call(self, mtx_m, mtx_d, mtx_k,
n_eigs, eigenvectors, status, conf,
**kwargs)
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
return result
return _standard_call
class LQuadraticEVPSolver(QuadraticEVPSolver):
"""
Quadratic eigenvalue problem solver based on the problem linearization.
(w^2 M + w D + K) x = 0.
"""
name = 'eig.qevp'
_parameters = [
('method', "{'companion', 'cholesky'}", 'companion', False,
'The linearization method.'),
('solver', 'dict', {'kind': 'eig.scipy', 'method': 'eig'}, False,
"""The configuration of an eigenvalue solver for
the linearized problem (A - w B) x = 0."""),
('mode', "{'normal', 'inverted'}", 'normal', False,
'Solve either A - w B (normal), or B - 1/w A (inverted).'),
('debug', 'bool', False, False,
'If True, print debugging information.'),
]
@standard_call
def __call__(self, mtx_m, mtx_d, mtx_k, n_eigs=None,
eigenvectors=None, status=None, conf=None):
if conf.debug:
ssym = status['matrix_info'] = {}
ssym['|M - M^T|'] = max_diff_csr(mtx_m, mtx_m.T)
ssym['|D - D^T|'] = max_diff_csr(mtx_d, mtx_d.T)
ssym['|K - K^T|'] = max_diff_csr(mtx_k, mtx_k.T)
ssym['|M - M^H|'] = max_diff_csr(mtx_m, mtx_m.H)
ssym['|D - D^H|'] = max_diff_csr(mtx_d, mtx_d.H)
ssym['|K - K^H|'] = max_diff_csr(mtx_k, mtx_k.H)
if conf.method == 'companion':
mtx_eye = -sps.eye(mtx_m.shape[0], dtype=mtx_m.dtype)
mtx_a = sps.bmat([[mtx_d, mtx_k],
[mtx_eye, None]])
mtx_b = sps.bmat([[-mtx_m, None],
[None, mtx_eye]])
elif conf.method == 'cholesky':
from sksparse.cholmod import cholesky
factor = cholesky(mtx_m)
perm = factor.P()
ir = nm.arange(len(perm))
mtx_p = sps.coo_matrix((nm.ones_like(perm), (ir, perm)))
mtx_l = mtx_p.T * factor.L()
if conf.debug:
ssym['|S - LL^T|'] = max_diff_csr(mtx_m, mtx_l * mtx_l.T)
mtx_eye = sps.eye(mtx_l.shape[0], dtype=nm.float64)
mtx_a = sps.bmat([[-mtx_k, None],
[None, mtx_eye]])
mtx_b = sps.bmat([[mtx_d, mtx_l],
[mtx_l.T, None]])
else:
raise ValueError('unknown method! (%s)' % conf.method)
if conf.debug:
ssym['|A - A^T|'] = max_diff_csr(mtx_a, mtx_a.T)
ssym['|A - A^H|'] = max_diff_csr(mtx_a, mtx_a.H)
ssym['|B - B^T|'] = max_diff_csr(mtx_b, mtx_b.T)
ssym['|B - B^H|'] = max_diff_csr(mtx_b, mtx_b.H)
for key, val in sorted(ssym.items()):
output('{}: {}'.format(key, val))
if conf.mode == 'normal':
out = self.solver(mtx_a, mtx_b, n_eigs=n_eigs,
eigenvectors=eigenvectors, status=status)
if eigenvectors:
eigs, vecs = out
out = (eigs, vecs[:mtx_m.shape[0], :])
if conf.debug:
res = mtx_a.dot(vecs) - eigs * mtx_b.dot(vecs)
status['lin. error'] = nm.linalg.norm(res, nm.inf)
else:
out = self.solver(mtx_b, mtx_a, n_eigs=n_eigs,
eigenvectors=eigenvectors, status=status)
if eigenvectors:
eigs, vecs = out
out = (1.0 / eigs, vecs[:mtx_m.shape[0], :])
if conf.debug:
res = (1.0 / eigs) * mtx_b.dot(vecs) - mtx_a.dot(vecs)
status['lin. error'] = nm.linalg.norm(res, nm.inf)
else:
out = 1.0 / out
if conf.debug and eigenvectors:
eigs, vecs = out
res = ((eigs**2 * (mtx_m.dot(vecs)))
+ (eigs * (mtx_d.dot(vecs)))
+ (mtx_k.dot(vecs)))
status['error'] = nm.linalg.norm(res, nm.inf)
return out
|
[
"sfepy.base.base.get_default",
"sfepy.linalg.utils.max_diff_csr"
] |
[((609, 637), 'sfepy.base.base.get_default', 'get_default', (['conf', 'self.conf'], {}), '(conf, self.conf)\n', (620, 637), False, 'from sfepy.base.base import output, get_default\n'), ((654, 684), 'sfepy.base.base.get_default', 'get_default', (['mtx_m', 'self.mtx_m'], {}), '(mtx_m, self.mtx_m)\n', (665, 684), False, 'from sfepy.base.base import output, get_default\n'), ((701, 731), 'sfepy.base.base.get_default', 'get_default', (['mtx_d', 'self.mtx_d'], {}), '(mtx_d, self.mtx_d)\n', (712, 731), False, 'from sfepy.base.base import output, get_default\n'), ((748, 778), 'sfepy.base.base.get_default', 'get_default', (['mtx_k', 'self.mtx_k'], {}), '(mtx_k, self.mtx_k)\n', (759, 778), False, 'from sfepy.base.base import output, get_default\n'), ((796, 828), 'sfepy.base.base.get_default', 'get_default', (['n_eigs', 'self.n_eigs'], {}), '(n_eigs, self.n_eigs)\n', (807, 828), False, 'from sfepy.base.base import output, get_default\n'), ((852, 896), 'sfepy.base.base.get_default', 'get_default', (['eigenvectors', 'self.eigenvectors'], {}), '(eigenvectors, self.eigenvectors)\n', (863, 896), False, 'from sfepy.base.base import output, get_default\n'), ((914, 946), 'sfepy.base.base.get_default', 'get_default', (['status', 'self.status'], {}), '(status, self.status)\n', (925, 946), False, 'from sfepy.base.base import output, get_default\n'), ((2211, 2239), 'sfepy.linalg.utils.max_diff_csr', 'max_diff_csr', (['mtx_m', 'mtx_m.T'], {}), '(mtx_m, mtx_m.T)\n', (2223, 2239), False, 'from sfepy.linalg.utils import max_diff_csr\n'), ((2272, 2300), 'sfepy.linalg.utils.max_diff_csr', 'max_diff_csr', (['mtx_d', 'mtx_d.T'], {}), '(mtx_d, mtx_d.T)\n', (2284, 2300), False, 'from sfepy.linalg.utils import max_diff_csr\n'), ((2333, 2361), 'sfepy.linalg.utils.max_diff_csr', 'max_diff_csr', (['mtx_k', 'mtx_k.T'], {}), '(mtx_k, mtx_k.T)\n', (2345, 2361), False, 'from sfepy.linalg.utils import max_diff_csr\n'), ((2394, 2422), 'sfepy.linalg.utils.max_diff_csr', 'max_diff_csr', (['mtx_m', 'mtx_m.H'], {}), '(mtx_m, mtx_m.H)\n', (2406, 2422), False, 'from sfepy.linalg.utils import max_diff_csr\n'), ((2455, 2483), 'sfepy.linalg.utils.max_diff_csr', 'max_diff_csr', (['mtx_d', 'mtx_d.H'], {}), '(mtx_d, mtx_d.H)\n', (2467, 2483), False, 'from sfepy.linalg.utils import max_diff_csr\n'), ((2516, 2544), 'sfepy.linalg.utils.max_diff_csr', 'max_diff_csr', (['mtx_k', 'mtx_k.H'], {}), '(mtx_k, mtx_k.H)\n', (2528, 2544), False, 'from sfepy.linalg.utils import max_diff_csr\n'), ((2672, 2715), 'scipy.sparse.bmat', 'sps.bmat', (['[[mtx_d, mtx_k], [mtx_eye, None]]'], {}), '([[mtx_d, mtx_k], [mtx_eye, None]])\n', (2680, 2715), True, 'import scipy.sparse as sps\n'), ((2766, 2809), 'scipy.sparse.bmat', 'sps.bmat', (['[[-mtx_m, None], [None, mtx_eye]]'], {}), '([[-mtx_m, None], [None, mtx_eye]])\n', (2774, 2809), True, 'import scipy.sparse as sps\n'), ((3641, 3669), 'sfepy.linalg.utils.max_diff_csr', 'max_diff_csr', (['mtx_a', 'mtx_a.T'], {}), '(mtx_a, mtx_a.T)\n', (3653, 3669), False, 'from sfepy.linalg.utils import max_diff_csr\n'), ((3702, 3730), 'sfepy.linalg.utils.max_diff_csr', 'max_diff_csr', (['mtx_a', 'mtx_a.H'], {}), '(mtx_a, mtx_a.H)\n', (3714, 3730), False, 'from sfepy.linalg.utils import max_diff_csr\n'), ((3763, 3791), 'sfepy.linalg.utils.max_diff_csr', 'max_diff_csr', (['mtx_b', 'mtx_b.T'], {}), '(mtx_b, mtx_b.T)\n', (3775, 3791), False, 'from sfepy.linalg.utils import max_diff_csr\n'), ((3824, 3852), 'sfepy.linalg.utils.max_diff_csr', 'max_diff_csr', (['mtx_b', 'mtx_b.H'], {}), '(mtx_b, mtx_b.H)\n', (3836, 3852), False, 'from sfepy.linalg.utils import max_diff_csr\n'), ((5145, 5172), 'numpy.linalg.norm', 'nm.linalg.norm', (['res', 'nm.inf'], {}), '(res, nm.inf)\n', (5159, 5172), True, 'import numpy as nm\n'), ((2608, 2650), 'scipy.sparse.eye', 'sps.eye', (['mtx_m.shape[0]'], {'dtype': 'mtx_m.dtype'}), '(mtx_m.shape[0], dtype=mtx_m.dtype)\n', (2615, 2650), True, 'import scipy.sparse as sps\n'), ((2953, 2968), 'sksparse.cholmod.cholesky', 'cholesky', (['mtx_m'], {}), '(mtx_m)\n', (2961, 2968), False, 'from sksparse.cholmod import cholesky\n'), ((3272, 3313), 'scipy.sparse.eye', 'sps.eye', (['mtx_l.shape[0]'], {'dtype': 'nm.float64'}), '(mtx_l.shape[0], dtype=nm.float64)\n', (3279, 3313), True, 'import scipy.sparse as sps\n'), ((3335, 3378), 'scipy.sparse.bmat', 'sps.bmat', (['[[-mtx_k, None], [None, mtx_eye]]'], {}), '([[-mtx_k, None], [None, mtx_eye]])\n', (3343, 3378), True, 'import scipy.sparse as sps\n'), ((3429, 3472), 'scipy.sparse.bmat', 'sps.bmat', (['[[mtx_d, mtx_l], [mtx_l.T, None]]'], {}), '([[mtx_d, mtx_l], [mtx_l.T, None]])\n', (3437, 3472), True, 'import scipy.sparse as sps\n'), ((3212, 3248), 'sfepy.linalg.utils.max_diff_csr', 'max_diff_csr', (['mtx_m', '(mtx_l * mtx_l.T)'], {}), '(mtx_m, mtx_l * mtx_l.T)\n', (3224, 3248), False, 'from sfepy.linalg.utils import max_diff_csr\n'), ((4380, 4407), 'numpy.linalg.norm', 'nm.linalg.norm', (['res', 'nm.inf'], {}), '(res, nm.inf)\n', (4394, 4407), True, 'import numpy as nm\n'), ((4829, 4856), 'numpy.linalg.norm', 'nm.linalg.norm', (['res', 'nm.inf'], {}), '(res, nm.inf)\n', (4843, 4856), True, 'import numpy as nm\n'), ((3073, 3091), 'numpy.ones_like', 'nm.ones_like', (['perm'], {}), '(perm)\n', (3085, 3091), True, 'import numpy as nm\n')]
|
from fastapi import APIRouter, Depends
from ..utils import engine, get_session
from sqlmodel import Session, select, SQLModel, or_
from sqlalchemy.exc import NoResultFound
from ..models.role import Role
from datetime import datetime
router = APIRouter(prefix="/api/roles", tags=["role"])
session = Session(engine)
@router.post("/")
async def post_role(*, role: Role, session: Session = Depends(get_session)):
"""
Post a new role.
Parameters
----------
role : Role
Role that is to be added to the database.
session : Session
SQL session that is to be used to add the role.
Defaults to creating a dependency on the running SQL model session.
"""
statement = select(Role).where(Role.id == role.id)
try:
result = session.exec(statement).one()
return False
except NoResultFound:
session.add(role)
session.commit()
session.refresh(role)
return role
@router.get("/")
async def read_roles(session: Session = Depends(get_session)):
"""
Get list of all roles.
Parameters
----------
session : Session
SQL session that is to be used to get the roles.
Defaults to creating a dependency on the running SQL model session.
"""
statement = select(Role)
results = session.exec(statement).all()
return results
@router.get("/active")
async def read_roles(session: Session = Depends(get_session)):
"""
Get list of active roles.
Parameters
----------
session : Session
SQL session that is to be used to get the roles.
Defaults to creating a dependency on the running SQL model session.
"""
statement = select(Role).where(Role.is_active == True)
results = session.exec(statement).all()
return results
@router.put("/{role_id}/activate")
async def activate_role(
role_id: str = None,
session: Session = Depends(get_session),
):
"""
Activate a role using the role ID as a key.
Parameters
----------
role_id : str
ID of role to be activated.
session : Session
SQL session that is to be used to activate the role.
Defaults to creating a dependency on the running SQL model session.
"""
statement = select(Role).where(Role.id == role_id)
role_to_activate = session.exec(statement).one()
role_to_activate.is_active = True
role_to_activate.updated_at = datetime.now()
session.add(role_to_activate)
session.commit()
session.refresh(role_to_activate)
return role_to_activate
@router.put("/{role_id}/deactivate")
async def deactivate_role(
role_id: str = None,
session: Session = Depends(get_session),
):
"""
Deactivate a role using the role ID as a key.
Parameters
----------
role_id : str
ID of role to be deactivated.
session : Session
SQL session that is to be used to deactivate the role.
Defaults to creating a dependency on the running SQL model session.
"""
statement = select(Role).where(Role.id == role_id)
role_to_deactivate = session.exec(statement).one()
role_to_deactivate.is_active = False
role_to_deactivate.updated_at = datetime.now()
session.add(role_to_deactivate)
session.commit()
session.refresh(role_to_deactivate)
return role_to_deactivate
@router.put("/")
async def update_role(
id: str = None,
new_name: str = None,
new_short_name: str = None,
is_active: bool = None,
session: Session = Depends(get_session),
):
"""
Update a role.
Parameters
----------
id : str
ID of role to be updated.
new_name : str
New name of the role.
new_short_name : str
New short name of the role.
is_active : bool
New status of the role.
session : Session
SQL session that is to be used to update the role.
Defaults to creating a dependency on the running SQL model session.
"""
statement = select(Role.is_active).where(Role.id == id)
result = session.exec(statement).first()
if result == True:
statement = select(Role).where(Role.id == id)
role_to_update = session.exec(statement).one()
if new_name != None:
role_to_update.name = new_name
if new_short_name != None:
role_to_update.short_name = new_short_name
if is_active != None:
role_to_update.is_active = is_active
session.add(role_to_update)
role_to_update.updated_at = datetime.now()
session.commit()
session.refresh(role_to_update)
return role_to_update
else:
return False
|
[
"sqlmodel.select",
"sqlmodel.Session"
] |
[((243, 288), 'fastapi.APIRouter', 'APIRouter', ([], {'prefix': '"""/api/roles"""', 'tags': "['role']"}), "(prefix='/api/roles', tags=['role'])\n", (252, 288), False, 'from fastapi import APIRouter, Depends\n'), ((299, 314), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (306, 314), False, 'from sqlmodel import Session, select, SQLModel, or_\n'), ((389, 409), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (396, 409), False, 'from fastapi import APIRouter, Depends\n'), ((1018, 1038), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1025, 1038), False, 'from fastapi import APIRouter, Depends\n'), ((1286, 1298), 'sqlmodel.select', 'select', (['Role'], {}), '(Role)\n', (1292, 1298), False, 'from sqlmodel import Session, select, SQLModel, or_\n'), ((1427, 1447), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1434, 1447), False, 'from fastapi import APIRouter, Depends\n'), ((1914, 1934), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (1921, 1934), False, 'from fastapi import APIRouter, Depends\n'), ((2427, 2441), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2439, 2441), False, 'from datetime import datetime\n'), ((2677, 2697), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (2684, 2697), False, 'from fastapi import APIRouter, Depends\n'), ((3203, 3217), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3215, 3217), False, 'from datetime import datetime\n'), ((3516, 3536), 'fastapi.Depends', 'Depends', (['get_session'], {}), '(get_session)\n', (3523, 3536), False, 'from fastapi import APIRouter, Depends\n'), ((4524, 4538), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4536, 4538), False, 'from datetime import datetime\n'), ((716, 728), 'sqlmodel.select', 'select', (['Role'], {}), '(Role)\n', (722, 728), False, 'from sqlmodel import Session, select, SQLModel, or_\n'), ((1698, 1710), 'sqlmodel.select', 'select', (['Role'], {}), '(Role)\n', (1704, 1710), False, 'from sqlmodel import Session, select, SQLModel, or_\n'), ((2263, 2275), 'sqlmodel.select', 'select', (['Role'], {}), '(Role)\n', (2269, 2275), False, 'from sqlmodel import Session, select, SQLModel, or_\n'), ((3032, 3044), 'sqlmodel.select', 'select', (['Role'], {}), '(Role)\n', (3038, 3044), False, 'from sqlmodel import Session, select, SQLModel, or_\n'), ((3990, 4012), 'sqlmodel.select', 'select', (['Role.is_active'], {}), '(Role.is_active)\n', (3996, 4012), False, 'from sqlmodel import Session, select, SQLModel, or_\n'), ((4122, 4134), 'sqlmodel.select', 'select', (['Role'], {}), '(Role)\n', (4128, 4134), False, 'from sqlmodel import Session, select, SQLModel, or_\n')]
|
import pytest
from fastapi.testclient import TestClient
from sqlmodel import Session, SQLModel, create_engine
from sqlmodel.pool import StaticPool
from api.main import app, get_session
from api.models import Measurement, Observer
@pytest.fixture(name="session")
def session_fixture():
engine = create_engine(
"sqlite://", connect_args={"check_same_thread": False}, poolclass=StaticPool
)
SQLModel.metadata.create_all(engine)
with Session(engine) as session:
yield session
@pytest.fixture(name="client")
def client_fixture(session: Session):
def get_session_override():
return session
app.dependency_overrides[get_session] = get_session_override
client = TestClient(app)
yield client
app.dependency_overrides.clear()
@pytest.fixture(name="observer_1")
def observer_fixture(session: Session):
observer = Observer(phone="+1555-555-5555", email="<EMAIL>")
session.add(observer)
session.commit()
yield observer
session.delete(observer)
def test_create_observer(client: TestClient):
response = client.post(
"/observers/", json={"phone": "+1555-555-5555", "email": "<EMAIL>"}
)
data = response.json()
assert response.status_code == 200
assert data["phone"] == "+1555-555-5555"
assert data["email"] == "<EMAIL>"
assert data["id"] is not None
def test_create_observer_incomplete(client: TestClient):
# No hande
response = client.post("/observers/", json={"phone": "+1555-555-5555"})
assert response.status_code == 422
def test_create_observer_invalid(client: TestClient):
# email has an invalid type
response = client.post(
"/observers/", json={"phone": "+1555-555-5555", "email": {"key": "value"}}
)
assert response.status_code == 422
def test_delete_observer(session: Session, client: TestClient, observer_1: Observer):
response = client.delete(f"/observers/{observer_1.id}")
observer_in_db = session.get(Observer, observer_1.id)
assert response.status_code == 200
assert observer_in_db is None
def test_create_measurement_wrong_observer(client: TestClient, observer_1: Observer):
response = client.post(
"/measurements/",
json={
"temperaturescale": "C",
"temperature": 4,
"organizationid": 876543,
"siteid": 65432,
"observer_id": 8,
},
)
data = response.json()
assert response.status_code == 400
assert data["detail"] == "Not a valid observer id"
|
[
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.Session",
"sqlmodel.create_engine"
] |
[((234, 264), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""session"""'}), "(name='session')\n", (248, 264), False, 'import pytest\n'), ((510, 539), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""client"""'}), "(name='client')\n", (524, 539), False, 'import pytest\n'), ((785, 818), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""observer_1"""'}), "(name='observer_1')\n", (799, 818), False, 'import pytest\n'), ((301, 396), 'sqlmodel.create_engine', 'create_engine', (['"""sqlite://"""'], {'connect_args': "{'check_same_thread': False}", 'poolclass': 'StaticPool'}), "('sqlite://', connect_args={'check_same_thread': False},\n poolclass=StaticPool)\n", (314, 396), False, 'from sqlmodel import Session, SQLModel, create_engine\n'), ((411, 447), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (439, 447), False, 'from sqlmodel import Session, SQLModel, create_engine\n'), ((712, 727), 'fastapi.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (722, 727), False, 'from fastapi.testclient import TestClient\n'), ((749, 781), 'api.main.app.dependency_overrides.clear', 'app.dependency_overrides.clear', ([], {}), '()\n', (779, 781), False, 'from api.main import app, get_session\n'), ((874, 923), 'api.models.Observer', 'Observer', ([], {'phone': '"""+1555-555-5555"""', 'email': '"""<EMAIL>"""'}), "(phone='+1555-555-5555', email='<EMAIL>')\n", (882, 923), False, 'from api.models import Measurement, Observer\n'), ((457, 472), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (464, 472), False, 'from sqlmodel import Session, SQLModel, create_engine\n')]
|
from typing import Optional
from sqlmodel import SQLModel, Field, create_engine, Session
engine = create_engine(url="sqlite:///users.db", echo=False)
class User(SQLModel, table=True):
id: Optional[int] = Field(None, primary_key=True)
username: str
password: str
def get_session():
with Session(engine) as session:
yield session
def init_db():
SQLModel.metadata.create_all(engine)
|
[
"sqlmodel.Session",
"sqlmodel.create_engine",
"sqlmodel.Field",
"sqlmodel.SQLModel.metadata.create_all"
] |
[((100, 151), 'sqlmodel.create_engine', 'create_engine', ([], {'url': '"""sqlite:///users.db"""', 'echo': '(False)'}), "(url='sqlite:///users.db', echo=False)\n", (113, 151), False, 'from sqlmodel import SQLModel, Field, create_engine, Session\n'), ((212, 241), 'sqlmodel.Field', 'Field', (['None'], {'primary_key': '(True)'}), '(None, primary_key=True)\n', (217, 241), False, 'from sqlmodel import SQLModel, Field, create_engine, Session\n'), ((378, 414), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (406, 414), False, 'from sqlmodel import SQLModel, Field, create_engine, Session\n'), ((307, 322), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (314, 322), False, 'from sqlmodel import SQLModel, Field, create_engine, Session\n')]
|
import numpy as nm
from sfepy.base.conf import transform_functions
from sfepy.base.testing import TestCommon
def get_nodes(coors, domain=None):
x, z = coors[:,0], coors[:,2]
return nm.where((z < 0.1) & (x < 0.1))[0]
def get_elements(coors, domain=None):
return {0 : [1, 4, 5]}
class Test(TestCommon):
@staticmethod
def from_conf( conf, options ):
from sfepy import data_dir
from sfepy.fem import Mesh, Domain, Functions
mesh = Mesh('test mesh',
data_dir + '/meshes/various_formats/abaqus_tet.inp')
domain = Domain('test domain', mesh)
conf_functions = {
'get_nodes' : (get_nodes,),
'get_elements' : (get_elements,),
}
functions = Functions.from_conf(transform_functions(conf_functions))
test = Test(conf=conf, options=options,
domain=domain, functions=functions)
return test
def test_selectors(self):
"""
Test basic region selectors.
"""
selectors = [
'all',
'nodes of surface',
'nodes of group 0',
'nodes in (z < 0.1) & (x < 0.1)',
'nodes by get_nodes',
'node 0, 1, 2',
'elements of group 0',
'elements by get_elements',
'element 1, 4, 5',
'element (0, 1), (0, 4), (0, 5)'
]
all_vertices = [
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
[0, 1, 3, 7],
[1, 2, 3, 4, 5, 9, 11],
[1, 2, 3, 4, 5, 9, 11],
[0, 1, 2],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
[0, 1, 2, 3, 4, 5, 6, 8],
[0, 1, 2, 3, 4, 5, 6, 8],
[0, 1, 2, 3, 4, 5, 6, 8],
]
ok = True
for ii, sel in enumerate(selectors):
self.report('select:', sel)
reg = self.domain.create_region('r', sel, functions=self.functions)
_ok = (reg.all_vertices == all_vertices[ii]).all()
self.report(' all_vertices:', _ok)
ok = ok and _ok
return ok
def test_operators(self):
"""
Test operators in region selectors.
"""
ok = True
r1 = self.domain.create_region('r1', 'nodes of surface')
sel = 'r.r1 -n nodes of group 0'
self.report('select:', sel)
reg = self.domain.create_region('reg', sel)
av = [2, 4, 5, 6, 8, 9, 10, 11, 12]
_ok = (reg.all_vertices == nm.array(av)).all()
self.report(' all_vertices:', _ok)
ok = ok and _ok
sel = 'node 0, 1, 2 +n nodes of group 0'
self.report('select:', sel)
reg = self.domain.create_region('reg', sel)
av = [0, 1, 2, 3, 7]
_ok = (reg.all_vertices == nm.array(av)).all()
self.report(' all_vertices:', _ok)
ok = ok and _ok
sel = 'node 0, 1, 2 *n nodes of group 0'
self.report('select:', sel)
reg = self.domain.create_region('reg', sel)
av = [0, 1]
_ok = (reg.all_vertices == nm.array(av)).all()
self.report(' all_vertices:', _ok)
ok = ok and _ok
sel = 'r.r1 -e element 1, 4, 5'
self.report('select:', sel)
reg = self.domain.create_region('reg', sel)
_ok = (nm.setdiff1d(r1.cells[0], [1, 4, 5]) == reg.cells[0]).all()
self.report(' cells:', _ok)
ok = ok and _ok
sel = 'element 8, 3 +e element 1, 4, 5'
self.report('select:', sel)
reg = self.domain.create_region('reg', sel)
cells = [1, 3, 4, 5, 8]
_ok = (reg.cells[0] == nm.array(cells)).all()
self.report(' cells:', _ok)
ok = ok and _ok
sel = 'element 8, 3, 2 *e element 8, 4, 2, 7'
self.report('select:', sel)
reg = self.domain.create_region('reg', sel)
cells = [2, 8]
_ok = (reg.cells[0] == nm.array(cells)).all()
self.report(' cells:', _ok)
ok = ok and _ok
return ok
|
[
"sfepy.base.conf.transform_functions",
"sfepy.fem.Domain",
"sfepy.fem.Mesh"
] |
[((192, 223), 'numpy.where', 'nm.where', (['((z < 0.1) & (x < 0.1))'], {}), '((z < 0.1) & (x < 0.1))\n', (200, 223), True, 'import numpy as nm\n'), ((478, 548), 'sfepy.fem.Mesh', 'Mesh', (['"""test mesh"""', "(data_dir + '/meshes/various_formats/abaqus_tet.inp')"], {}), "('test mesh', data_dir + '/meshes/various_formats/abaqus_tet.inp')\n", (482, 548), False, 'from sfepy.fem import Mesh, Domain, Functions\n'), ((586, 613), 'sfepy.fem.Domain', 'Domain', (['"""test domain"""', 'mesh'], {}), "('test domain', mesh)\n", (592, 613), False, 'from sfepy.fem import Mesh, Domain, Functions\n'), ((778, 813), 'sfepy.base.conf.transform_functions', 'transform_functions', (['conf_functions'], {}), '(conf_functions)\n', (797, 813), False, 'from sfepy.base.conf import transform_functions\n'), ((2649, 2661), 'numpy.array', 'nm.array', (['av'], {}), '(av)\n', (2657, 2661), True, 'import numpy as nm\n'), ((2943, 2955), 'numpy.array', 'nm.array', (['av'], {}), '(av)\n', (2951, 2955), True, 'import numpy as nm\n'), ((3225, 3237), 'numpy.array', 'nm.array', (['av'], {}), '(av)\n', (3233, 3237), True, 'import numpy as nm\n'), ((3457, 3493), 'numpy.setdiff1d', 'nm.setdiff1d', (['r1.cells[0]', '[1, 4, 5]'], {}), '(r1.cells[0], [1, 4, 5])\n', (3469, 3493), True, 'import numpy as nm\n'), ((3782, 3797), 'numpy.array', 'nm.array', (['cells'], {}), '(cells)\n', (3790, 3797), True, 'import numpy as nm\n'), ((4064, 4079), 'numpy.array', 'nm.array', (['cells'], {}), '(cells)\n', (4072, 4079), True, 'import numpy as nm\n')]
|
from datetime import date
from typing import List, Optional
from rich.console import Console, ConsoleOptions, RenderResult
from rich.text import Text
from sqlmodel import Field, Relationship, SQLModel
class SpokenLanguageMovieLink(SQLModel, table=True):
spoken_language_id: Optional[int] = Field(
default=None, foreign_key="spoken_language.local_id", primary_key=True
)
movie_id: Optional[int] = Field(
default=None, foreign_key="movie.local_id", primary_key=True
)
class SpokenLanguage(SQLModel, table=True):
__tablename__ = "spoken_language"
local_id: Optional[int] = Field(default=None, primary_key=True)
english_name: Optional[str] = None
iso_639_1: Optional[str] = None
name: str
movies: List["Movie"] = Relationship(
back_populates="spoken_languages", link_model=SpokenLanguageMovieLink
)
def __rich_repr__(self):
yield self.name
class ProductionCountryMovieLink(SQLModel, table=True):
production_country_id: Optional[int] = Field(
default=None, foreign_key="production_country.local_id", primary_key=True
)
movie_id: Optional[int] = Field(
default=None, foreign_key="movie.local_id", primary_key=True
)
class ProductionCountry(SQLModel, table=True):
__tablename__ = "production_country"
local_id: Optional[int] = Field(default=None, primary_key=True)
iso_3166_1: Optional[str] = None
name: str
movies: List["Movie"] = Relationship(
back_populates="production_countries", link_model=ProductionCountryMovieLink
)
def __rich_repr__(self):
yield self.name
class ProductionCompanyMovieLink(SQLModel, table=True):
production_company_id: Optional[int] = Field(
default=None, foreign_key="production_company.local_id", primary_key=True
)
movie_id: Optional[int] = Field(
default=None, foreign_key="movie.local_id", primary_key=True
)
class ProductionCompany(SQLModel, table=True):
__tablename__ = "production_company"
local_id: Optional[int] = Field(default=None, primary_key=True)
id: int
name: str
origin_country: Optional[str] = None
logo_path: Optional[str] = None
movies: List["Movie"] = Relationship(
back_populates="production_companies", link_model=ProductionCompanyMovieLink
)
def __rich_repr__(self):
yield self.name
class Collection(SQLModel, table=True):
local_id: Optional[int] = Field(default=None, primary_key=True)
id: int
name: str
poster_path: Optional[str] = None
backdrop_path: Optional[str] = None
movies: List["Movie"] = Relationship(back_populates="collection")
def __rich_repr__(self):
yield self.name
class GenreMovieLink(SQLModel, table=True):
genre_id: Optional[int] = Field(
default=None, foreign_key="genre.local_id", primary_key=True
)
movie_id: Optional[int] = Field(
default=None, foreign_key="movie.local_id", primary_key=True
)
class Genre(SQLModel, table=True):
local_id: Optional[int] = Field(default=None, primary_key=True)
id: int
name: str
movies: List["Movie"] = Relationship(
back_populates="genres", link_model=GenreMovieLink
)
def __rich_repr__(self):
yield self.name
class Movie(SQLModel, table=True):
local_id: Optional[int] = Field(default=None, primary_key=True)
adult: Optional[bool] = None
backdrop_path: Optional[str] = None
collection_id: Optional[int] = Field(
default=None, foreign_key="collection.local_id"
)
collection: Optional[Collection] = Relationship(back_populates="movies")
budget: Optional[int] = None
genres: List[Genre] = Relationship(
back_populates="movies", link_model=GenreMovieLink
)
homepage: Optional[str] = None
id: int
imdb_id: Optional[str] = None
original_language: Optional[str] = None
original_title: Optional[str] = None
overview: Optional[str] = None
popularity: Optional[float] = None
poster_path: Optional[str] = None
production_companies: List[ProductionCompany] = Relationship(
back_populates="movies", link_model=ProductionCompanyMovieLink
)
production_countries: List[ProductionCountry] = Relationship(
back_populates="movies", link_model=ProductionCountryMovieLink
)
release_date: Optional[date] = Field(None, index=True)
revenue: Optional[int] = None
runtime: Optional[int] = None
spoken_languages: List[SpokenLanguage] = Relationship(
back_populates="movies", link_model=SpokenLanguageMovieLink
)
status: Optional[str] = None
tagline: Optional[str] = None
title: str = Field(..., index=True)
video: Optional[bool] = None
vote_average: Optional[float] = None
vote_count: Optional[int] = None
def __rich_repr__(self):
yield self.title
yield "overview", self.overview
yield "release_date", self.release_date
yield "runtime", f"{self.runtime} min"
yield "genres", self.genres
yield "collection", self.collection
yield "spoken_languages", self.spoken_languages
yield "revenue", f"{self.revenue / 1e6:.1f}M"
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
yield Text(f"{self.title}", justify="center", style="bold magenta")
release_date_str = self.release_date.strftime("%b %d, %Y")
yield Text(f"Released: {release_date_str}")
yield Text(f"Runtime: {self.runtime} min")
genres_str = ", ".join([g.name for g in self.genres])
yield Text(f"Genres: {genres_str}")
return
|
[
"sqlmodel.Relationship",
"sqlmodel.Field"
] |
[((297, 374), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""spoken_language.local_id"""', 'primary_key': '(True)'}), "(default=None, foreign_key='spoken_language.local_id', primary_key=True)\n", (302, 374), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((419, 486), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""movie.local_id"""', 'primary_key': '(True)'}), "(default=None, foreign_key='movie.local_id', primary_key=True)\n", (424, 486), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((615, 652), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (620, 652), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((770, 858), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""spoken_languages"""', 'link_model': 'SpokenLanguageMovieLink'}), "(back_populates='spoken_languages', link_model=\n SpokenLanguageMovieLink)\n", (782, 858), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((1023, 1108), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""production_country.local_id"""', 'primary_key': '(True)'}), "(default=None, foreign_key='production_country.local_id', primary_key=True\n )\n", (1028, 1108), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((1148, 1215), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""movie.local_id"""', 'primary_key': '(True)'}), "(default=None, foreign_key='movie.local_id', primary_key=True)\n", (1153, 1215), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((1350, 1387), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (1355, 1387), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((1467, 1562), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""production_countries"""', 'link_model': 'ProductionCountryMovieLink'}), "(back_populates='production_countries', link_model=\n ProductionCountryMovieLink)\n", (1479, 1562), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((1727, 1812), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""production_company.local_id"""', 'primary_key': '(True)'}), "(default=None, foreign_key='production_company.local_id', primary_key=True\n )\n", (1732, 1812), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((1852, 1919), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""movie.local_id"""', 'primary_key': '(True)'}), "(default=None, foreign_key='movie.local_id', primary_key=True)\n", (1857, 1919), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((2054, 2091), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (2059, 2091), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((2223, 2318), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""production_companies"""', 'link_model': 'ProductionCompanyMovieLink'}), "(back_populates='production_companies', link_model=\n ProductionCompanyMovieLink)\n", (2235, 2318), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((2454, 2491), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (2459, 2491), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((2624, 2665), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""collection"""'}), "(back_populates='collection')\n", (2636, 2665), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((2796, 2863), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""genre.local_id"""', 'primary_key': '(True)'}), "(default=None, foreign_key='genre.local_id', primary_key=True)\n", (2801, 2863), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((2908, 2975), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""movie.local_id"""', 'primary_key': '(True)'}), "(default=None, foreign_key='movie.local_id', primary_key=True)\n", (2913, 2975), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((3057, 3094), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (3062, 3094), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((3149, 3213), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""genres"""', 'link_model': 'GenreMovieLink'}), "(back_populates='genres', link_model=GenreMovieLink)\n", (3161, 3213), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((3349, 3386), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (3354, 3386), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((3495, 3549), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""collection.local_id"""'}), "(default=None, foreign_key='collection.local_id')\n", (3500, 3549), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((3603, 3640), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""movies"""'}), "(back_populates='movies')\n", (3615, 3640), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((3700, 3764), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""movies"""', 'link_model': 'GenreMovieLink'}), "(back_populates='movies', link_model=GenreMovieLink)\n", (3712, 3764), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((4109, 4185), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""movies"""', 'link_model': 'ProductionCompanyMovieLink'}), "(back_populates='movies', link_model=ProductionCompanyMovieLink)\n", (4121, 4185), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((4252, 4328), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""movies"""', 'link_model': 'ProductionCountryMovieLink'}), "(back_populates='movies', link_model=ProductionCountryMovieLink)\n", (4264, 4328), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((4378, 4401), 'sqlmodel.Field', 'Field', (['None'], {'index': '(True)'}), '(None, index=True)\n', (4383, 4401), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((4515, 4588), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""movies"""', 'link_model': 'SpokenLanguageMovieLink'}), "(back_populates='movies', link_model=SpokenLanguageMovieLink)\n", (4527, 4588), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((4687, 4709), 'sqlmodel.Field', 'Field', (['...'], {'index': '(True)'}), '(..., index=True)\n', (4692, 4709), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((5322, 5383), 'rich.text.Text', 'Text', (['f"""{self.title}"""'], {'justify': '"""center"""', 'style': '"""bold magenta"""'}), "(f'{self.title}', justify='center', style='bold magenta')\n", (5326, 5383), False, 'from rich.text import Text\n'), ((5466, 5503), 'rich.text.Text', 'Text', (['f"""Released: {release_date_str}"""'], {}), "(f'Released: {release_date_str}')\n", (5470, 5503), False, 'from rich.text import Text\n'), ((5518, 5554), 'rich.text.Text', 'Text', (['f"""Runtime: {self.runtime} min"""'], {}), "(f'Runtime: {self.runtime} min')\n", (5522, 5554), False, 'from rich.text import Text\n'), ((5632, 5661), 'rich.text.Text', 'Text', (['f"""Genres: {genres_str}"""'], {}), "(f'Genres: {genres_str}')\n", (5636, 5661), False, 'from rich.text import Text\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import official.vision.classification.resnet.model as resnet
import numpy as np
class DeconvLayers(M.Module):
def __init__(self, nf1, nf2s, kernels, num_layers, bias=True, norm=M.BatchNorm2d):
super(DeconvLayers, self).__init__()
_body = []
for i in range(num_layers):
kernel = kernels[i]
padding = (
kernel // 3
) # padding=0 when kernel=2 and padding=1 when kernel=4 or kernel=3
_body += [
M.ConvTranspose2d(nf1, nf2s[i], kernel, 2, padding, bias=bias),
norm(nf2s[i]),
M.ReLU(),
]
nf1 = nf2s[i]
self.body = M.Sequential(*_body)
def forward(self, x):
return self.body(x)
class SimpleBaseline(M.Module):
def __init__(self, backbone, cfg):
super(SimpleBaseline, self).__init__()
norm = M.BatchNorm2d
self.backbone = getattr(resnet, backbone)(
norm=norm, pretrained=cfg.backbone_pretrained
)
del self.backbone.fc
self.cfg = cfg
self.deconv_layers = DeconvLayers(
cfg.initial_deconv_channels,
cfg.deconv_channels,
cfg.deconv_kernel_sizes,
cfg.num_deconv_layers,
cfg.deconv_with_bias,
norm,
)
self.last_layer = M.Conv2d(cfg.deconv_channels[-1], cfg.keypoint_num, 3, 1, 1)
self._initialize_weights()
self.inputs = {
"image": mge.tensor(dtype="float32"),
"heatmap": mge.tensor(dtype="float32"),
"heat_valid": mge.tensor(dtype="float32"),
}
def calc_loss(self):
out = self.forward(self.inputs["image"])
valid = self.inputs["heat_valid"][:, :, None, None]
label = self.inputs["heatmap"][:, -1]
loss = F.square_loss(out * valid, label * valid)
return loss
def predict(self):
return self.forward(self.inputs["image"])
def _initialize_weights(self):
for k, m in self.deconv_layers.named_modules():
if isinstance(m, M.ConvTranspose2d):
M.init.normal_(m.weight, std=0.001)
if self.cfg.deconv_with_bias:
M.init.zeros_(m.bias)
if isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
M.init.normal_(self.last_layer.weight, std=0.001)
M.init.zeros_(self.last_layer.bias)
def forward(self, x):
f = self.backbone.extract_features(x)["res5"]
f = self.deconv_layers(f)
pred = self.last_layer(f)
return pred
class SimpleBaseline_Config:
initial_deconv_channels = 2048
num_deconv_layers = 3
deconv_channels = [256, 256, 256]
deconv_kernel_sizes = [4, 4, 4]
deconv_with_bias = False
keypoint_num = 17
backbone_pretrained = True
cfg = SimpleBaseline_Config()
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/simplebaseline50_256x192_0_255_71_2.pkl"
)
def simplebaseline_res50(**kwargs):
model = SimpleBaseline(backbone="resnet50", cfg=cfg, **kwargs)
return model
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/simplebaseline101_256x192_0_255_72_2.pkl"
)
def simplebaseline_res101(**kwargs):
model = SimpleBaseline(backbone="resnet101", cfg=cfg, **kwargs)
return model
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/simplebaseline152_256x192_0_255_72_4.pkl"
)
def simplebaseline_res152(**kwargs):
model = SimpleBaseline(backbone="resnet152", cfg=cfg, **kwargs)
return model
|
[
"megengine.module.init.zeros_",
"megengine.module.init.normal_",
"megengine.tensor",
"megengine.module.ReLU",
"megengine.module.Conv2d",
"megengine.module.init.ones_",
"megengine.module.ConvTranspose2d",
"megengine.functional.square_loss",
"megengine.module.Sequential",
"megengine.hub.pretrained"
] |
[((3431, 3543), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/models/weights/simplebaseline50_256x192_0_255_71_2.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/models/weights/simplebaseline50_256x192_0_255_71_2.pkl'\n )\n", (3445, 3543), True, 'import megengine.hub as hub\n'), ((3664, 3777), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/models/weights/simplebaseline101_256x192_0_255_72_2.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/models/weights/simplebaseline101_256x192_0_255_72_2.pkl'\n )\n", (3678, 3777), True, 'import megengine.hub as hub\n'), ((3900, 4013), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/models/weights/simplebaseline152_256x192_0_255_72_4.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/models/weights/simplebaseline152_256x192_0_255_72_4.pkl'\n )\n", (3914, 4013), True, 'import megengine.hub as hub\n'), ((1176, 1196), 'megengine.module.Sequential', 'M.Sequential', (['*_body'], {}), '(*_body)\n', (1188, 1196), True, 'import megengine.module as M\n'), ((1851, 1911), 'megengine.module.Conv2d', 'M.Conv2d', (['cfg.deconv_channels[-1]', 'cfg.keypoint_num', '(3)', '(1)', '(1)'], {}), '(cfg.deconv_channels[-1], cfg.keypoint_num, 3, 1, 1)\n', (1859, 1911), True, 'import megengine.module as M\n'), ((2336, 2377), 'megengine.functional.square_loss', 'F.square_loss', (['(out * valid)', '(label * valid)'], {}), '(out * valid, label * valid)\n', (2349, 2377), True, 'import megengine.functional as F\n'), ((2885, 2934), 'megengine.module.init.normal_', 'M.init.normal_', (['self.last_layer.weight'], {'std': '(0.001)'}), '(self.last_layer.weight, std=0.001)\n', (2899, 2934), True, 'import megengine.module as M\n'), ((2943, 2978), 'megengine.module.init.zeros_', 'M.init.zeros_', (['self.last_layer.bias'], {}), '(self.last_layer.bias)\n', (2956, 2978), True, 'import megengine.module as M\n'), ((1994, 2021), 'megengine.tensor', 'mge.tensor', ([], {'dtype': '"""float32"""'}), "(dtype='float32')\n", (2004, 2021), True, 'import megengine as mge\n'), ((2046, 2073), 'megengine.tensor', 'mge.tensor', ([], {'dtype': '"""float32"""'}), "(dtype='float32')\n", (2056, 2073), True, 'import megengine as mge\n'), ((2101, 2128), 'megengine.tensor', 'mge.tensor', ([], {'dtype': '"""float32"""'}), "(dtype='float32')\n", (2111, 2128), True, 'import megengine as mge\n'), ((995, 1057), 'megengine.module.ConvTranspose2d', 'M.ConvTranspose2d', (['nf1', 'nf2s[i]', 'kernel', '(2)', 'padding'], {'bias': 'bias'}), '(nf1, nf2s[i], kernel, 2, padding, bias=bias)\n', (1012, 1057), True, 'import megengine.module as M\n'), ((1106, 1114), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (1112, 1114), True, 'import megengine.module as M\n'), ((2630, 2665), 'megengine.module.init.normal_', 'M.init.normal_', (['m.weight'], {'std': '(0.001)'}), '(m.weight, std=0.001)\n', (2644, 2665), True, 'import megengine.module as M\n'), ((2815, 2837), 'megengine.module.init.ones_', 'M.init.ones_', (['m.weight'], {}), '(m.weight)\n', (2827, 2837), True, 'import megengine.module as M\n'), ((2854, 2875), 'megengine.module.init.zeros_', 'M.init.zeros_', (['m.bias'], {}), '(m.bias)\n', (2867, 2875), True, 'import megengine.module as M\n'), ((2732, 2753), 'megengine.module.init.zeros_', 'M.init.zeros_', (['m.bias'], {}), '(m.bias)\n', (2745, 2753), True, 'import megengine.module as M\n')]
|
from datetime import datetime
from typing import Optional
from sqlmodel import Field, Enum, Column
from sqlmodel.main import SQLModel
from graphene_example.app.core.structures import TaskStatusEnum
class User(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
full_name: str
email: Optional[str]
hashed_password: str
is_active: bool = True
is_superuser: bool = True
class Task(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
created_at: datetime = Field(default=datetime.utcnow)
title: str
status: TaskStatusEnum = Field(sa_column=Column(Enum(TaskStatusEnum)), default=TaskStatusEnum.draft)
user_id: Optional[int] = Field(default=None, foreign_key="user.id")
|
[
"sqlmodel.Enum",
"sqlmodel.Field"
] |
[((260, 297), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (265, 297), False, 'from sqlmodel import Field, Enum, Column\n'), ((484, 521), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (489, 521), False, 'from sqlmodel import Field, Enum, Column\n'), ((549, 579), 'sqlmodel.Field', 'Field', ([], {'default': 'datetime.utcnow'}), '(default=datetime.utcnow)\n', (554, 579), False, 'from sqlmodel import Field, Enum, Column\n'), ((729, 771), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""user.id"""'}), "(default=None, foreign_key='user.id')\n", (734, 771), False, 'from sqlmodel import Field, Enum, Column\n'), ((647, 667), 'sqlmodel.Enum', 'Enum', (['TaskStatusEnum'], {}), '(TaskStatusEnum)\n', (651, 667), False, 'from sqlmodel import Field, Enum, Column\n')]
|
from datetime import datetime
import dateutil.parser
import json
import requests
from requests.models import to_key_val_list
from sqlmodel import Field, Session, SQLModel, create_engine, select
from fastapi.logger import logger
from database import engine
from models import Listing, Facility, Image, InterestPoint, Route, RouteCreate, PlaceNearby
def get_daft_search_result():
try:
response = requests.get('http://daft:8000/search_result/')
response.raise_for_status()
# Additional code will only run if the request is successful
except requests.exceptions.HTTPError as error:
print(error)
return response.json()
def get_daft_details(url):
try:
print(url)
params = {
'url': url,
'method': 'json_details',
}
response = requests.get(
'http://daft:8000/listing_details/', params=params)
response.raise_for_status()
return response.json()
# Additional code will only run if the request is successful
except requests.exceptions.HTTPError as error:
logger.error(error)
return response.json()
def get_routes_json(from_lat, from_long, to_lat, to_long):
try:
data = {
"from_point": {"lat": from_lat, "long": from_long},
"to_point": {"lat": to_lat, "long": to_long}
}
response = requests.post(
'http://location:8000/route/', data=json.dumps(data))
response.raise_for_status()
return response.json()
# Additional code will only run if the request is successful
except requests.exceptions.HTTPError as error:
logger.error(error)
return {}
def get_routes(listing: Listing):
ret_ = []
with Session(engine) as session:
interest_points_sttm = select(InterestPoint).\
where(InterestPoint.is_active == True)
interest_points = session.exec(interest_points_sttm).all()
for interest_point in interest_points:
routes = get_routes_json(
listing.latitude, listing.longitude,
interest_point.latitude, interest_point.longitude)
print('routes')
print(routes)
for route in routes:
ret_.append(Route(
interest_point_id=interest_point.id,
waking_distance=route['waking_distance'],
total_distance=route['total_distance'],
total_time=route['total_time'],
public_transport_count=route['public_transport_count'],
))
print(ret_)
return ret_
def get_places_nearby_json(from_lat, from_long, query):
try:
data = {"lat": from_lat, "long": from_long}
response = requests.post(
'http://location:8000/interest_places_nearby/', data=json.dumps(data))
response.raise_for_status()
# Additional code will only run if the request is successful
except requests.exceptions.HTTPError as error:
print(error)
return response.json()
def get_places_nearby(listing: Listing):
ret_ = []
query = 'Grocery'
places = get_places_nearby_json(
from_lat=listing.latitude, from_long=listing.longitude,
query=query)
for place in places:
ret_.append(PlaceNearby(
name=place['name'],
latitude=place['lat'],
longitude=place['long'],
address=place['address'],
distance=place['distance'],
website=place['website'],
website_domain=place['website_domain'],
chain_name=place['chain_name'],
query=query,
))
return ret_
def save_new_listing(search_result, listing_d):
with Session(engine) as session:
listing = Listing()
# Search Result
listing.source = 'daft'
listing.is_active = True
listing.url = search_result['url']
listing.address = search_result['title']
listing.price = search_result['monthly_price']
listing.latitude = search_result['latitude']
listing.longitude = search_result['longitude']
listing.publish_date = dateutil.parser.isoparse(
search_result['publish_date'])
# Details:
listing.source_id = listing_d['id']
listing.source_code = listing_d['daftShortcode']
listing.title = listing_d['title']
listing.bedrooms = listing_d['numBedrooms']
listing.bathrooms = listing_d['numBathrooms']
listing.description = listing_d['description']
listing.last_updated = listing_d['lastUpdateDate']
listing.images_count = listing_d['totalImages']
listing.views = listing_d['listingViews']
facilities_arr = []
for facility in listing_d['facilities']:
facility_sttm = select(Facility).\
where(Facility.name == facility.title()).\
where(Facility.category == 'facilities')
facility_obj = session.exec(facility_sttm).first()
if(not facility_obj):
facility_obj = Facility(
name=facility.title(),
category='facilities'
)
facilities_arr.append(facility_obj)
for facility in listing_d['propertyOverview']:
facility_sttm = select(Facility).\
where(Facility.name == facility.title()).\
where(Facility.category == 'overview')
facility_obj = session.exec(facility_sttm).first()
if(not facility_obj):
facility_obj = Facility(
name=facility.title(),
category='overview'
)
facilities_arr.append(facility_obj)
listing.facilities = facilities_arr
listing.images = [Image(url=x['url'], url_600=x['url_600']) for x in listing_d['images']]
listing.routes = get_routes(listing)
listing.places_nearby = get_places_nearby(listing)
# Saving it
session.add(listing)
session.commit()
def give_it_a_try(how_many = 25):
ret_ = {}
daft_search_results = get_daft_search_result()
daft_result_list = daft_search_results['result_list']
c = 0
details = []
with Session(engine) as session:
for daft_result in daft_result_list:
statement = select(Listing).\
where(Listing.source == 'daft').\
where(Listing.url == daft_result['url']).\
where(Listing.price == daft_result['monthly_price'])
results = session.exec(statement).first()
if results:
continue
pass # Check telegram sent message
else:
print(daft_result['url'])
details = get_daft_details(daft_result['url'])
save_new_listing(daft_result, details)
c += 1
if c < how_many:
continue
break
return details
|
[
"sqlmodel.Session",
"sqlmodel.select"
] |
[((413, 460), 'requests.get', 'requests.get', (['"""http://daft:8000/search_result/"""'], {}), "('http://daft:8000/search_result/')\n", (425, 460), False, 'import requests\n'), ((833, 897), 'requests.get', 'requests.get', (['"""http://daft:8000/listing_details/"""'], {'params': 'params'}), "('http://daft:8000/listing_details/', params=params)\n", (845, 897), False, 'import requests\n'), ((1773, 1788), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (1780, 1788), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((3788, 3803), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (3795, 3803), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((3834, 3843), 'models.Listing', 'Listing', ([], {}), '()\n', (3841, 3843), False, 'from models import Listing, Facility, Image, InterestPoint, Route, RouteCreate, PlaceNearby\n'), ((6332, 6347), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (6339, 6347), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((1106, 1125), 'fastapi.logger.logger.error', 'logger.error', (['error'], {}), '(error)\n', (1118, 1125), False, 'from fastapi.logger import logger\n'), ((1676, 1695), 'fastapi.logger.logger.error', 'logger.error', (['error'], {}), '(error)\n', (1688, 1695), False, 'from fastapi.logger import logger\n'), ((3347, 3606), 'models.PlaceNearby', 'PlaceNearby', ([], {'name': "place['name']", 'latitude': "place['lat']", 'longitude': "place['long']", 'address': "place['address']", 'distance': "place['distance']", 'website': "place['website']", 'website_domain': "place['website_domain']", 'chain_name': "place['chain_name']", 'query': 'query'}), "(name=place['name'], latitude=place['lat'], longitude=place[\n 'long'], address=place['address'], distance=place['distance'], website=\n place['website'], website_domain=place['website_domain'], chain_name=\n place['chain_name'], query=query)\n", (3358, 3606), False, 'from models import Listing, Facility, Image, InterestPoint, Route, RouteCreate, PlaceNearby\n'), ((5885, 5926), 'models.Image', 'Image', ([], {'url': "x['url']", 'url_600': "x['url_600']"}), "(url=x['url'], url_600=x['url_600'])\n", (5890, 5926), False, 'from models import Listing, Facility, Image, InterestPoint, Route, RouteCreate, PlaceNearby\n'), ((1463, 1479), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (1473, 1479), False, 'import json\n'), ((1832, 1853), 'sqlmodel.select', 'select', (['InterestPoint'], {}), '(InterestPoint)\n', (1838, 1853), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((2879, 2895), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (2889, 2895), False, 'import json\n'), ((2295, 2522), 'models.Route', 'Route', ([], {'interest_point_id': 'interest_point.id', 'waking_distance': "route['waking_distance']", 'total_distance': "route['total_distance']", 'total_time': "route['total_time']", 'public_transport_count': "route['public_transport_count']"}), "(interest_point_id=interest_point.id, waking_distance=route[\n 'waking_distance'], total_distance=route['total_distance'], total_time=\n route['total_time'], public_transport_count=route['public_transport_count']\n )\n", (2300, 2522), False, 'from models import Listing, Facility, Image, InterestPoint, Route, RouteCreate, PlaceNearby\n'), ((4884, 4900), 'sqlmodel.select', 'select', (['Facility'], {}), '(Facility)\n', (4890, 4900), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((5393, 5409), 'sqlmodel.select', 'select', (['Facility'], {}), '(Facility)\n', (5399, 5409), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n'), ((6430, 6445), 'sqlmodel.select', 'select', (['Listing'], {}), '(Listing)\n', (6436, 6445), False, 'from sqlmodel import Field, Session, SQLModel, create_engine, select\n')]
|
# Copyright (c) Megvii, Inc. and its affiliates.
import megengine.functional as F
import megengine.module as M
from .head import get_head
from .loss import get_loss
from .resnet import get_backbone
from .stn import STN
class FaceRecognitionModel(M.Module):
"""combination of all building blocks, including backbone, head and loss
"""
def __init__(self, configs):
"""initialize with configuration
Args:
configs (dict): configuration, required fields include:
backbone: custom name of backbone
output_head: custon name of output head
feature_dim: dimension number of output embedding
loss_type: custon name of loss function
num_class: classification number of dataset
loss_scale: used in loss function
loss_m1: used in loss function
loss_m2: used in loss function
loss_m3: used in loss function
use_stn: whether or not use stn
"""
super().__init__()
backbone_constructor = get_backbone(configs["backbone"])
self.backbone = backbone_constructor()
head_constructor = get_head(configs["output_head"])
self.head = head_constructor(feature_dim=configs["feature_dim"], channel=self.backbone.output_channel)
metric_constructor = get_loss(configs["loss_type"])
self.metric = metric_constructor(
num_class=configs["num_class"],
scale=configs["loss_scale"],
m1=configs["loss_m1"],
m2=configs["loss_m2"],
m3=configs["loss_m3"],
feature_dim=configs["feature_dim"],
)
if configs["use_stn"]:
self.stn = STN()
self.use_stn = True
else:
self.use_stn = False
def forward_embedding_only(self, images):
"""run forward pass without calculating loss, expected useful during evaluation.
Args:
images (Tensor): preprocessed images (shape: n * 3 * 112 * 112)
Returns:
embedding (Tensor): embedding
"""
if self.use_stn:
images = self.stn(images)
feature_map = self.backbone(images)
embedding = self.head(feature_map)
embedding = F.normalize(embedding, axis=1)
return embedding
def forward(self, images, labels):
"""run forward pass and calculate loss, expected useful during training.
Args:
images (Tensor): preprocessed images (shape: n * 3 * 112 * 112)
labels (Tensor): ground truth class id (shape: n)
Returns:
loss (Tensor): loss
accuracy (Tensor): top1 accuracy (range: 0~1)
embedding (Tensor): embedding
"""
embedding = self.forward_embedding_only(images)
loss, accuracy = self.metric(embedding, labels)
return loss, accuracy, embedding
|
[
"megengine.functional.normalize"
] |
[((2314, 2344), 'megengine.functional.normalize', 'F.normalize', (['embedding'], {'axis': '(1)'}), '(embedding, axis=1)\n', (2325, 2344), True, 'import megengine.functional as F\n')]
|
"""Initial 5
Revision ID: 0101e666f4e9
Revises: 6c98e82ae2b5
Create Date: 2021-11-14 01:40:19.792380
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = '0101e666f4e9'
down_revision = '6c98e82ae2b5'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('interest_points',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('latitude', sa.Float(), nullable=True),
sa.Column('longitude', sa.Float(), nullable=True),
sa.Column('name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('address', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('distance', sa.Integer(), nullable=False),
sa.Column('website', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('website_domain', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('chain_name', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('listing_id', sa.Integer(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['listing_id'], ['listings.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_interest_points_address'), 'interest_points', ['address'], unique=False)
op.create_index(op.f('ix_interest_points_chain_name'), 'interest_points', ['chain_name'], unique=False)
op.create_index(op.f('ix_interest_points_created_at'), 'interest_points', ['created_at'], unique=False)
op.create_index(op.f('ix_interest_points_distance'), 'interest_points', ['distance'], unique=False)
op.create_index(op.f('ix_interest_points_id'), 'interest_points', ['id'], unique=False)
op.create_index(op.f('ix_interest_points_latitude'), 'interest_points', ['latitude'], unique=False)
op.create_index(op.f('ix_interest_points_listing_id'), 'interest_points', ['listing_id'], unique=False)
op.create_index(op.f('ix_interest_points_longitude'), 'interest_points', ['longitude'], unique=False)
op.create_index(op.f('ix_interest_points_name'), 'interest_points', ['name'], unique=False)
op.create_index(op.f('ix_interest_points_updated_at'), 'interest_points', ['updated_at'], unique=False)
op.create_index(op.f('ix_interest_points_website'), 'interest_points', ['website'], unique=False)
op.create_index(op.f('ix_interest_points_website_domain'), 'interest_points', ['website_domain'], unique=False)
op.create_table('places_nearby',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('latitude', sa.Float(), nullable=True),
sa.Column('longitude', sa.Float(), nullable=True),
sa.Column('name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('address', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('distance', sa.Integer(), nullable=False),
sa.Column('website', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('website_domain', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('chain_name', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('listing_id', sa.Integer(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['listing_id'], ['listings.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_places_nearby_address'), 'places_nearby', ['address'], unique=False)
op.create_index(op.f('ix_places_nearby_chain_name'), 'places_nearby', ['chain_name'], unique=False)
op.create_index(op.f('ix_places_nearby_created_at'), 'places_nearby', ['created_at'], unique=False)
op.create_index(op.f('ix_places_nearby_distance'), 'places_nearby', ['distance'], unique=False)
op.create_index(op.f('ix_places_nearby_id'), 'places_nearby', ['id'], unique=False)
op.create_index(op.f('ix_places_nearby_latitude'), 'places_nearby', ['latitude'], unique=False)
op.create_index(op.f('ix_places_nearby_listing_id'), 'places_nearby', ['listing_id'], unique=False)
op.create_index(op.f('ix_places_nearby_longitude'), 'places_nearby', ['longitude'], unique=False)
op.create_index(op.f('ix_places_nearby_name'), 'places_nearby', ['name'], unique=False)
op.create_index(op.f('ix_places_nearby_updated_at'), 'places_nearby', ['updated_at'], unique=False)
op.create_index(op.f('ix_places_nearby_website'), 'places_nearby', ['website'], unique=False)
op.create_index(op.f('ix_places_nearby_website_domain'), 'places_nearby', ['website_domain'], unique=False)
op.create_table('routes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('latitude', sa.Float(), nullable=True),
sa.Column('longitude', sa.Float(), nullable=True),
sa.Column('name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('address', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('distance', sa.Integer(), nullable=False),
sa.Column('website', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('website_domain', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('chain_name', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('listing_id', sa.Integer(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['listing_id'], ['listings.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_routes_address'), 'routes', ['address'], unique=False)
op.create_index(op.f('ix_routes_chain_name'), 'routes', ['chain_name'], unique=False)
op.create_index(op.f('ix_routes_created_at'), 'routes', ['created_at'], unique=False)
op.create_index(op.f('ix_routes_distance'), 'routes', ['distance'], unique=False)
op.create_index(op.f('ix_routes_id'), 'routes', ['id'], unique=False)
op.create_index(op.f('ix_routes_latitude'), 'routes', ['latitude'], unique=False)
op.create_index(op.f('ix_routes_listing_id'), 'routes', ['listing_id'], unique=False)
op.create_index(op.f('ix_routes_longitude'), 'routes', ['longitude'], unique=False)
op.create_index(op.f('ix_routes_name'), 'routes', ['name'], unique=False)
op.create_index(op.f('ix_routes_updated_at'), 'routes', ['updated_at'], unique=False)
op.create_index(op.f('ix_routes_website'), 'routes', ['website'], unique=False)
op.create_index(op.f('ix_routes_website_domain'), 'routes', ['website_domain'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_routes_website_domain'), table_name='routes')
op.drop_index(op.f('ix_routes_website'), table_name='routes')
op.drop_index(op.f('ix_routes_updated_at'), table_name='routes')
op.drop_index(op.f('ix_routes_name'), table_name='routes')
op.drop_index(op.f('ix_routes_longitude'), table_name='routes')
op.drop_index(op.f('ix_routes_listing_id'), table_name='routes')
op.drop_index(op.f('ix_routes_latitude'), table_name='routes')
op.drop_index(op.f('ix_routes_id'), table_name='routes')
op.drop_index(op.f('ix_routes_distance'), table_name='routes')
op.drop_index(op.f('ix_routes_created_at'), table_name='routes')
op.drop_index(op.f('ix_routes_chain_name'), table_name='routes')
op.drop_index(op.f('ix_routes_address'), table_name='routes')
op.drop_table('routes')
op.drop_index(op.f('ix_places_nearby_website_domain'), table_name='places_nearby')
op.drop_index(op.f('ix_places_nearby_website'), table_name='places_nearby')
op.drop_index(op.f('ix_places_nearby_updated_at'), table_name='places_nearby')
op.drop_index(op.f('ix_places_nearby_name'), table_name='places_nearby')
op.drop_index(op.f('ix_places_nearby_longitude'), table_name='places_nearby')
op.drop_index(op.f('ix_places_nearby_listing_id'), table_name='places_nearby')
op.drop_index(op.f('ix_places_nearby_latitude'), table_name='places_nearby')
op.drop_index(op.f('ix_places_nearby_id'), table_name='places_nearby')
op.drop_index(op.f('ix_places_nearby_distance'), table_name='places_nearby')
op.drop_index(op.f('ix_places_nearby_created_at'), table_name='places_nearby')
op.drop_index(op.f('ix_places_nearby_chain_name'), table_name='places_nearby')
op.drop_index(op.f('ix_places_nearby_address'), table_name='places_nearby')
op.drop_table('places_nearby')
op.drop_index(op.f('ix_interest_points_website_domain'), table_name='interest_points')
op.drop_index(op.f('ix_interest_points_website'), table_name='interest_points')
op.drop_index(op.f('ix_interest_points_updated_at'), table_name='interest_points')
op.drop_index(op.f('ix_interest_points_name'), table_name='interest_points')
op.drop_index(op.f('ix_interest_points_longitude'), table_name='interest_points')
op.drop_index(op.f('ix_interest_points_listing_id'), table_name='interest_points')
op.drop_index(op.f('ix_interest_points_latitude'), table_name='interest_points')
op.drop_index(op.f('ix_interest_points_id'), table_name='interest_points')
op.drop_index(op.f('ix_interest_points_distance'), table_name='interest_points')
op.drop_index(op.f('ix_interest_points_created_at'), table_name='interest_points')
op.drop_index(op.f('ix_interest_points_chain_name'), table_name='interest_points')
op.drop_index(op.f('ix_interest_points_address'), table_name='interest_points')
op.drop_table('interest_points')
# ### end Alembic commands ###
|
[
"sqlmodel.sql.sqltypes.AutoString"
] |
[((7600, 7623), 'alembic.op.drop_table', 'op.drop_table', (['"""routes"""'], {}), "('routes')\n", (7613, 7623), False, 'from alembic import op\n'), ((8603, 8633), 'alembic.op.drop_table', 'op.drop_table', (['"""places_nearby"""'], {}), "('places_nearby')\n", (8616, 8633), False, 'from alembic import op\n'), ((9661, 9693), 'alembic.op.drop_table', 'op.drop_table', (['"""interest_points"""'], {}), "('interest_points')\n", (9674, 9693), False, 'from alembic import op\n'), ((1222, 1278), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['listing_id']", "['listings.id']"], {}), "(['listing_id'], ['listings.id'])\n", (1245, 1278), True, 'import sqlalchemy as sa\n'), ((1286, 1315), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (1309, 1315), True, 'import sqlalchemy as sa\n'), ((1342, 1376), 'alembic.op.f', 'op.f', (['"""ix_interest_points_address"""'], {}), "('ix_interest_points_address')\n", (1346, 1376), False, 'from alembic import op\n'), ((1444, 1481), 'alembic.op.f', 'op.f', (['"""ix_interest_points_chain_name"""'], {}), "('ix_interest_points_chain_name')\n", (1448, 1481), False, 'from alembic import op\n'), ((1552, 1589), 'alembic.op.f', 'op.f', (['"""ix_interest_points_created_at"""'], {}), "('ix_interest_points_created_at')\n", (1556, 1589), False, 'from alembic import op\n'), ((1660, 1695), 'alembic.op.f', 'op.f', (['"""ix_interest_points_distance"""'], {}), "('ix_interest_points_distance')\n", (1664, 1695), False, 'from alembic import op\n'), ((1764, 1793), 'alembic.op.f', 'op.f', (['"""ix_interest_points_id"""'], {}), "('ix_interest_points_id')\n", (1768, 1793), False, 'from alembic import op\n'), ((1856, 1891), 'alembic.op.f', 'op.f', (['"""ix_interest_points_latitude"""'], {}), "('ix_interest_points_latitude')\n", (1860, 1891), False, 'from alembic import op\n'), ((1960, 1997), 'alembic.op.f', 'op.f', (['"""ix_interest_points_listing_id"""'], {}), "('ix_interest_points_listing_id')\n", (1964, 1997), False, 'from alembic import op\n'), ((2068, 2104), 'alembic.op.f', 'op.f', (['"""ix_interest_points_longitude"""'], {}), "('ix_interest_points_longitude')\n", (2072, 2104), False, 'from alembic import op\n'), ((2174, 2205), 'alembic.op.f', 'op.f', (['"""ix_interest_points_name"""'], {}), "('ix_interest_points_name')\n", (2178, 2205), False, 'from alembic import op\n'), ((2270, 2307), 'alembic.op.f', 'op.f', (['"""ix_interest_points_updated_at"""'], {}), "('ix_interest_points_updated_at')\n", (2274, 2307), False, 'from alembic import op\n'), ((2378, 2412), 'alembic.op.f', 'op.f', (['"""ix_interest_points_website"""'], {}), "('ix_interest_points_website')\n", (2382, 2412), False, 'from alembic import op\n'), ((2480, 2521), 'alembic.op.f', 'op.f', (['"""ix_interest_points_website_domain"""'], {}), "('ix_interest_points_website_domain')\n", (2484, 2521), False, 'from alembic import op\n'), ((3404, 3460), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['listing_id']", "['listings.id']"], {}), "(['listing_id'], ['listings.id'])\n", (3427, 3460), True, 'import sqlalchemy as sa\n'), ((3468, 3497), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (3491, 3497), True, 'import sqlalchemy as sa\n'), ((3524, 3556), 'alembic.op.f', 'op.f', (['"""ix_places_nearby_address"""'], {}), "('ix_places_nearby_address')\n", (3528, 3556), False, 'from alembic import op\n'), ((3622, 3657), 'alembic.op.f', 'op.f', (['"""ix_places_nearby_chain_name"""'], {}), "('ix_places_nearby_chain_name')\n", (3626, 3657), False, 'from alembic import op\n'), ((3726, 3761), 'alembic.op.f', 'op.f', (['"""ix_places_nearby_created_at"""'], {}), "('ix_places_nearby_created_at')\n", (3730, 3761), False, 'from alembic import op\n'), ((3830, 3863), 'alembic.op.f', 'op.f', (['"""ix_places_nearby_distance"""'], {}), "('ix_places_nearby_distance')\n", (3834, 3863), False, 'from alembic import op\n'), ((3930, 3957), 'alembic.op.f', 'op.f', (['"""ix_places_nearby_id"""'], {}), "('ix_places_nearby_id')\n", (3934, 3957), False, 'from alembic import op\n'), ((4018, 4051), 'alembic.op.f', 'op.f', (['"""ix_places_nearby_latitude"""'], {}), "('ix_places_nearby_latitude')\n", (4022, 4051), False, 'from alembic import op\n'), ((4118, 4153), 'alembic.op.f', 'op.f', (['"""ix_places_nearby_listing_id"""'], {}), "('ix_places_nearby_listing_id')\n", (4122, 4153), False, 'from alembic import op\n'), ((4222, 4256), 'alembic.op.f', 'op.f', (['"""ix_places_nearby_longitude"""'], {}), "('ix_places_nearby_longitude')\n", (4226, 4256), False, 'from alembic import op\n'), ((4324, 4353), 'alembic.op.f', 'op.f', (['"""ix_places_nearby_name"""'], {}), "('ix_places_nearby_name')\n", (4328, 4353), False, 'from alembic import op\n'), ((4416, 4451), 'alembic.op.f', 'op.f', (['"""ix_places_nearby_updated_at"""'], {}), "('ix_places_nearby_updated_at')\n", (4420, 4451), False, 'from alembic import op\n'), ((4520, 4552), 'alembic.op.f', 'op.f', (['"""ix_places_nearby_website"""'], {}), "('ix_places_nearby_website')\n", (4524, 4552), False, 'from alembic import op\n'), ((4618, 4657), 'alembic.op.f', 'op.f', (['"""ix_places_nearby_website_domain"""'], {}), "('ix_places_nearby_website_domain')\n", (4622, 4657), False, 'from alembic import op\n'), ((5531, 5587), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['listing_id']", "['listings.id']"], {}), "(['listing_id'], ['listings.id'])\n", (5554, 5587), True, 'import sqlalchemy as sa\n'), ((5595, 5624), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (5618, 5624), True, 'import sqlalchemy as sa\n'), ((5651, 5676), 'alembic.op.f', 'op.f', (['"""ix_routes_address"""'], {}), "('ix_routes_address')\n", (5655, 5676), False, 'from alembic import op\n'), ((5735, 5763), 'alembic.op.f', 'op.f', (['"""ix_routes_chain_name"""'], {}), "('ix_routes_chain_name')\n", (5739, 5763), False, 'from alembic import op\n'), ((5825, 5853), 'alembic.op.f', 'op.f', (['"""ix_routes_created_at"""'], {}), "('ix_routes_created_at')\n", (5829, 5853), False, 'from alembic import op\n'), ((5915, 5941), 'alembic.op.f', 'op.f', (['"""ix_routes_distance"""'], {}), "('ix_routes_distance')\n", (5919, 5941), False, 'from alembic import op\n'), ((6001, 6021), 'alembic.op.f', 'op.f', (['"""ix_routes_id"""'], {}), "('ix_routes_id')\n", (6005, 6021), False, 'from alembic import op\n'), ((6075, 6101), 'alembic.op.f', 'op.f', (['"""ix_routes_latitude"""'], {}), "('ix_routes_latitude')\n", (6079, 6101), False, 'from alembic import op\n'), ((6161, 6189), 'alembic.op.f', 'op.f', (['"""ix_routes_listing_id"""'], {}), "('ix_routes_listing_id')\n", (6165, 6189), False, 'from alembic import op\n'), ((6251, 6278), 'alembic.op.f', 'op.f', (['"""ix_routes_longitude"""'], {}), "('ix_routes_longitude')\n", (6255, 6278), False, 'from alembic import op\n'), ((6339, 6361), 'alembic.op.f', 'op.f', (['"""ix_routes_name"""'], {}), "('ix_routes_name')\n", (6343, 6361), False, 'from alembic import op\n'), ((6417, 6445), 'alembic.op.f', 'op.f', (['"""ix_routes_updated_at"""'], {}), "('ix_routes_updated_at')\n", (6421, 6445), False, 'from alembic import op\n'), ((6507, 6532), 'alembic.op.f', 'op.f', (['"""ix_routes_website"""'], {}), "('ix_routes_website')\n", (6511, 6532), False, 'from alembic import op\n'), ((6591, 6623), 'alembic.op.f', 'op.f', (['"""ix_routes_website_domain"""'], {}), "('ix_routes_website_domain')\n", (6595, 6623), False, 'from alembic import op\n'), ((6807, 6839), 'alembic.op.f', 'op.f', (['"""ix_routes_website_domain"""'], {}), "('ix_routes_website_domain')\n", (6811, 6839), False, 'from alembic import op\n'), ((6880, 6905), 'alembic.op.f', 'op.f', (['"""ix_routes_website"""'], {}), "('ix_routes_website')\n", (6884, 6905), False, 'from alembic import op\n'), ((6946, 6974), 'alembic.op.f', 'op.f', (['"""ix_routes_updated_at"""'], {}), "('ix_routes_updated_at')\n", (6950, 6974), False, 'from alembic import op\n'), ((7015, 7037), 'alembic.op.f', 'op.f', (['"""ix_routes_name"""'], {}), "('ix_routes_name')\n", (7019, 7037), False, 'from alembic import op\n'), ((7078, 7105), 'alembic.op.f', 'op.f', (['"""ix_routes_longitude"""'], {}), "('ix_routes_longitude')\n", (7082, 7105), False, 'from alembic import op\n'), ((7146, 7174), 'alembic.op.f', 'op.f', (['"""ix_routes_listing_id"""'], {}), "('ix_routes_listing_id')\n", (7150, 7174), False, 'from alembic import op\n'), ((7215, 7241), 'alembic.op.f', 'op.f', (['"""ix_routes_latitude"""'], {}), "('ix_routes_latitude')\n", (7219, 7241), False, 'from alembic import op\n'), ((7282, 7302), 'alembic.op.f', 'op.f', (['"""ix_routes_id"""'], {}), "('ix_routes_id')\n", (7286, 7302), False, 'from alembic import op\n'), ((7343, 7369), 'alembic.op.f', 'op.f', (['"""ix_routes_distance"""'], {}), "('ix_routes_distance')\n", (7347, 7369), False, 'from alembic import op\n'), ((7410, 7438), 'alembic.op.f', 'op.f', (['"""ix_routes_created_at"""'], {}), "('ix_routes_created_at')\n", (7414, 7438), False, 'from alembic import op\n'), ((7479, 7507), 'alembic.op.f', 'op.f', (['"""ix_routes_chain_name"""'], {}), "('ix_routes_chain_name')\n", (7483, 7507), False, 'from alembic import op\n'), ((7548, 7573), 'alembic.op.f', 'op.f', (['"""ix_routes_address"""'], {}), "('ix_routes_address')\n", (7552, 7573), False, 'from alembic import op\n'), ((7642, 7681), 'alembic.op.f', 'op.f', (['"""ix_places_nearby_website_domain"""'], {}), "('ix_places_nearby_website_domain')\n", (7646, 7681), False, 'from alembic import op\n'), ((7729, 7761), 'alembic.op.f', 'op.f', (['"""ix_places_nearby_website"""'], {}), "('ix_places_nearby_website')\n", (7733, 7761), False, 'from alembic import op\n'), ((7809, 7844), 'alembic.op.f', 'op.f', (['"""ix_places_nearby_updated_at"""'], {}), "('ix_places_nearby_updated_at')\n", (7813, 7844), False, 'from alembic import op\n'), ((7892, 7921), 'alembic.op.f', 'op.f', (['"""ix_places_nearby_name"""'], {}), "('ix_places_nearby_name')\n", (7896, 7921), False, 'from alembic import op\n'), ((7969, 8003), 'alembic.op.f', 'op.f', (['"""ix_places_nearby_longitude"""'], {}), "('ix_places_nearby_longitude')\n", (7973, 8003), False, 'from alembic import op\n'), ((8051, 8086), 'alembic.op.f', 'op.f', (['"""ix_places_nearby_listing_id"""'], {}), "('ix_places_nearby_listing_id')\n", (8055, 8086), False, 'from alembic import op\n'), ((8134, 8167), 'alembic.op.f', 'op.f', (['"""ix_places_nearby_latitude"""'], {}), "('ix_places_nearby_latitude')\n", (8138, 8167), False, 'from alembic import op\n'), ((8215, 8242), 'alembic.op.f', 'op.f', (['"""ix_places_nearby_id"""'], {}), "('ix_places_nearby_id')\n", (8219, 8242), False, 'from alembic import op\n'), ((8290, 8323), 'alembic.op.f', 'op.f', (['"""ix_places_nearby_distance"""'], {}), "('ix_places_nearby_distance')\n", (8294, 8323), False, 'from alembic import op\n'), ((8371, 8406), 'alembic.op.f', 'op.f', (['"""ix_places_nearby_created_at"""'], {}), "('ix_places_nearby_created_at')\n", (8375, 8406), False, 'from alembic import op\n'), ((8454, 8489), 'alembic.op.f', 'op.f', (['"""ix_places_nearby_chain_name"""'], {}), "('ix_places_nearby_chain_name')\n", (8458, 8489), False, 'from alembic import op\n'), ((8537, 8569), 'alembic.op.f', 'op.f', (['"""ix_places_nearby_address"""'], {}), "('ix_places_nearby_address')\n", (8541, 8569), False, 'from alembic import op\n'), ((8652, 8693), 'alembic.op.f', 'op.f', (['"""ix_interest_points_website_domain"""'], {}), "('ix_interest_points_website_domain')\n", (8656, 8693), False, 'from alembic import op\n'), ((8743, 8777), 'alembic.op.f', 'op.f', (['"""ix_interest_points_website"""'], {}), "('ix_interest_points_website')\n", (8747, 8777), False, 'from alembic import op\n'), ((8827, 8864), 'alembic.op.f', 'op.f', (['"""ix_interest_points_updated_at"""'], {}), "('ix_interest_points_updated_at')\n", (8831, 8864), False, 'from alembic import op\n'), ((8914, 8945), 'alembic.op.f', 'op.f', (['"""ix_interest_points_name"""'], {}), "('ix_interest_points_name')\n", (8918, 8945), False, 'from alembic import op\n'), ((8995, 9031), 'alembic.op.f', 'op.f', (['"""ix_interest_points_longitude"""'], {}), "('ix_interest_points_longitude')\n", (8999, 9031), False, 'from alembic import op\n'), ((9081, 9118), 'alembic.op.f', 'op.f', (['"""ix_interest_points_listing_id"""'], {}), "('ix_interest_points_listing_id')\n", (9085, 9118), False, 'from alembic import op\n'), ((9168, 9203), 'alembic.op.f', 'op.f', (['"""ix_interest_points_latitude"""'], {}), "('ix_interest_points_latitude')\n", (9172, 9203), False, 'from alembic import op\n'), ((9253, 9282), 'alembic.op.f', 'op.f', (['"""ix_interest_points_id"""'], {}), "('ix_interest_points_id')\n", (9257, 9282), False, 'from alembic import op\n'), ((9332, 9367), 'alembic.op.f', 'op.f', (['"""ix_interest_points_distance"""'], {}), "('ix_interest_points_distance')\n", (9336, 9367), False, 'from alembic import op\n'), ((9417, 9454), 'alembic.op.f', 'op.f', (['"""ix_interest_points_created_at"""'], {}), "('ix_interest_points_created_at')\n", (9421, 9454), False, 'from alembic import op\n'), ((9504, 9541), 'alembic.op.f', 'op.f', (['"""ix_interest_points_chain_name"""'], {}), "('ix_interest_points_chain_name')\n", (9508, 9541), False, 'from alembic import op\n'), ((9591, 9625), 'alembic.op.f', 'op.f', (['"""ix_interest_points_address"""'], {}), "('ix_interest_points_address')\n", (9595, 9625), False, 'from alembic import op\n'), ((451, 463), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (461, 463), True, 'import sqlalchemy as sa\n'), ((508, 518), 'sqlalchemy.Float', 'sa.Float', ([], {}), '()\n', (516, 518), True, 'import sqlalchemy as sa\n'), ((563, 573), 'sqlalchemy.Float', 'sa.Float', ([], {}), '()\n', (571, 573), True, 'import sqlalchemy as sa\n'), ((613, 647), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (645, 647), False, 'import sqlmodel\n'), ((691, 725), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (723, 725), False, 'import sqlmodel\n'), ((770, 782), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (780, 782), True, 'import sqlalchemy as sa\n'), ((826, 860), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (858, 860), False, 'import sqlmodel\n'), ((910, 944), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (942, 944), False, 'import sqlmodel\n'), ((990, 1024), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1022, 1024), False, 'import sqlmodel\n'), ((1070, 1082), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1080, 1082), True, 'import sqlalchemy as sa\n'), ((1128, 1141), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (1139, 1141), True, 'import sqlalchemy as sa\n'), ((1187, 1200), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (1198, 1200), True, 'import sqlalchemy as sa\n'), ((2633, 2645), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2643, 2645), True, 'import sqlalchemy as sa\n'), ((2690, 2700), 'sqlalchemy.Float', 'sa.Float', ([], {}), '()\n', (2698, 2700), True, 'import sqlalchemy as sa\n'), ((2745, 2755), 'sqlalchemy.Float', 'sa.Float', ([], {}), '()\n', (2753, 2755), True, 'import sqlalchemy as sa\n'), ((2795, 2829), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2827, 2829), False, 'import sqlmodel\n'), ((2873, 2907), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2905, 2907), False, 'import sqlmodel\n'), ((2952, 2964), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2962, 2964), True, 'import sqlalchemy as sa\n'), ((3008, 3042), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (3040, 3042), False, 'import sqlmodel\n'), ((3092, 3126), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (3124, 3126), False, 'import sqlmodel\n'), ((3172, 3206), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (3204, 3206), False, 'import sqlmodel\n'), ((3252, 3264), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (3262, 3264), True, 'import sqlalchemy as sa\n'), ((3310, 3323), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (3321, 3323), True, 'import sqlalchemy as sa\n'), ((3369, 3382), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (3380, 3382), True, 'import sqlalchemy as sa\n'), ((4760, 4772), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (4770, 4772), True, 'import sqlalchemy as sa\n'), ((4817, 4827), 'sqlalchemy.Float', 'sa.Float', ([], {}), '()\n', (4825, 4827), True, 'import sqlalchemy as sa\n'), ((4872, 4882), 'sqlalchemy.Float', 'sa.Float', ([], {}), '()\n', (4880, 4882), True, 'import sqlalchemy as sa\n'), ((4922, 4956), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (4954, 4956), False, 'import sqlmodel\n'), ((5000, 5034), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (5032, 5034), False, 'import sqlmodel\n'), ((5079, 5091), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (5089, 5091), True, 'import sqlalchemy as sa\n'), ((5135, 5169), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (5167, 5169), False, 'import sqlmodel\n'), ((5219, 5253), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (5251, 5253), False, 'import sqlmodel\n'), ((5299, 5333), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (5331, 5333), False, 'import sqlmodel\n'), ((5379, 5391), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (5389, 5391), True, 'import sqlalchemy as sa\n'), ((5437, 5450), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (5448, 5450), True, 'import sqlalchemy as sa\n'), ((5496, 5509), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (5507, 5509), True, 'import sqlalchemy as sa\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = Buffer(b)
@jit.trace
def f(x):
return F.add_update(y, x)
f(np.zeros((2, 3)).astype(np.float32))
z = Buffer(np.zeros((2, 3)).astype(np.float32))
F.add_update(y, z, beta=0.1)
res = f(np.ones((2, 3)).astype(np.float32))
assertTensorClose(res, b + 1)
def test_cross_entropy_with_softmax():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = F.cross_entropy(F.softmax(tensor(data2)), tensor(label2)).numpy()
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy_with_softmax)
def test_cross_entropy():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([0.5, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = np.array([-np.log(0.5)], dtype=np.float32)
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = np.array([-np.log(0.4)], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy)
def test_binary_cross_entropy():
data1_shape = (2, 2)
label1_shape = (2, 2)
data2_shape = (2, 3)
label2_shape = (2, 3)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compare_fn(x, y):
assertTensorClose(x.numpy(), y, max_err=5e-4)
np.random.seed(123)
data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32))
label1 = np.random.uniform(size=label1_shape).astype(np.float32)
expect1 = np.array([0.6361], dtype=np.float32)
np.random.seed(123)
data2 = sigmoid(np.random.uniform(size=data2_shape).astype(np.float32))
label2 = np.random.uniform(size=label2_shape).astype(np.float32)
expect2 = np.array([0.6750], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn)
@pytest.mark.skip
def test_conv_bias():
inp_scale = 0.01
w_scale = 0.02
outp_scale = 0.1
inp_dtype = mgb.dtype.qint8(inp_scale)
w_dtype = mgb.dtype.qint8(w_scale)
b_dtype = mgb.dtype.qint32(inp_scale * w_scale)
out_dtype = mgb.dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="IDENTITY",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KW, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = mgb.dtype.get_scale(inp_dtype)
w_scale = mgb.dtype.get_scale(w_dtype)
b_scale = mgb.dtype.get_scale(b_dtype)
inpv = mgb.dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = mgb.dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = mgb.dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = tensor(inpv, dtype=inp_dtype)
w_int8 = Parameter(wv, dtype=w_dtype)
b_int32 = Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
jit.trace.enabled = True
b_symbolic = True
def convert_to_nchw4(var):
return var.reshape(
var.shapeof(0), var.shapeof(1) // 4, 4, var.shapeof(2), var.shapeof(3)
).dimshuffle(0, 1, 3, 4, 2)
@jit.trace(symbolic=b_symbolic)
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "RELU":
return F.relu(O)
else:
return O
@jit.trace(symbolic=b_symbolic)
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else np.zeros_like(b)
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = F.flatten(b)
return F.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = result.dimshuffle(0, 1, 4, 2, 3)
expected = F.flatten(expected)
result = F.flatten(result)
assertTensorClose(result.numpy(), expected.numpy())
if not is_cuda_available():
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "RELU")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "RELU")
|
[
"megengine.jit.trace",
"megengine.functional.add_update",
"megengine.functional.conv2d",
"megengine.tensor",
"megengine.functional.flatten",
"megengine.functional.concat",
"megengine._internal.dtype.convert_to_qint32",
"megengine._internal.dtype.convert_to_qint8",
"megengine._internal.dtype.qint8",
"megengine.Buffer",
"megengine.test.assertTensorClose",
"megengine.is_cuda_available",
"megengine.functional.relu",
"megengine._internal.dtype.get_scale",
"megengine._internal.dtype.qint32",
"megengine.Parameter",
"megengine.functional.conv_bias_activation"
] |
[((1047, 1096), 'helpers.opr_test', 'opr_test', (['cases', 'F.flatten'], {'compare_fn': 'compare_fn'}), '(cases, F.flatten, compare_fn=compare_fn)\n', (1055, 1096), False, 'from helpers import opr_test\n'), ((1247, 1310), 'helpers.opr_test', 'opr_test', (['cases', 'F.flatten'], {'compare_fn': 'compare_fn', 'start_axis': '(1)'}), '(cases, F.flatten, compare_fn=compare_fn, start_axis=1)\n', (1255, 1310), False, 'from helpers import opr_test\n'), ((1459, 1522), 'helpers.opr_test', 'opr_test', (['cases', 'F.flatten'], {'compare_fn': 'compare_fn', 'start_axis': '(2)'}), '(cases, F.flatten, compare_fn=compare_fn, start_axis=2)\n', (1467, 1522), False, 'from helpers import opr_test\n'), ((1671, 1746), 'helpers.opr_test', 'opr_test', (['cases', 'F.flatten'], {'compare_fn': 'compare_fn', 'start_axis': '(1)', 'end_axis': '(2)'}), '(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)\n', (1679, 1746), False, 'from helpers import opr_test\n'), ((1780, 1822), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {'dtype': 'np.int32'}), '([[1, 0], [0, 1]], dtype=np.int32)\n', (1788, 1822), True, 'import numpy as np\n'), ((1833, 1887), 'numpy.array', 'np.array', (['[[1, np.inf], [np.nan, 4]]'], {'dtype': 'np.float32'}), '([[1, np.inf], [np.nan, 4]], dtype=np.float32)\n', (1841, 1887), True, 'import numpy as np\n'), ((1898, 1942), 'numpy.array', 'np.array', (['[[5, 6], [7, 8]]'], {'dtype': 'np.float32'}), '([[5, 6], [7, 8]], dtype=np.float32)\n', (1906, 1942), True, 'import numpy as np\n'), ((1957, 2016), 'numpy.array', 'np.array', (['[[1, 0, 1], [1, 0, 0], [1, 1, 0]]'], {'dtype': 'np.int32'}), '([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)\n', (1965, 2016), True, 'import numpy as np\n'), ((2027, 2098), 'numpy.array', 'np.array', (['[[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]]'], {'dtype': 'np.float32'}), '([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)\n', (2035, 2098), True, 'import numpy as np\n'), ((2109, 2170), 'numpy.array', 'np.array', (['[[5, 6, 9], [2, 7, 8], [2, 1, 9]]'], {'dtype': 'np.float32'}), '([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)\n', (2117, 2170), True, 'import numpy as np\n'), ((2274, 2315), 'helpers.opr_test', 'opr_test', (['cases', 'F.where'], {'ref_fn': 'np.where'}), '(cases, F.where, ref_fn=np.where)\n', (2282, 2315), False, 'from helpers import opr_test\n'), ((2330, 2365), 'numpy.array', 'np.array', (['[1, 1, 1]'], {'dtype': 'np.int32'}), '([1, 1, 1], dtype=np.int32)\n', (2338, 2365), True, 'import numpy as np\n'), ((2376, 2413), 'numpy.array', 'np.array', (['[1, 3, 2]'], {'dtype': 'np.float32'}), '([1, 3, 2], dtype=np.float32)\n', (2384, 2413), True, 'import numpy as np\n'), ((2424, 2461), 'numpy.array', 'np.array', (['[5, 6, 9]'], {'dtype': 'np.float32'}), '([5, 6, 9], dtype=np.float32)\n', (2432, 2461), True, 'import numpy as np\n'), ((2476, 2511), 'numpy.array', 'np.array', (['[0, 0, 0]'], {'dtype': 'np.int32'}), '([0, 0, 0], dtype=np.int32)\n', (2484, 2511), True, 'import numpy as np\n'), ((2522, 2559), 'numpy.array', 'np.array', (['[1, 3, 2]'], {'dtype': 'np.float32'}), '([1, 3, 2], dtype=np.float32)\n', (2530, 2559), True, 'import numpy as np\n'), ((2570, 2607), 'numpy.array', 'np.array', (['[5, 6, 9]'], {'dtype': 'np.float32'}), '([5, 6, 9], dtype=np.float32)\n', (2578, 2607), True, 'import numpy as np\n'), ((2711, 2752), 'helpers.opr_test', 'opr_test', (['cases', 'F.where'], {'ref_fn': 'np.where'}), '(cases, F.where, ref_fn=np.where)\n', (2719, 2752), False, 'from helpers import opr_test\n'), ((3751, 3798), 'helpers.opr_test', 'opr_test', (['cases', 'F.matrix_mul'], {'ref_fn': 'np.matmul'}), '(cases, F.matrix_mul, ref_fn=np.matmul)\n', (3759, 3798), False, 'from helpers import opr_test\n'), ((4906, 4929), 'helpers.opr_test', 'opr_test', (['cases', 'F.sort'], {}), '(cases, F.sort)\n', (4914, 4929), False, 'from helpers import opr_test\n'), ((5174, 5215), 'helpers.opr_test', 'opr_test', (['cases', 'F.round'], {'ref_fn': 'np.round'}), '(cases, F.round, ref_fn=np.round)\n', (5182, 5215), False, 'from helpers import opr_test\n'), ((5714, 5768), 'helpers.opr_test', 'opr_test', (['cases', 'F.broadcast_to'], {'compare_fn': 'compare_fn'}), '(cases, F.broadcast_to, compare_fn=compare_fn)\n', (5722, 5768), False, 'from helpers import opr_test\n'), ((7048, 7057), 'megengine.Buffer', 'Buffer', (['v'], {}), '(v)\n', (7054, 7057), False, 'from megengine import Buffer, Parameter, is_cuda_available, jit, tensor\n'), ((7067, 7085), 'megengine.functional.add_update', 'F.add_update', (['b', '(1)'], {}), '(b, 1)\n', (7079, 7085), True, 'import megengine.functional as F\n'), ((7134, 7152), 'megengine.functional.add_update', 'F.add_update', (['b', '(1)'], {}), '(b, 1)\n', (7146, 7152), True, 'import megengine.functional as F\n'), ((7202, 7235), 'numpy.ones', 'np.ones', (['(2, 2)'], {'dtype': 'np.float32'}), '((2, 2), dtype=np.float32)\n', (7209, 7235), True, 'import numpy as np\n'), ((7263, 7272), 'megengine.tensor', 'tensor', (['x'], {}), '(x)\n', (7269, 7272), False, 'from megengine import Buffer, Parameter, is_cuda_available, jit, tensor\n'), ((7285, 7294), 'megengine.tensor', 'tensor', (['y'], {}), '(y)\n', (7291, 7294), False, 'from megengine import Buffer, Parameter, is_cuda_available, jit, tensor\n'), ((7518, 7527), 'megengine.Buffer', 'Buffer', (['b'], {}), '(b)\n', (7524, 7527), False, 'from megengine import Buffer, Parameter, is_cuda_available, jit, tensor\n'), ((7693, 7721), 'megengine.functional.add_update', 'F.add_update', (['y', 'z'], {'beta': '(0.1)'}), '(y, z, beta=0.1)\n', (7705, 7721), True, 'import megengine.functional as F\n'), ((7775, 7804), 'megengine.test.assertTensorClose', 'assertTensorClose', (['res', '(b + 1)'], {}), '(res, b + 1)\n', (7792, 7804), False, 'from megengine.test import assertTensorClose\n'), ((8520, 8565), 'helpers.opr_test', 'opr_test', (['cases', 'F.cross_entropy_with_softmax'], {}), '(cases, F.cross_entropy_with_softmax)\n', (8528, 8565), False, 'from helpers import opr_test\n'), ((9224, 9256), 'helpers.opr_test', 'opr_test', (['cases', 'F.cross_entropy'], {}), '(cases, F.cross_entropy)\n', (9232, 9256), False, 'from helpers import opr_test\n'), ((9537, 9556), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (9551, 9556), True, 'import numpy as np\n'), ((9716, 9752), 'numpy.array', 'np.array', (['[0.6361]'], {'dtype': 'np.float32'}), '([0.6361], dtype=np.float32)\n', (9724, 9752), True, 'import numpy as np\n'), ((9758, 9777), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (9772, 9777), True, 'import numpy as np\n'), ((9937, 9972), 'numpy.array', 'np.array', (['[0.675]'], {'dtype': 'np.float32'}), '([0.675], dtype=np.float32)\n', (9945, 9972), True, 'import numpy as np\n'), ((10111, 10173), 'helpers.opr_test', 'opr_test', (['cases', 'F.binary_cross_entropy'], {'compare_fn': 'compare_fn'}), '(cases, F.binary_cross_entropy, compare_fn=compare_fn)\n', (10119, 10173), False, 'from helpers import opr_test\n'), ((10293, 10319), 'megengine._internal.dtype.qint8', 'mgb.dtype.qint8', (['inp_scale'], {}), '(inp_scale)\n', (10308, 10319), True, 'import megengine._internal as mgb\n'), ((10334, 10358), 'megengine._internal.dtype.qint8', 'mgb.dtype.qint8', (['w_scale'], {}), '(w_scale)\n', (10349, 10358), True, 'import megengine._internal as mgb\n'), ((10373, 10410), 'megengine._internal.dtype.qint32', 'mgb.dtype.qint32', (['(inp_scale * w_scale)'], {}), '(inp_scale * w_scale)\n', (10389, 10410), True, 'import megengine._internal as mgb\n'), ((10427, 10454), 'megengine._internal.dtype.qint8', 'mgb.dtype.qint8', (['outp_scale'], {}), '(outp_scale)\n', (10442, 10454), True, 'import megengine._internal as mgb\n'), ((3267, 3291), 'megengine.functional.concat', 'F.concat', (['[data1, data2]'], {}), '([data1, data2])\n', (3275, 3291), True, 'import megengine.functional as F\n'), ((4672, 4686), 'numpy.sort', 'np.sort', (['data1'], {}), '(data1)\n', (4679, 4686), True, 'import numpy as np\n'), ((4739, 4753), 'numpy.sort', 'np.sort', (['data2'], {}), '(data2)\n', (4746, 4753), True, 'import numpy as np\n'), ((7573, 7591), 'megengine.functional.add_update', 'F.add_update', (['y', 'x'], {}), '(y, x)\n', (7585, 7591), True, 'import megengine.functional as F\n'), ((10681, 10719), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(N, IC, IH, IW)'}), '(size=(N, IC, IH, IW))\n', (10697, 10719), True, 'import numpy as np\n'), ((10734, 10773), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(OC, IC, KW, KW)'}), '(size=(OC, IC, KW, KW))\n', (10750, 10773), True, 'import numpy as np\n'), ((10788, 10824), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, OC, 1, 1)'}), '(size=(1, OC, 1, 1))\n', (10804, 10824), True, 'import numpy as np\n'), ((10845, 10875), 'megengine._internal.dtype.get_scale', 'mgb.dtype.get_scale', (['inp_dtype'], {}), '(inp_dtype)\n', (10864, 10875), True, 'import megengine._internal as mgb\n'), ((10894, 10922), 'megengine._internal.dtype.get_scale', 'mgb.dtype.get_scale', (['w_dtype'], {}), '(w_dtype)\n', (10913, 10922), True, 'import megengine._internal as mgb\n'), ((10941, 10969), 'megengine._internal.dtype.get_scale', 'mgb.dtype.get_scale', (['b_dtype'], {}), '(b_dtype)\n', (10960, 10969), True, 'import megengine._internal as mgb\n'), ((10986, 11042), 'megengine._internal.dtype.convert_to_qint8', 'mgb.dtype.convert_to_qint8', (['(inp_v * inp_scale)', 'inp_dtype'], {}), '(inp_v * inp_scale, inp_dtype)\n', (11012, 11042), True, 'import megengine._internal as mgb\n'), ((11056, 11106), 'megengine._internal.dtype.convert_to_qint8', 'mgb.dtype.convert_to_qint8', (['(w_v * w_scale)', 'w_dtype'], {}), '(w_v * w_scale, w_dtype)\n', (11082, 11106), True, 'import megengine._internal as mgb\n'), ((11120, 11171), 'megengine._internal.dtype.convert_to_qint32', 'mgb.dtype.convert_to_qint32', (['(b_v * b_scale)', 'b_dtype'], {}), '(b_v * b_scale, b_dtype)\n', (11147, 11171), True, 'import megengine._internal as mgb\n'), ((11192, 11221), 'megengine.tensor', 'tensor', (['inpv'], {'dtype': 'inp_dtype'}), '(inpv, dtype=inp_dtype)\n', (11198, 11221), False, 'from megengine import Buffer, Parameter, is_cuda_available, jit, tensor\n'), ((11239, 11267), 'megengine.Parameter', 'Parameter', (['wv'], {'dtype': 'w_dtype'}), '(wv, dtype=w_dtype)\n', (11248, 11267), False, 'from megengine import Buffer, Parameter, is_cuda_available, jit, tensor\n'), ((11286, 11314), 'megengine.Parameter', 'Parameter', (['bv'], {'dtype': 'b_dtype'}), '(bv, dtype=b_dtype)\n', (11295, 11314), False, 'from megengine import Buffer, Parameter, is_cuda_available, jit, tensor\n'), ((11712, 11742), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': 'b_symbolic'}), '(symbolic=b_symbolic)\n', (11721, 11742), False, 'from megengine import Buffer, Parameter, is_cuda_available, jit, tensor\n'), ((12029, 12059), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': 'b_symbolic'}), '(symbolic=b_symbolic)\n', (12038, 12059), False, 'from megengine import Buffer, Parameter, is_cuda_available, jit, tensor\n'), ((12977, 12996), 'megengine.functional.flatten', 'F.flatten', (['expected'], {}), '(expected)\n', (12986, 12996), True, 'import megengine.functional as F\n'), ((13014, 13031), 'megengine.functional.flatten', 'F.flatten', (['result'], {}), '(result)\n', (13023, 13031), True, 'import megengine.functional as F\n'), ((13104, 13123), 'megengine.is_cuda_available', 'is_cuda_available', ([], {}), '()\n', (13121, 13123), False, 'from megengine import Buffer, Parameter, is_cuda_available, jit, tensor\n'), ((720, 749), 'numpy.random.random', 'np.random.random', (['data0_shape'], {}), '(data0_shape)\n', (736, 749), True, 'import numpy as np\n'), ((781, 810), 'numpy.random.random', 'np.random.random', (['data1_shape'], {}), '(data1_shape)\n', (797, 810), True, 'import numpy as np\n'), ((3526, 3550), 'numpy.random.random', 'np.random.random', (['shape1'], {}), '(shape1)\n', (3542, 3550), True, 'import numpy as np\n'), ((3581, 3605), 'numpy.random.random', 'np.random.random', (['shape2'], {}), '(shape2)\n', (3597, 3605), True, 'import numpy as np\n'), ((3636, 3660), 'numpy.random.random', 'np.random.random', (['shape3'], {}), '(shape3)\n', (3652, 3660), True, 'import numpy as np\n'), ((3960, 3984), 'numpy.random.random', 'np.random.random', (['shape1'], {}), '(shape1)\n', (3976, 3984), True, 'import numpy as np\n'), ((4015, 4039), 'numpy.random.random', 'np.random.random', (['shape2'], {}), '(shape2)\n', (4031, 4039), True, 'import numpy as np\n'), ((4070, 4094), 'numpy.random.random', 'np.random.random', (['shape3'], {}), '(shape3)\n', (4086, 4094), True, 'import numpy as np\n'), ((4547, 4576), 'numpy.random.random', 'np.random.random', (['data1_shape'], {}), '(data1_shape)\n', (4563, 4576), True, 'import numpy as np\n'), ((4608, 4637), 'numpy.random.random', 'np.random.random', (['data2_shape'], {}), '(data2_shape)\n', (4624, 4637), True, 'import numpy as np\n'), ((5010, 5039), 'numpy.random.random', 'np.random.random', (['data1_shape'], {}), '(data1_shape)\n', (5026, 5039), True, 'import numpy as np\n'), ((5071, 5100), 'numpy.random.random', 'np.random.random', (['data2_shape'], {}), '(data2_shape)\n', (5087, 5100), True, 'import numpy as np\n'), ((5316, 5346), 'numpy.random.random', 'np.random.random', (['input1_shape'], {}), '(input1_shape)\n', (5332, 5346), True, 'import numpy as np\n'), ((5440, 5470), 'numpy.random.random', 'np.random.random', (['input2_shape'], {}), '(input2_shape)\n', (5456, 5470), True, 'import numpy as np\n'), ((6997, 7020), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (7013, 7020), True, 'import numpy as np\n'), ((7335, 7346), 'megengine.tensor', 'tensor', (['(0.9)'], {}), '(0.9)\n', (7341, 7346), False, 'from megengine import Buffer, Parameter, is_cuda_available, jit, tensor\n'), ((7466, 7490), 'numpy.random.random', 'np.random.random', (['(2, 3)'], {}), '((2, 3))\n', (7482, 7490), True, 'import numpy as np\n'), ((7957, 7993), 'numpy.array', 'np.array', (['[1, 0.5]'], {'dtype': 'np.float32'}), '([1, 0.5], dtype=np.float32)\n', (7965, 7993), True, 'import numpy as np\n'), ((8028, 8057), 'numpy.array', 'np.array', (['[1]'], {'dtype': 'np.int32'}), '([1], dtype=np.int32)\n', (8036, 8057), True, 'import numpy as np\n'), ((8173, 8216), 'numpy.array', 'np.array', (['[0.3, 0.4, 0.3]'], {'dtype': 'np.float32'}), '([0.3, 0.4, 0.3], dtype=np.float32)\n', (8181, 8216), True, 'import numpy as np\n'), ((8251, 8280), 'numpy.array', 'np.array', (['[1]'], {'dtype': 'np.int32'}), '([1], dtype=np.int32)\n', (8259, 8280), True, 'import numpy as np\n'), ((8705, 8743), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {'dtype': 'np.float32'}), '([0.5, 0.5], dtype=np.float32)\n', (8713, 8743), True, 'import numpy as np\n'), ((8778, 8807), 'numpy.array', 'np.array', (['[1]'], {'dtype': 'np.int32'}), '([1], dtype=np.int32)\n', (8786, 8807), True, 'import numpy as np\n'), ((8900, 8943), 'numpy.array', 'np.array', (['[0.3, 0.4, 0.3]'], {'dtype': 'np.float32'}), '([0.3, 0.4, 0.3], dtype=np.float32)\n', (8908, 8943), True, 'import numpy as np\n'), ((8978, 9007), 'numpy.array', 'np.array', (['[1]'], {'dtype': 'np.int32'}), '([1], dtype=np.int32)\n', (8986, 9007), True, 'import numpy as np\n'), ((9646, 9682), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'label1_shape'}), '(size=label1_shape)\n', (9663, 9682), True, 'import numpy as np\n'), ((9867, 9903), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'label2_shape'}), '(size=label2_shape)\n', (9884, 9903), True, 'import numpy as np\n'), ((11794, 11870), 'megengine.functional.conv2d', 'F.conv2d', (['inp', 'w', '(b if has_bias else None)'], {'stride': '(SH, SW)', 'padding': '(PH, PW)'}), '(inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW))\n', (11802, 11870), True, 'import megengine.functional as F\n'), ((12335, 12456), 'megengine.functional.conv_bias_activation', 'F.conv_bias_activation', (['inp', 'w', 'b'], {'stride': '(SH, SW)', 'padding': '(PH, PW)', 'dtype': 'out_dtype', 'nonlinear_mode': 'nonlinear_mode'}), '(inp, w, b, stride=(SH, SW), padding=(PH, PW), dtype=\n out_dtype, nonlinear_mode=nonlinear_mode)\n', (12357, 12456), True, 'import megengine.functional as F\n'), ((12608, 12627), 'megengine.is_cuda_available', 'is_cuda_available', ([], {}), '()\n', (12625, 12627), False, 'from megengine import Buffer, Parameter, is_cuda_available, jit, tensor\n'), ((3405, 3427), 'numpy.concatenate', 'np.concatenate', (['[x, y]'], {}), '([x, y])\n', (3419, 3427), True, 'import numpy as np\n'), ((4688, 4705), 'numpy.argsort', 'np.argsort', (['data1'], {}), '(data1)\n', (4698, 4705), True, 'import numpy as np\n'), ((4755, 4772), 'numpy.argsort', 'np.argsort', (['data2'], {}), '(data2)\n', (4765, 4772), True, 'import numpy as np\n'), ((5962, 6009), 'numpy.linspace', 'np.linspace', (['start', 'end', 'step'], {'dtype': 'np.float32'}), '(start, end, step, dtype=np.float32)\n', (5973, 6009), True, 'import numpy as np\n'), ((6188, 6235), 'numpy.linspace', 'np.linspace', (['start', 'end', 'step'], {'dtype': 'np.float32'}), '(start, end, step, dtype=np.float32)\n', (6199, 6235), True, 'import numpy as np\n'), ((6432, 6477), 'numpy.arange', 'np.arange', (['start', 'end', 'step'], {'dtype': 'np.float32'}), '(start, end, step, dtype=np.float32)\n', (6441, 6477), True, 'import numpy as np\n'), ((6656, 6701), 'numpy.arange', 'np.arange', (['start', 'end', 'step'], {'dtype': 'np.float32'}), '(start, end, step, dtype=np.float32)\n', (6665, 6701), True, 'import numpy as np\n'), ((6892, 6937), 'numpy.arange', 'np.arange', (['start', 'end', 'step'], {'dtype': 'np.float32'}), '(start, end, step, dtype=np.float32)\n', (6901, 6937), True, 'import numpy as np\n'), ((7599, 7615), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {}), '((2, 3))\n', (7607, 7615), True, 'import numpy as np\n'), ((7652, 7668), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {}), '((2, 3))\n', (7660, 7668), True, 'import numpy as np\n'), ((7735, 7750), 'numpy.ones', 'np.ones', (['(2, 3)'], {}), '((2, 3))\n', (7742, 7750), True, 'import numpy as np\n'), ((8136, 8150), 'megengine.tensor', 'tensor', (['label1'], {}), '(label1)\n', (8142, 8150), False, 'from megengine import Buffer, Parameter, is_cuda_available, jit, tensor\n'), ((8359, 8373), 'megengine.tensor', 'tensor', (['label2'], {}), '(label2)\n', (8365, 8373), False, 'from megengine import Buffer, Parameter, is_cuda_available, jit, tensor\n'), ((8855, 8866), 'numpy.log', 'np.log', (['(0.5)'], {}), '(0.5)\n', (8861, 8866), True, 'import numpy as np\n'), ((9055, 9066), 'numpy.log', 'np.log', (['(0.4)'], {}), '(0.4)\n', (9061, 9066), True, 'import numpy as np\n'), ((9439, 9449), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (9445, 9449), True, 'import numpy as np\n'), ((9577, 9612), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'data1_shape'}), '(size=data1_shape)\n', (9594, 9612), True, 'import numpy as np\n'), ((9798, 9833), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'data2_shape'}), '(size=data2_shape)\n', (9815, 9833), True, 'import numpy as np\n'), ((11966, 11975), 'megengine.functional.relu', 'F.relu', (['O'], {}), '(O)\n', (11972, 11975), True, 'import megengine.functional as F\n'), ((12148, 12164), 'numpy.zeros_like', 'np.zeros_like', (['b'], {}), '(b)\n', (12161, 12164), True, 'import numpy as np\n'), ((12303, 12315), 'megengine.functional.flatten', 'F.flatten', (['b'], {}), '(b)\n', (12312, 12315), True, 'import megengine.functional as F\n'), ((4421, 4452), 'numpy.matmul', 'np.matmul', (['x[i, ...]', 'y[i, ...]'], {}), '(x[i, ...], y[i, ...])\n', (4430, 4452), True, 'import numpy as np\n'), ((8120, 8133), 'megengine.tensor', 'tensor', (['data1'], {}), '(data1)\n', (8126, 8133), False, 'from megengine import Buffer, Parameter, is_cuda_available, jit, tensor\n'), ((8343, 8356), 'megengine.tensor', 'tensor', (['data2'], {}), '(data2)\n', (8349, 8356), False, 'from megengine import Buffer, Parameter, is_cuda_available, jit, tensor\n'), ((2896, 2908), 'numpy.eye', 'np.eye', (['n', 'm'], {}), '(n, m)\n', (2902, 2908), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
r"""
Linear elasticity with given displacements.
Find :math:`\ul{u}` such that:
.. math::
\int_{\Omega} D_{ijkl}\ e_{ij}(\ul{v}) e_{kl}(\ul{u})
= 0
\;, \quad \forall \ul{v} \;,
where
.. math::
D_{ijkl} = \mu (\delta_{ik} \delta_{jl}+\delta_{il} \delta_{jk}) +
\lambda \ \delta_{ij} \delta_{kl}
\;.
This example models a cylinder that is fixed at one end while the second end
has a specified displacement of 0.01 in the x direction (this boundary
condition is named ``'Displaced'``). There is also a specified displacement of
0.005 in the z direction for points in the region labeled
``'SomewhereTop'``. This boundary condition is named
``'PerturbedSurface'``. The region ``'SomewhereTop'`` is specified as those
vertices for which::
(z > 0.017) & (x > 0.03) & (x < 0.07)
The displacement field (three DOFs/node) in the ``'Omega region'`` is
approximated using P1 (four-node tetrahedral) finite elements. The material is
linear elastic and its properties are specified as Lamé parameters
:math:`\lambda` and :math:`\mu` (see
http://en.wikipedia.org/wiki/Lam%C3%A9_parameters)
The output is the displacement for each vertex, saved by default to
cylinder.vtk. View the results using::
$ ./postproc.py cylinder.vtk --wireframe -b --only-names=u -d'u,plot_displacements,rel_scaling=1'
"""
from sfepy import data_dir
from sfepy.mechanics.matcoefs import stiffness_from_lame
filename_mesh = data_dir + '/meshes/3d/cylinder.mesh'
regions = {
'Omega' : 'all',
'Left' : ('vertices in (x < 0.001)', 'facet'),
'Right' : ('vertices in (x > 0.099)', 'facet'),
'SomewhereTop' : ('vertices in (z > 0.017) & (x > 0.03) & (x < 0.07)',
'vertex'),
}
materials = {
'solid' : ({'D': stiffness_from_lame(dim=3, lam=1e1, mu=1e0)},),
}
fields = {
'displacement': ('real', 'vector', 'Omega', 1),
}
integrals = {
'i' : 1,
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
}
ebcs = {
'Fixed' : ('Left', {'u.all' : 0.0}),
'Displaced' : ('Right', {'u.0' : 0.01, 'u.[1,2]' : 0.0}),
'PerturbedSurface' : ('SomewhereTop', {'u.2' : 0.005}),
}
equations = {
'balance_of_forces' :
"""dw_lin_elastic.i.Omega(solid.D, v, u) = 0""",
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 1,
'eps_a' : 1e-10,
}),
}
|
[
"sfepy.mechanics.matcoefs.stiffness_from_lame"
] |
[((1770, 1814), 'sfepy.mechanics.matcoefs.stiffness_from_lame', 'stiffness_from_lame', ([], {'dim': '(3)', 'lam': '(10.0)', 'mu': '(1.0)'}), '(dim=3, lam=10.0, mu=1.0)\n', (1789, 1814), False, 'from sfepy.mechanics.matcoefs import stiffness_from_lame\n')]
|
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine.functional as F
import megengine.module as M
import models.backbones.resnet.model as resnet
import layers
class FreeAnchor(M.Module):
"""
Implement RetinaNet (https://arxiv.org/abs/1708.02002).
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=self.cfg.anchor_scales,
anchor_ratios=self.cfg.anchor_ratios,
strides=self.cfg.stride,
offset=self.cfg.anchor_offset,
)
self.box_coder = layers.BoxCoder(cfg.reg_mean, cfg.reg_std)
self.in_features = cfg.in_features
# ----------------------- build backbone ------------------------ #
bottom_up = getattr(resnet, cfg.backbone)(
norm=layers.get_norm(cfg.backbone_norm), pretrained=cfg.backbone_pretrained
)
del bottom_up.fc
# ----------------------- build FPN ----------------------------- #
self.backbone = layers.FPN(
bottom_up=bottom_up,
in_features=cfg.fpn_in_features,
out_channels=cfg.fpn_out_channels,
norm=cfg.fpn_norm,
top_block=layers.LastLevelP6P7(
cfg.fpn_top_in_channel, cfg.fpn_out_channels, cfg.fpn_top_in_feature
),
strides=cfg.fpn_in_strides,
channels=cfg.fpn_in_channels,
)
backbone_shape = self.backbone.output_shape()
feature_shapes = [backbone_shape[f] for f in self.in_features]
# ----------------------- build FreeAnchor Head ----------------- #
self.head = layers.BoxHead(cfg, feature_shapes)
def preprocess_image(self, image):
padded_image = layers.get_padded_tensor(image, 32, 0.0)
normed_image = (
padded_image
- np.array(self.cfg.img_mean, dtype="float32")[None, :, None, None]
) / np.array(self.cfg.img_std, dtype="float32")[None, :, None, None]
return normed_image
def forward(self, image, im_info, gt_boxes=None):
image = self.preprocess_image(image)
features = self.backbone(image)
features = [features[f] for f in self.in_features]
box_logits, box_offsets = self.head(features)
box_logits_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, self.cfg.num_classes)
for _ in box_logits
]
box_offsets_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, 4) for _ in box_offsets
]
anchors_list = self.anchor_generator(features)
all_level_box_logits = F.concat(box_logits_list, axis=1)
all_level_box_offsets = F.concat(box_offsets_list, axis=1)
all_level_anchors = F.concat(anchors_list, axis=0)
if self.training:
loss_dict = self.get_losses(
all_level_anchors, all_level_box_logits,
all_level_box_offsets, gt_boxes, im_info
)
self.cfg.losses_keys = list(loss_dict.keys())
return loss_dict
else:
# currently not support multi-batch testing
assert image.shape[0] == 1
pred_boxes = self.box_coder.decode(
all_level_anchors, all_level_box_offsets[0]
)
pred_boxes = pred_boxes.reshape(-1, 4)
scale_w = im_info[0, 1] / im_info[0, 3]
scale_h = im_info[0, 0] / im_info[0, 2]
pred_boxes = pred_boxes / F.concat(
[scale_w, scale_h, scale_w, scale_h], axis=0
)
clipped_boxes = layers.get_clipped_boxes(
pred_boxes, im_info[0, 2:4]
).reshape(-1, 4)
pred_score = F.sigmoid(all_level_box_logits)[0]
return pred_score, clipped_boxes
def get_losses(self, anchors, pred_logits, pred_offsets, gt_boxes, im_info):
# pylint: disable=too-many-statements
def positive_bag_loss(logits, axis=1):
weight = 1.0 / (1.0 - logits)
weight /= weight.sum(axis=axis, keepdims=True)
bag_prob = (weight * logits).sum(axis=1)
return -layers.safelog(bag_prob)
def negative_bag_loss(logits, gamma):
return (logits ** gamma) * (-layers.safelog(1.0 - logits))
pred_scores = F.sigmoid(pred_logits)
box_prob_list = []
positive_losses = []
clamp_eps = 1e-7
bucket_size = self.cfg.bucket_size
for bid in range(im_info.shape[0]):
boxes_info = gt_boxes[bid, : im_info[bid, 4].astype("int32")]
# id 0 is used for background classes, so -1 first
labels = boxes_info[:, 4].astype("int32") - 1
pred_box = self.box_coder.decode(anchors, pred_offsets[bid]).detach()
overlaps = layers.get_iou(boxes_info[:, :4], pred_box).detach()
thresh1 = self.cfg.box_iou_threshold
thresh2 = F.clip(
overlaps.max(axis=1, keepdims=True),
lower=thresh1 + clamp_eps, upper=1.0
)
gt_pred_prob = F.clip(
(overlaps - thresh1) / (thresh2 - thresh1), lower=0, upper=1.0)
image_boxes_prob = F.zeros(pred_logits.shape[1:]).detach()
# guarantee that nonzero_idx is not empty
if gt_pred_prob.max() > clamp_eps:
_, nonzero_idx = F.cond_take(gt_pred_prob != 0, gt_pred_prob)
# since nonzeros is only 1 dim, use num_anchor to get real indices
num_anchors = gt_pred_prob.shape[1]
anchors_idx = nonzero_idx % num_anchors
gt_idx = nonzero_idx // num_anchors
image_boxes_prob[anchors_idx, labels[gt_idx]] = gt_pred_prob[gt_idx, anchors_idx]
box_prob_list.append(image_boxes_prob)
# construct bags for objects
match_quality_matrix = layers.get_iou(boxes_info[:, :4], anchors).detach()
num_gt = match_quality_matrix.shape[0]
_, matched_idx = F.topk(
match_quality_matrix,
k=bucket_size,
descending=True,
no_sort=True,
)
matched_idx = matched_idx.detach()
matched_idx_flatten = matched_idx.reshape(-1)
gather_idx = labels.reshape(-1, 1)
gather_idx = F.broadcast_to(gather_idx, (num_gt, bucket_size))
gather_src = pred_scores[bid, matched_idx_flatten]
gather_src = gather_src.reshape(num_gt, bucket_size, -1)
matched_score = F.indexing_one_hot(gather_src, gather_idx, axis=2)
topk_anchors = anchors[matched_idx_flatten]
boxes_broad_cast = F.broadcast_to(
F.expand_dims(boxes_info[:, :4], axis=1), (num_gt, bucket_size, 4)
).reshape(-1, 4)
matched_offsets = self.box_coder.encode(topk_anchors, boxes_broad_cast)
reg_loss = layers.smooth_l1_loss(
pred_offsets[bid, matched_idx_flatten],
matched_offsets,
beta=self.cfg.smooth_l1_beta
).sum(axis=-1) * self.cfg.reg_loss_weight
matched_reg_scores = F.exp(-reg_loss)
positive_losses.append(
positive_bag_loss(
matched_score * matched_reg_scores.reshape(-1, bucket_size), axis=1
)
)
num_foreground = im_info[:, 4].sum()
pos_loss = F.concat(positive_losses).sum() / F.maximum(num_foreground, 1)
box_probs = F.stack(box_prob_list, axis=0)
neg_loss = negative_bag_loss(
pred_scores * (1 - box_probs), self.cfg.focal_loss_gamma
).sum() / F.maximum(num_foreground * bucket_size, 1)
alpha = self.cfg.focal_loss_alpha
pos_loss = pos_loss * alpha
neg_loss = neg_loss * (1 - alpha)
loss_dict = {
"total_loss": pos_loss + neg_loss,
"pos_loss": pos_loss,
"neg_loss": neg_loss,
}
return loss_dict
class FreeAnchorConfig:
# pylint: disable=too-many-statements
def __init__(self):
self.backbone = "resnet50"
self.backbone_pretrained = True
self.backbone_norm = "FrozenBN"
self.backbone_freeze_at = 2
self.fpn_norm = None
self.fpn_in_features = ["res3", "res4", "res5"]
self.fpn_in_strides = [8, 16, 32]
self.fpn_in_channels = [512, 1024, 2048]
self.fpn_out_channels = 256
self.fpn_top_in_feature = "p5"
self.fpn_top_in_channel = 256
# ------------------------ data cfg -------------------------- #
self.train_dataset = dict(
name="coco",
root="train2017",
ann_file="annotations/instances_train2017.json",
remove_images_without_annotations=True,
)
self.test_dataset = dict(
name="coco",
root="val2017",
ann_file="annotations/instances_val2017.json",
remove_images_without_annotations=False,
)
self.num_classes = 80
self.img_mean = [103.530, 116.280, 123.675] # BGR
self.img_std = [57.375, 57.120, 58.395]
# ----------------------- net cfg ------------------------- #
self.stride = [8, 16, 32, 64, 128]
self.in_features = ["p3", "p4", "p5", "p6", "p7"]
self.reg_mean = [0.0, 0.0, 0.0, 0.0]
self.reg_std = [0.1, 0.1, 0.2, 0.2]
self.anchor_scales = [
[x, x * 2 ** (1.0 / 3), x * 2 ** (2.0 / 3)] for x in [32, 64, 128, 256, 512]
]
self.anchor_ratios = [[0.5, 1, 2]]
self.anchor_offset = 0.5
self.box_iou_threshold = 0.6
self.bucket_size = 50
self.class_aware_box = False
self.cls_prior_prob = 0.02
# ------------------------ loss cfg -------------------------- #
self.focal_loss_alpha = 0.5
self.focal_loss_gamma = 2
self.smooth_l1_beta = 0 # use L1 loss
self.reg_loss_weight = 0.75
self.num_losses = 3
# ------------------------ training cfg ---------------------- #
self.train_image_short_size = (640, 672, 704, 736, 768, 800)
self.train_image_max_size = 1333
self.basic_lr = 0.01 / 16 # The basic learning rate for single-image
self.momentum = 0.9
self.weight_decay = 1e-4
self.log_interval = 20
self.nr_images_epoch = 80000
self.max_epoch = 54
self.warm_iters = 500
self.lr_decay_rate = 0.1
self.lr_decay_stages = [42, 50]
# ------------------------ testing cfg ----------------------- #
self.test_image_short_size = 800
self.test_image_max_size = 1333
self.test_max_boxes_per_image = 100
self.test_vis_threshold = 0.3
self.test_cls_threshold = 0.05
self.test_nms = 0.5
|
[
"megengine.functional.zeros",
"megengine.functional.broadcast_to",
"megengine.functional.stack",
"megengine.functional.concat",
"megengine.functional.exp",
"megengine.functional.sigmoid",
"megengine.functional.maximum",
"megengine.functional.cond_take",
"megengine.functional.topk",
"megengine.functional.expand_dims",
"megengine.functional.clip",
"megengine.functional.indexing_one_hot"
] |
[((740, 906), 'layers.AnchorBoxGenerator', 'layers.AnchorBoxGenerator', ([], {'anchor_scales': 'self.cfg.anchor_scales', 'anchor_ratios': 'self.cfg.anchor_ratios', 'strides': 'self.cfg.stride', 'offset': 'self.cfg.anchor_offset'}), '(anchor_scales=self.cfg.anchor_scales,\n anchor_ratios=self.cfg.anchor_ratios, strides=self.cfg.stride, offset=\n self.cfg.anchor_offset)\n', (765, 906), False, 'import layers\n'), ((982, 1024), 'layers.BoxCoder', 'layers.BoxCoder', (['cfg.reg_mean', 'cfg.reg_std'], {}), '(cfg.reg_mean, cfg.reg_std)\n', (997, 1024), False, 'import layers\n'), ((2048, 2083), 'layers.BoxHead', 'layers.BoxHead', (['cfg', 'feature_shapes'], {}), '(cfg, feature_shapes)\n', (2062, 2083), False, 'import layers\n'), ((2147, 2187), 'layers.get_padded_tensor', 'layers.get_padded_tensor', (['image', '(32)', '(0.0)'], {}), '(image, 32, 0.0)\n', (2171, 2187), False, 'import layers\n'), ((3049, 3082), 'megengine.functional.concat', 'F.concat', (['box_logits_list'], {'axis': '(1)'}), '(box_logits_list, axis=1)\n', (3057, 3082), True, 'import megengine.functional as F\n'), ((3115, 3149), 'megengine.functional.concat', 'F.concat', (['box_offsets_list'], {'axis': '(1)'}), '(box_offsets_list, axis=1)\n', (3123, 3149), True, 'import megengine.functional as F\n'), ((3178, 3208), 'megengine.functional.concat', 'F.concat', (['anchors_list'], {'axis': '(0)'}), '(anchors_list, axis=0)\n', (3186, 3208), True, 'import megengine.functional as F\n'), ((4751, 4773), 'megengine.functional.sigmoid', 'F.sigmoid', (['pred_logits'], {}), '(pred_logits)\n', (4760, 4773), True, 'import megengine.functional as F\n'), ((7984, 8014), 'megengine.functional.stack', 'F.stack', (['box_prob_list'], {'axis': '(0)'}), '(box_prob_list, axis=0)\n', (7991, 8014), True, 'import megengine.functional as F\n'), ((5523, 5593), 'megengine.functional.clip', 'F.clip', (['((overlaps - thresh1) / (thresh2 - thresh1))'], {'lower': '(0)', 'upper': '(1.0)'}), '((overlaps - thresh1) / (thresh2 - thresh1), lower=0, upper=1.0)\n', (5529, 5593), True, 'import megengine.functional as F\n'), ((6464, 6538), 'megengine.functional.topk', 'F.topk', (['match_quality_matrix'], {'k': 'bucket_size', 'descending': '(True)', 'no_sort': '(True)'}), '(match_quality_matrix, k=bucket_size, descending=True, no_sort=True)\n', (6470, 6538), True, 'import megengine.functional as F\n'), ((6796, 6845), 'megengine.functional.broadcast_to', 'F.broadcast_to', (['gather_idx', '(num_gt, bucket_size)'], {}), '(gather_idx, (num_gt, bucket_size))\n', (6810, 6845), True, 'import megengine.functional as F\n'), ((7007, 7057), 'megengine.functional.indexing_one_hot', 'F.indexing_one_hot', (['gather_src', 'gather_idx'], {'axis': '(2)'}), '(gather_src, gather_idx, axis=2)\n', (7025, 7057), True, 'import megengine.functional as F\n'), ((7627, 7643), 'megengine.functional.exp', 'F.exp', (['(-reg_loss)'], {}), '(-reg_loss)\n', (7632, 7643), True, 'import megengine.functional as F\n'), ((7935, 7963), 'megengine.functional.maximum', 'F.maximum', (['num_foreground', '(1)'], {}), '(num_foreground, 1)\n', (7944, 7963), True, 'import megengine.functional as F\n'), ((8141, 8183), 'megengine.functional.maximum', 'F.maximum', (['(num_foreground * bucket_size)', '(1)'], {}), '(num_foreground * bucket_size, 1)\n', (8150, 8183), True, 'import megengine.functional as F\n'), ((1214, 1248), 'layers.get_norm', 'layers.get_norm', (['cfg.backbone_norm'], {}), '(cfg.backbone_norm)\n', (1229, 1248), False, 'import layers\n'), ((1611, 1706), 'layers.LastLevelP6P7', 'layers.LastLevelP6P7', (['cfg.fpn_top_in_channel', 'cfg.fpn_out_channels', 'cfg.fpn_top_in_feature'], {}), '(cfg.fpn_top_in_channel, cfg.fpn_out_channels, cfg.\n fpn_top_in_feature)\n', (1631, 1706), False, 'import layers\n'), ((2330, 2373), 'numpy.array', 'np.array', (['self.cfg.img_std'], {'dtype': '"""float32"""'}), "(self.cfg.img_std, dtype='float32')\n", (2338, 2373), True, 'import numpy as np\n'), ((3919, 3973), 'megengine.functional.concat', 'F.concat', (['[scale_w, scale_h, scale_w, scale_h]'], {'axis': '(0)'}), '([scale_w, scale_h, scale_w, scale_h], axis=0)\n', (3927, 3973), True, 'import megengine.functional as F\n'), ((4156, 4187), 'megengine.functional.sigmoid', 'F.sigmoid', (['all_level_box_logits'], {}), '(all_level_box_logits)\n', (4165, 4187), True, 'import megengine.functional as F\n'), ((4585, 4609), 'layers.safelog', 'layers.safelog', (['bag_prob'], {}), '(bag_prob)\n', (4599, 4609), False, 'import layers\n'), ((5817, 5861), 'megengine.functional.cond_take', 'F.cond_take', (['(gt_pred_prob != 0)', 'gt_pred_prob'], {}), '(gt_pred_prob != 0, gt_pred_prob)\n', (5828, 5861), True, 'import megengine.functional as F\n'), ((2252, 2296), 'numpy.array', 'np.array', (['self.cfg.img_mean'], {'dtype': '"""float32"""'}), "(self.cfg.img_mean, dtype='float32')\n", (2260, 2296), True, 'import numpy as np\n'), ((4032, 4085), 'layers.get_clipped_boxes', 'layers.get_clipped_boxes', (['pred_boxes', 'im_info[0, 2:4]'], {}), '(pred_boxes, im_info[0, 2:4])\n', (4056, 4085), False, 'import layers\n'), ((4698, 4726), 'layers.safelog', 'layers.safelog', (['(1.0 - logits)'], {}), '(1.0 - logits)\n', (4712, 4726), False, 'import layers\n'), ((5244, 5287), 'layers.get_iou', 'layers.get_iou', (['boxes_info[:, :4]', 'pred_box'], {}), '(boxes_info[:, :4], pred_box)\n', (5258, 5287), False, 'import layers\n'), ((5643, 5673), 'megengine.functional.zeros', 'F.zeros', (['pred_logits.shape[1:]'], {}), '(pred_logits.shape[1:])\n', (5650, 5673), True, 'import megengine.functional as F\n'), ((6332, 6374), 'layers.get_iou', 'layers.get_iou', (['boxes_info[:, :4]', 'anchors'], {}), '(boxes_info[:, :4], anchors)\n', (6346, 6374), False, 'import layers\n'), ((7901, 7926), 'megengine.functional.concat', 'F.concat', (['positive_losses'], {}), '(positive_losses)\n', (7909, 7926), True, 'import megengine.functional as F\n'), ((7178, 7218), 'megengine.functional.expand_dims', 'F.expand_dims', (['boxes_info[:, :4]'], {'axis': '(1)'}), '(boxes_info[:, :4], axis=1)\n', (7191, 7218), True, 'import megengine.functional as F\n'), ((7383, 7495), 'layers.smooth_l1_loss', 'layers.smooth_l1_loss', (['pred_offsets[bid, matched_idx_flatten]', 'matched_offsets'], {'beta': 'self.cfg.smooth_l1_beta'}), '(pred_offsets[bid, matched_idx_flatten],\n matched_offsets, beta=self.cfg.smooth_l1_beta)\n', (7404, 7495), False, 'import layers\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import math
from typing import List
import megengine.module as M
from megengine.core import Tensor
from official.vision.detection.layers import basic
class RetinaNetHead(M.Module):
"""
The head used in RetinaNet for object classification and box regression.
"""
def __init__(self, cfg, input_shape: List[basic.ShapeSpec]):
super().__init__()
in_channels = input_shape[0].channels
num_classes = cfg.num_classes
num_convs = 4
prior_prob = cfg.cls_prior_prob
num_anchors = [9, 9, 9, 9, 9]
assert (
len(set(num_anchors)) == 1
), "not support different number of anchors between levels"
num_anchors = num_anchors[0]
cls_subnet = []
bbox_subnet = []
for _ in range(num_convs):
cls_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1,)
)
cls_subnet.append(M.ReLU())
bbox_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1,)
)
bbox_subnet.append(M.ReLU())
self.cls_subnet = M.Sequential(*cls_subnet)
self.bbox_subnet = M.Sequential(*bbox_subnet)
self.cls_score = M.Conv2d(
in_channels, num_anchors * num_classes, kernel_size=3, stride=1, padding=1,
)
self.bbox_pred = M.Conv2d(
in_channels, num_anchors * 4, kernel_size=3, stride=1, padding=1
)
# Initialization
for modules in [
self.cls_subnet,
self.bbox_subnet,
self.cls_score,
self.bbox_pred,
]:
for layer in modules.modules():
if isinstance(layer, M.Conv2d):
M.init.normal_(layer.weight, mean=0, std=0.01)
M.init.fill_(layer.bias, 0)
# Use prior in model initialization to improve stability
bias_value = -math.log((1 - prior_prob) / prior_prob)
M.init.fill_(self.cls_score.bias, bias_value)
def forward(self, features: List[Tensor]):
logits, bbox_reg = [], []
for feature in features:
logits.append(self.cls_score(self.cls_subnet(feature)))
bbox_reg.append(self.bbox_pred(self.bbox_subnet(feature)))
return logits, bbox_reg
|
[
"megengine.module.init.normal_",
"megengine.module.init.fill_",
"megengine.module.ReLU",
"megengine.module.Sequential",
"megengine.module.Conv2d"
] |
[((1556, 1581), 'megengine.module.Sequential', 'M.Sequential', (['*cls_subnet'], {}), '(*cls_subnet)\n', (1568, 1581), True, 'import megengine.module as M\n'), ((1609, 1635), 'megengine.module.Sequential', 'M.Sequential', (['*bbox_subnet'], {}), '(*bbox_subnet)\n', (1621, 1635), True, 'import megengine.module as M\n'), ((1661, 1749), 'megengine.module.Conv2d', 'M.Conv2d', (['in_channels', '(num_anchors * num_classes)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels, num_anchors * num_classes, kernel_size=3, stride=1,\n padding=1)\n', (1669, 1749), True, 'import megengine.module as M\n'), ((1794, 1868), 'megengine.module.Conv2d', 'M.Conv2d', (['in_channels', '(num_anchors * 4)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels, num_anchors * 4, kernel_size=3, stride=1, padding=1)\n', (1802, 1868), True, 'import megengine.module as M\n'), ((2411, 2456), 'megengine.module.init.fill_', 'M.init.fill_', (['self.cls_score.bias', 'bias_value'], {}), '(self.cls_score.bias, bias_value)\n', (2423, 2456), True, 'import megengine.module as M\n'), ((2363, 2402), 'math.log', 'math.log', (['((1 - prior_prob) / prior_prob)'], {}), '((1 - prior_prob) / prior_prob)\n', (2371, 2402), False, 'import math\n'), ((1228, 1298), 'megengine.module.Conv2d', 'M.Conv2d', (['in_channels', 'in_channels'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels, in_channels, kernel_size=3, stride=1, padding=1)\n', (1236, 1298), True, 'import megengine.module as M\n'), ((1344, 1352), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (1350, 1352), True, 'import megengine.module as M\n'), ((1402, 1472), 'megengine.module.Conv2d', 'M.Conv2d', (['in_channels', 'in_channels'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels, in_channels, kernel_size=3, stride=1, padding=1)\n', (1410, 1472), True, 'import megengine.module as M\n'), ((1519, 1527), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (1525, 1527), True, 'import megengine.module as M\n'), ((2180, 2226), 'megengine.module.init.normal_', 'M.init.normal_', (['layer.weight'], {'mean': '(0)', 'std': '(0.01)'}), '(layer.weight, mean=0, std=0.01)\n', (2194, 2226), True, 'import megengine.module as M\n'), ((2247, 2274), 'megengine.module.init.fill_', 'M.init.fill_', (['layer.bias', '(0)'], {}), '(layer.bias, 0)\n', (2259, 2274), True, 'import megengine.module as M\n')]
|
from sqlite3.dbapi2 import Timestamp, adapt
from typing import Optional
from sqlmodel import Field, SQLModel, Field
from pydantic import validator
from datetime import datetime, date
from fastapi import HTTPException
import re
class User(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
short_name: str
first_name: str
last_name: str
email: str
role_id: int
team_id: Optional[int] = None
start_date: date
created_at: datetime
updated_at: datetime
is_active: bool
@validator("short_name", always=True)
def valid_short_name(cls, sn_input):
assert sn_input.isalpha(), "only alphabet letters allowed in short name"
assert sn_input.islower(), "short name contains small letters only"
return sn_input
@validator("first_name", always=True)
def valid_first_name(cls, first_name):
assert first_name.replace(
" ", ""
).isalpha(), "only alphabet letters allowed in first name"
if first_name[0].isupper() == False:
raise HTTPException(
status_code=400, detail="first name should start with a capital letter"
)
return first_name
@validator("last_name", always=True)
def valid_last_name(cls, ln_input):
assert ln_input.replace(
" ", ""
).isalpha(), "only alphabet letters allowed in last name"
return ln_input
@validator("email", always=True)
def valid_email(cls, email_input):
regex = r"\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b"
assert re.fullmatch(regex, email_input), "email format incorrect"
return email_input
|
[
"sqlmodel.Field"
] |
[((287, 324), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (292, 324), False, 'from sqlmodel import Field, SQLModel, Field\n'), ((547, 583), 'pydantic.validator', 'validator', (['"""short_name"""'], {'always': '(True)'}), "('short_name', always=True)\n", (556, 583), False, 'from pydantic import validator\n'), ((812, 848), 'pydantic.validator', 'validator', (['"""first_name"""'], {'always': '(True)'}), "('first_name', always=True)\n", (821, 848), False, 'from pydantic import validator\n'), ((1226, 1261), 'pydantic.validator', 'validator', (['"""last_name"""'], {'always': '(True)'}), "('last_name', always=True)\n", (1235, 1261), False, 'from pydantic import validator\n'), ((1451, 1482), 'pydantic.validator', 'validator', (['"""email"""'], {'always': '(True)'}), "('email', always=True)\n", (1460, 1482), False, 'from pydantic import validator\n'), ((1608, 1640), 're.fullmatch', 're.fullmatch', (['regex', 'email_input'], {}), '(regex, email_input)\n', (1620, 1640), False, 'import re\n'), ((1077, 1168), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(400)', 'detail': '"""first name should start with a capital letter"""'}), "(status_code=400, detail=\n 'first name should start with a capital letter')\n", (1090, 1168), False, 'from fastapi import HTTPException\n')]
|
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = F.log(gt_height / bbox_height)
target = F.stack([target_dx, target_dy, target_dw, target_dh], axis=1)
return target
def box_overlap_opr(box: Tensor, gt: Tensor) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shape[0], boxes2.shape[0], 4)
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1),(N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_gt = F.expand_dims(gt, 0).broadcast_to(N, K, gt.shape[1])
# b_box = F.expand_dims(boxes1, 1).broadcast(*target_shape)
# b_gt = F.expand_dims(boxes2, 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = F.maximum(gt[:, 2] - gt[:, 0], 0) * F.maximum(gt[:, 3] - gt[:, 1], 0)
# area_target_shape = (box.shape[0], gt.shapeof()[0])
b_area_box = F.broadcast_to(F.expand_dims(area_box, 1), (N, K))
b_area_gt = F.broadcast_to(F.expand_dims(area_gt, 0), (N, K))
# b_area_box = F.expand_dims(area_box, 1).broadcast_to(N, K)
# b_area_gt = F.expand_dims(area_gt, 0).broadcast_to(N, K)
# b_area_box = F.add_axis(area_box, 1).broadcast(*area_target_shape)
# b_area_gt = F.add_axis(area_gt, 0).broadcast(*area_target_shape)
union = b_area_box + b_area_gt - inter
overlaps = F.maximum(inter / union, 0)
return overlaps
def box_overlap_ignore_opr(box: Tensor, gt: Tensor, ignore_label=-1) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shapeof()[0], boxes2.shapeof()[0], 4)
eps = 1e-5
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1), (N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_box = F.add_axis(boxes1, 1).broadcast(*target_shape)
# b_gt = F.add_axis(boxes2[:, :4], 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = F.maximum(gt[:, 2] - gt[:, 0], 0) * F.maximum(gt[:, 3] - gt[:, 1], 0)
# area_target_shape = (box.shapeof()[0], gt.shapeof()[0])
# b_area_box = F.add_axis(area_box, 1).broadcast(*area_target_shape)
# b_area_gt = F.add_axis(area_gt, 0).broadcast(*area_target_shape)
b_area_box = F.broadcast_to(F.expand_dims(area_box, 1), (N, K)) + eps
b_area_gt = F.broadcast_to(F.expand_dims(area_gt, 0), (N, K))
union = b_area_box + b_area_gt - inter + eps
overlaps_normal = F.maximum(inter / union, 0)
overlaps_ignore = F.maximum(inter / b_area_box, 0)
overlaps = F.maximum(inter / union, 0)
# gt_ignore_mask = F.add_axis(F.equal(gt[:, 4], ignore_label), 0).broadcast(*area_target_shape)
ignore_mask = F.equal(gt[:, 4], ignore_label)
gt_ignore_mask = F.expand_dims(ignore_mask, 0)
overlaps_normal *= (1 - gt_ignore_mask)
overlaps_ignore *= gt_ignore_mask
return overlaps_normal, overlaps_ignore
|
[
"megengine.functional.exp",
"megengine.functional.maximum",
"megengine.functional.equal",
"megengine.functional.stack",
"megengine.functional.expand_dims",
"megengine.functional.minimum",
"megengine.functional.prod",
"megengine.functional.log"
] |
[((1566, 1587), 'math.log', 'math.log', (['(1000.0 / 16)'], {}), '(1000.0 / 16)\n', (1574, 1587), False, 'import math\n'), ((2027, 2051), 'megengine.functional.minimum', 'F.minimum', (['dw', 'max_delta'], {}), '(dw, max_delta)\n', (2036, 2051), True, 'import megengine.functional as F\n'), ((2061, 2085), 'megengine.functional.minimum', 'F.minimum', (['dh', 'max_delta'], {}), '(dh, max_delta)\n', (2070, 2085), True, 'import megengine.functional as F\n'), ((2528, 2581), 'megengine.functional.stack', 'F.stack', (['[pred_x1, pred_y1, pred_x2, pred_y2]'], {'axis': '(1)'}), '([pred_x1, pred_y1, pred_x2, pred_y2], axis=1)\n', (2535, 2581), True, 'import megengine.functional as F\n'), ((3230, 3258), 'megengine.functional.log', 'F.log', (['(gt_width / bbox_width)'], {}), '(gt_width / bbox_width)\n', (3235, 3258), True, 'import megengine.functional as F\n'), ((3275, 3305), 'megengine.functional.log', 'F.log', (['(gt_height / bbox_height)'], {}), '(gt_height / bbox_height)\n', (3280, 3305), True, 'import megengine.functional as F\n'), ((3319, 3380), 'megengine.functional.stack', 'F.stack', (['[target_dx, target_dy, target_dw, target_dh]'], {'axis': '(1)'}), '([target_dx, target_dy, target_dw, target_dh], axis=1)\n', (3326, 3380), True, 'import megengine.functional as F\n'), ((5226, 5253), 'megengine.functional.maximum', 'F.maximum', (['(inter / union)', '(0)'], {}), '(inter / union, 0)\n', (5235, 5253), True, 'import megengine.functional as F\n'), ((6978, 7005), 'megengine.functional.maximum', 'F.maximum', (['(inter / union)', '(0)'], {}), '(inter / union, 0)\n', (6987, 7005), True, 'import megengine.functional as F\n'), ((7028, 7060), 'megengine.functional.maximum', 'F.maximum', (['(inter / b_area_box)', '(0)'], {}), '(inter / b_area_box, 0)\n', (7037, 7060), True, 'import megengine.functional as F\n'), ((7076, 7103), 'megengine.functional.maximum', 'F.maximum', (['(inter / union)', '(0)'], {}), '(inter / union, 0)\n', (7085, 7103), True, 'import megengine.functional as F\n'), ((7223, 7254), 'megengine.functional.equal', 'F.equal', (['gt[:, 4]', 'ignore_label'], {}), '(gt[:, 4], ignore_label)\n', (7230, 7254), True, 'import megengine.functional as F\n'), ((7276, 7305), 'megengine.functional.expand_dims', 'F.expand_dims', (['ignore_mask', '(0)'], {}), '(ignore_mask, 0)\n', (7289, 7305), True, 'import megengine.functional as F\n'), ((2116, 2125), 'megengine.functional.exp', 'F.exp', (['dw'], {}), '(dw)\n', (2121, 2125), True, 'import megengine.functional as F\n'), ((2158, 2167), 'megengine.functional.exp', 'F.exp', (['dh'], {}), '(dh)\n', (2163, 2167), True, 'import megengine.functional as F\n'), ((3958, 3979), 'megengine.functional.expand_dims', 'F.expand_dims', (['box', '(1)'], {}), '(box, 1)\n', (3971, 3979), True, 'import megengine.functional as F\n'), ((4028, 4048), 'megengine.functional.expand_dims', 'F.expand_dims', (['gt', '(0)'], {}), '(gt, 0)\n', (4041, 4048), True, 'import megengine.functional as F\n'), ((4275, 4315), 'megengine.functional.minimum', 'F.minimum', (['b_box[:, :, 2]', 'b_gt[:, :, 2]'], {}), '(b_box[:, :, 2], b_gt[:, :, 2])\n', (4284, 4315), True, 'import megengine.functional as F\n'), ((4318, 4358), 'megengine.functional.maximum', 'F.maximum', (['b_box[:, :, 0]', 'b_gt[:, :, 0]'], {}), '(b_box[:, :, 0], b_gt[:, :, 0])\n', (4327, 4358), True, 'import megengine.functional as F\n'), ((4382, 4422), 'megengine.functional.minimum', 'F.minimum', (['b_box[:, :, 3]', 'b_gt[:, :, 3]'], {}), '(b_box[:, :, 3], b_gt[:, :, 3])\n', (4391, 4422), True, 'import megengine.functional as F\n'), ((4425, 4465), 'megengine.functional.maximum', 'F.maximum', (['b_box[:, :, 1]', 'b_gt[:, :, 1]'], {}), '(b_box[:, :, 1], b_gt[:, :, 1])\n', (4434, 4465), True, 'import megengine.functional as F\n'), ((4492, 4508), 'megengine.functional.maximum', 'F.maximum', (['iw', '(0)'], {}), '(iw, 0)\n', (4501, 4508), True, 'import megengine.functional as F\n'), ((4511, 4527), 'megengine.functional.maximum', 'F.maximum', (['ih', '(0)'], {}), '(ih, 0)\n', (4520, 4527), True, 'import megengine.functional as F\n'), ((4544, 4579), 'megengine.functional.maximum', 'F.maximum', (['(box[:, 2] - box[:, 0])', '(0)'], {}), '(box[:, 2] - box[:, 0], 0)\n', (4553, 4579), True, 'import megengine.functional as F\n'), ((4582, 4617), 'megengine.functional.maximum', 'F.maximum', (['(box[:, 3] - box[:, 1])', '(0)'], {}), '(box[:, 3] - box[:, 1], 0)\n', (4591, 4617), True, 'import megengine.functional as F\n'), ((4632, 4665), 'megengine.functional.maximum', 'F.maximum', (['(gt[:, 2] - gt[:, 0])', '(0)'], {}), '(gt[:, 2] - gt[:, 0], 0)\n', (4641, 4665), True, 'import megengine.functional as F\n'), ((4668, 4701), 'megengine.functional.maximum', 'F.maximum', (['(gt[:, 3] - gt[:, 1])', '(0)'], {}), '(gt[:, 3] - gt[:, 1], 0)\n', (4677, 4701), True, 'import megengine.functional as F\n'), ((4793, 4819), 'megengine.functional.expand_dims', 'F.expand_dims', (['area_box', '(1)'], {}), '(area_box, 1)\n', (4806, 4819), True, 'import megengine.functional as F\n'), ((4860, 4885), 'megengine.functional.expand_dims', 'F.expand_dims', (['area_gt', '(0)'], {}), '(area_gt, 0)\n', (4873, 4885), True, 'import megengine.functional as F\n'), ((5880, 5901), 'megengine.functional.expand_dims', 'F.expand_dims', (['box', '(1)'], {}), '(box, 1)\n', (5893, 5901), True, 'import megengine.functional as F\n'), ((5951, 5971), 'megengine.functional.expand_dims', 'F.expand_dims', (['gt', '(0)'], {}), '(gt, 0)\n', (5964, 5971), True, 'import megengine.functional as F\n'), ((6133, 6173), 'megengine.functional.minimum', 'F.minimum', (['b_box[:, :, 2]', 'b_gt[:, :, 2]'], {}), '(b_box[:, :, 2], b_gt[:, :, 2])\n', (6142, 6173), True, 'import megengine.functional as F\n'), ((6176, 6216), 'megengine.functional.maximum', 'F.maximum', (['b_box[:, :, 0]', 'b_gt[:, :, 0]'], {}), '(b_box[:, :, 0], b_gt[:, :, 0])\n', (6185, 6216), True, 'import megengine.functional as F\n'), ((6240, 6280), 'megengine.functional.minimum', 'F.minimum', (['b_box[:, :, 3]', 'b_gt[:, :, 3]'], {}), '(b_box[:, :, 3], b_gt[:, :, 3])\n', (6249, 6280), True, 'import megengine.functional as F\n'), ((6283, 6323), 'megengine.functional.maximum', 'F.maximum', (['b_box[:, :, 1]', 'b_gt[:, :, 1]'], {}), '(b_box[:, :, 1], b_gt[:, :, 1])\n', (6292, 6323), True, 'import megengine.functional as F\n'), ((6350, 6366), 'megengine.functional.maximum', 'F.maximum', (['iw', '(0)'], {}), '(iw, 0)\n', (6359, 6366), True, 'import megengine.functional as F\n'), ((6369, 6385), 'megengine.functional.maximum', 'F.maximum', (['ih', '(0)'], {}), '(ih, 0)\n', (6378, 6385), True, 'import megengine.functional as F\n'), ((6402, 6437), 'megengine.functional.maximum', 'F.maximum', (['(box[:, 2] - box[:, 0])', '(0)'], {}), '(box[:, 2] - box[:, 0], 0)\n', (6411, 6437), True, 'import megengine.functional as F\n'), ((6440, 6475), 'megengine.functional.maximum', 'F.maximum', (['(box[:, 3] - box[:, 1])', '(0)'], {}), '(box[:, 3] - box[:, 1], 0)\n', (6449, 6475), True, 'import megengine.functional as F\n'), ((6490, 6523), 'megengine.functional.maximum', 'F.maximum', (['(gt[:, 2] - gt[:, 0])', '(0)'], {}), '(gt[:, 2] - gt[:, 0], 0)\n', (6499, 6523), True, 'import megengine.functional as F\n'), ((6526, 6559), 'megengine.functional.maximum', 'F.maximum', (['(gt[:, 3] - gt[:, 1])', '(0)'], {}), '(gt[:, 3] - gt[:, 1], 0)\n', (6535, 6559), True, 'import megengine.functional as F\n'), ((6871, 6896), 'megengine.functional.expand_dims', 'F.expand_dims', (['area_gt', '(0)'], {}), '(area_gt, 0)\n', (6884, 6896), True, 'import megengine.functional as F\n'), ((991, 1021), 'megengine.functional.prod', 'F.prod', (['(wh >= min_size)'], {'axis': '(1)'}), '(wh >= min_size, axis=1)\n', (997, 1021), True, 'import megengine.functional as F\n'), ((6798, 6824), 'megengine.functional.expand_dims', 'F.expand_dims', (['area_box', '(1)'], {}), '(area_box, 1)\n', (6811, 6824), True, 'import megengine.functional as F\n'), ((565, 587), 'megengine.functional.expand_dims', 'F.expand_dims', (['rois', '(1)'], {}), '(rois, 1)\n', (578, 587), True, 'import megengine.functional as F\n')]
|
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
import megengine.module as Float
import megengine.module.qat as QAT
import megengine.module.quantized as Q
from megengine.core.tensor import dtype
from megengine.quantization import min_max_fakequant_qconfig
from megengine.quantization.quantize import (
disable_fake_quant,
disable_observer,
propagate_qconfig,
)
"""
Calculate testing scales based on ``min_max_fakequant_qconfig``
"""
inp_scale = np.float32(np.random.rand() + 1)
min_val = np.random.randint(-127, 0, size=(2,)).astype("float32")
max_val = np.random.randint(1, 127, size=(2,)).astype("float32")
weight_scale = np.float32(np.max([-min_val[0], max_val[0]]) / 254 * 2)
act_scale = np.float32(np.max([-min_val[1], max_val[1]]) / 255 * 2)
def quant(x, scale):
inp_dtype = dtype.qint8(scale)
return x.astype(inp_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
def init_qat_net(net):
if net.with_weight:
net.weight_observer.min_val.set_value(min_val[0])
net.weight_observer.max_val.set_value(max_val[0])
if net.with_act:
net.act_observer.min_val.set_value(min_val[1])
net.act_observer.max_val.set_value(max_val[1])
def test_quant_stub():
normal_net = Float.QuantStub()
normal_net.eval()
qat_from_float = QAT.QuantStub.from_float_module(normal_net)
qat_from_float.eval()
disable_observer(qat_from_float)
disable_fake_quant(qat_from_float)
qat_net = QAT.QuantStub()
qat_net.eval()
disable_observer(qat_net)
propagate_qconfig(qat_net, min_max_fakequant_qconfig)
init_qat_net(qat_net)
q_net = Q.QuantStub.from_qat_module(qat_net)
q_net.eval()
x = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
normal = normal_net(x)
qat_without_fakequant = qat_from_float(x)
fake_quant_normal = fake_quant(normal_net(x), act_scale)
qat = qat_net(x)
q = q_net(x).numpy() * act_scale
np.testing.assert_allclose(qat_without_fakequant, normal)
np.testing.assert_allclose(qat, fake_quant_normal)
np.testing.assert_allclose(q, fake_quant_normal.numpy())
def test_dequant_stub():
normal_net = Float.DequantStub()
normal_net.eval()
qat_from_float = QAT.DequantStub.from_float_module(normal_net)
qat_from_float.eval()
disable_fake_quant(qat_from_float)
disable_observer(qat_from_float)
qat_net = QAT.DequantStub()
qat_net.eval()
disable_observer(qat_net)
propagate_qconfig(qat_net, min_max_fakequant_qconfig)
init_qat_net(qat_net)
q_net = Q.DequantStub.from_qat_module(qat_net)
q_net.eval()
x = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x = fake_quant(x, inp_scale)
x.q_dict["scale"] = inp_scale
normal = normal_net(x)
qat_without_fakequant = qat_from_float(x)
fake_quant_normal = normal_net(x)
qat = qat_net(x)
q = q_net(quant(x, inp_scale)).numpy()
np.testing.assert_allclose(qat_without_fakequant, normal)
np.testing.assert_allclose(qat, fake_quant_normal)
np.testing.assert_allclose(q, fake_quant_normal.numpy())
@pytest.mark.parametrize("kind", ["COS", "RELU", "ADD", "MUL", "FUSE_ADD_RELU"])
def test_elemwise(kind):
normal_net = Float.Elemwise(kind)
normal_net.eval()
qat_from_float = QAT.Elemwise.from_float_module(normal_net)
qat_from_float.eval()
disable_observer(qat_from_float)
disable_fake_quant(qat_from_float)
qat_net = QAT.Elemwise(kind)
qat_net.eval()
disable_observer(qat_net)
propagate_qconfig(qat_net, min_max_fakequant_qconfig)
init_qat_net(qat_net)
q_net = Q.Elemwise.from_qat_module(qat_net)
q_net.eval()
x1_scale = np.float32(np.random.rand() + 1)
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1 = fake_quant(x1, x1_scale)
x1.q_dict["scale"] = x1_scale
x2_scale = np.float32(np.random.rand() + 1)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2 = fake_quant(x2, x2_scale)
x2.q_dict["scale"] = x2_scale
x1_int8 = quant(x1, x1_scale)
x2_int8 = quant(x2, x2_scale)
# test correctness of `Float`, `QAT` and `Quantized`
if kind in ("ADD", "MUL", "FUSE_ADD_RELU"):
normal = normal_net(x1, x2)
qat_without_fakequant = qat_from_float(x1, x2)
fake_quant_normal = fake_quant(normal_net(x1, x2), act_scale)
qat = qat_net(x1, x2)
q = q_net(x1_int8, x2_int8).numpy() * act_scale
else:
normal = normal_net(x1)
qat_without_fakequant = qat_from_float(x1)
fake_quant_normal = fake_quant(normal_net(x1), act_scale)
qat = qat_net(x1)
q = q_net(x1_int8).numpy() * act_scale
np.testing.assert_allclose(qat_without_fakequant, normal)
np.testing.assert_allclose(qat, fake_quant_normal)
np.testing.assert_allclose(q, fake_quant_normal.numpy())
def test_linear():
normal_net = Float.Linear(3, 3, bias=True)
normal_net.eval()
qat_net = QAT.Linear(3, 3, bias=True)
qat_net.eval()
disable_observer(qat_net)
propagate_qconfig(qat_net, min_max_fakequant_qconfig)
init_qat_net(qat_net)
x = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x = fake_quant(x, inp_scale)
x.q_dict["scale"] = inp_scale
x_int8 = quant(x, inp_scale)
weight = np.random.normal(size=(3, 3)).astype("float32")
bias = np.random.normal(size=(3,)).astype("float32")
normal_net.weight.set_value(fake_quant(weight, weight_scale))
normal_net.bias.set_value(fake_quant(bias, inp_scale * weight_scale))
qat_net.weight.set_value(weight)
qat_net.bias.set_value(bias)
qat_from_float = QAT.Linear.from_float_module(normal_net)
qat_from_float.eval()
disable_fake_quant(qat_from_float)
disable_observer(qat_from_float)
q_net = Q.Linear.from_qat_module(qat_net)
q_net.eval()
normal = normal_net(x)
qat_without_fakequant = qat_from_float(x)
fake_quant_normal = fake_quant(normal_net(x), act_scale)
qat = qat_net(x)
q = q_net(x_int8).numpy() * act_scale
np.testing.assert_allclose(qat_without_fakequant, normal)
np.testing.assert_allclose(qat, fake_quant_normal)
np.testing.assert_allclose(q, fake_quant_normal.numpy())
@pytest.mark.parametrize("module", ["Conv2d", "ConvBn2d", "ConvBnRelu2d"])
def test_conv(module):
normal_net = getattr(Float, module)(3, 3, 3, 1, 1, 1, bias=True)
normal_net.eval()
qat_net = getattr(QAT, module)(3, 3, 3, 1, 1, 1, bias=True)
qat_net.eval()
disable_observer(qat_net)
propagate_qconfig(qat_net, min_max_fakequant_qconfig)
init_qat_net(qat_net)
x = mge.tensor(np.random.normal(size=(1, 3, 3, 3)).astype("float32"))
x = fake_quant(x, inp_scale)
x.q_dict["scale"] = inp_scale
x_int8 = quant(x, inp_scale)
weight = np.random.normal(size=(3, 3, 3, 3)).astype("float32")
bias = np.random.normal(size=(1, 3, 1, 1)).astype("float32")
if module in ("ConvBn2d", "ConvBnRelu2d"):
normal_net.conv.weight.set_value(fake_quant(weight, weight_scale))
normal_net.conv.bias.set_value(fake_quant(bias, inp_scale * weight_scale))
qat_net.conv.weight.set_value(weight)
qat_net.conv.bias.set_value(bias)
else:
normal_net.weight.set_value(fake_quant(weight, weight_scale))
normal_net.bias.set_value(fake_quant(bias, inp_scale * weight_scale))
qat_net.weight.set_value(weight)
qat_net.bias.set_value(bias)
qat_from_float = getattr(QAT, module).from_float_module(normal_net)
qat_from_float.eval()
disable_observer(qat_from_float)
disable_fake_quant(qat_from_float)
q_net = getattr(Q, module).from_qat_module(qat_net)
q_net.eval()
normal = normal_net(x)
qat_without_fakequant = qat_from_float(x)
fake_quant_normal = fake_quant(normal_net(x), act_scale)
qat = qat_net(x)
q = q_net(x_int8).numpy() * act_scale
np.testing.assert_allclose(qat_without_fakequant, normal, atol=1e-6)
np.testing.assert_allclose(qat, fake_quant_normal)
np.testing.assert_allclose(q, fake_quant_normal.numpy())
|
[
"megengine.functional.round",
"megengine.module.quantized.Linear.from_qat_module",
"megengine.module.qat.Elemwise.from_float_module",
"megengine.module.qat.DequantStub.from_float_module",
"megengine.module.QuantStub",
"megengine.quantization.quantize.propagate_qconfig",
"megengine.module.Elemwise",
"megengine.module.qat.DequantStub",
"megengine.module.qat.QuantStub",
"megengine.module.quantized.Elemwise.from_qat_module",
"megengine.module.qat.QuantStub.from_float_module",
"megengine.module.quantized.DequantStub.from_qat_module",
"megengine.module.qat.Elemwise",
"megengine.core.tensor.dtype.qint8",
"megengine.module.qat.Linear",
"megengine.module.qat.Linear.from_float_module",
"megengine.quantization.quantize.disable_observer",
"megengine.quantization.quantize.disable_fake_quant",
"megengine.functional.clip",
"megengine.module.DequantStub",
"megengine.module.Linear",
"megengine.module.quantized.QuantStub.from_qat_module"
] |
[((3223, 3302), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""kind"""', "['COS', 'RELU', 'ADD', 'MUL', 'FUSE_ADD_RELU']"], {}), "('kind', ['COS', 'RELU', 'ADD', 'MUL', 'FUSE_ADD_RELU'])\n", (3246, 3302), False, 'import pytest\n'), ((6367, 6440), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""module"""', "['Conv2d', 'ConvBn2d', 'ConvBnRelu2d']"], {}), "('module', ['Conv2d', 'ConvBn2d', 'ConvBnRelu2d'])\n", (6390, 6440), False, 'import pytest\n'), ((845, 863), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['scale'], {}), '(scale)\n', (856, 863), False, 'from megengine.core.tensor import dtype\n'), ((949, 959), 'megengine.functional.round', 'F.round', (['x'], {}), '(x)\n', (956, 959), True, 'import megengine.functional as F\n'), ((968, 988), 'megengine.functional.clip', 'F.clip', (['x', '(-128)', '(127)'], {}), '(x, -128, 127)\n', (974, 988), True, 'import megengine.functional as F\n'), ((1358, 1375), 'megengine.module.QuantStub', 'Float.QuantStub', ([], {}), '()\n', (1373, 1375), True, 'import megengine.module as Float\n'), ((1420, 1463), 'megengine.module.qat.QuantStub.from_float_module', 'QAT.QuantStub.from_float_module', (['normal_net'], {}), '(normal_net)\n', (1451, 1463), True, 'import megengine.module.qat as QAT\n'), ((1494, 1526), 'megengine.quantization.quantize.disable_observer', 'disable_observer', (['qat_from_float'], {}), '(qat_from_float)\n', (1510, 1526), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((1531, 1565), 'megengine.quantization.quantize.disable_fake_quant', 'disable_fake_quant', (['qat_from_float'], {}), '(qat_from_float)\n', (1549, 1565), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((1581, 1596), 'megengine.module.qat.QuantStub', 'QAT.QuantStub', ([], {}), '()\n', (1594, 1596), True, 'import megengine.module.qat as QAT\n'), ((1620, 1645), 'megengine.quantization.quantize.disable_observer', 'disable_observer', (['qat_net'], {}), '(qat_net)\n', (1636, 1645), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((1651, 1704), 'megengine.quantization.quantize.propagate_qconfig', 'propagate_qconfig', (['qat_net', 'min_max_fakequant_qconfig'], {}), '(qat_net, min_max_fakequant_qconfig)\n', (1668, 1704), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((1744, 1780), 'megengine.module.quantized.QuantStub.from_qat_module', 'Q.QuantStub.from_qat_module', (['qat_net'], {}), '(qat_net)\n', (1771, 1780), True, 'import megengine.module.quantized as Q\n'), ((2064, 2121), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['qat_without_fakequant', 'normal'], {}), '(qat_without_fakequant, normal)\n', (2090, 2121), True, 'import numpy as np\n'), ((2126, 2176), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['qat', 'fake_quant_normal'], {}), '(qat, fake_quant_normal)\n', (2152, 2176), True, 'import numpy as np\n'), ((2282, 2301), 'megengine.module.DequantStub', 'Float.DequantStub', ([], {}), '()\n', (2299, 2301), True, 'import megengine.module as Float\n'), ((2346, 2391), 'megengine.module.qat.DequantStub.from_float_module', 'QAT.DequantStub.from_float_module', (['normal_net'], {}), '(normal_net)\n', (2379, 2391), True, 'import megengine.module.qat as QAT\n'), ((2422, 2456), 'megengine.quantization.quantize.disable_fake_quant', 'disable_fake_quant', (['qat_from_float'], {}), '(qat_from_float)\n', (2440, 2456), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((2461, 2493), 'megengine.quantization.quantize.disable_observer', 'disable_observer', (['qat_from_float'], {}), '(qat_from_float)\n', (2477, 2493), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((2509, 2526), 'megengine.module.qat.DequantStub', 'QAT.DequantStub', ([], {}), '()\n', (2524, 2526), True, 'import megengine.module.qat as QAT\n'), ((2550, 2575), 'megengine.quantization.quantize.disable_observer', 'disable_observer', (['qat_net'], {}), '(qat_net)\n', (2566, 2575), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((2581, 2634), 'megengine.quantization.quantize.propagate_qconfig', 'propagate_qconfig', (['qat_net', 'min_max_fakequant_qconfig'], {}), '(qat_net, min_max_fakequant_qconfig)\n', (2598, 2634), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((2674, 2712), 'megengine.module.quantized.DequantStub.from_qat_module', 'Q.DequantStub.from_qat_module', (['qat_net'], {}), '(qat_net)\n', (2703, 2712), True, 'import megengine.module.quantized as Q\n'), ((3046, 3103), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['qat_without_fakequant', 'normal'], {}), '(qat_without_fakequant, normal)\n', (3072, 3103), True, 'import numpy as np\n'), ((3108, 3158), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['qat', 'fake_quant_normal'], {}), '(qat, fake_quant_normal)\n', (3134, 3158), True, 'import numpy as np\n'), ((3345, 3365), 'megengine.module.Elemwise', 'Float.Elemwise', (['kind'], {}), '(kind)\n', (3359, 3365), True, 'import megengine.module as Float\n'), ((3410, 3452), 'megengine.module.qat.Elemwise.from_float_module', 'QAT.Elemwise.from_float_module', (['normal_net'], {}), '(normal_net)\n', (3440, 3452), True, 'import megengine.module.qat as QAT\n'), ((3483, 3515), 'megengine.quantization.quantize.disable_observer', 'disable_observer', (['qat_from_float'], {}), '(qat_from_float)\n', (3499, 3515), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((3520, 3554), 'megengine.quantization.quantize.disable_fake_quant', 'disable_fake_quant', (['qat_from_float'], {}), '(qat_from_float)\n', (3538, 3554), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((3570, 3588), 'megengine.module.qat.Elemwise', 'QAT.Elemwise', (['kind'], {}), '(kind)\n', (3582, 3588), True, 'import megengine.module.qat as QAT\n'), ((3612, 3637), 'megengine.quantization.quantize.disable_observer', 'disable_observer', (['qat_net'], {}), '(qat_net)\n', (3628, 3637), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((3643, 3696), 'megengine.quantization.quantize.propagate_qconfig', 'propagate_qconfig', (['qat_net', 'min_max_fakequant_qconfig'], {}), '(qat_net, min_max_fakequant_qconfig)\n', (3660, 3696), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((3736, 3771), 'megengine.module.quantized.Elemwise.from_qat_module', 'Q.Elemwise.from_qat_module', (['qat_net'], {}), '(qat_net)\n', (3762, 3771), True, 'import megengine.module.quantized as Q\n'), ((4819, 4876), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['qat_without_fakequant', 'normal'], {}), '(qat_without_fakequant, normal)\n', (4845, 4876), True, 'import numpy as np\n'), ((4881, 4931), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['qat', 'fake_quant_normal'], {}), '(qat, fake_quant_normal)\n', (4907, 4931), True, 'import numpy as np\n'), ((5031, 5060), 'megengine.module.Linear', 'Float.Linear', (['(3)', '(3)'], {'bias': '(True)'}), '(3, 3, bias=True)\n', (5043, 5060), True, 'import megengine.module as Float\n'), ((5098, 5125), 'megengine.module.qat.Linear', 'QAT.Linear', (['(3)', '(3)'], {'bias': '(True)'}), '(3, 3, bias=True)\n', (5108, 5125), True, 'import megengine.module.qat as QAT\n'), ((5149, 5174), 'megengine.quantization.quantize.disable_observer', 'disable_observer', (['qat_net'], {}), '(qat_net)\n', (5165, 5174), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((5180, 5233), 'megengine.quantization.quantize.propagate_qconfig', 'propagate_qconfig', (['qat_net', 'min_max_fakequant_qconfig'], {}), '(qat_net, min_max_fakequant_qconfig)\n', (5197, 5233), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((5781, 5821), 'megengine.module.qat.Linear.from_float_module', 'QAT.Linear.from_float_module', (['normal_net'], {}), '(normal_net)\n', (5809, 5821), True, 'import megengine.module.qat as QAT\n'), ((5852, 5886), 'megengine.quantization.quantize.disable_fake_quant', 'disable_fake_quant', (['qat_from_float'], {}), '(qat_from_float)\n', (5870, 5886), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((5891, 5923), 'megengine.quantization.quantize.disable_observer', 'disable_observer', (['qat_from_float'], {}), '(qat_from_float)\n', (5907, 5923), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((5937, 5970), 'megengine.module.quantized.Linear.from_qat_module', 'Q.Linear.from_qat_module', (['qat_net'], {}), '(qat_net)\n', (5961, 5970), True, 'import megengine.module.quantized as Q\n'), ((6190, 6247), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['qat_without_fakequant', 'normal'], {}), '(qat_without_fakequant, normal)\n', (6216, 6247), True, 'import numpy as np\n'), ((6252, 6302), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['qat', 'fake_quant_normal'], {}), '(qat, fake_quant_normal)\n', (6278, 6302), True, 'import numpy as np\n'), ((6643, 6668), 'megengine.quantization.quantize.disable_observer', 'disable_observer', (['qat_net'], {}), '(qat_net)\n', (6659, 6668), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((6674, 6727), 'megengine.quantization.quantize.propagate_qconfig', 'propagate_qconfig', (['qat_net', 'min_max_fakequant_qconfig'], {}), '(qat_net, min_max_fakequant_qconfig)\n', (6691, 6727), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((7695, 7727), 'megengine.quantization.quantize.disable_observer', 'disable_observer', (['qat_from_float'], {}), '(qat_from_float)\n', (7711, 7727), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((7732, 7766), 'megengine.quantization.quantize.disable_fake_quant', 'disable_fake_quant', (['qat_from_float'], {}), '(qat_from_float)\n', (7750, 7766), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((8043, 8112), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['qat_without_fakequant', 'normal'], {'atol': '(1e-06)'}), '(qat_without_fakequant, normal, atol=1e-06)\n', (8069, 8112), True, 'import numpy as np\n'), ((8116, 8166), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['qat', 'fake_quant_normal'], {}), '(qat, fake_quant_normal)\n', (8142, 8166), True, 'import numpy as np\n'), ((513, 529), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (527, 529), True, 'import numpy as np\n'), ((546, 583), 'numpy.random.randint', 'np.random.randint', (['(-127)', '(0)'], {'size': '(2,)'}), '(-127, 0, size=(2,))\n', (563, 583), True, 'import numpy as np\n'), ((612, 648), 'numpy.random.randint', 'np.random.randint', (['(1)', '(127)'], {'size': '(2,)'}), '(1, 127, size=(2,))\n', (629, 648), True, 'import numpy as np\n'), ((693, 726), 'numpy.max', 'np.max', (['[-min_val[0], max_val[0]]'], {}), '([-min_val[0], max_val[0]])\n', (699, 726), True, 'import numpy as np\n'), ((761, 794), 'numpy.max', 'np.max', (['[-min_val[1], max_val[1]]'], {}), '([-min_val[1], max_val[1]])\n', (767, 794), True, 'import numpy as np\n'), ((3816, 3832), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3830, 3832), True, 'import numpy as np\n'), ((4002, 4018), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4016, 4018), True, 'import numpy as np\n'), ((5444, 5473), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3, 3)'}), '(size=(3, 3))\n', (5460, 5473), True, 'import numpy as np\n'), ((5503, 5530), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3,)'}), '(size=(3,))\n', (5519, 5530), True, 'import numpy as np\n'), ((6944, 6979), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3, 3, 3, 3)'}), '(size=(3, 3, 3, 3))\n', (6960, 6979), True, 'import numpy as np\n'), ((7009, 7044), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, 3, 1, 1)'}), '(size=(1, 3, 1, 1))\n', (7025, 7044), True, 'import numpy as np\n'), ((1818, 1847), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3, 3)'}), '(size=(3, 3))\n', (1834, 1847), True, 'import numpy as np\n'), ((2750, 2779), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3, 3)'}), '(size=(3, 3))\n', (2766, 2779), True, 'import numpy as np\n'), ((3858, 3887), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3, 3)'}), '(size=(3, 3))\n', (3874, 3887), True, 'import numpy as np\n'), ((4044, 4073), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3, 3)'}), '(size=(3, 3))\n', (4060, 4073), True, 'import numpy as np\n'), ((5280, 5309), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3, 3)'}), '(size=(3, 3))\n', (5296, 5309), True, 'import numpy as np\n'), ((6774, 6809), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, 3, 3, 3)'}), '(size=(1, 3, 3, 3))\n', (6790, 6809), True, 'import numpy as np\n')]
|
import numpy as nm
from sfepy.base.base import output, get_default, assert_, Struct
def get_print_info(n_step):
if n_step > 1:
n_digit = int(nm.log10(n_step - 1) + 1)
else:
n_digit = 1
format = '%%%dd of %%%dd' % (n_digit, n_digit)
suffix = '%%0%dd' % n_digit
return n_digit, format, suffix
class TimeStepper(Struct):
"""
Time stepper class.
"""
@staticmethod
def from_conf(conf):
return TimeStepper(conf.t0, conf.t1, dt=conf.dt, n_step=conf.n_step,
is_quasistatic=conf.quasistatic)
def __init__(self, t0, t1, dt=None, n_step=None, step=None,
is_quasistatic=False):
self.set_from_data(t0, t1, dt=dt, n_step=n_step, step=step)
self.is_quasistatic = is_quasistatic
self.step_start_time = None
def _get_n_step(self, t0, t1, dt):
n_step = int(round(nm.floor(((t1 - t0) / dt) + 0.5) + 1.0))
return n_step
def set_from_data(self, t0, t1, dt=None, n_step=None, step=None):
self.t0, self.t1 = t0, t1
dt = get_default(dt, t1 - t0)
self.n_step = get_default(n_step,
self._get_n_step(self.t0, self.t1, dt))
if self.n_step > 1:
self.times, self.dt = nm.linspace(self.t0, self.t1, self.n_step,
endpoint=True, retstep=True)
else:
self.times = nm.array((self.t0,), dtype=nm.float64)
self.dt = self.t1 - self.t0
self.n_digit, self.format, self.suffix = get_print_info(self.n_step)
self.set_step(step)
def set_from_ts(self, ts, step=None):
step = get_default(step, ts.step)
self.set_from_data(ts.t0, ts.t1, ts.dt, ts.n_step, step=step)
def get_state(self):
return {'step' : self.step}
def set_state(self, step=0, **kwargs):
self.set_step(step=step)
def set_substep_time(self, sub_dt):
self.step_start_time = self.time
self.time += sub_dt
def restore_step_time(self):
if self.step_start_time is not None:
self.time = self.step_start_time
self.step_start_time = None
def advance(self):
if self.step < (self.n_step - 1):
self.step += 1
self.time = self.times[self.step]
self.normalize_time()
def __iter__(self):
"""ts.step, ts.time is consistent with step, time returned here
ts.nt is normalized time in [0, 1]"""
return self.iter_from(0)
def iter_from(self, step):
self.set_step(step=step)
for time in self.times[step:]:
yield self.step, self.time
self.advance()
def normalize_time(self):
self.nt = (self.time - self.t0) / (self.t1 - self.t0)
def set_step(self, step=0, nt=0.0):
nm1 = self.n_step - 1
if step is None:
step = int(round(nt * nm1))
if step < 0:
step = self.n_step + step
if (step >= self.n_step) or (step < 0):
output('time step must be in [%d, %d]' % (-nm1, nm1) )
raise ValueError
self.step = step
self.time = self.times[step]
self.normalize_time()
def __eq__(self, other):
if type(other) == type(self):
return (abs(self.t0 == other.t0) < 1e-15) and \
(abs(self.t1 == other.t1) < 1e-15) and \
(self.n_step == other.n_step)
else:
raise ValueError
class VariableTimeStepper(TimeStepper):
"""
Time stepper class with a variable time step.
"""
@staticmethod
def from_conf(conf):
return VariableTimeStepper(conf.t0, conf.t1, dt=conf.dt,
n_step=conf.n_step,
is_quasistatic=conf.quasistatic)
def set_from_data(self, t0, t1, dt=None, n_step=None, step=None):
self.t0, self.t1 = t0, t1
self.dtime = self.t1 - self.t0
dt = get_default(dt, self.dtime)
self.n_step0 = get_default(n_step,
self._get_n_step(self.t0, self.t1, dt))
if self.n_step0 > 1:
self.dt = self.dtime / (self.n_step0 - 1)
else:
self.dt = self.dtime
self.dt0 = self.dt
self.n_digit, self.format, self.suffix = get_print_info(5)
self.set_step(step)
def set_from_ts(self, ts, step=None):
self.set_from_data(ts.t0, ts.t1, ts.dt, ts.n_step0, step=0)
def get_state(self):
return {'step' : self.step, 'dts' : self.dts, 'times' : self.times}
def set_state(self, step=0, dts=None, times=None, **kwargs):
assert_(len(dts) == len(times) == (step + 1))
self.step = step
self.dts = dts
self.times = times
self.dt = self.dts[-1]
self.time = self.times[-1]
self.normalize_time()
def set_n_digit_from_min_dt(self, dt):
n_step = self._get_n_step(self.t0, self.t1, dt)
self.n_digit, self.format, self.suffix = get_print_info(n_step)
def set_step(self, step=0, nt=0.0):
if step is None:
step = 0
if (step > 0) and (step != self.step):
msg = 'cannot set step != self.step or 0 in VariableTimeStepper!'
raise ValueError(msg)
if step == 0:
self.step = 0
self.time = self.t0
self.nt = 0.0
self.dts = [self.dt]
self.times = [self.time]
self.n_step = 1
def get_default_time_step(self):
return self.dt0
def set_time_step(self, dt, update_time=False):
self.dt = dt
if update_time:
self.time = self.times[self.step - 1] + self.dt
self.times[self.step] = self.time
self.normalize_time()
def advance(self):
self.step += 1
self.time += self.dt
self.normalize_time()
self.times.append(self.time)
self.dts.append(self.dt)
self.n_step = self.step + 1
def iter_from(self, step):
self.set_step(step=step)
return self.iter_from_current()
def iter_from_current(self):
"""
ts.step, ts.time is consistent with step, time returned here
ts.nt is normalized time in [0, 1].
"""
while 1:
yield self.step, self.time
if self.nt >= 1.0:
break
self.advance()
def __iter__(self):
self.set_step(0)
return self.iter_from_current()
|
[
"sfepy.base.base.output",
"sfepy.base.base.get_default"
] |
[((1084, 1108), 'sfepy.base.base.get_default', 'get_default', (['dt', '(t1 - t0)'], {}), '(dt, t1 - t0)\n', (1095, 1108), False, 'from sfepy.base.base import output, get_default, assert_, Struct\n'), ((1689, 1715), 'sfepy.base.base.get_default', 'get_default', (['step', 'ts.step'], {}), '(step, ts.step)\n', (1700, 1715), False, 'from sfepy.base.base import output, get_default, assert_, Struct\n'), ((4024, 4051), 'sfepy.base.base.get_default', 'get_default', (['dt', 'self.dtime'], {}), '(dt, self.dtime)\n', (4035, 4051), False, 'from sfepy.base.base import output, get_default, assert_, Struct\n'), ((1288, 1359), 'numpy.linspace', 'nm.linspace', (['self.t0', 'self.t1', 'self.n_step'], {'endpoint': '(True)', 'retstep': '(True)'}), '(self.t0, self.t1, self.n_step, endpoint=True, retstep=True)\n', (1299, 1359), True, 'import numpy as nm\n'), ((1445, 1483), 'numpy.array', 'nm.array', (['(self.t0,)'], {'dtype': 'nm.float64'}), '((self.t0,), dtype=nm.float64)\n', (1453, 1483), True, 'import numpy as nm\n'), ((3069, 3122), 'sfepy.base.base.output', 'output', (["('time step must be in [%d, %d]' % (-nm1, nm1))"], {}), "('time step must be in [%d, %d]' % (-nm1, nm1))\n", (3075, 3122), False, 'from sfepy.base.base import output, get_default, assert_, Struct\n'), ((155, 175), 'numpy.log10', 'nm.log10', (['(n_step - 1)'], {}), '(n_step - 1)\n', (163, 175), True, 'import numpy as nm\n'), ((902, 932), 'numpy.floor', 'nm.floor', (['((t1 - t0) / dt + 0.5)'], {}), '((t1 - t0) / dt + 0.5)\n', (910, 932), True, 'import numpy as nm\n')]
|
from sqlmodel import create_engine, SQLModel, Session
from .config import settings
engine = create_engine(
settings.db.uri,
echo=settings.db.echo,
connect_args=settings.db.connect_args
)
def init_db():
SQLModel.metadata.create_all(engine)
def drop_db():
SQLModel.metadata.drop_all(engine)
def get_session():
with Session(engine) as session:
yield session
|
[
"sqlmodel.Session",
"sqlmodel.create_engine",
"sqlmodel.SQLModel.metadata.create_all",
"sqlmodel.SQLModel.metadata.drop_all"
] |
[((95, 192), 'sqlmodel.create_engine', 'create_engine', (['settings.db.uri'], {'echo': 'settings.db.echo', 'connect_args': 'settings.db.connect_args'}), '(settings.db.uri, echo=settings.db.echo, connect_args=settings\n .db.connect_args)\n', (108, 192), False, 'from sqlmodel import create_engine, SQLModel, Session\n'), ((223, 259), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (251, 259), False, 'from sqlmodel import create_engine, SQLModel, Session\n'), ((281, 315), 'sqlmodel.SQLModel.metadata.drop_all', 'SQLModel.metadata.drop_all', (['engine'], {}), '(engine)\n', (307, 315), False, 'from sqlmodel import create_engine, SQLModel, Session\n'), ((346, 361), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (353, 361), False, 'from sqlmodel import create_engine, SQLModel, Session\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 25 09:19:35 2020
@author: dhulls
"""
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct, Struct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.postprocess.probes_vtk import ProbeFromFile, Probe
from sfepy.mechanics.matcoefs import stiffness_from_lame
from sfepy.mechanics.matcoefs import stiffness_for_transIso
from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson
import numpy as np
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
def post_process(out, problem, state, extend=False):
"""
Calculate and output strain and stress for given displacements.
"""
ev = problem.evaluate
stress = ev('ev_cauchy_stress.%d.Omega(m.D, u)' % (2), mode='el_avg',
copy_materials=False, verbose=False)
out['cauchy_stress'] = Struct(name='output_data', mode='cell',
data=stress, dofs=None)
return out
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
mesh = Mesh.from_file(data_dir + '/meshes/3d/cylinder.inp')
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
Bottom = domain.create_region('Bottom', 'vertices in (z < -0.049)', 'facet')
Top = domain.create_region('Top', 'vertices in (z > 0.049)', 'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega, approx_order=1)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
m = Material('m', D=stiffness_from_youngpoisson(dim=3, young=2, poisson=0.3))
# m = Material('m', D=stiffness_for_transIso(dim=3, Ex=2.5, Ez=2, vxy=0.25, vxz=0.3, Gxz=1.75))
integral = Integral('i', order=1)
t1 = Term.new('dw_lin_elastic(m.D, v, u)',
integral, omega, m=m, v=v, u=u)
eq = Equation('balance', t1)
eqs = Equations([eq])
Fixed = EssentialBC('Fixed', Bottom, {'u.all' : 0.0})
Displaced = EssentialBC('Displaced', Top, {'u.1' : 0.01, 'u.[0,2]' : 0.0})
ls = ScipyDirect({})
nls_status = IndexedStruct()
nls = Newton({}, lin_solver=ls, status=nls_status)
pb = Problem('elasticity', equations=eqs)
pb.save_regions_as_groups('regions')
pb.set_bcs(ebcs=Conditions([Fixed, Displaced]))
pb.set_solver(nls)
status = IndexedStruct()
#####
state = pb.solve(status=status, save_results=True, post_process_hook=post_process)
out = state.create_output_dict()
out = post_process(out, pb, state, extend=True)
pb.save_state('postprocess.vtk', out=out)
prb = Probe(out,pb.domain.mesh)
def Compute_Max_VonMises(probe,num_points,interval):
radii = np.arange(interval,0.05,interval)
lens = np.arange(interval,0.05,interval)
max_stress = 0
for ii in np.arange(0,len(lens),1):
for jj in np.arange(0,len(radii),1):
probe.add_circle_probe('Test', np.array([0,0,lens[ii]]), np.array([0,0,1]), radii[jj], num_points)
pars,vals = prb.__call__('Test','cauchy_stress')
req = np.max(0.5*((vals[:,0]-vals[:,1])**2+(vals[:,2]-vals[:,1])**2+(vals[:,0]-vals[:,2])**2+6*(vals[:,3]**2+vals[:,4]**2+vals[:,5]**2)))
if max_stress < req:
max_stress = req
return max_stress
stress_von = Compute_Max_VonMises(prb,200,0.0025)
# print('Nonlinear solver status:\n', nls_status)
# print('Stationary solver status:\n', status)
# pb.save_state('linear_elasticity.vtk', state)
# # if options.show:
# view = Viewer('postprocess.vtk')
# view(rel_scaling=2,
# is_scalar_bar=True, is_wireframe=True)
|
[
"sfepy.base.base.IndexedStruct",
"sfepy.discrete.Equation",
"sfepy.mechanics.matcoefs.stiffness_from_youngpoisson",
"sfepy.discrete.conditions.EssentialBC",
"sfepy.postprocess.probes_vtk.Probe",
"sfepy.base.base.Struct",
"sfepy.discrete.fem.Mesh.from_file",
"sfepy.solvers.ls.ScipyDirect",
"sfepy.discrete.fem.Field.from_args",
"sfepy.solvers.nls.Newton",
"sfepy.terms.Term.new",
"sfepy.discrete.Equations",
"sfepy.discrete.Integral",
"sfepy.discrete.fem.FEDomain",
"sfepy.discrete.Problem",
"sfepy.discrete.FieldVariable",
"sfepy.discrete.conditions.Conditions"
] |
[((253, 273), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (268, 273), False, 'import sys\n'), ((1699, 1715), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (1713, 1715), False, 'from argparse import ArgumentParser\n'), ((1970, 2022), 'sfepy.discrete.fem.Mesh.from_file', 'Mesh.from_file', (["(data_dir + '/meshes/3d/cylinder.inp')"], {}), "(data_dir + '/meshes/3d/cylinder.inp')\n", (1984, 2022), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((2032, 2056), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['"""domain"""', 'mesh'], {}), "('domain', mesh)\n", (2040, 2056), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((2259, 2325), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', (['"""fu"""', 'nm.float64', '"""vector"""', 'omega'], {'approx_order': '(1)'}), "('fu', nm.float64, 'vector', omega, approx_order=1)\n", (2274, 2325), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((2331, 2367), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""u"""', '"""unknown"""', 'field'], {}), "('u', 'unknown', field)\n", (2344, 2367), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((2372, 2427), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""v"""', '"""test"""', 'field'], {'primary_var_name': '"""u"""'}), "('v', 'test', field, primary_var_name='u')\n", (2385, 2427), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((2616, 2638), 'sfepy.discrete.Integral', 'Integral', (['"""i"""'], {'order': '(1)'}), "('i', order=1)\n", (2624, 2638), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((2645, 2714), 'sfepy.terms.Term.new', 'Term.new', (['"""dw_lin_elastic(m.D, v, u)"""', 'integral', 'omega'], {'m': 'm', 'v': 'v', 'u': 'u'}), "('dw_lin_elastic(m.D, v, u)', integral, omega, m=m, v=v, u=u)\n", (2653, 2714), False, 'from sfepy.terms import Term\n'), ((2734, 2757), 'sfepy.discrete.Equation', 'Equation', (['"""balance"""', 't1'], {}), "('balance', t1)\n", (2742, 2757), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((2764, 2779), 'sfepy.discrete.Equations', 'Equations', (['[eq]'], {}), '([eq])\n', (2773, 2779), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((2789, 2833), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', (['"""Fixed"""', 'Bottom', "{'u.all': 0.0}"], {}), "('Fixed', Bottom, {'u.all': 0.0})\n", (2800, 2833), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((2848, 2908), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', (['"""Displaced"""', 'Top', "{'u.1': 0.01, 'u.[0,2]': 0.0}"], {}), "('Displaced', Top, {'u.1': 0.01, 'u.[0,2]': 0.0})\n", (2859, 2908), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((2917, 2932), 'sfepy.solvers.ls.ScipyDirect', 'ScipyDirect', (['{}'], {}), '({})\n', (2928, 2932), False, 'from sfepy.solvers.ls import ScipyDirect\n'), ((2947, 2962), 'sfepy.base.base.IndexedStruct', 'IndexedStruct', ([], {}), '()\n', (2960, 2962), False, 'from sfepy.base.base import IndexedStruct, Struct\n'), ((2969, 3013), 'sfepy.solvers.nls.Newton', 'Newton', (['{}'], {'lin_solver': 'ls', 'status': 'nls_status'}), '({}, lin_solver=ls, status=nls_status)\n', (2975, 3013), False, 'from sfepy.solvers.nls import Newton\n'), ((3020, 3056), 'sfepy.discrete.Problem', 'Problem', (['"""elasticity"""'], {'equations': 'eqs'}), "('elasticity', equations=eqs)\n", (3027, 3056), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((3174, 3189), 'sfepy.base.base.IndexedStruct', 'IndexedStruct', ([], {}), '()\n', (3187, 3189), False, 'from sfepy.base.base import IndexedStruct, Struct\n'), ((3414, 3440), 'sfepy.postprocess.probes_vtk.Probe', 'Probe', (['out', 'pb.domain.mesh'], {}), '(out, pb.domain.mesh)\n', (3419, 3440), False, 'from sfepy.postprocess.probes_vtk import ProbeFromFile, Probe\n'), ((1547, 1610), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""cell"""', 'data': 'stress', 'dofs': 'None'}), "(name='output_data', mode='cell', data=stress, dofs=None)\n", (1553, 1610), False, 'from sfepy.base.base import IndexedStruct, Struct\n'), ((3506, 3541), 'numpy.arange', 'np.arange', (['interval', '(0.05)', 'interval'], {}), '(interval, 0.05, interval)\n', (3515, 3541), True, 'import numpy as np\n'), ((3551, 3586), 'numpy.arange', 'np.arange', (['interval', '(0.05)', 'interval'], {}), '(interval, 0.05, interval)\n', (3560, 3586), True, 'import numpy as np\n'), ((2449, 2505), 'sfepy.mechanics.matcoefs.stiffness_from_youngpoisson', 'stiffness_from_youngpoisson', ([], {'dim': '(3)', 'young': '(2)', 'poisson': '(0.3)'}), '(dim=3, young=2, poisson=0.3)\n', (2476, 2505), False, 'from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson\n'), ((3112, 3142), 'sfepy.discrete.conditions.Conditions', 'Conditions', (['[Fixed, Displaced]'], {}), '([Fixed, Displaced])\n', (3122, 3142), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC\n'), ((3879, 4059), 'numpy.max', 'np.max', (['(0.5 * ((vals[:, 0] - vals[:, 1]) ** 2 + (vals[:, 2] - vals[:, 1]) ** 2 + (\n vals[:, 0] - vals[:, 2]) ** 2 + 6 * (vals[:, 3] ** 2 + vals[:, 4] ** 2 +\n vals[:, 5] ** 2)))'], {}), '(0.5 * ((vals[:, 0] - vals[:, 1]) ** 2 + (vals[:, 2] - vals[:, 1]) **\n 2 + (vals[:, 0] - vals[:, 2]) ** 2 + 6 * (vals[:, 3] ** 2 + vals[:, 4] **\n 2 + vals[:, 5] ** 2)))\n', (3885, 4059), True, 'import numpy as np\n'), ((3732, 3758), 'numpy.array', 'np.array', (['[0, 0, lens[ii]]'], {}), '([0, 0, lens[ii]])\n', (3740, 3758), True, 'import numpy as np\n'), ((3758, 3777), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (3766, 3777), True, 'import numpy as np\n')]
|
"""init
Revision ID: fb8ce6ce7c6b
Revises:
Create Date: 2021-11-27 16:52:18.035895
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = "fb8ce6ce7c6b"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"appid_error",
sa.Column("pk", sa.Integer(), nullable=True),
sa.Column("appid", sa.Integer(), nullable=False),
sa.Column("name", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("reason", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.PrimaryKeyConstraint("pk"),
sa.UniqueConstraint("appid"),
)
op.create_table(
"category",
sa.Column("pk", sa.Integer(), nullable=True),
sa.Column("id", sa.Integer(), nullable=False),
sa.Column(
"description", sqlmodel.sql.sqltypes.AutoString(), nullable=False
),
sa.PrimaryKeyConstraint("pk"),
)
op.create_table(
"genre",
sa.Column("pk", sa.Integer(), nullable=True),
sa.Column("id", sa.Integer(), nullable=False),
sa.Column(
"description", sqlmodel.sql.sqltypes.AutoString(), nullable=False
),
sa.PrimaryKeyConstraint("pk"),
)
op.create_table(
"steam_app",
sa.Column("pk", sa.Integer(), nullable=True),
sa.Column("appid", sa.Integer(), nullable=False),
sa.Column("type", sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column("is_free", sa.Boolean(), nullable=True),
sa.Column("name", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column(
"controller_support",
sqlmodel.sql.sqltypes.AutoString(),
nullable=True,
),
sa.Column("metacritic_score", sa.Integer(), nullable=True),
sa.Column(
"metacritic_url", sqlmodel.sql.sqltypes.AutoString(), nullable=True
),
sa.Column("recommendations", sa.Integer(), nullable=True),
sa.Column("achievements_total", sa.Integer(), nullable=True),
sa.Column("release_date", sa.Date(), nullable=True),
sa.Column("created", sa.DateTime(), nullable=False),
sa.Column("updated", sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint("pk"),
)
op.create_index(
op.f("ix_steam_app_appid"), "steam_app", ["appid"], unique=True
)
op.create_index(
op.f("ix_steam_app_name"), "steam_app", ["name"], unique=False
)
op.create_table(
"achievement",
sa.Column("pk", sa.Integer(), nullable=True),
sa.Column("name", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("percent", sa.Float(), nullable=False),
sa.Column("steam_app_pk", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["steam_app_pk"],
["steam_app.pk"],
),
sa.PrimaryKeyConstraint("pk"),
)
op.create_table(
"categorysteamapplink",
sa.Column("category_pk", sa.Integer(), nullable=True),
sa.Column("steam_app_pk", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["category_pk"],
["category.pk"],
),
sa.ForeignKeyConstraint(
["steam_app_pk"],
["steam_app.pk"],
),
sa.PrimaryKeyConstraint("category_pk", "steam_app_pk"),
)
op.create_table(
"genresteammapplink",
sa.Column("genre_pk", sa.Integer(), nullable=True),
sa.Column("steam_app_pk", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["genre_pk"],
["genre.pk"],
),
sa.ForeignKeyConstraint(
["steam_app_pk"],
["steam_app.pk"],
),
sa.PrimaryKeyConstraint("genre_pk", "steam_app_pk"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("genresteammapplink")
op.drop_table("categorysteamapplink")
op.drop_table("achievement")
op.drop_index(op.f("ix_steam_app_name"), table_name="steam_app")
op.drop_index(op.f("ix_steam_app_appid"), table_name="steam_app")
op.drop_table("steam_app")
op.drop_table("genre")
op.drop_table("category")
op.drop_table("appid_error")
# ### end Alembic commands ###
|
[
"sqlmodel.sql.sqltypes.AutoString"
] |
[((4084, 4119), 'alembic.op.drop_table', 'op.drop_table', (['"""genresteammapplink"""'], {}), "('genresteammapplink')\n", (4097, 4119), False, 'from alembic import op\n'), ((4124, 4161), 'alembic.op.drop_table', 'op.drop_table', (['"""categorysteamapplink"""'], {}), "('categorysteamapplink')\n", (4137, 4161), False, 'from alembic import op\n'), ((4166, 4194), 'alembic.op.drop_table', 'op.drop_table', (['"""achievement"""'], {}), "('achievement')\n", (4179, 4194), False, 'from alembic import op\n'), ((4338, 4364), 'alembic.op.drop_table', 'op.drop_table', (['"""steam_app"""'], {}), "('steam_app')\n", (4351, 4364), False, 'from alembic import op\n'), ((4369, 4391), 'alembic.op.drop_table', 'op.drop_table', (['"""genre"""'], {}), "('genre')\n", (4382, 4391), False, 'from alembic import op\n'), ((4396, 4421), 'alembic.op.drop_table', 'op.drop_table', (['"""category"""'], {}), "('category')\n", (4409, 4421), False, 'from alembic import op\n'), ((4426, 4454), 'alembic.op.drop_table', 'op.drop_table', (['"""appid_error"""'], {}), "('appid_error')\n", (4439, 4454), False, 'from alembic import op\n'), ((687, 716), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""pk"""'], {}), "('pk')\n", (710, 716), True, 'import sqlalchemy as sa\n'), ((726, 754), 'sqlalchemy.UniqueConstraint', 'sa.UniqueConstraint', (['"""appid"""'], {}), "('appid')\n", (745, 754), True, 'import sqlalchemy as sa\n'), ((1028, 1057), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""pk"""'], {}), "('pk')\n", (1051, 1057), True, 'import sqlalchemy as sa\n'), ((1328, 1357), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""pk"""'], {}), "('pk')\n", (1351, 1357), True, 'import sqlalchemy as sa\n'), ((2380, 2409), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""pk"""'], {}), "('pk')\n", (2403, 2409), True, 'import sqlalchemy as sa\n'), ((2446, 2472), 'alembic.op.f', 'op.f', (['"""ix_steam_app_appid"""'], {}), "('ix_steam_app_appid')\n", (2450, 2472), False, 'from alembic import op\n'), ((2545, 2570), 'alembic.op.f', 'op.f', (['"""ix_steam_app_name"""'], {}), "('ix_steam_app_name')\n", (2549, 2570), False, 'from alembic import op\n'), ((2921, 2980), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['steam_app_pk']", "['steam_app.pk']"], {}), "(['steam_app_pk'], ['steam_app.pk'])\n", (2944, 2980), True, 'import sqlalchemy as sa\n'), ((3025, 3054), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""pk"""'], {}), "('pk')\n", (3048, 3054), True, 'import sqlalchemy as sa\n'), ((3250, 3307), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['category_pk']", "['category.pk']"], {}), "(['category_pk'], ['category.pk'])\n", (3273, 3307), True, 'import sqlalchemy as sa\n'), ((3352, 3411), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['steam_app_pk']", "['steam_app.pk']"], {}), "(['steam_app_pk'], ['steam_app.pk'])\n", (3375, 3411), True, 'import sqlalchemy as sa\n'), ((3456, 3510), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""category_pk"""', '"""steam_app_pk"""'], {}), "('category_pk', 'steam_app_pk')\n", (3479, 3510), True, 'import sqlalchemy as sa\n'), ((3701, 3752), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['genre_pk']", "['genre.pk']"], {}), "(['genre_pk'], ['genre.pk'])\n", (3724, 3752), True, 'import sqlalchemy as sa\n'), ((3797, 3856), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['steam_app_pk']", "['steam_app.pk']"], {}), "(['steam_app_pk'], ['steam_app.pk'])\n", (3820, 3856), True, 'import sqlalchemy as sa\n'), ((3901, 3952), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""genre_pk"""', '"""steam_app_pk"""'], {}), "('genre_pk', 'steam_app_pk')\n", (3924, 3952), True, 'import sqlalchemy as sa\n'), ((4213, 4238), 'alembic.op.f', 'op.f', (['"""ix_steam_app_name"""'], {}), "('ix_steam_app_name')\n", (4217, 4238), False, 'from alembic import op\n'), ((4282, 4308), 'alembic.op.f', 'op.f', (['"""ix_steam_app_appid"""'], {}), "('ix_steam_app_appid')\n", (4286, 4308), False, 'from alembic import op\n'), ((433, 445), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (443, 445), True, 'import sqlalchemy as sa\n'), ((490, 502), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (500, 502), True, 'import sqlalchemy as sa\n'), ((547, 581), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (579, 581), False, 'import sqlmodel\n'), ((627, 661), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (659, 661), False, 'import sqlmodel\n'), ((827, 839), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (837, 839), True, 'import sqlalchemy as sa\n'), ((881, 893), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (891, 893), True, 'import sqlalchemy as sa\n'), ((958, 992), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (990, 992), False, 'import sqlmodel\n'), ((1127, 1139), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1137, 1139), True, 'import sqlalchemy as sa\n'), ((1181, 1193), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1191, 1193), True, 'import sqlalchemy as sa\n'), ((1258, 1292), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1290, 1292), False, 'import sqlmodel\n'), ((1431, 1443), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1441, 1443), True, 'import sqlalchemy as sa\n'), ((1488, 1500), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1498, 1500), True, 'import sqlalchemy as sa\n'), ((1545, 1579), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1577, 1579), False, 'import sqlmodel\n'), ((1626, 1638), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (1636, 1638), True, 'import sqlalchemy as sa\n'), ((1682, 1716), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1714, 1716), False, 'import sqlmodel\n'), ((1800, 1834), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (1832, 1834), False, 'import sqlmodel\n'), ((1912, 1924), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1922, 1924), True, 'import sqlalchemy as sa\n'), ((1991, 2025), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2023, 2025), False, 'import sqlmodel\n'), ((2089, 2101), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2099, 2101), True, 'import sqlalchemy as sa\n'), ((2159, 2171), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2169, 2171), True, 'import sqlalchemy as sa\n'), ((2223, 2232), 'sqlalchemy.Date', 'sa.Date', ([], {}), '()\n', (2230, 2232), True, 'import sqlalchemy as sa\n'), ((2279, 2292), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (2290, 2292), True, 'import sqlalchemy as sa\n'), ((2340, 2353), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (2351, 2353), True, 'import sqlalchemy as sa\n'), ((2682, 2694), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2692, 2694), True, 'import sqlalchemy as sa\n'), ((2738, 2772), 'sqlmodel.sql.sqltypes.AutoString', 'sqlmodel.sql.sqltypes.AutoString', ([], {}), '()\n', (2770, 2772), False, 'import sqlmodel\n'), ((2820, 2830), 'sqlalchemy.Float', 'sa.Float', ([], {}), '()\n', (2828, 2830), True, 'import sqlalchemy as sa\n'), ((2883, 2895), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2893, 2895), True, 'import sqlalchemy as sa\n'), ((3148, 3160), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (3158, 3160), True, 'import sqlalchemy as sa\n'), ((3212, 3224), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (3222, 3224), True, 'import sqlalchemy as sa\n'), ((3599, 3611), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (3609, 3611), True, 'import sqlalchemy as sa\n'), ((3663, 3675), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (3673, 3675), True, 'import sqlalchemy as sa\n')]
|
from typing import Union
from fastapi import Request
from fastapi.param_functions import Depends, Header
from fastapi.security import OAuth2PasswordBearer
from sqlmodel import Session, select
from ..core.constants import AccessLevel, ContextEnum
from ..core.helpers.database import make_session
from ..core.helpers.exceptions import NotAuthorizedError
from ..core.models import Context, ParsedToken, User
from ..core.security import load_jwt_token
reusable_oauth2 = OAuth2PasswordBearer(tokenUrl="api/v1/auth/access-token")
def get_string_token(token: str = Header(None, alias="Authorization")) -> Union[None, str]:
if token:
_, _, token = token.partition(" ")
return token
def load_access_token(request: Request, token: str = Depends(reusable_oauth2)) -> ParsedToken:
return load_jwt_token(token)
async def get_current_user(
session: Session = Depends(make_session), token: ParsedToken = Depends(load_access_token)
) -> User:
user = session.exec(select(User).where(User.id == token.sub)).first()
if not user:
raise NotAuthorizedError("Usuário não localizado")
return user
async def login_required(current_user: User = Depends(get_current_user)) -> None:
if not current_user.is_active:
raise NotAuthorizedError("Sua licença expirou! Entre em contato com um administrador.")
async def validate_super_user(user: User = Depends(get_current_user)) -> None:
if not user.is_super_user:
raise NotAuthorizedError("Essa página só está disponível para administradores")
class ContextManager:
context: ContextEnum
def __init__(self, context: Union[str, ContextEnum]) -> None:
self.context = context if isinstance(context, ContextEnum) else ContextEnum(context)
def __call__(self, request: Request, token: str = Depends(get_string_token)) -> Context:
try:
parsed_token = load_jwt_token(token=token)
user_id = parsed_token.sub
access_level = parsed_token.access_level
authenticated = True
except NotAuthorizedError:
user_id = None
authenticated = False
access_level = AccessLevel.ANONIMOUS
return Context(
context=self.context,
user_id=user_id,
method=request.url.path,
authenticated=authenticated,
access_level=access_level,
)
context_manager = ContextManager(ContextEnum.API)
|
[
"sqlmodel.select"
] |
[((469, 526), 'fastapi.security.OAuth2PasswordBearer', 'OAuth2PasswordBearer', ([], {'tokenUrl': '"""api/v1/auth/access-token"""'}), "(tokenUrl='api/v1/auth/access-token')\n", (489, 526), False, 'from fastapi.security import OAuth2PasswordBearer\n'), ((563, 598), 'fastapi.param_functions.Header', 'Header', (['None'], {'alias': '"""Authorization"""'}), "(None, alias='Authorization')\n", (569, 598), False, 'from fastapi.param_functions import Depends, Header\n'), ((751, 775), 'fastapi.param_functions.Depends', 'Depends', (['reusable_oauth2'], {}), '(reusable_oauth2)\n', (758, 775), False, 'from fastapi.param_functions import Depends, Header\n'), ((879, 900), 'fastapi.param_functions.Depends', 'Depends', (['make_session'], {}), '(make_session)\n', (886, 900), False, 'from fastapi.param_functions import Depends, Header\n'), ((923, 949), 'fastapi.param_functions.Depends', 'Depends', (['load_access_token'], {}), '(load_access_token)\n', (930, 949), False, 'from fastapi.param_functions import Depends, Header\n'), ((1177, 1202), 'fastapi.param_functions.Depends', 'Depends', (['get_current_user'], {}), '(get_current_user)\n', (1184, 1202), False, 'from fastapi.param_functions import Depends, Header\n'), ((1389, 1414), 'fastapi.param_functions.Depends', 'Depends', (['get_current_user'], {}), '(get_current_user)\n', (1396, 1414), False, 'from fastapi.param_functions import Depends, Header\n'), ((1808, 1833), 'fastapi.param_functions.Depends', 'Depends', (['get_string_token'], {}), '(get_string_token)\n', (1815, 1833), False, 'from fastapi.param_functions import Depends, Header\n'), ((985, 997), 'sqlmodel.select', 'select', (['User'], {}), '(User)\n', (991, 997), False, 'from sqlmodel import Session, select\n')]
|
"""
Problem description file handling.
Notes
-----
Short syntax: key is suffixed with '__<number>' to prevent collisions with long
syntax keys -> both cases can be used in a single input.
"""
from __future__ import absolute_import
import re
import numpy as nm
from sfepy.base.base import (Struct, IndexedStruct, dict_to_struct,
output, copy, update_dict_recursively,
import_file, assert_, get_default, basestr)
from sfepy.base.parse_conf import create_bnf
import six
_required = ['filename_mesh|filename_domain', 'field_[0-9]+|fields',
# TODO originaly EBC were required to be specified but in some examples
# (especially 1D) only EPBCs specified is valid
'ebc_[0-9]+|ebcs|dgebc_[0-9]+|dgebcs|dgepbc_[0-9]+|dgepbcs',
'equations',
'region_[0-9]+|regions', 'variable_[0-9]+|variables',
'material_[0-9]+|materials',
'solver_[0-9]+|solvers']
_other = ['epbc_[0-9]+|epbcs',
'lcbc_[0-9]+|lcbcs', 'nbc_[0-9]+|nbcs',
'ic_[0-9]+|ics', 'function_[0-9]+|functions', 'options',
'integral_[0-9]+|integrals']
def get_standard_keywords():
return copy(_required), copy(_other)
def tuple_to_conf(name, vals, order):
"""
Convert a configuration tuple `vals` into a Struct named `name`, with
attribute names given in and ordered by `order`.
Items in `order` at indices outside the length of `vals` are ignored.
"""
conf = Struct(name=name)
for ii, key in enumerate(order[:len(vals)]):
setattr(conf, key, vals[ii])
return conf
def transform_variables(adict):
d2 = {}
for ii, (key, conf) in enumerate(six.iteritems(adict)):
if isinstance(conf, tuple):
c2 = tuple_to_conf(key, conf, ['kind', 'field'])
if len(conf) >= 3:
kind = c2.kind.split()[0]
if kind == 'unknown':
c2.order = conf[2]
elif kind == 'test':
c2.dual = conf[2]
elif kind == 'parameter':
if isinstance(conf[2], basestr) or (conf[2] is None):
c2.like = conf[2]
else:
c2.like = None
c2.special = conf[2]
if len(conf) == 4:
c2.history = conf[3]
d2['variable_%s__%d' % (c2.name, ii)] = c2
else:
c2 = transform_to_struct_1(conf)
d2['variable_'+c2.name] = c2
return d2
def transform_conditions(adict, prefix):
d2 = {}
for ii, (key, conf) in enumerate(six.iteritems(adict)):
if isinstance(conf, tuple):
if len(conf) == 2:
c2 = tuple_to_conf(key, conf, ['region', 'dofs'])
else:
c2 = tuple_to_conf(key, conf, ['region', 'times', 'dofs'])
d2['%s_%s__%d' % (prefix, c2.name, ii)] = c2
else:
c2 = transform_to_struct_1(conf)
d2['%s_%s' % (prefix, c2.name)] = c2
return d2
def transform_dgebcs(adict):
return transform_conditions(adict, "dgebc")
def transform_ebcs(adict):
return transform_conditions(adict, 'ebc')
def transform_ics(adict):
return transform_conditions(adict, 'ic')
def transform_lcbcs(adict):
d2 = {}
for ii, (key, conf) in enumerate(six.iteritems(adict)):
if isinstance(conf, tuple):
if len(conf) >= 4:
if isinstance(conf[1], dict):
c2 = tuple_to_conf(key, conf, ['region', 'dofs',
'dof_map_fun', 'kind'])
c2.arguments = conf[4:]
else:
c2 = tuple_to_conf(key, conf, ['region', 'times', 'dofs',
'dof_map_fun', 'kind'])
c2.arguments = conf[5:]
else:
msg = 'LCBC syntax has to be: region[, times], dofs,' \
' dof_map_fun, kind[, other arguments]'
raise SyntaxError(msg)
d2['lcbc_%s__%d' % (c2.name, ii)] = c2
else:
c2 = transform_to_struct_1(conf)
c2.set_default('dof_map_fun', None)
c2.set_default('arguments', ())
d2['lcbc_%s' % (c2.name)] = c2
return d2
def transform_epbcs(adict, prefix="epbc"):
d2 = {}
for ii, (key, conf) in enumerate(six.iteritems(adict)):
if isinstance(conf, tuple):
if len(conf) == 3:
c2 = tuple_to_conf(key, conf, ['region', 'dofs', 'match'])
else:
c2 = tuple_to_conf(key, conf,
['region', 'times', 'dofs', 'match'])
d2['%s_%s__%d' % (prefix, c2.name, ii)] = c2
else:
c2 = transform_to_struct_1(conf)
d2['%s_%s' % (prefix, c2.name)] = c2
return d2
def transform_dgepbcs(adict):
return transform_epbcs(adict, "dgepbc")
def transform_regions(adict):
d2 = {}
for ii, (key, conf) in enumerate(six.iteritems(adict)):
if isinstance(conf, basestr):
c2 = Struct(name=key, select=conf)
d2['region_%s__%d' % (c2.name, ii)] = c2
elif isinstance(conf, tuple):
c2 = tuple_to_conf(key, conf, ['select', 'kind'])
if len(conf) == 3:
c2.parent = conf[2]
if len(conf) == 4:
c2.parent = conf[2]
c2.extra_options = conf[3]
d2['region_%s__%d' % (c2.name, ii)] = c2
else:
c2 = transform_to_struct_1(conf)
d2['region_'+c2.name] = c2
return d2
def transform_integrals(adict):
d2 = {}
for ii, (key, conf) in enumerate(six.iteritems(adict)):
if isinstance(conf, int):
c2 = Struct(name=key, order=conf)
d2['integral_%s__%d' % (c2.name, ii)] = c2
elif isinstance(conf, tuple):
if len(conf) == 2: # Old tuple version with now-ignored 'kind'.
conf = conf[1]
c2 = Struct(name=key, order=conf)
elif len(conf) == 3:
c2 = tuple_to_conf(key, conf, ['order', 'vals', 'weights'])
d2['integral_%s__%d' % (c2.name, ii)] = c2
else:
c2 = transform_to_struct_1(conf)
d2['integral_'+c2.name] = c2
return d2
def transform_fields(adict):
dtypes = {'real' : nm.float64, 'complex' : nm.complex128}
d2 = {}
for ii, (key, conf) in enumerate(six.iteritems(adict)):
if isinstance(conf, tuple):
c2 = tuple_to_conf(key, conf,
['dtype', 'shape', 'region', 'approx_order',
'space', 'poly_space_base'])
if c2.dtype in dtypes:
c2.dtype = dtypes[c2.dtype]
d2['field_%s__%d' % (c2.name, ii)] = c2
else:
c2 = transform_to_struct_1(conf)
c2.set_default('dtype', nm.float64)
if c2.dtype in dtypes:
c2.dtype = dtypes[c2.dtype]
d2['field_'+c2.name] = c2
return d2
def transform_materials(adict):
d2 = {}
for ii, (key, conf) in enumerate(six.iteritems(adict)):
if isinstance(conf, basestr):
c2 = Struct(name=key, function=conf)
d2['material_%s__%d' % (c2.name, ii)] = c2
elif isinstance(conf, tuple):
c2 = tuple_to_conf(key, conf,
['values', 'function', 'kind'])
if len(conf) == 4:
c2.flags = conf[3]
d2['material_%s__%d' % (c2.name, ii)] = c2
else:
c2 = transform_to_struct_1(conf)
d2['material_'+conf['name']] = c2
return d2
def transform_solvers(adict):
d2 = {}
for ii, (key, conf) in enumerate(six.iteritems(adict)):
if isinstance(conf, tuple):
c2 = tuple_to_conf(key, conf, ['kind','params'])
for param, val in six.iteritems(c2.params):
setattr(c2, param, val)
delattr(c2, 'params')
d2['solvers_%s__%d' % (c2.name, ii)] = c2
else:
c2 = transform_to_struct_1(conf)
d2['solvers_'+c2.name] = c2
return d2
def transform_functions(adict):
d2 = {}
for ii, (key, conf) in enumerate(six.iteritems(adict)):
if isinstance(conf, tuple):
c2 = tuple_to_conf(key, conf, ['function'])
d2['function_%s__%d' % (c2.name, ii)] = c2
else:
c2 = transform_to_struct_1(conf)
d2['function_'+c2.name] = c2
return d2
def transform_to_struct_1(adict):
return dict_to_struct(adict, flag=(1,))
def transform_to_i_struct_1(adict):
return dict_to_struct(adict, flag=(1,), constructor=IndexedStruct)
def transform_to_struct_01(adict):
return dict_to_struct(adict, flag=(0,1))
def transform_to_struct_10(adict):
return dict_to_struct(adict, flag=(1,0))
transforms = {
'options' : transform_to_i_struct_1,
'solvers' : transform_solvers,
'integrals' : transform_integrals,
'regions' : transform_regions,
'fields' : transform_fields,
'variables' : transform_variables,
'ebcs' : transform_ebcs,
'epbcs' : transform_epbcs,
'dgebcs' : transform_dgebcs,
'dgepbcs' : transform_dgepbcs,
'nbcs' : transform_to_struct_01,
'lcbcs' : transform_lcbcs,
'ics' : transform_ics,
'materials' : transform_materials,
'functions' : transform_functions,
}
def dict_from_string(string, allow_tuple=False, free_word=False):
"""
Parse `string` and return a dictionary that can be used to
construct/override a ProblemConf instance.
"""
if string is None:
return {}
if isinstance(string, dict):
return string
parser = create_bnf(allow_tuple=allow_tuple, free_word=free_word)
out = {}
for r in parser.parseString(string, parseAll=True):
out.update(r)
return out
def dict_from_options(options):
"""
Return a dictionary that can be used to construct/override a ProblemConf
instance based on `options`.
See ``--conf`` and ``--options`` options of the ``simple.py`` script.
"""
override = dict_from_string(options.conf)
if options.app_options:
if not 'options' in override:
override['options'] = {}
override_options = dict_from_string(options.app_options)
override['options'].update(override_options)
return override
##
# 27.10.2005, c
class ProblemConf(Struct):
"""
Problem configuration, corresponding to an input (problem description
file). It validates the input using lists of required and other keywords
that have to/can appear in the input. Default keyword lists can be obtained
by sfepy.base.conf.get_standard_keywords().
ProblemConf instance is used to construct a Problem instance via
Problem.from_conf(conf).
"""
@staticmethod
def from_file(filename, required=None, other=None, verbose=True,
define_args=None, override=None, setup=True):
"""
Loads the problem definition from a file.
The filename can either contain plain definitions, or it can contain
the define() function, in which case it will be called to return the
input definitions.
The job of the define() function is to return a dictionary of
parameters. How the dictionary is constructed is not our business, but
the usual way is to simply have a function define() along these lines
in the input file::
def define():
options = {
'save_eig_vectors' : None,
'eigen_solver' : 'eigen1',
}
region_2 = {
'name' : 'Surface',
'select' : 'nodes of surface',
}
return locals()
Optionally, the define() function can accept additional arguments
that should be defined using the `define_args` tuple or dictionary.
"""
funmod = import_file(filename, package_name=False)
if "define" in funmod.__dict__:
if define_args is None:
define_dict = funmod.__dict__["define"]()
else:
if isinstance(define_args, str):
define_args = dict_from_string(define_args)
if isinstance(define_args, dict):
define_dict = funmod.__dict__["define"](**define_args)
else:
define_dict = funmod.__dict__["define"](*define_args)
else:
define_dict = funmod.__dict__
obj = ProblemConf(define_dict, funmod=funmod, filename=filename,
required=required, other=other, verbose=verbose,
override=override, setup=setup)
return obj
@staticmethod
def from_file_and_options(filename, options, required=None, other=None,
verbose=True, define_args=None, setup=True):
"""
Utility function, a wrapper around ProblemConf.from_file() with
possible override taken from `options`.
"""
override = dict_from_options(options)
obj = ProblemConf.from_file(filename, required=required, other=other,
verbose=verbose, define_args=define_args,
override=override, setup=setup)
return obj
@staticmethod
def from_module(module, required=None, other=None, verbose=True,
override=None, setup=True):
obj = ProblemConf(module.__dict__, module, module.__name__,
required, other, verbose, override, setup=setup)
return obj
@staticmethod
def from_dict(dict_, funmod, required=None, other=None, verbose=True,
override=None, setup=True):
obj = ProblemConf(dict_, funmod, None, required, other, verbose,
override, setup=setup)
return obj
def __init__(self, define_dict, funmod=None, filename=None,
required=None, other=None, verbose=True, override=None,
setup=True):
if override:
if isinstance(override, Struct):
override = override.__dict__
define_dict = update_dict_recursively(define_dict, override, True)
self.__dict__.update(define_dict)
self.verbose = verbose
if setup:
self.setup(funmod=funmod, filename=filename,
required=required, other=other)
def setup(self, define_dict=None, funmod=None, filename=None,
required=None, other=None):
define_dict = get_default(define_dict, self.__dict__)
self._filename = filename
self.validate(required=required, other=other)
self.transform_input_trivial()
self._raw = {}
for key, val in six.iteritems(define_dict):
if isinstance(val, dict):
self._raw[key] = copy(val)
self.transform_input()
self.funmod = funmod
def _validate_helper(self, items, but_nots):
keys = list(self.__dict__.keys())
left_over = keys[:]
if but_nots is not None:
for item in but_nots:
match = re.compile('^' + item + '$').match
for key in keys:
if match(key):
left_over.remove(key)
missing = []
if items is not None:
for item in items:
found = False
match = re.compile('^' + item + '$').match
for key in keys:
if match(key):
found = True
left_over.remove(key)
if not found:
missing.append(item)
return left_over, missing
def validate(self, required=None, other=None):
required_left_over, required_missing \
= self._validate_helper(required, other)
other_left_over, other_missing \
= self._validate_helper(other, required)
assert_(required_left_over == other_left_over)
if other_left_over and self.verbose:
output('left over:', other_left_over)
if required_missing:
raise ValueError('required missing: %s' % required_missing)
return other_missing
def transform_input_trivial(self):
"""Trivial input transformations."""
##
# Unordered inputs.
tr_list = ['([a-zA-Z0-9]+)_[0-9]+']
# Keywords not in 'required', but needed even empty (e.g. for
# running tests).
for key in transforms.keys():
if key not in self.__dict__:
self.__dict__[key] = {}
keys = list(self.__dict__.keys())
for item in tr_list:
match = re.compile(item).match
for key in keys:
obj = match(key)
if obj:
new = obj.group(1) + 's'
result = {key : self.__dict__[key]}
try:
self.__dict__[new].update(result)
except:
self.__dict__[new] = result
del self.__dict__[key]
def transform_input(self):
keys = list(self.__dict__.keys())
for key, transform in six.iteritems(transforms):
if not key in keys: continue
self.__dict__[key] = transform(self.__dict__[key])
def get_raw(self, key=None):
if key is None:
return self._raw
else:
return self._raw[key]
def get_item_by_name(self, key, item_name):
"""
Return item with name `item_name` in configuration group given
by `key`.
"""
val = getattr(self, key)
for item in six.itervalues(val):
if item.name == item_name:
return item
def get_function(self, name):
"""
Get a function object given its name.
It can be either in `ProblemConf.funmod`, or a `ProblemConf`
attribute directly.
Parameters
----------
name : str or function or None
The function name or directly the function.
Returns
-------
fun : function or None
The required function, or None if `name` was `None`.
"""
if name is None:
fun = None
elif callable(name):
import inspect
if not (inspect.isfunction(name) or inspect.ismethod(name)):
msg = '`name` has to have `str` or `function` type! (got %s)'
raise ValueError(msg % type(name))
fun = name
else:
try:
fun = getattr(self.funmod, name)
except AttributeError:
try:
fun = getattr(self, name)
except AttributeError:
raise ValueError('function %s cannot be found!' % name)
return fun
def edit(self, key, newval):
self.__dict__[key] = transforms[key](newval)
def update_conf(self, conf):
"""
Update configuration by values in another problem configuration.
Values that are dictionaries are updated in-place by ``dict.update()``.
Parameters
----------
conf : ProblemConf instance
The other configuration.
"""
for x in conf.__dict__:
his = conf.__dict__[x]
my = getattr(self, x, None)
if isinstance(my, dict) and isinstance(his, dict):
my.update(his)
else:
setattr(self, x, his)
def add_missing(self, conf):
"""
Add missing values from another problem configuration.
Missing keys/values are added also to values that are dictionaries.
Parameters
----------
conf : ProblemConf instance
The other configuration.
"""
for x in conf.__dict__:
his = conf.__dict__[x]
my = getattr(self, x, None)
if isinstance(my, dict) and isinstance(his, dict):
for key in his:
if key not in my:
my[key]=his[key]
elif my is None:
setattr(self, x, his)
|
[
"sfepy.base.base.import_file",
"sfepy.base.base.output",
"sfepy.base.base.get_default",
"sfepy.base.parse_conf.create_bnf",
"sfepy.base.base.Struct",
"sfepy.base.base.update_dict_recursively",
"sfepy.base.base.copy",
"sfepy.base.base.dict_to_struct",
"sfepy.base.base.assert_"
] |
[((1517, 1534), 'sfepy.base.base.Struct', 'Struct', ([], {'name': 'name'}), '(name=name)\n', (1523, 1534), False, 'from sfepy.base.base import Struct, IndexedStruct, dict_to_struct, output, copy, update_dict_recursively, import_file, assert_, get_default, basestr\n'), ((8742, 8774), 'sfepy.base.base.dict_to_struct', 'dict_to_struct', (['adict'], {'flag': '(1,)'}), '(adict, flag=(1,))\n', (8756, 8774), False, 'from sfepy.base.base import Struct, IndexedStruct, dict_to_struct, output, copy, update_dict_recursively, import_file, assert_, get_default, basestr\n'), ((8822, 8881), 'sfepy.base.base.dict_to_struct', 'dict_to_struct', (['adict'], {'flag': '(1,)', 'constructor': 'IndexedStruct'}), '(adict, flag=(1,), constructor=IndexedStruct)\n', (8836, 8881), False, 'from sfepy.base.base import Struct, IndexedStruct, dict_to_struct, output, copy, update_dict_recursively, import_file, assert_, get_default, basestr\n'), ((8928, 8962), 'sfepy.base.base.dict_to_struct', 'dict_to_struct', (['adict'], {'flag': '(0, 1)'}), '(adict, flag=(0, 1))\n', (8942, 8962), False, 'from sfepy.base.base import Struct, IndexedStruct, dict_to_struct, output, copy, update_dict_recursively, import_file, assert_, get_default, basestr\n'), ((9008, 9042), 'sfepy.base.base.dict_to_struct', 'dict_to_struct', (['adict'], {'flag': '(1, 0)'}), '(adict, flag=(1, 0))\n', (9022, 9042), False, 'from sfepy.base.base import Struct, IndexedStruct, dict_to_struct, output, copy, update_dict_recursively, import_file, assert_, get_default, basestr\n'), ((9925, 9981), 'sfepy.base.parse_conf.create_bnf', 'create_bnf', ([], {'allow_tuple': 'allow_tuple', 'free_word': 'free_word'}), '(allow_tuple=allow_tuple, free_word=free_word)\n', (9935, 9981), False, 'from sfepy.base.parse_conf import create_bnf\n'), ((1219, 1234), 'sfepy.base.base.copy', 'copy', (['_required'], {}), '(_required)\n', (1223, 1234), False, 'from sfepy.base.base import Struct, IndexedStruct, dict_to_struct, output, copy, update_dict_recursively, import_file, assert_, get_default, basestr\n'), ((1236, 1248), 'sfepy.base.base.copy', 'copy', (['_other'], {}), '(_other)\n', (1240, 1248), False, 'from sfepy.base.base import Struct, IndexedStruct, dict_to_struct, output, copy, update_dict_recursively, import_file, assert_, get_default, basestr\n'), ((1719, 1739), 'six.iteritems', 'six.iteritems', (['adict'], {}), '(adict)\n', (1732, 1739), False, 'import six\n'), ((2668, 2688), 'six.iteritems', 'six.iteritems', (['adict'], {}), '(adict)\n', (2681, 2688), False, 'import six\n'), ((3402, 3422), 'six.iteritems', 'six.iteritems', (['adict'], {}), '(adict)\n', (3415, 3422), False, 'import six\n'), ((4493, 4513), 'six.iteritems', 'six.iteritems', (['adict'], {}), '(adict)\n', (4506, 4513), False, 'import six\n'), ((5131, 5151), 'six.iteritems', 'six.iteritems', (['adict'], {}), '(adict)\n', (5144, 5151), False, 'import six\n'), ((5816, 5836), 'six.iteritems', 'six.iteritems', (['adict'], {}), '(adict)\n', (5829, 5836), False, 'import six\n'), ((6593, 6613), 'six.iteritems', 'six.iteritems', (['adict'], {}), '(adict)\n', (6606, 6613), False, 'import six\n'), ((7282, 7302), 'six.iteritems', 'six.iteritems', (['adict'], {}), '(adict)\n', (7295, 7302), False, 'import six\n'), ((7913, 7933), 'six.iteritems', 'six.iteritems', (['adict'], {}), '(adict)\n', (7926, 7933), False, 'import six\n'), ((8412, 8432), 'six.iteritems', 'six.iteritems', (['adict'], {}), '(adict)\n', (8425, 8432), False, 'import six\n'), ((12223, 12264), 'sfepy.base.base.import_file', 'import_file', (['filename'], {'package_name': '(False)'}), '(filename, package_name=False)\n', (12234, 12264), False, 'from sfepy.base.base import Struct, IndexedStruct, dict_to_struct, output, copy, update_dict_recursively, import_file, assert_, get_default, basestr\n'), ((14918, 14957), 'sfepy.base.base.get_default', 'get_default', (['define_dict', 'self.__dict__'], {}), '(define_dict, self.__dict__)\n', (14929, 14957), False, 'from sfepy.base.base import Struct, IndexedStruct, dict_to_struct, output, copy, update_dict_recursively, import_file, assert_, get_default, basestr\n'), ((15135, 15161), 'six.iteritems', 'six.iteritems', (['define_dict'], {}), '(define_dict)\n', (15148, 15161), False, 'import six\n'), ((16377, 16423), 'sfepy.base.base.assert_', 'assert_', (['(required_left_over == other_left_over)'], {}), '(required_left_over == other_left_over)\n', (16384, 16423), False, 'from sfepy.base.base import Struct, IndexedStruct, dict_to_struct, output, copy, update_dict_recursively, import_file, assert_, get_default, basestr\n'), ((17649, 17674), 'six.iteritems', 'six.iteritems', (['transforms'], {}), '(transforms)\n', (17662, 17674), False, 'import six\n'), ((18130, 18149), 'six.itervalues', 'six.itervalues', (['val'], {}), '(val)\n', (18144, 18149), False, 'import six\n'), ((5209, 5238), 'sfepy.base.base.Struct', 'Struct', ([], {'name': 'key', 'select': 'conf'}), '(name=key, select=conf)\n', (5215, 5238), False, 'from sfepy.base.base import Struct, IndexedStruct, dict_to_struct, output, copy, update_dict_recursively, import_file, assert_, get_default, basestr\n'), ((5890, 5918), 'sfepy.base.base.Struct', 'Struct', ([], {'name': 'key', 'order': 'conf'}), '(name=key, order=conf)\n', (5896, 5918), False, 'from sfepy.base.base import Struct, IndexedStruct, dict_to_struct, output, copy, update_dict_recursively, import_file, assert_, get_default, basestr\n'), ((7360, 7391), 'sfepy.base.base.Struct', 'Struct', ([], {'name': 'key', 'function': 'conf'}), '(name=key, function=conf)\n', (7366, 7391), False, 'from sfepy.base.base import Struct, IndexedStruct, dict_to_struct, output, copy, update_dict_recursively, import_file, assert_, get_default, basestr\n'), ((8063, 8087), 'six.iteritems', 'six.iteritems', (['c2.params'], {}), '(c2.params)\n', (8076, 8087), False, 'import six\n'), ((14527, 14579), 'sfepy.base.base.update_dict_recursively', 'update_dict_recursively', (['define_dict', 'override', '(True)'], {}), '(define_dict, override, True)\n', (14550, 14579), False, 'from sfepy.base.base import Struct, IndexedStruct, dict_to_struct, output, copy, update_dict_recursively, import_file, assert_, get_default, basestr\n'), ((16482, 16519), 'sfepy.base.base.output', 'output', (['"""left over:"""', 'other_left_over'], {}), "('left over:', other_left_over)\n", (16488, 16519), False, 'from sfepy.base.base import Struct, IndexedStruct, dict_to_struct, output, copy, update_dict_recursively, import_file, assert_, get_default, basestr\n'), ((15234, 15243), 'sfepy.base.base.copy', 'copy', (['val'], {}), '(val)\n', (15238, 15243), False, 'from sfepy.base.base import Struct, IndexedStruct, dict_to_struct, output, copy, update_dict_recursively, import_file, assert_, get_default, basestr\n'), ((17128, 17144), 're.compile', 're.compile', (['item'], {}), '(item)\n', (17138, 17144), False, 'import re\n'), ((6141, 6169), 'sfepy.base.base.Struct', 'Struct', ([], {'name': 'key', 'order': 'conf'}), '(name=key, order=conf)\n', (6147, 6169), False, 'from sfepy.base.base import Struct, IndexedStruct, dict_to_struct, output, copy, update_dict_recursively, import_file, assert_, get_default, basestr\n'), ((15516, 15544), 're.compile', 're.compile', (["('^' + item + '$')"], {}), "('^' + item + '$')\n", (15526, 15544), False, 'import re\n'), ((15802, 15830), 're.compile', 're.compile', (["('^' + item + '$')"], {}), "('^' + item + '$')\n", (15812, 15830), False, 'import re\n'), ((18809, 18833), 'inspect.isfunction', 'inspect.isfunction', (['name'], {}), '(name)\n', (18827, 18833), False, 'import inspect\n'), ((18837, 18859), 'inspect.ismethod', 'inspect.ismethod', (['name'], {}), '(name)\n', (18853, 18859), False, 'import inspect\n')]
|
import importlib
import os
from typing import Dict, List, Optional, Tuple
from types import ModuleType
import typer
from rich import inspect
from rich.prompt import Prompt
from rich.table import Table
from sqlalchemy import Column
from sqlalchemy.future.engine import Engine
from sqlmodel import SQLModel, create_engine
from ._console import console, error_console
def get_db_url(database_url: Optional[str] = None):
"""A helper function to get the database url."""
if not database_url:
database_url = os.getenv("DATABASE_URL")
if not database_url:
msg = "Please ensure that an environment variable is set for `DATABASE_URL` or pass in the url to the database_url option."
error_console.print(msg)
raise typer.Exit(code=1)
return database_url
def get_tables(models_module) -> Dict[str, SQLModel]:
"""Find all of the SQLModel tables."""
tables = {}
for name, obj in models_module.__dict__.items():
if isinstance(obj, type(SQLModel)) and name != "SQLModel":
tables[obj.__tablename__] = obj
return tables
def get_models(models_path: Optional[str] = None):
# Load the models provided by the user.
if not models_path:
models_path = os.getenv("MODELS_PATH")
if not models_path:
msg = "No modules_path specified. You can set a modules_path by either passing in a value to the -m option or by setting an environment variable `export MODELS_PATH='sqlcli_demo/models.py'`"
error_console.print(msg)
raise typer.Exit(code=1)
models_path = os.path.normpath(models_path)
path, filename = os.path.split(models_path)
module_name, ext = os.path.split(filename)
spec = importlib.util.spec_from_file_location(module_name, models_path)
models = importlib.util.module_from_spec(spec)
spec.loader.exec_module(models)
return models
def is_foreign_key(obj, field_name: str) -> bool:
foreign_keys = [i for i in obj.__table__.foreign_keys]
for fk in foreign_keys:
if fk.parent.name == field_name:
return True
return False
def get_foreign_key_column_name(obj: SQLModel, field_name: str) -> str:
foreign_keys = [i for i in obj.__table__.foreign_keys]
for fk in foreign_keys:
if fk.parent.name == field_name:
return fk.column.name
def get_foreign_key_table_name(obj: SQLModel, field_name: str) -> Optional[str]:
foreign_keys = [i for i in obj.__table__.foreign_keys]
for fk in foreign_keys:
if fk.parent.name == field_name:
return fk.column.table.name
return None
def sqlmodel_setup(
models_path: str, database_url: str
) -> Tuple[ModuleType, str, Engine, Dict[str, SQLModel]]:
"""Quickstart for getting required objects"""
models = get_models(models_path)
url = get_db_url(database_url)
engine = create_engine(url)
tables = get_tables(models)
return models, url, engine, tables
def create_rich_table(data: List[SQLModel], **kwargs) -> Table:
"""Convert a list of SQLModel objects into a rich table."""
table = Table(**kwargs)
# Note that column names are accessed via .__table__.columns._all_columns
# because it guarantees the correct order. If you were to use the more
# succinct `for col in data[0].dict().keys()` the order can change.
for col in data[0].__table__.columns._all_columns:
col_name = col.name
table.add_column(col_name)
for row in data:
row_data = []
for col in row.__table__.columns._all_columns:
row_data.append(row.dict()[col.name])
row_data = [str(i) for i in row_data]
table.add_row(*row_data)
return table
def validate_table_name(
table_name: Optional[str], tables: List[SQLModel]
) -> Tuple[SQLModel, str]:
if not table_name:
table_name = Prompt.ask("Please select a table", choices=tables.keys())
try:
obj = tables[table_name]
except KeyError:
error_console.print(
f"The provided table does not exist. Please select from one of:"
)
error_console.print(f"{list(tables.keys())}")
raise typer.Exit(code=1)
return obj, table_name
|
[
"sqlmodel.create_engine"
] |
[((1602, 1631), 'os.path.normpath', 'os.path.normpath', (['models_path'], {}), '(models_path)\n', (1618, 1631), False, 'import os\n'), ((1653, 1679), 'os.path.split', 'os.path.split', (['models_path'], {}), '(models_path)\n', (1666, 1679), False, 'import os\n'), ((1703, 1726), 'os.path.split', 'os.path.split', (['filename'], {}), '(filename)\n', (1716, 1726), False, 'import os\n'), ((1739, 1803), 'importlib.util.spec_from_file_location', 'importlib.util.spec_from_file_location', (['module_name', 'models_path'], {}), '(module_name, models_path)\n', (1777, 1803), False, 'import importlib\n'), ((1817, 1854), 'importlib.util.module_from_spec', 'importlib.util.module_from_spec', (['spec'], {}), '(spec)\n', (1848, 1854), False, 'import importlib\n'), ((2889, 2907), 'sqlmodel.create_engine', 'create_engine', (['url'], {}), '(url)\n', (2902, 2907), False, 'from sqlmodel import SQLModel, create_engine\n'), ((3121, 3136), 'rich.table.Table', 'Table', ([], {}), '(**kwargs)\n', (3126, 3136), False, 'from rich.table import Table\n'), ((522, 547), 'os.getenv', 'os.getenv', (['"""DATABASE_URL"""'], {}), "('DATABASE_URL')\n", (531, 547), False, 'import os\n'), ((1253, 1277), 'os.getenv', 'os.getenv', (['"""MODELS_PATH"""'], {}), "('MODELS_PATH')\n", (1262, 1277), False, 'import os\n'), ((769, 787), 'typer.Exit', 'typer.Exit', ([], {'code': '(1)'}), '(code=1)\n', (779, 787), False, 'import typer\n'), ((1564, 1582), 'typer.Exit', 'typer.Exit', ([], {'code': '(1)'}), '(code=1)\n', (1574, 1582), False, 'import typer\n'), ((4184, 4202), 'typer.Exit', 'typer.Exit', ([], {'code': '(1)'}), '(code=1)\n', (4194, 4202), False, 'import typer\n')]
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
import argparse
from loguru import logger
import multiprocessing as mp
import megengine as mge
import megengine.distributed as dist
from yolox.core import Trainer
from yolox.exp import get_exp
from yolox.utils import configure_nccl
def make_parser():
parser = argparse.ArgumentParser("YOLOX train parser")
parser.add_argument("-expn", "--experiment-name", type=str, default=None)
parser.add_argument("-n", "--name", type=str, default=None, help="model name")
parser.add_argument("-b", "--batch-size", type=int, default=64, help="batch size")
parser.add_argument(
"-d", "--devices", default=None, type=int, help="device for training"
)
parser.add_argument(
"-f",
"--exp_file",
default=None,
type=str,
help="plz input your expriment description file",
)
parser.add_argument(
"--resume", default=False, action="store_true", help="resume training"
)
parser.add_argument("-c", "--ckpt", default=None, type=str, help="checkpoint file")
parser.add_argument(
"--num_machine", default=1, type=int, help="num of node for training"
)
parser.add_argument(
"--machine_rank", default=0, type=int, help="node rank for multi-node training"
)
parser.add_argument(
"--sync_level", type=int, default=None, help="config sync level, use 0 to debug"
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser
@logger.catch
def main(exp, args):
if not args.experiment_name:
args.experiment_name = exp.exp_name
# set environment variables for distributed training
configure_nccl()
# enable dtr to avoid CUDA OOM
mge.dtr.enable()
if args.sync_level is not None:
# NOTE: use sync_level = 0 to debug mge error
from megengine.core._imperative_rt.core2 import config_async_level
logger.info("Using aysnc_level {}".format(args.sync_level))
config_async_level(args.sync_level)
trainer = Trainer(exp, args)
trainer.train()
if __name__ == "__main__":
args = make_parser().parse_args()
exp = get_exp(args.exp_file, args.name)
exp.merge(args.opts)
mp.set_start_method("spawn")
num_gpus = dist.helper.get_device_count_by_fork("gpu")
if args.devices is None:
args.devices = num_gpus
assert args.devices <= num_gpus
if args.devices > 1:
train = dist.launcher(main, n_gpus=args.devices)
train(exp, args)
else:
main(exp, args)
|
[
"megengine.dtr.enable",
"megengine.distributed.helper.get_device_count_by_fork",
"megengine.distributed.launcher",
"megengine.core._imperative_rt.core2.config_async_level"
] |
[((364, 409), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""YOLOX train parser"""'], {}), "('YOLOX train parser')\n", (387, 409), False, 'import argparse\n'), ((1837, 1853), 'yolox.utils.configure_nccl', 'configure_nccl', ([], {}), '()\n', (1851, 1853), False, 'from yolox.utils import configure_nccl\n'), ((1894, 1910), 'megengine.dtr.enable', 'mge.dtr.enable', ([], {}), '()\n', (1908, 1910), True, 'import megengine as mge\n'), ((2204, 2222), 'yolox.core.Trainer', 'Trainer', (['exp', 'args'], {}), '(exp, args)\n', (2211, 2222), False, 'from yolox.core import Trainer\n'), ((2320, 2353), 'yolox.exp.get_exp', 'get_exp', (['args.exp_file', 'args.name'], {}), '(args.exp_file, args.name)\n', (2327, 2353), False, 'from yolox.exp import get_exp\n'), ((2384, 2412), 'multiprocessing.set_start_method', 'mp.set_start_method', (['"""spawn"""'], {}), "('spawn')\n", (2403, 2412), True, 'import multiprocessing as mp\n'), ((2428, 2471), 'megengine.distributed.helper.get_device_count_by_fork', 'dist.helper.get_device_count_by_fork', (['"""gpu"""'], {}), "('gpu')\n", (2464, 2471), True, 'import megengine.distributed as dist\n'), ((2153, 2188), 'megengine.core._imperative_rt.core2.config_async_level', 'config_async_level', (['args.sync_level'], {}), '(args.sync_level)\n', (2171, 2188), False, 'from megengine.core._imperative_rt.core2 import config_async_level\n'), ((2613, 2653), 'megengine.distributed.launcher', 'dist.launcher', (['main'], {'n_gpus': 'args.devices'}), '(main, n_gpus=args.devices)\n', (2626, 2653), True, 'import megengine.distributed as dist\n')]
|
from __future__ import annotations
import typing as t
import strawberry
from sqlmodel import Field, Relationship, SQLModel
from .schema_generation import create_array_relationship_resolver, create_query_root
class AddressModel(SQLModel, table=True):
__tablename__ = "addresses"
id: t.Optional[int] = Field(
default=None, primary_key=True, index=True, nullable=False
)
street: str
state: str
country: str
zip: str
users: t.List["UserModel"] = Relationship(back_populates="address")
class UserModel(SQLModel, table=True):
__tablename__ = "users"
id: t.Optional[int] = Field(
default=None, primary_key=True, index=True, nullable=False
)
age: int
password: t.Optional[str]
address_id: t.Optional[int] = Field(default=None, foreign_key="addresses.id")
address: t.Optional[AddressModel] = Relationship(back_populates="users")
@strawberry.experimental.pydantic.type(
UserModel, fields=["id", "age", "password", "address_id", "address"]
)
class User:
pass
@strawberry.experimental.pydantic.type(
AddressModel, fields=["id", "street", "state", "country", "zip"]
)
class Address:
users: t.List[create_array_relationship_resolver(User)] = strawberry.field(
resolver=create_array_relationship_resolver(User)
)
Query = create_query_root([User, Address])
schema = strawberry.Schema(query=Query)
|
[
"sqlmodel.Field",
"sqlmodel.Relationship"
] |
[((906, 1017), 'strawberry.experimental.pydantic.type', 'strawberry.experimental.pydantic.type', (['UserModel'], {'fields': "['id', 'age', 'password', 'address_id', 'address']"}), "(UserModel, fields=['id', 'age',\n 'password', 'address_id', 'address'])\n", (943, 1017), False, 'import strawberry\n'), ((1044, 1151), 'strawberry.experimental.pydantic.type', 'strawberry.experimental.pydantic.type', (['AddressModel'], {'fields': "['id', 'street', 'state', 'country', 'zip']"}), "(AddressModel, fields=['id', 'street',\n 'state', 'country', 'zip'])\n", (1081, 1151), False, 'import strawberry\n'), ((1368, 1398), 'strawberry.Schema', 'strawberry.Schema', ([], {'query': 'Query'}), '(query=Query)\n', (1385, 1398), False, 'import strawberry\n'), ((313, 378), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)', 'index': '(True)', 'nullable': '(False)'}), '(default=None, primary_key=True, index=True, nullable=False)\n', (318, 378), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((487, 525), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""address"""'}), "(back_populates='address')\n", (499, 525), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((621, 686), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)', 'index': '(True)', 'nullable': '(False)'}), '(default=None, primary_key=True, index=True, nullable=False)\n', (626, 686), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((778, 825), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""addresses.id"""'}), "(default=None, foreign_key='addresses.id')\n", (783, 825), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((866, 902), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""users"""'}), "(back_populates='users')\n", (878, 902), False, 'from sqlmodel import Field, Relationship, SQLModel\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing as mp
import os
import cv2
import megengine as mge
import megengine.data as data
import megengine.data.dataset as dataset
import megengine.data.transform as T
import megengine.jit as jit
import numpy as np
from tqdm import tqdm
from official.vision.segmentation.deeplabv3plus import DeepLabV3Plus
class Config:
DATA_WORKERS = 4
NUM_CLASSES = 21
IMG_SIZE = 512
IMG_MEAN = [103.530, 116.280, 123.675]
IMG_STD = [57.375, 57.120, 58.395]
VAL_BATCHES = 1
VAL_MULTISCALE = [1.0] # [0.5, 0.75, 1.0, 1.25, 1.5, 1.75]
VAL_FLIP = False
VAL_SLIP = False
VAL_SAVE = None
cfg = Config()
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-d", "--dataset_dir", type=str, default="/data/datasets/VOC2012",
)
parser.add_argument(
"-m", "--model_path", type=str, default=None, help="eval model file"
)
args = parser.parse_args()
test_loader, test_size = build_dataloader(args.dataset_dir)
print("number of test images: %d" % (test_size))
net = DeepLabV3Plus(class_num=cfg.NUM_CLASSES)
model_dict = mge.load(args.model_path)
net.load_state_dict(model_dict["state_dict"])
print("load model %s" % (args.model_path))
net.eval()
result_list = []
for sample_batched in tqdm(test_loader):
img = sample_batched[0].squeeze()
label = sample_batched[1].squeeze()
pred = evaluate(net, img)
result_list.append({"pred": pred, "gt": label})
if cfg.VAL_SAVE:
save_results(result_list, cfg.VAL_SAVE)
compute_metric(result_list)
def pad_image_to_shape(img, shape, border_mode, value):
margin = np.zeros(4, np.uint32)
pad_height = shape[0] - img.shape[0] if shape[0] - img.shape[0] > 0 else 0
pad_width = shape[1] - img.shape[1] if shape[1] - img.shape[1] > 0 else 0
margin[0] = pad_height // 2
margin[1] = pad_height // 2 + pad_height % 2
margin[2] = pad_width // 2
margin[3] = pad_width // 2 + pad_width % 2
img = cv2.copyMakeBorder(
img, margin[0], margin[1], margin[2], margin[3], border_mode, value=value
)
return img, margin
def eval_single(net, img, is_flip):
@jit.trace(symbolic=True, opt_level=2)
def pred_fun(data, net=None):
net.eval()
pred = net(data)
return pred
data = mge.tensor()
data.set_value(img.transpose(2, 0, 1)[np.newaxis])
pred = pred_fun(data, net=net)
if is_flip:
img_flip = img[:, ::-1, :]
data.set_value(img_flip.transpose(2, 0, 1)[np.newaxis])
pred_flip = pred_fun(data, net=net)
pred = (pred + pred_flip[:, :, :, ::-1]) / 2.0
del pred_flip
pred = pred.numpy().squeeze().transpose(1, 2, 0)
del data
return pred
def evaluate(net, img):
ori_h, ori_w, _ = img.shape
pred_all = np.zeros((ori_h, ori_w, cfg.NUM_CLASSES))
for rate in cfg.VAL_MULTISCALE:
if cfg.VAL_SLIP:
img_scale = cv2.resize(
img, None, fx=rate, fy=rate, interpolation=cv2.INTER_LINEAR
)
val_size = (cfg.IMG_SIZE, cfg.IMG_SIZE)
else:
out_h, out_w = int(cfg.IMG_SIZE * rate), int(cfg.IMG_SIZE * rate)
img_scale = cv2.resize(img, (out_w, out_h), interpolation=cv2.INTER_LINEAR)
val_size = (out_h, out_w)
new_h, new_w, _ = img_scale.shape
if (new_h <= val_size[0]) and (new_h <= val_size[1]):
img_pad, margin = pad_image_to_shape(
img_scale, val_size, cv2.BORDER_CONSTANT, value=0
)
pred = eval_single(net, img_pad, cfg.VAL_FLIP)
pred = pred[
margin[0] : (pred.shape[0] - margin[1]),
margin[2] : (pred.shape[1] - margin[3]),
:,
]
else:
stride_rate = 2 / 3
stride = [int(np.ceil(i * stride_rate)) for i in val_size]
print(img_scale.shape, stride, val_size)
img_pad, margin = pad_image_to_shape(
img_scale, val_size, cv2.BORDER_CONSTANT, value=0
)
pad_h, pad_w = img_pad.shape[:2]
r_grid, c_grid = [
int(np.ceil((ps - cs) / stride)) + 1
for ps, cs, stride in zip(img_pad.shape, val_size, stride)
]
pred_scale = np.zeros((pad_h, pad_w, cfg.NUM_CLASSES))
count_scale = np.zeros((pad_h, pad_w, cfg.NUM_CLASSES))
for grid_yidx in range(r_grid):
for grid_xidx in range(c_grid):
s_x = grid_xidx * stride[1]
s_y = grid_yidx * stride[0]
e_x = min(s_x + val_size[1], pad_w)
e_y = min(s_y + val_size[0], pad_h)
s_x = e_x - val_size[1]
s_y = e_y - val_size[0]
img_sub = img_pad[s_y:e_y, s_x:e_x, :]
timg_pad, tmargin = pad_image_to_shape(
img_sub, val_size, cv2.BORDER_CONSTANT, value=0
)
print(tmargin, timg_pad.shape)
tpred = eval_single(net, timg_pad, cfg.VAL_FLIP)
tpred = tpred[
margin[0] : (tpred.shape[0] - margin[1]),
margin[2] : (tpred.shape[1] - margin[3]),
:,
]
count_scale[s_y:e_y, s_x:e_x, :] += 1
pred_scale[s_y:e_y, s_x:e_x, :] += tpred
pred_scale = pred_scale / count_scale
pred = pred_scale[
margin[0] : (pred_scale.shape[0] - margin[1]),
margin[2] : (pred_scale.shape[1] - margin[3]),
:,
]
pred = cv2.resize(pred, (ori_w, ori_h), interpolation=cv2.INTER_LINEAR)
pred_all = pred_all + pred
pred_all = pred_all / len(cfg.VAL_MULTISCALE)
result = np.argmax(pred_all, axis=2).astype(np.uint8)
return result
def save_results(result_list, save_dir):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
for idx, sample in enumerate(result_list):
file_path = os.path.join(save_dir, "%d.png" % idx)
cv2.imwrite(file_path, sample["pred"])
file_path = os.path.join(save_dir, "%d.gt.png" % idx)
cv2.imwrite(file_path, sample["gt"])
def compute_metric(result_list):
"""
modified from https://github.com/YudeWang/deeplabv3plus-pytorch
"""
TP, P, T = [], [], []
for i in range(cfg.NUM_CLASSES):
TP.append(mp.Value("i", 0, lock=True))
P.append(mp.Value("i", 0, lock=True))
T.append(mp.Value("i", 0, lock=True))
def compare(start, step, TP, P, T):
for idx in tqdm(range(start, len(result_list), step)):
pred = result_list[idx]["pred"]
gt = result_list[idx]["gt"]
cal = gt < 255
mask = (pred == gt) * cal
for i in range(cfg.NUM_CLASSES):
P[i].acquire()
P[i].value += np.sum((pred == i) * cal)
P[i].release()
T[i].acquire()
T[i].value += np.sum((gt == i) * cal)
T[i].release()
TP[i].acquire()
TP[i].value += np.sum((gt == i) * mask)
TP[i].release()
p_list = []
for i in range(8):
p = mp.Process(target=compare, args=(i, 8, TP, P, T))
p.start()
p_list.append(p)
for p in p_list:
p.join()
class_names = dataset.PascalVOC.class_names
IoU = []
for i in range(cfg.NUM_CLASSES):
IoU.append(TP[i].value / (T[i].value + P[i].value - TP[i].value + 1e-10))
for i in range(cfg.NUM_CLASSES):
if i == 0:
print("%11s:%7.3f%%" % ("backbound", IoU[i] * 100), end="\t")
else:
if i % 2 != 1:
print("%11s:%7.3f%%" % (class_names[i - 1], IoU[i] * 100), end="\t")
else:
print("%11s:%7.3f%%" % (class_names[i - 1], IoU[i] * 100))
miou = np.mean(np.array(IoU))
print("\n======================================================")
print("%11s:%7.3f%%" % ("mIoU", miou * 100))
return miou
def build_dataloader(dataset_dir):
val_dataset = dataset.PascalVOC(dataset_dir, "val", order=["image", "mask"])
val_sampler = data.SequentialSampler(val_dataset, cfg.VAL_BATCHES)
val_dataloader = data.DataLoader(
val_dataset,
sampler=val_sampler,
transform=T.Normalize(
mean=cfg.IMG_MEAN, std=cfg.IMG_STD, order=["image", "mask"]
),
num_workers=cfg.DATA_WORKERS,
)
return val_dataloader, val_dataset.__len__()
if __name__ == "__main__":
main()
|
[
"megengine.data.transform.Normalize",
"megengine.data.SequentialSampler",
"megengine.jit.trace",
"megengine.tensor",
"megengine.load",
"megengine.data.dataset.PascalVOC"
] |
[((1066, 1091), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1089, 1091), False, 'import argparse\n'), ((1465, 1505), 'official.vision.segmentation.deeplabv3plus.DeepLabV3Plus', 'DeepLabV3Plus', ([], {'class_num': 'cfg.NUM_CLASSES'}), '(class_num=cfg.NUM_CLASSES)\n', (1478, 1505), False, 'from official.vision.segmentation.deeplabv3plus import DeepLabV3Plus\n'), ((1523, 1548), 'megengine.load', 'mge.load', (['args.model_path'], {}), '(args.model_path)\n', (1531, 1548), True, 'import megengine as mge\n'), ((1710, 1727), 'tqdm.tqdm', 'tqdm', (['test_loader'], {}), '(test_loader)\n', (1714, 1727), False, 'from tqdm import tqdm\n'), ((2077, 2099), 'numpy.zeros', 'np.zeros', (['(4)', 'np.uint32'], {}), '(4, np.uint32)\n', (2085, 2099), True, 'import numpy as np\n'), ((2426, 2523), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['img', 'margin[0]', 'margin[1]', 'margin[2]', 'margin[3]', 'border_mode'], {'value': 'value'}), '(img, margin[0], margin[1], margin[2], margin[3],\n border_mode, value=value)\n', (2444, 2523), False, 'import cv2\n'), ((2600, 2637), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)', 'opt_level': '(2)'}), '(symbolic=True, opt_level=2)\n', (2609, 2637), True, 'import megengine.jit as jit\n'), ((2748, 2760), 'megengine.tensor', 'mge.tensor', ([], {}), '()\n', (2758, 2760), True, 'import megengine as mge\n'), ((3242, 3283), 'numpy.zeros', 'np.zeros', (['(ori_h, ori_w, cfg.NUM_CLASSES)'], {}), '((ori_h, ori_w, cfg.NUM_CLASSES))\n', (3250, 3283), True, 'import numpy as np\n'), ((8680, 8742), 'megengine.data.dataset.PascalVOC', 'dataset.PascalVOC', (['dataset_dir', '"""val"""'], {'order': "['image', 'mask']"}), "(dataset_dir, 'val', order=['image', 'mask'])\n", (8697, 8742), True, 'import megengine.data.dataset as dataset\n'), ((8761, 8813), 'megengine.data.SequentialSampler', 'data.SequentialSampler', (['val_dataset', 'cfg.VAL_BATCHES'], {}), '(val_dataset, cfg.VAL_BATCHES)\n', (8783, 8813), True, 'import megengine.data as data\n'), ((6173, 6237), 'cv2.resize', 'cv2.resize', (['pred', '(ori_w, ori_h)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(pred, (ori_w, ori_h), interpolation=cv2.INTER_LINEAR)\n', (6183, 6237), False, 'import cv2\n'), ((6454, 6478), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (6468, 6478), False, 'import os\n'), ((6488, 6509), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (6499, 6509), False, 'import os\n'), ((6577, 6615), 'os.path.join', 'os.path.join', (['save_dir', "('%d.png' % idx)"], {}), "(save_dir, '%d.png' % idx)\n", (6589, 6615), False, 'import os\n'), ((6624, 6662), 'cv2.imwrite', 'cv2.imwrite', (['file_path', "sample['pred']"], {}), "(file_path, sample['pred'])\n", (6635, 6662), False, 'import cv2\n'), ((6683, 6724), 'os.path.join', 'os.path.join', (['save_dir', "('%d.gt.png' % idx)"], {}), "(save_dir, '%d.gt.png' % idx)\n", (6695, 6724), False, 'import os\n'), ((6733, 6769), 'cv2.imwrite', 'cv2.imwrite', (['file_path', "sample['gt']"], {}), "(file_path, sample['gt'])\n", (6744, 6769), False, 'import cv2\n'), ((7795, 7844), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'compare', 'args': '(i, 8, TP, P, T)'}), '(target=compare, args=(i, 8, TP, P, T))\n', (7805, 7844), True, 'import multiprocessing as mp\n'), ((8475, 8488), 'numpy.array', 'np.array', (['IoU'], {}), '(IoU)\n', (8483, 8488), True, 'import numpy as np\n'), ((3369, 3440), 'cv2.resize', 'cv2.resize', (['img', 'None'], {'fx': 'rate', 'fy': 'rate', 'interpolation': 'cv2.INTER_LINEAR'}), '(img, None, fx=rate, fy=rate, interpolation=cv2.INTER_LINEAR)\n', (3379, 3440), False, 'import cv2\n'), ((3639, 3702), 'cv2.resize', 'cv2.resize', (['img', '(out_w, out_h)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(img, (out_w, out_h), interpolation=cv2.INTER_LINEAR)\n', (3649, 3702), False, 'import cv2\n'), ((4751, 4792), 'numpy.zeros', 'np.zeros', (['(pad_h, pad_w, cfg.NUM_CLASSES)'], {}), '((pad_h, pad_w, cfg.NUM_CLASSES))\n', (4759, 4792), True, 'import numpy as np\n'), ((4819, 4860), 'numpy.zeros', 'np.zeros', (['(pad_h, pad_w, cfg.NUM_CLASSES)'], {}), '((pad_h, pad_w, cfg.NUM_CLASSES))\n', (4827, 4860), True, 'import numpy as np\n'), ((6337, 6364), 'numpy.argmax', 'np.argmax', (['pred_all'], {'axis': '(2)'}), '(pred_all, axis=2)\n', (6346, 6364), True, 'import numpy as np\n'), ((6970, 6997), 'multiprocessing.Value', 'mp.Value', (['"""i"""', '(0)'], {'lock': '(True)'}), "('i', 0, lock=True)\n", (6978, 6997), True, 'import multiprocessing as mp\n'), ((7016, 7043), 'multiprocessing.Value', 'mp.Value', (['"""i"""', '(0)'], {'lock': '(True)'}), "('i', 0, lock=True)\n", (7024, 7043), True, 'import multiprocessing as mp\n'), ((7062, 7089), 'multiprocessing.Value', 'mp.Value', (['"""i"""', '(0)'], {'lock': '(True)'}), "('i', 0, lock=True)\n", (7070, 7089), True, 'import multiprocessing as mp\n'), ((8920, 8992), 'megengine.data.transform.Normalize', 'T.Normalize', ([], {'mean': 'cfg.IMG_MEAN', 'std': 'cfg.IMG_STD', 'order': "['image', 'mask']"}), "(mean=cfg.IMG_MEAN, std=cfg.IMG_STD, order=['image', 'mask'])\n", (8931, 8992), True, 'import megengine.data.transform as T\n'), ((7450, 7475), 'numpy.sum', 'np.sum', (['((pred == i) * cal)'], {}), '((pred == i) * cal)\n', (7456, 7475), True, 'import numpy as np\n'), ((7568, 7591), 'numpy.sum', 'np.sum', (['((gt == i) * cal)'], {}), '((gt == i) * cal)\n', (7574, 7591), True, 'import numpy as np\n'), ((7686, 7710), 'numpy.sum', 'np.sum', (['((gt == i) * mask)'], {}), '((gt == i) * mask)\n', (7692, 7710), True, 'import numpy as np\n'), ((4279, 4303), 'numpy.ceil', 'np.ceil', (['(i * stride_rate)'], {}), '(i * stride_rate)\n', (4286, 4303), True, 'import numpy as np\n'), ((4603, 4630), 'numpy.ceil', 'np.ceil', (['((ps - cs) / stride)'], {}), '((ps - cs) / stride)\n', (4610, 4630), True, 'import numpy as np\n')]
|
"""Governance Database Tables/Models.
Models of the Traction tables for Governance and related data. This includes ledger
related tables for schemas and credential definitions.
"""
import uuid
from datetime import datetime
from typing import List
from sqlmodel import Field, Relationship
from sqlalchemy import (
Column,
func,
String,
select,
desc,
text,
UniqueConstraint,
)
from sqlalchemy.dialects.postgresql import TIMESTAMP, ARRAY, UUID
from sqlmodel.ext.asyncio.session import AsyncSession
from api.db.models.base import BaseModel
from api.endpoints.models.v1.errors import (
NotFoundError,
)
class SchemaTemplate(BaseModel, table=True):
"""SchemaTemplate.
This is the model for the Schema table (postgresql specific dialects in use).
Schemas are registered on the ledger, so there can be only one...
However, each Tenant can import the same schema for their own purposes. For now,
there will be redundancy in the Schema data, we are going to wait on usage to
determine if we need to normalize and have a singular table for Schemas for all and
then join table for Schema/Tenant.
There is already a table for v0 API named tenantschema and this is named differently
to avoid confusion and interference. When v0 is retired/deleted, perhaps we change
the name then...
For a given tenant, the schema can be found by the schema_template_id (Traction id)
or schema_id (ledger id).
Attributes:
schema_template_id: Traction ID
tenant_id: Traction Tenant ID, owner of this Contact
schema_id: This will be the ledger schema id - this is not a UUID
name: a "pretty" name for the schema, this can be different than the name on the
ledger (schema_name).
status: Status of the schema as it is being endorsed and registered
tags: Set by tenant for arbitrary grouping of their Schemas
deleted: Schema/Tenant "soft" delete indicator.
imported: When True, this tenant imported the schema, otherwise they created it
version: version, on ledger
attributes: list of attribute names, on ledger
schema_name: name as is appears on the ledger
transaction_id: id used when schema is being endorsed and registered
state: The underlying AcaPy endorser state
created_at: Timestamp when record was created in Traction
updated_at: Timestamp when record was last modified in Traction
"""
__tablename__ = "schema_template"
__table_args__ = (UniqueConstraint("tenant_id", "schema_id"),)
schema_template_id: uuid.UUID = Field(
sa_column=Column(
UUID(as_uuid=True),
primary_key=True,
server_default=text("gen_random_uuid()"),
)
)
tenant_id: uuid.UUID = Field(foreign_key="tenant.id", index=True)
schema_id: str = Field(nullable=True, index=True)
name: str = Field(nullable=False)
status: str = Field(nullable=False)
tags: List[str] = Field(sa_column=Column(ARRAY(String)))
deleted: bool = Field(nullable=False, default=False)
imported: bool = Field(nullable=False, default=False)
state: str = Field(nullable=False)
# ledger data ---
version: str = Field(nullable=False)
attributes: List[str] = Field(sa_column=Column(ARRAY(String)))
schema_name: str = Field(nullable=True)
transaction_id: str = Field(nullable=True)
# --- ledger data
created_at: datetime = Field(
sa_column=Column(TIMESTAMP, nullable=False, server_default=func.now())
)
updated_at: datetime = Field(
sa_column=Column(
TIMESTAMP, nullable=False, server_default=func.now(), onupdate=func.now()
)
)
@classmethod
async def get_by_id(
cls: "SchemaTemplate",
db: AsyncSession,
tenant_id: uuid.UUID,
schema_template_id: uuid.UUID,
deleted: bool | None = False,
) -> "SchemaTemplate":
"""Get SchemaTemplate by schema_template_id.
Find and return the database SchemaTemplate record
Args:
db: database session
tenant_id: Traction ID of tenant making the call
schema_template_id: Traction ID of Schema Template
Returns: The Traction SchemaTemplate (db) record
Raises:
NotFoundError: if the SchemaTemplate cannot be found by ID and deleted flag
"""
q = (
select(cls)
.where(cls.tenant_id == tenant_id)
.where(cls.schema_template_id == schema_template_id)
.where(cls.deleted == deleted)
)
q_result = await db.execute(q)
db_rec = q_result.scalar_one_or_none()
if not db_rec:
raise NotFoundError(
code="schema_template.id_not_found",
title="Schema Template does not exist",
detail=f"Schema Template does not exist for id<{schema_template_id}>",
)
return db_rec
@classmethod
async def get_by_schema_id(
cls: "SchemaTemplate",
db: AsyncSession,
tenant_id: uuid.UUID,
schema_id: str,
deleted: bool | None = False,
) -> "SchemaTemplate":
"""Get SchemaTemplate by schema_id.
Find and return the database SchemaTemplate record
Args:
db: database session
tenant_id: Traction ID of tenant making the call
schema_id: Ledger Schema ID of Schema Template
Returns: The Traction SchemaTemplate (db) record
Raises:
NotFoundError: if the SchemaTemplate cannot be found by schema ID and deleted
flag
"""
q = (
select(cls)
.where(cls.tenant_id == tenant_id)
.where(cls.schema_id == schema_id)
.where(cls.deleted == deleted)
)
q_result = await db.execute(q)
db_rec = q_result.scalar_one_or_none()
if not db_rec:
raise NotFoundError(
code="schema_template.schema_id_not_found",
title="Schema Template does not exist",
detail=f"Schema Template does not exist for schema_id<{schema_id}>",
)
return db_rec
@classmethod
async def get_by_transaction_id(
cls: "SchemaTemplate",
db: AsyncSession,
tenant_id: uuid.UUID,
transaction_id: str,
deleted: bool | None = False,
) -> "SchemaTemplate":
"""Get SchemaTemplate by transaction_id.
Find and return the database SchemaTemplate record
Args:
db: database session
tenant_id: Traction ID of tenant making the call
transaction_id: Transaction ID from endorser
Returns: The Traction SchemaTemplate (db) record
Raises:
NotFoundError: if the SchemaTemplate cannot be found by schema ID and deleted
flag
"""
q = (
select(cls)
.where(cls.tenant_id == tenant_id)
.where(cls.transaction_id == transaction_id)
.where(cls.deleted == deleted)
)
q_result = await db.execute(q)
db_rec = q_result.scalar_one_or_none()
if not db_rec:
raise NotFoundError(
code="schema_template.transaction_id_not_found",
title="Schema Template does not exist",
detail=f"Schema Template does not exist for transaction_id<{transaction_id}>", # noqa: E501
)
return db_rec
@classmethod
async def list_by_tenant_id(
cls: "SchemaTemplate",
db: AsyncSession,
tenant_id: uuid.UUID,
) -> List["SchemaTemplate"]:
"""List by Tenant ID.
Find and return list of SchemaTemplate records for Tenant.
Args:
db: database session
tenant_id: Traction ID of Tenant
Returns: List of Traction SchemaTemplate (db) records in descending order
"""
q = select(cls).where(cls.tenant_id == tenant_id).order_by(desc(cls.updated_at))
q_result = await db.execute(q)
db_recs = q_result.scalars()
return db_recs
class CredentialTemplate(BaseModel, table=True):
"""Credential Template.
Model for the Credential Definition table (postgresql specific dialects in use).
This will track Credential Definitions for the Tenants.
For a given tenant, the Credential Tempalte can be found by the
credential_template_id (Traction id) or cred_def_id (ledger id).
Attributes:
credential_template_id: Traction ID
tenant_id: Traction Tenant ID
schema_template_id: Traction ID for Schema Template
cred_def_id: Credential Definition ID from the ledger
schema_id: Ledger ID of Schema this credential definition is for
name: based on SchemaTemplate.name, but allow override here...
status: Status of the credential definition as it is being endorsed and registered
deleted: Credential Definition "soft" delete indicator.
transaction_id: id used when schema is being endorsed and registered
tags: Set by tenant for arbitrary grouping of Credential Templates/Definitions
tag: tag used to create the credential definition (on ledger)
attributes: list of attribute names (on ledger)
state: The underlying AcaPy endorser state
revocation_enabled: when True, subsequent Credentials can be revoked.
revocation_registry_size: how large the default revocation registry is
revocation_registry_state: The underlying AcaPy endorser state for revocation
created_at: Timestamp when record was created in Traction
updated_at: Timestamp when record was last modified in Traction
"""
__tablename__ = "credential_template"
credential_template_id: uuid.UUID = Field(
sa_column=Column(
UUID(as_uuid=True),
primary_key=True,
server_default=text("gen_random_uuid()"),
)
)
tenant_id: uuid.UUID = Field(foreign_key="tenant.id", nullable=False, index=True)
schema_template_id: uuid.UUID = Field(
foreign_key="schema_template.schema_template_id", nullable=False, index=True
)
cred_def_id: str = Field(nullable=True, index=True)
schema_id: str = Field(nullable=True)
name: str = Field(nullable=False)
status: str = Field(nullable=False)
tags: List[str] = Field(sa_column=Column(ARRAY(String)))
deleted: bool = Field(nullable=False, default=False)
state: str = Field(nullable=False)
# ledger(ish) data ---
transaction_id: str = Field(nullable=True)
tag: str = Field(nullable=False)
attributes: List[str] = Field(sa_column=Column(ARRAY(String)))
revocation_enabled: bool = Field(nullable=False, default=False)
revocation_registry_size: int = Field(nullable=True, default=None)
revocation_registry_state: str = Field(nullable=False)
# --- ledger data
# relationships ---
issuer_credentials: List["IssuerCredential"] = Relationship( # noqa: F821
back_populates="credential_template"
)
# --- relationships
created_at: datetime = Field(
sa_column=Column(TIMESTAMP, nullable=False, server_default=func.now())
)
updated_at: datetime = Field(
sa_column=Column(
TIMESTAMP, nullable=False, server_default=func.now(), onupdate=func.now()
)
)
@classmethod
async def get_by_id(
cls: "CredentialTemplate",
db: AsyncSession,
tenant_id: uuid.UUID,
credential_template_id: uuid.UUID,
deleted: bool | None = False,
) -> "CredentialTemplate":
"""Get CredentialDefinition by cred def id.
Find and return the database CredentialTemplate record
Args:
db: database session
tenant_id: Traction ID of tenant making the call
credential_template_id: Traction ID of CredentialTemplate
Returns: The Traction CredentialTemplate (db) record
Raises:
NotFoundError: if the CredentialTemplate cannot be found by ID and deleted
flag
"""
q = (
select(cls)
.where(cls.tenant_id == tenant_id)
.where(cls.credential_template_id == credential_template_id)
.where(cls.deleted == deleted)
)
q_result = await db.execute(q)
db_rec = q_result.scalar_one_or_none()
if not db_rec:
raise NotFoundError(
code="credential_template.id_not_found",
title="Credential Template does not exist",
detail=f"Credential Template does not exist for id<{credential_template_id}>", # noqa: E501
)
return db_rec
@classmethod
async def get_by_cred_def_id(
cls: "CredentialTemplate",
db: AsyncSession,
tenant_id: uuid.UUID,
cred_def_id: str,
deleted: bool | None = False,
) -> "CredentialTemplate":
"""Get CredentialDefinition by cred def id.
Find and return the database CredentialTemplate record
Args:
db: database session
tenant_id: Traction ID of tenant making the call
cred_def_id: Ledger Cred Def ID of CredentialTemplate
Returns: The Traction CredentialTemplate (db) record
Raises:
NotFoundError: if the CredentialTemplate cannot be found by Cred Def ID and
deleted flag
"""
q = (
select(cls)
.where(cls.tenant_id == tenant_id)
.where(cls.cred_def_id == cred_def_id)
.where(cls.deleted == deleted)
)
q_result = await db.execute(q)
db_rec = q_result.scalar_one_or_none()
if not db_rec:
raise NotFoundError(
code="credential_template.cred_def_id_not_found",
title="Credential Template does not exist",
detail=f"Credential Template does not exist for cred_def_id<{cred_def_id}>", # noqa: E501
)
return db_rec
@classmethod
async def get_by_transaction_id(
cls: "CredentialTemplate",
db: AsyncSession,
tenant_id: uuid.UUID,
transaction_id: str,
deleted: bool | None = False,
) -> "CredentialTemplate":
"""Get CredentialTemplate by transaction_id.
Find and return the database CredentialTemplate record
Args:
db: database session
tenant_id: Traction ID of tenant making the call
transaction_id: Transaction ID from endorser
Returns: The Traction CredentialTemplate (db) record
Raises:
NotFoundError: if the CredentialTemplate cannot be found by schema ID and
deleted flag
"""
q = (
select(cls)
.where(cls.tenant_id == tenant_id)
.where(cls.transaction_id == transaction_id)
.where(cls.deleted == deleted)
)
q_result = await db.execute(q)
db_rec = q_result.scalar_one_or_none()
if not db_rec:
raise NotFoundError(
code="credential_template.transaction_id_not_found",
title="Credential Template does not exist",
detail=f"Credential Template does not exist for transaction_id<{transaction_id}>", # noqa: E501
)
return db_rec
@classmethod
async def get_by_schema_and_tag(
cls: "CredentialTemplate",
db: AsyncSession,
tenant_id: uuid.UUID,
schema_id: str,
tag: str,
) -> "CredentialTemplate":
"""Get CredentialTemplate by schema id (ledger) and tag.
Use this to determine if we can create a new template. If we have a tag for
this schema, then we cannot reliably create the cred def on the ledger.
Args:
db: database session
tenant_id: Traction ID of tenant making the call
schema_id: ledger schema id
tag: cred def tag
Returns: The Traction CredentialTemplate (db) record or None
"""
q = (
select(cls)
.where(cls.tenant_id == tenant_id)
.where(cls.schema_id == schema_id)
.where(cls.tag == tag)
)
q_result = await db.execute(q)
db_rec = q_result.scalar_one_or_none()
return db_rec
@classmethod
async def list_by_schema_template_id(
cls: "CredentialTemplate",
db: AsyncSession,
tenant_id: uuid.UUID,
schema_template_id: uuid.UUID,
status: str | None = None,
) -> List["CredentialTemplate"]:
"""List by Schema Template ID.
Find and return list of Credential Template records for Schema (Tenant).
tenant_id: Traction ID of tenant making the call
schema_template_id: Traction ID of SchemaTemplate
status: optional, if provided return only items that have this status
Returns: List of Traction CredentialTemplate (db) records in descending order
"""
filters = [
cls.tenant_id == tenant_id,
cls.schema_template_id == schema_template_id,
]
if status:
filters.append(cls.status == status)
q = select(cls).filter(*filters).order_by(desc(cls.updated_at))
q_result = await db.execute(q)
db_recs = q_result.scalars()
return db_recs
@classmethod
async def list_by_schema_id(
cls: "CredentialTemplate",
db: AsyncSession,
tenant_id: uuid.UUID,
schema_id: str,
) -> List["CredentialTemplate"]:
"""List by Schema ID.
Find and return list of Credential Template records for Schema (Tenant).
tenant_id: Traction ID of tenant making the call
schema_id: Ledger ID of Schema
Returns: List of Traction CredentialTemplate (db) records in descending order
"""
q = (
select(cls)
.where(cls.schema_id == schema_id)
.where(cls.tenant_id == tenant_id)
.order_by(desc(cls.created_at))
)
q_result = await db.execute(q)
db_recs = q_result.scalars()
return db_recs
@classmethod
async def list_by_tenant_id(
cls: "CredentialTemplate",
db: AsyncSession,
tenant_id: uuid.UUID,
) -> List["CredentialTemplate"]:
"""List by Tenant ID.
Find and return list of Credential Template records for Tenant.
tenant_id: Traction ID of tenant making the call
Returns: List of Traction CredentialTemplate (db) records in descending order
"""
q = select(cls).where(cls.tenant_id == tenant_id).order_by(desc(cls.updated_at))
q_result = await db.execute(q)
db_recs = q_result.scalars()
return db_recs
|
[
"sqlmodel.Relationship",
"sqlmodel.Field"
] |
[((2795, 2837), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""tenant.id"""', 'index': '(True)'}), "(foreign_key='tenant.id', index=True)\n", (2800, 2837), False, 'from sqlmodel import Field, Relationship\n'), ((2859, 2891), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'index': '(True)'}), '(nullable=True, index=True)\n', (2864, 2891), False, 'from sqlmodel import Field, Relationship\n'), ((2909, 2930), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (2914, 2930), False, 'from sqlmodel import Field, Relationship\n'), ((2950, 2971), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (2955, 2971), False, 'from sqlmodel import Field, Relationship\n'), ((3053, 3089), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)', 'default': '(False)'}), '(nullable=False, default=False)\n', (3058, 3089), False, 'from sqlmodel import Field, Relationship\n'), ((3111, 3147), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)', 'default': '(False)'}), '(nullable=False, default=False)\n', (3116, 3147), False, 'from sqlmodel import Field, Relationship\n'), ((3166, 3187), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (3171, 3187), False, 'from sqlmodel import Field, Relationship\n'), ((3230, 3251), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (3235, 3251), False, 'from sqlmodel import Field, Relationship\n'), ((3342, 3362), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (3347, 3362), False, 'from sqlmodel import Field, Relationship\n'), ((3389, 3409), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (3394, 3409), False, 'from sqlmodel import Field, Relationship\n'), ((10018, 10076), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""tenant.id"""', 'nullable': '(False)', 'index': '(True)'}), "(foreign_key='tenant.id', nullable=False, index=True)\n", (10023, 10076), False, 'from sqlmodel import Field, Relationship\n'), ((10113, 10200), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""schema_template.schema_template_id"""', 'nullable': '(False)', 'index': '(True)'}), "(foreign_key='schema_template.schema_template_id', nullable=False,\n index=True)\n", (10118, 10200), False, 'from sqlmodel import Field, Relationship\n'), ((10234, 10266), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'index': '(True)'}), '(nullable=True, index=True)\n', (10239, 10266), False, 'from sqlmodel import Field, Relationship\n'), ((10288, 10308), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (10293, 10308), False, 'from sqlmodel import Field, Relationship\n'), ((10326, 10347), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (10331, 10347), False, 'from sqlmodel import Field, Relationship\n'), ((10366, 10387), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (10371, 10387), False, 'from sqlmodel import Field, Relationship\n'), ((10469, 10505), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)', 'default': '(False)'}), '(nullable=False, default=False)\n', (10474, 10505), False, 'from sqlmodel import Field, Relationship\n'), ((10523, 10544), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (10528, 10544), False, 'from sqlmodel import Field, Relationship\n'), ((10599, 10619), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (10604, 10619), False, 'from sqlmodel import Field, Relationship\n'), ((10635, 10656), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (10640, 10656), False, 'from sqlmodel import Field, Relationship\n'), ((10755, 10791), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)', 'default': '(False)'}), '(nullable=False, default=False)\n', (10760, 10791), False, 'from sqlmodel import Field, Relationship\n'), ((10828, 10862), 'sqlmodel.Field', 'Field', ([], {'nullable': '(True)', 'default': 'None'}), '(nullable=True, default=None)\n', (10833, 10862), False, 'from sqlmodel import Field, Relationship\n'), ((10900, 10921), 'sqlmodel.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (10905, 10921), False, 'from sqlmodel import Field, Relationship\n'), ((11020, 11070), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""credential_template"""'}), "(back_populates='credential_template')\n", (11032, 11070), False, 'from sqlmodel import Field, Relationship\n'), ((2522, 2564), 'sqlalchemy.UniqueConstraint', 'UniqueConstraint', (['"""tenant_id"""', '"""schema_id"""'], {}), "('tenant_id', 'schema_id')\n", (2538, 2564), False, 'from sqlalchemy import Column, func, String, select, desc, text, UniqueConstraint\n'), ((4731, 4902), 'api.endpoints.models.v1.errors.NotFoundError', 'NotFoundError', ([], {'code': '"""schema_template.id_not_found"""', 'title': '"""Schema Template does not exist"""', 'detail': 'f"""Schema Template does not exist for id<{schema_template_id}>"""'}), "(code='schema_template.id_not_found', title=\n 'Schema Template does not exist', detail=\n f'Schema Template does not exist for id<{schema_template_id}>')\n", (4744, 4902), False, 'from api.endpoints.models.v1.errors import NotFoundError\n'), ((5973, 6149), 'api.endpoints.models.v1.errors.NotFoundError', 'NotFoundError', ([], {'code': '"""schema_template.schema_id_not_found"""', 'title': '"""Schema Template does not exist"""', 'detail': 'f"""Schema Template does not exist for schema_id<{schema_id}>"""'}), "(code='schema_template.schema_id_not_found', title=\n 'Schema Template does not exist', detail=\n f'Schema Template does not exist for schema_id<{schema_id}>')\n", (5986, 6149), False, 'from api.endpoints.models.v1.errors import NotFoundError\n'), ((7243, 7434), 'api.endpoints.models.v1.errors.NotFoundError', 'NotFoundError', ([], {'code': '"""schema_template.transaction_id_not_found"""', 'title': '"""Schema Template does not exist"""', 'detail': 'f"""Schema Template does not exist for transaction_id<{transaction_id}>"""'}), "(code='schema_template.transaction_id_not_found', title=\n 'Schema Template does not exist', detail=\n f'Schema Template does not exist for transaction_id<{transaction_id}>')\n", (7256, 7434), False, 'from api.endpoints.models.v1.errors import NotFoundError\n'), ((8045, 8065), 'sqlalchemy.desc', 'desc', (['cls.updated_at'], {}), '(cls.updated_at)\n', (8049, 8065), False, 'from sqlalchemy import Column, func, String, select, desc, text, UniqueConstraint\n'), ((12470, 12657), 'api.endpoints.models.v1.errors.NotFoundError', 'NotFoundError', ([], {'code': '"""credential_template.id_not_found"""', 'title': '"""Credential Template does not exist"""', 'detail': 'f"""Credential Template does not exist for id<{credential_template_id}>"""'}), "(code='credential_template.id_not_found', title=\n 'Credential Template does not exist', detail=\n f'Credential Template does not exist for id<{credential_template_id}>')\n", (12483, 12657), False, 'from api.endpoints.models.v1.errors import NotFoundError\n'), ((13787, 13981), 'api.endpoints.models.v1.errors.NotFoundError', 'NotFoundError', ([], {'code': '"""credential_template.cred_def_id_not_found"""', 'title': '"""Credential Template does not exist"""', 'detail': 'f"""Credential Template does not exist for cred_def_id<{cred_def_id}>"""'}), "(code='credential_template.cred_def_id_not_found', title=\n 'Credential Template does not exist', detail=\n f'Credential Template does not exist for cred_def_id<{cred_def_id}>')\n", (13800, 13981), False, 'from api.endpoints.models.v1.errors import NotFoundError\n'), ((15113, 15316), 'api.endpoints.models.v1.errors.NotFoundError', 'NotFoundError', ([], {'code': '"""credential_template.transaction_id_not_found"""', 'title': '"""Credential Template does not exist"""', 'detail': 'f"""Credential Template does not exist for transaction_id<{transaction_id}>"""'}), "(code='credential_template.transaction_id_not_found', title=\n 'Credential Template does not exist', detail=\n f'Credential Template does not exist for transaction_id<{transaction_id}>')\n", (15126, 15316), False, 'from api.endpoints.models.v1.errors import NotFoundError\n'), ((17323, 17343), 'sqlalchemy.desc', 'desc', (['cls.updated_at'], {}), '(cls.updated_at)\n', (17327, 17343), False, 'from sqlalchemy import Column, func, String, select, desc, text, UniqueConstraint\n'), ((18114, 18134), 'sqlalchemy.desc', 'desc', (['cls.created_at'], {}), '(cls.created_at)\n', (18118, 18134), False, 'from sqlalchemy import Column, func, String, select, desc, text, UniqueConstraint\n'), ((18754, 18774), 'sqlalchemy.desc', 'desc', (['cls.updated_at'], {}), '(cls.updated_at)\n', (18758, 18774), False, 'from sqlalchemy import Column, func, String, select, desc, text, UniqueConstraint\n'), ((2648, 2666), 'sqlalchemy.dialects.postgresql.UUID', 'UUID', ([], {'as_uuid': '(True)'}), '(as_uuid=True)\n', (2652, 2666), False, 'from sqlalchemy.dialects.postgresql import TIMESTAMP, ARRAY, UUID\n'), ((3017, 3030), 'sqlalchemy.dialects.postgresql.ARRAY', 'ARRAY', (['String'], {}), '(String)\n', (3022, 3030), False, 'from sqlalchemy.dialects.postgresql import TIMESTAMP, ARRAY, UUID\n'), ((3303, 3316), 'sqlalchemy.dialects.postgresql.ARRAY', 'ARRAY', (['String'], {}), '(String)\n', (3308, 3316), False, 'from sqlalchemy.dialects.postgresql import TIMESTAMP, ARRAY, UUID\n'), ((9871, 9889), 'sqlalchemy.dialects.postgresql.UUID', 'UUID', ([], {'as_uuid': '(True)'}), '(as_uuid=True)\n', (9875, 9889), False, 'from sqlalchemy.dialects.postgresql import TIMESTAMP, ARRAY, UUID\n'), ((10433, 10446), 'sqlalchemy.dialects.postgresql.ARRAY', 'ARRAY', (['String'], {}), '(String)\n', (10438, 10446), False, 'from sqlalchemy.dialects.postgresql import TIMESTAMP, ARRAY, UUID\n'), ((10708, 10721), 'sqlalchemy.dialects.postgresql.ARRAY', 'ARRAY', (['String'], {}), '(String)\n', (10713, 10721), False, 'from sqlalchemy.dialects.postgresql import TIMESTAMP, ARRAY, UUID\n'), ((2725, 2750), 'sqlalchemy.text', 'text', (['"""gen_random_uuid()"""'], {}), "('gen_random_uuid()')\n", (2729, 2750), False, 'from sqlalchemy import Column, func, String, select, desc, text, UniqueConstraint\n'), ((3534, 3544), 'sqlalchemy.func.now', 'func.now', ([], {}), '()\n', (3542, 3544), False, 'from sqlalchemy import Column, func, String, select, desc, text, UniqueConstraint\n'), ((3666, 3676), 'sqlalchemy.func.now', 'func.now', ([], {}), '()\n', (3674, 3676), False, 'from sqlalchemy import Column, func, String, select, desc, text, UniqueConstraint\n'), ((3687, 3697), 'sqlalchemy.func.now', 'func.now', ([], {}), '()\n', (3695, 3697), False, 'from sqlalchemy import Column, func, String, select, desc, text, UniqueConstraint\n'), ((9948, 9973), 'sqlalchemy.text', 'text', (['"""gen_random_uuid()"""'], {}), "('gen_random_uuid()')\n", (9952, 9973), False, 'from sqlalchemy import Column, func, String, select, desc, text, UniqueConstraint\n'), ((11225, 11235), 'sqlalchemy.func.now', 'func.now', ([], {}), '()\n', (11233, 11235), False, 'from sqlalchemy import Column, func, String, select, desc, text, UniqueConstraint\n'), ((11357, 11367), 'sqlalchemy.func.now', 'func.now', ([], {}), '()\n', (11365, 11367), False, 'from sqlalchemy import Column, func, String, select, desc, text, UniqueConstraint\n'), ((11378, 11388), 'sqlalchemy.func.now', 'func.now', ([], {}), '()\n', (11386, 11388), False, 'from sqlalchemy import Column, func, String, select, desc, text, UniqueConstraint\n'), ((7990, 8001), 'sqlalchemy.select', 'select', (['cls'], {}), '(cls)\n', (7996, 8001), False, 'from sqlalchemy import Column, func, String, select, desc, text, UniqueConstraint\n'), ((17285, 17296), 'sqlalchemy.select', 'select', (['cls'], {}), '(cls)\n', (17291, 17296), False, 'from sqlalchemy import Column, func, String, select, desc, text, UniqueConstraint\n'), ((18699, 18710), 'sqlalchemy.select', 'select', (['cls'], {}), '(cls)\n', (18705, 18710), False, 'from sqlalchemy import Column, func, String, select, desc, text, UniqueConstraint\n'), ((4427, 4438), 'sqlalchemy.select', 'select', (['cls'], {}), '(cls)\n', (4433, 4438), False, 'from sqlalchemy import Column, func, String, select, desc, text, UniqueConstraint\n'), ((5687, 5698), 'sqlalchemy.select', 'select', (['cls'], {}), '(cls)\n', (5693, 5698), False, 'from sqlalchemy import Column, func, String, select, desc, text, UniqueConstraint\n'), ((6947, 6958), 'sqlalchemy.select', 'select', (['cls'], {}), '(cls)\n', (6953, 6958), False, 'from sqlalchemy import Column, func, String, select, desc, text, UniqueConstraint\n'), ((12158, 12169), 'sqlalchemy.select', 'select', (['cls'], {}), '(cls)\n', (12164, 12169), False, 'from sqlalchemy import Column, func, String, select, desc, text, UniqueConstraint\n'), ((13497, 13508), 'sqlalchemy.select', 'select', (['cls'], {}), '(cls)\n', (13503, 13508), False, 'from sqlalchemy import Column, func, String, select, desc, text, UniqueConstraint\n'), ((14817, 14828), 'sqlalchemy.select', 'select', (['cls'], {}), '(cls)\n', (14823, 14828), False, 'from sqlalchemy import Column, func, String, select, desc, text, UniqueConstraint\n'), ((16135, 16146), 'sqlalchemy.select', 'select', (['cls'], {}), '(cls)\n', (16141, 16146), False, 'from sqlalchemy import Column, func, String, select, desc, text, UniqueConstraint\n'), ((17986, 17997), 'sqlalchemy.select', 'select', (['cls'], {}), '(cls)\n', (17992, 17997), False, 'from sqlalchemy import Column, func, String, select, desc, text, UniqueConstraint\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core import Buffer, Graph, Parameter
from megengine.module import Conv2d
from megengine.test import assertTensorClose
def test_set_value():
v0 = np.random.random((2, 3)).astype(np.float32)
param = Parameter(v0)
v1 = np.random.random((2, 3)).astype(np.float32)
param.set_value(v1)
assertTensorClose(param.numpy(), v1, max_err=5e-6)
v2 = np.random.random((3, 3)).astype(np.float32)
# TODO: add this
# with pytest.raises(ValueError):
# param.set_value(v2)
assertTensorClose(param.numpy(), v1, max_err=5e-6)
def test_fill():
a = Buffer(np.zeros((2, 3), dtype=np.float32))
a.fill(3)
assertTensorClose(a.numpy(), np.full((2, 3), 3, dtype=np.float32))
a.fill(124.568)
assertTensorClose(a.numpy(), np.full((2, 3), 124.568, dtype=np.float32))
# TODO: remove or rewrite following test
# def test_attach():
# p_ = np.random.random((2, 3)).astype(np.float32)
# with Graph() as g:
# g.set_option('eager_evaluation', False)
# p = Parameter(p_)
# v = p * 2
# f = compile(v, None)
# out, = f()
# assertTensorClose(out, p_ * 2)
# F.add_update(p, p)
# out, = f()
# assertTensorClose(out, p_ * 4)
# TODO: remove or rewrite following test
# def test_module_attach():
# v = np.random.random((1, 3, 64, 64)).astype(np.float32)
# net = Conv2d(3, 16, 3)
# with Graph() as g:
# g.set_option('eager_evaluation', False)
# data0 = Input("data")
# f = compile(net(data0), None)
# out0, = f(data=v)
# data1 = Input("data", value=v)
# out1 = net(data1)
# assertTensorClose(out0, out1.numpy())
def test_shape_warning():
with Graph() as cg:
cg.set_option("eager_evaluation", False)
b = Buffer(np.ones((2, 3)).astype(np.float32))
with pytest.warns(None) as record:
print(b.shape)
if len(record) != 0:
raise ValueError(
"Getting the shape of a constant Tensor should throw no Warning"
)
|
[
"megengine.core.Graph",
"megengine.core.Parameter"
] |
[((703, 716), 'megengine.core.Parameter', 'Parameter', (['v0'], {}), '(v0)\n', (712, 716), False, 'from megengine.core import Buffer, Graph, Parameter\n'), ((1080, 1114), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.float32'}), '((2, 3), dtype=np.float32)\n', (1088, 1114), True, 'import numpy as np\n'), ((1163, 1199), 'numpy.full', 'np.full', (['(2, 3)', '(3)'], {'dtype': 'np.float32'}), '((2, 3), 3, dtype=np.float32)\n', (1170, 1199), True, 'import numpy as np\n'), ((1254, 1296), 'numpy.full', 'np.full', (['(2, 3)', '(124.568)'], {'dtype': 'np.float32'}), '((2, 3), 124.568, dtype=np.float32)\n', (1261, 1296), True, 'import numpy as np\n'), ((2186, 2193), 'megengine.core.Graph', 'Graph', ([], {}), '()\n', (2191, 2193), False, 'from megengine.core import Buffer, Graph, Parameter\n'), ((647, 671), 'numpy.random.random', 'np.random.random', (['(2, 3)'], {}), '((2, 3))\n', (663, 671), True, 'import numpy as np\n'), ((726, 750), 'numpy.random.random', 'np.random.random', (['(2, 3)'], {}), '((2, 3))\n', (742, 750), True, 'import numpy as np\n'), ((858, 882), 'numpy.random.random', 'np.random.random', (['(3, 3)'], {}), '((3, 3))\n', (874, 882), True, 'import numpy as np\n'), ((2318, 2336), 'pytest.warns', 'pytest.warns', (['None'], {}), '(None)\n', (2330, 2336), False, 'import pytest\n'), ((2269, 2284), 'numpy.ones', 'np.ones', (['(2, 3)'], {}), '((2, 3))\n', (2276, 2284), True, 'import numpy as np\n')]
|
from sfepy import data_dir
filename_meshes = ['/meshes/3d/cylinder.mesh',
'/meshes/3d/cylinder.vtk',
'/meshes/various_formats/small2d.mesh',
'/meshes/various_formats/small2d.vtk',
'/meshes/various_formats/octahedron.node',
'/meshes/various_formats/comsol_tri.txt',
'/meshes/various_formats/abaqus_hex.inp',
'/meshes/various_formats/abaqus_tet.inp',
'/meshes/various_formats/abaqus_quad.inp',
'/meshes/various_formats/abaqus_tri.inp',
'/meshes/various_formats/abaqus_quad_tri.inp',
'/meshes/various_formats/hex4.mesh3d',
'/meshes/various_formats/tetra8.mesh3d',
'/meshes/various_formats/cube.bdf',
'/meshes/various_formats/med_2d_tri_quad.med',
'/meshes/various_formats/med_3d_tet_hex.med']
filename_meshes = [data_dir + name for name in filename_meshes]
def mesh_hook(mesh, mode):
"""
Define a mesh programmatically.
"""
if mode == 'read':
nodes = [[0, 0], [1, 0], [1, 1], [0, 1]]
nod_ids = [0, 0, 1, 1]
conns = [[[0, 1, 2], [0, 2, 3]]]
mat_ids = [[0, 1]]
descs = ['2_3']
mesh._set_data(nodes, nod_ids, conns, mat_ids, descs)
## mesh.write('aux.vtk', io='auto')
elif mode == 'write':
pass
from sfepy.fem.meshio import UserMeshIO
filename_meshes.extend([mesh_hook, UserMeshIO(mesh_hook)])
same = [(0, 1), (2, 3)]
import os.path as op
from sfepy.base.testing import TestCommon, assert_
##
# c: 05.02.2008
class Test( TestCommon ):
"""Write test names explicitely to impose a given order of evaluation."""
tests = ['test_read_meshes', 'test_compare_same_meshes', 'test_read_dimension']
##
# c: 05.02.2008, r: 05.02.2008
def from_conf( conf, options ):
return Test( conf = conf, options = options )
from_conf = staticmethod( from_conf )
##
# c: 05.02.2008, r: 05.02.2008
def test_read_meshes( self ):
"""Try to read all listed meshes."""
from sfepy.fem import Mesh
conf_dir = op.dirname(__file__)
meshes = {}
for ii, filename in enumerate( filename_meshes ):
self.report( '%d. mesh: %s' % (ii + 1, filename) )
mesh = Mesh.from_file(filename, prefix_dir=conf_dir)
assert_(mesh.dim == (mesh.coors.shape[1]))
assert_(mesh.n_nod == (mesh.coors.shape[0]))
assert_(mesh.n_nod == (mesh.ngroups.shape[0]))
assert_(mesh.n_el == sum(mesh.n_els))
for ig, conn in enumerate( mesh.conns ):
assert_(conn.shape[0] == len(mesh.mat_ids[ig]))
assert_(conn.shape[0] == mesh.n_els[ig])
assert_(conn.shape[1] == mesh.n_e_ps[ig])
self.report( 'read ok' )
meshes[filename] = mesh
self.meshes = meshes
return True
##
# c: 05.02.2008, r: 05.02.2008
def test_compare_same_meshes( self ):
"""Compare same meshes in various formats."""
import numpy as nm
oks = []
for i0, i1 in same:
name0 = filename_meshes[i0]
name1 = filename_meshes[i1]
self.report( 'comparing meshes from "%s" and "%s"' % (name0, name1) )
mesh0 = self.meshes[name0]
mesh1 = self.meshes[name1]
ok0 = (mesh0.dim == mesh1.dim)
if not ok0:
self.report( 'dimension failed!' )
oks.append( ok0 )
ok0 = mesh0.n_nod == mesh1.n_nod
if not ok0:
self.report( 'number of nodes failed!' )
oks.append( ok0 )
ok0 = mesh0.n_el == mesh1.n_el
if not ok0:
self.report( 'number of elements failed!' )
oks.append( ok0 )
ok0 = mesh0.n_e_ps == mesh1.n_e_ps
if not ok0:
self.report( 'number of element points failed!' )
oks.append( ok0 )
ok0 = mesh0.descs == mesh1.descs
if not ok0:
self.report( 'element types failed!' )
oks.append( ok0 )
ok0 = nm.allclose( mesh0.coors, mesh1.coors )
if not ok0:
self.report( 'nodes failed!' )
oks.append( ok0 )
ok0 = nm.all( mesh0.ngroups == mesh1.ngroups )
if not ok0:
self.report( 'node groups failed!' )
oks.append( ok0 )
for ii in range( len( mesh0.mat_ids ) ):
ok0 = nm.all( mesh0.mat_ids[ii] == mesh1.mat_ids[ii] )
if not ok0:
self.report( 'material ids failed!' )
oks.append( ok0 )
for ii in range( len( mesh0.mat_ids ) ):
ok0 = nm.all( mesh0.conns[ii] == mesh1.conns[ii] )
if not ok0:
self.report( 'connectivities failed!' )
oks.append( ok0 )
return sum( oks ) == len( oks )
##
# c: 03.07.2008, r: 03.07.2008
def test_read_dimension( self ):
from sfepy.fem import MeshIO
meshes = {data_dir + '/meshes/various_formats/small2d.mesh' : 2,
data_dir + '/meshes/various_formats/small2d.vtk' : 2,
data_dir + '/meshes/various_formats/small3d.mesh' : 3}
ok = True
conf_dir = op.dirname(__file__)
for filename, adim in meshes.iteritems():
self.report( 'mesh: %s, dimension %d' % (filename, adim) )
io = MeshIO.any_from_filename(filename, prefix_dir=conf_dir)
dim = io.read_dimension()
if dim != adim:
self.report( 'read dimension %d -> failed' % dim )
ok = False
else:
self.report( 'read dimension %d -> ok' % dim )
return ok
|
[
"sfepy.base.testing.assert_",
"sfepy.fem.meshio.UserMeshIO",
"sfepy.fem.MeshIO.any_from_filename",
"sfepy.fem.Mesh.from_file"
] |
[((1540, 1561), 'sfepy.fem.meshio.UserMeshIO', 'UserMeshIO', (['mesh_hook'], {}), '(mesh_hook)\n', (1550, 1561), False, 'from sfepy.fem.meshio import UserMeshIO\n'), ((2230, 2250), 'os.path.dirname', 'op.dirname', (['__file__'], {}), '(__file__)\n', (2240, 2250), True, 'import os.path as op\n'), ((5533, 5553), 'os.path.dirname', 'op.dirname', (['__file__'], {}), '(__file__)\n', (5543, 5553), True, 'import os.path as op\n'), ((2411, 2456), 'sfepy.fem.Mesh.from_file', 'Mesh.from_file', (['filename'], {'prefix_dir': 'conf_dir'}), '(filename, prefix_dir=conf_dir)\n', (2425, 2456), False, 'from sfepy.fem import Mesh\n'), ((2470, 2510), 'sfepy.base.testing.assert_', 'assert_', (['(mesh.dim == mesh.coors.shape[1])'], {}), '(mesh.dim == mesh.coors.shape[1])\n', (2477, 2510), False, 'from sfepy.base.testing import TestCommon, assert_\n'), ((2525, 2567), 'sfepy.base.testing.assert_', 'assert_', (['(mesh.n_nod == mesh.coors.shape[0])'], {}), '(mesh.n_nod == mesh.coors.shape[0])\n', (2532, 2567), False, 'from sfepy.base.testing import TestCommon, assert_\n'), ((2582, 2626), 'sfepy.base.testing.assert_', 'assert_', (['(mesh.n_nod == mesh.ngroups.shape[0])'], {}), '(mesh.n_nod == mesh.ngroups.shape[0])\n', (2589, 2626), False, 'from sfepy.base.testing import TestCommon, assert_\n'), ((4322, 4359), 'numpy.allclose', 'nm.allclose', (['mesh0.coors', 'mesh1.coors'], {}), '(mesh0.coors, mesh1.coors)\n', (4333, 4359), True, 'import numpy as nm\n'), ((4482, 4520), 'numpy.all', 'nm.all', (['(mesh0.ngroups == mesh1.ngroups)'], {}), '(mesh0.ngroups == mesh1.ngroups)\n', (4488, 4520), True, 'import numpy as nm\n'), ((5692, 5747), 'sfepy.fem.MeshIO.any_from_filename', 'MeshIO.any_from_filename', (['filename'], {'prefix_dir': 'conf_dir'}), '(filename, prefix_dir=conf_dir)\n', (5716, 5747), False, 'from sfepy.fem import MeshIO\n'), ((2812, 2852), 'sfepy.base.testing.assert_', 'assert_', (['(conn.shape[0] == mesh.n_els[ig])'], {}), '(conn.shape[0] == mesh.n_els[ig])\n', (2819, 2852), False, 'from sfepy.base.testing import TestCommon, assert_\n'), ((2869, 2910), 'sfepy.base.testing.assert_', 'assert_', (['(conn.shape[1] == mesh.n_e_ps[ig])'], {}), '(conn.shape[1] == mesh.n_e_ps[ig])\n', (2876, 2910), False, 'from sfepy.base.testing import TestCommon, assert_\n'), ((4706, 4752), 'numpy.all', 'nm.all', (['(mesh0.mat_ids[ii] == mesh1.mat_ids[ii])'], {}), '(mesh0.mat_ids[ii] == mesh1.mat_ids[ii])\n', (4712, 4752), True, 'import numpy as nm\n'), ((4951, 4993), 'numpy.all', 'nm.all', (['(mesh0.conns[ii] == mesh1.conns[ii])'], {}), '(mesh0.conns[ii] == mesh1.conns[ii])\n', (4957, 4993), True, 'import numpy as nm\n')]
|
from typing import List, Optional
from functools import wraps
from uuid import UUID
from sqlalchemy.sql.schema import UniqueConstraint
from sqlmodel import Field, Relationship, SQLModel
# monkeypath from https://github.com/tiangolo/sqlmodel/issues/9
# without this all database fields are indexed be default
def set_default_index(func):
"""Decorator to set default index for SQLModel
Can be removed when https://github.com/tiangolo/sqlmodel/pull/11 is merged
"""
@wraps(func)
def inner(*args, index=False, **kwargs):
return func(*args, index=index, **kwargs)
return inner
# monkey patch field with default index=False
# this works as long as we always call Field()
Field = set_default_index(Field)
class TopicModelBase(SQLModel):
model_id: UUID = Field()
version: int = Field(default=1)
class TopicModel(TopicModelBase, table=True):
__tablename__ = "topic_model"
__table_args__ = (UniqueConstraint("model_id", "version", name="_model_id_version_uc"),)
id: Optional[int] = Field(primary_key=True, nullable=False) # NOQA: A003
topics: List["Topic"] = Relationship(
back_populates="topic_model", sa_relationship_kwargs={"cascade": "all,delete"}
)
class WordBase(SQLModel):
name: str = Field()
score: float = Field()
class Word(WordBase, table=True):
id: Optional[int] = Field(primary_key=True, nullable=False) # NOQA: A003
topic_id: int = Field(foreign_key="topic.id")
topic: "Topic" = Relationship(
back_populates="top_words", sa_relationship_kwargs={"cascade": "all,delete"}
)
class TopicBase(SQLModel):
name: str = Field()
count: int = Field()
topic_index: int = Field()
class TopicWithWords(TopicBase):
top_words: List["WordBase"] = Field(default=[])
class Topic(TopicBase, table=True):
id: Optional[int] = Field(primary_key=True, nullable=False) # NOQA: A003
topic_model_id: int = Field(foreign_key="topic_model.id")
top_words: List[Word] = Relationship(
back_populates="topic", sa_relationship_kwargs={"cascade": "all,delete"}
)
topic_model: TopicModel = Relationship(
back_populates="topics", sa_relationship_kwargs={"cascade": "all,delete"}
)
|
[
"sqlmodel.Relationship",
"sqlmodel.Field"
] |
[((485, 496), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (490, 496), False, 'from functools import wraps\n'), ((793, 800), 'sqlmodel.Field', 'Field', ([], {}), '()\n', (798, 800), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((820, 836), 'sqlmodel.Field', 'Field', ([], {'default': '(1)'}), '(default=1)\n', (825, 836), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((1037, 1076), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)', 'nullable': '(False)'}), '(primary_key=True, nullable=False)\n', (1042, 1076), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((1119, 1216), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""topic_model"""', 'sa_relationship_kwargs': "{'cascade': 'all,delete'}"}), "(back_populates='topic_model', sa_relationship_kwargs={\n 'cascade': 'all,delete'})\n", (1131, 1216), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((1270, 1277), 'sqlmodel.Field', 'Field', ([], {}), '()\n', (1275, 1277), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((1297, 1304), 'sqlmodel.Field', 'Field', ([], {}), '()\n', (1302, 1304), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((1365, 1404), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)', 'nullable': '(False)'}), '(primary_key=True, nullable=False)\n', (1370, 1404), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((1439, 1468), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""topic.id"""'}), "(foreign_key='topic.id')\n", (1444, 1468), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((1490, 1584), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""top_words"""', 'sa_relationship_kwargs': "{'cascade': 'all,delete'}"}), "(back_populates='top_words', sa_relationship_kwargs={'cascade':\n 'all,delete'})\n", (1502, 1584), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((1640, 1647), 'sqlmodel.Field', 'Field', ([], {}), '()\n', (1645, 1647), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((1665, 1672), 'sqlmodel.Field', 'Field', ([], {}), '()\n', (1670, 1672), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((1696, 1703), 'sqlmodel.Field', 'Field', ([], {}), '()\n', (1701, 1703), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((1773, 1790), 'sqlmodel.Field', 'Field', ([], {'default': '[]'}), '(default=[])\n', (1778, 1790), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((1853, 1892), 'sqlmodel.Field', 'Field', ([], {'primary_key': '(True)', 'nullable': '(False)'}), '(primary_key=True, nullable=False)\n', (1858, 1892), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((1933, 1968), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""topic_model.id"""'}), "(foreign_key='topic_model.id')\n", (1938, 1968), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((1997, 2087), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""topic"""', 'sa_relationship_kwargs': "{'cascade': 'all,delete'}"}), "(back_populates='topic', sa_relationship_kwargs={'cascade':\n 'all,delete'})\n", (2009, 2087), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((2128, 2219), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""topics"""', 'sa_relationship_kwargs': "{'cascade': 'all,delete'}"}), "(back_populates='topics', sa_relationship_kwargs={'cascade':\n 'all,delete'})\n", (2140, 2219), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((941, 1009), 'sqlalchemy.sql.schema.UniqueConstraint', 'UniqueConstraint', (['"""model_id"""', '"""version"""'], {'name': '"""_model_id_version_uc"""'}), "('model_id', 'version', name='_model_id_version_uc')\n", (957, 1009), False, 'from sqlalchemy.sql.schema import UniqueConstraint\n')]
|
# Copyright 2021 Modelyst LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from datetime import datetime
from functools import partial
from io import StringIO
from typing import (
Any,
Callable,
ClassVar,
Dict,
Iterable,
List,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
overload,
)
from uuid import UUID
from sqlalchemy import Column, DateTime
from sqlalchemy.orm import registry
from sqlalchemy.sql import func
from sqlalchemy.sql.base import ImmutableColumnCollection
from sqlalchemy.sql.schema import Table
from sqlmodel.main import Field, FieldInfo, SQLModel, SQLModelMetaclass
from dbgen.core.args import ArgLike, Const
from dbgen.core.attribute import Attribute
from dbgen.core.base import Base, BaseMeta
from dbgen.core.node.load import Load, LoadEntity
from dbgen.core.type_registry import column_registry
from dbgen.exceptions import DBgenInvalidArgument, DBgenMissingInfo
def inherit_field(
bases, field_name: str, initial_value=set(), joiner=lambda x, y: x.union(y), type_check: bool = True
):
field_val = initial_value
for base in reversed(bases):
curr_id = getattr(base, field_name, initial_value)
if curr_id is not None:
if type_check and not isinstance(curr_id, type(initial_value)):
raise TypeError(f"Invalid {field_name} val: {curr_id}")
field_val = joiner(field_val, curr_id)
return field_val
overwrite_parent = partial(inherit_field, initial_value="", joiner=lambda x, y: y)
DEFAULT_ENTITY_REGISTRY = registry()
logger = logging.getLogger('dbgen.core.entity')
_T = TypeVar("_T")
def __dataclass_transform__(
*,
eq_default: bool = True,
order_default: bool = False,
kw_only_default: bool = False,
field_descriptors: Tuple[Union[type, Callable[..., Any]], ...] = (()),
) -> Callable[[_T], _T]:
return lambda a: a
@__dataclass_transform__(
kw_only_default=True,
field_descriptors=(
Field,
FieldInfo,
Attribute,
),
)
class EntityMetaclass(SQLModelMetaclass, BaseMeta):
def __new__(mcs, name, bases, attrs, **kwargs):
# Join the keys from all parents for __identifying__, _hashinclude_, and _hashexclude_
new_attrs = attrs.copy()
for value in ("__identifying__", "_hashexclude_", "_hashinclude_"):
starting = new_attrs.get(value, set())
if isinstance(starting, list):
starting = set(starting)
new_attrs[value] = starting.union(inherit_field(bases, value))
if kwargs.get('all_id', False):
assert (
"__identifying__" not in attrs
), f"Error with Entity {name}. Can't supply both all_id kwarg and __identifying__ attr"
new_attrs['__identifying__'] = new_attrs['__identifying__'].union(
{key for key in attrs.get('__annotations__', {})}
)
# Automatically add identifying attributes to the hashinclude
new_attrs["_hashinclude_"].update(new_attrs.get("__identifying__"))
# Set the default registry to be the default_registry
if "registry" not in kwargs:
kwargs["registry"] = DEFAULT_ENTITY_REGISTRY
# Call SQLModelMetaclass.__new__
cls = super().__new__(mcs, name, bases, new_attrs, **kwargs)
# Validate that we don't have table=True on current class and a base
current_cls_is_table = getattr(cls.__config__, "table", False) and kwargs.get("table")
setattr(cls, "_is_table", current_cls_is_table)
if current_cls_is_table:
base_is_table = False
for base in bases:
config = getattr(base, "__config__", None)
if config and getattr(config, "table", False):
base_is_table = True
offending_base_name = base.__name__
break
if base_is_table:
raise ValueError(
"Can't use table=True when inheriting from another table.\n"
f"Both {offending_base_name} and {name} have table=True set.\n"
"Create a common ancestor with table=False and mutaually inherit from that."
)
# Need to look into parents to find schema, only using most recent
schema_key = "__schema__"
schema = getattr(cls, schema_key, "") or overwrite_parent(bases, schema_key)
table_args = getattr(cls, "__table_args__", None) or dict().copy()
if not schema:
schema = "public"
if schema:
setattr(cls, schema_key, schema)
table_args = table_args.copy()
table_args.update({"schema": schema})
setattr(cls, "__table_args__", table_args)
setattr(
cls,
"__fulltablename__",
f"{schema}.{cls.__tablename__}" if schema else cls.__tablename__,
)
# Validate __identifying__ by making sure all attribute exists on Entity
unknown_ids = list(
filter(
lambda x: x not in cls.__fields__,
new_attrs["__identifying__"],
)
)
if unknown_ids:
raise ValueError(
f"Invalid Entity Class Definition. Identifying attributes not found on class: {unknown_ids}"
)
return cls
def __init__(cls, name, bases, attrs, **kwargs):
if cls._is_table:
registry = cls._sa_registry
if cls.__fulltablename__ in registry.metadata.tables:
raise ValueError(
f"The Class {attrs.get('__module__','')}.{name}'s __table_name__ {cls.__tablename__!r} already present in the registry's metadata.\n"
"This can occur if two Entity sub-classes share a case-insensitive name or if the same table has been added to the registry twice.\n"
"To address this you can set a different __tablename__ attribute for one or to clear the registry, you can call Entity.clear_registry() prior to declaring this class."
)
super().__init__(name, bases, attrs, **kwargs)
class BaseEntity(Base, SQLModel, metaclass=EntityMetaclass):
__identifying__: ClassVar[Set[str]]
__fulltablename__: ClassVar[str]
__schema__: ClassVar[str]
__table__: ClassVar[Table]
_is_table: ClassVar[bool]
_sa_registry: ClassVar[registry]
class Config:
"""Pydantic Config"""
force_validation = True
@classmethod
def _columns(cls) -> ImmutableColumnCollection:
if isinstance(cls.__fulltablename__, str):
table = cls.metadata.tables.get(cls.__fulltablename__)
if table is not None:
return table.c
raise ValueError(
f"{cls.__fulltablename__} not in metadata, is table=True set? {cls.metadata.tables}"
)
raise ValueError(f"Can't read __fulltablename__ {cls.__fulltablename__}")
@classmethod
def _get_load_entity(cls) -> LoadEntity:
"""Returns a LoadEntity which has the bare-minimum needed to load into this table."""
# Check that entity is a table
if not cls._is_table:
raise ValueError(f"{cls.__qualname__} is not a table. Can't get LoadEntity of a non-table Entity")
columns = cls._columns()
# Search for primary key name
primary_keys = [x.name for x in cls.__table__.primary_key]
if len(primary_keys) > 1:
raise NotImplementedError(f"Multiple primary_keys found: {primary_keys}")
elif not primary_keys:
raise ValueError(f"No primary key found on {cls.__name__}'s columns:\n{columns}")
primary_key_name = primary_keys[0]
all_attrs = {col.name: col for col in columns if not col.foreign_keys}
all_fks = {col.name: col for col in columns if col.foreign_keys}
# Create the attribute dict which maps attribute name to column type
attributes = {}
for col_name, col in columns.items():
try:
dt = column_registry[col.type]
attributes[col_name] = (
f"{dt.type_name}[]" if getattr(col.type, '_is_array', False) else dt.type_name
)
except KeyError:
raise TypeError(
f"Cannot parse column {col_name} on table {cls.__tablename__} due to its unknown type {type(col.type)}"
)
foreign_keys = set(all_fks.keys())
identifying_attributes = {x for x in all_attrs if x in cls.__identifying__}
identifying_fks = [x for x in all_fks if x in cls.__identifying__]
return LoadEntity(
name=cls.__tablename__ or cls.__name__,
schema_=cls.__schema__,
entity_class_str=f"{cls.__module__}.{cls.__qualname__}",
primary_key_name=primary_key_name,
attributes=attributes,
foreign_keys=foreign_keys,
identifying_attributes=identifying_attributes,
identifying_foreign_keys=identifying_fks,
)
@classmethod
def load(cls, insert: bool = False, validation: Optional[str] = None, **kwargs) -> Load[UUID]:
name = cls.__tablename__
assert isinstance(name, str)
# TODO check if we need this anymore
key_filter = lambda keyval: keyval[0] != "insert" and not isinstance(keyval[1], (ArgLike, Load))
invalid_args = list(filter(key_filter, kwargs.items()))
JSONAble = (str, int, float, dict, tuple)
for arg_name, invalid_arg in invalid_args:
# Check Invalid args to see if a const block would be appropriate
if isinstance(invalid_arg, JSONAble):
kwargs[arg_name] = Const(invalid_arg)
else:
raise ValueError(f"Non-jsonable constant value found: {arg_name}\n{invalid_arg}")
# get PK
pk = kwargs.pop(name, None)
# if we don't have a PK reference check for missing ID info
if not pk:
missing = cls.__identifying__ - set(kwargs)
if missing:
err = (
"Cannot refer to a row in {} without a PK or essential data."
" Missing essential data: {}"
)
raise DBgenMissingInfo(err.format(name, missing))
# Iterate through the columns to ensure we have no unknown kwargs
class_columns: List[Column] = list(cls._columns()) or []
all_attrs = {col.name: col for col in class_columns if not col.foreign_keys}
all_fks = {col.name: col for col in class_columns if col.foreign_keys}
attrs = {key: val for key, val in kwargs.items() if key in all_attrs}
fks = {key: col for key, col in kwargs.items() if key not in attrs}
for fk in fks:
if fk not in all_fks:
raise DBgenInvalidArgument(f'unknown "{fk}" kwarg in Load of {name}')
for k, v in fks.items():
if isinstance(v, Load):
fks[k] = v[v.outputs[0]]
return Load(
load_entity=cls._get_load_entity(),
primary_key=pk,
inputs={**attrs, **fks},
insert=insert,
validation=validation,
)
@classmethod
def _quick_load(cls, connection, rows: Iterable[Iterable[Any]], column_names: List[str]) -> None:
"""Bulk load many rows into entity"""
from dbgen.templates import jinja_env
# Assemble rows into stringio for copy_from statement
io_obj = StringIO()
for row in rows:
io_obj.write("\t".join(map(str, row)) + "\n")
io_obj.seek(0)
# Temporary table to copy data into
# Set name to be hash of input rows to ensure uniqueness for parallelization
temp_table_name = f"{cls.__tablename__}_temp_load_table"
load_entity = cls._get_load_entity()
# Need to create a temp table to copy data into
# Add an auto_inc column so that data can be ordered by its insert location
drop_temp_table = f"DROP TABLE IF EXISTS {temp_table_name};"
create_temp_table = """
CREATE TEMPORARY TABLE {temp_table_name} AS
TABLE {schema}.{obj}
WITH NO DATA;
ALTER TABLE {temp_table_name}
ADD COLUMN auto_inc SERIAL NOT NULL;
""".format(
obj=load_entity.name,
schema=load_entity.schema_,
temp_table_name=temp_table_name,
)
insert_template = jinja_env.get_template("insert.sql.jinja")
template_args = dict(
obj=load_entity.name,
obj_pk_name=load_entity.primary_key_name,
temp_table_name=temp_table_name,
all_column_names=column_names,
schema=load_entity.schema_,
first=False,
update=True,
)
insert_statement = insert_template.render(**template_args)
with connection.cursor() as curs:
curs.execute(drop_temp_table)
connection.commit()
with connection.cursor() as curs:
curs.execute(create_temp_table)
curs.copy_from(io_obj, temp_table_name, null="None", columns=column_names)
curs.execute(insert_statement)
connection.commit()
with connection.cursor() as curs:
curs.execute(drop_temp_table)
connection.commit()
@classmethod
def clear_registry(cls):
"""Removes all Entity classes from the Entity registry"""
cls.metadata.clear()
cls._sa_registry.dispose()
@classmethod
def foreign_key(cls, primary_key: bool = False):
"""Removes all Entity classes from the Entity registry"""
load_entity = cls._get_load_entity()
return Field(
None,
foreign_key=f"{cls.__fulltablename__}.{load_entity.primary_key_name}",
primary_key=primary_key,
)
id_field = Field(
default=None,
primary_key=True,
sa_column_kwargs={"autoincrement": False, "unique": True},
)
gen_id_field = Field(
default=None,
)
get_created_at_field = lambda: Field(
None, sa_column=Column(DateTime(timezone=True), server_default=func.now())
)
class Entity(BaseEntity):
id: Optional[UUID] = id_field
gen_id: Optional[UUID]
created_at: Optional[datetime] = get_created_at_field()
Model = TypeVar("Model", bound="BaseEntity")
@overload
def create_entity(
model_name: str,
field_definitions: Dict[str, Union[Tuple[type, Any], type, Tuple[type, ...]]],
base: None = None,
identifying: Set[str] = None,
schema: Optional[str] = None,
__module__: str = __name__,
**kwargs,
) -> Type[BaseEntity]:
...
@overload
def create_entity(
model_name: str,
field_definitions: Dict[str, Union[Tuple[type, Any], type, Tuple[type, ...]]],
base: Type[Model],
identifying: Set[str] = None,
schema: Optional[str] = None,
__module__: str = __name__,
**kwargs,
) -> Type[Model]:
...
def create_entity(
model_name: str,
field_definitions: Dict[str, Union[Tuple[type, Any], type, Tuple[type, ...]]] = None,
base: Optional[Type[Model]] = None,
identifying: Set[str] = None,
schema: Optional[str] = None,
__module__: str = __name__,
**kwargs,
) -> Type[Model]:
"""
Dynamically create a model, similar to the Pydantic `create_model()` method
:param model_name: name of the created model
:param field_definitions: data fields of the create model
:param base: base to inherit from
:param __module__: module of the created model
:param **kwargs: Other keyword arguments to pass to the metaclass constructor, e.g. table=True
"""
if base is None:
base = cast(Type["Model"], BaseEntity)
field_definitions = field_definitions or {}
fields = {}
annotations = {}
identifying = identifying or set()
for f_name, f_def in field_definitions.items():
if f_name.startswith("_"):
raise ValueError("Field names may not start with an underscore")
try:
if isinstance(f_def, tuple) and len(f_def) > 1:
f_annotation, f_value = f_def
elif isinstance(f_def, tuple):
f_annotation, f_value = f_def[0], Field(nullable=False)
else:
f_annotation, f_value = f_def, Field(nullable=False)
except ValueError as e:
raise ValueError(
"field_definitions values must be either a tuple of (<type_annotation>, <default_value>)"
"or just a type annotation [or a 1-tuple of (<type_annotation>,)]"
) from e
if f_annotation:
annotations[f_name] = f_annotation
fields[f_name] = f_value
namespace = {
"__annotations__": annotations,
"__identifying__": identifying,
"__module__": __module__,
}
if schema is not None:
namespace.update({"__schema__": schema})
if "registry" in kwargs:
assert isinstance(kwargs.get("registry"), registry), "Invalid type for registry:"
namespace.update(fields) # type: ignore
return EntityMetaclass(model_name, (base,), namespace, **kwargs) # type: ignore
|
[
"sqlmodel.main.Field"
] |
[((2008, 2071), 'functools.partial', 'partial', (['inherit_field'], {'initial_value': '""""""', 'joiner': '(lambda x, y: y)'}), "(inherit_field, initial_value='', joiner=lambda x, y: y)\n", (2015, 2071), False, 'from functools import partial\n'), ((2098, 2108), 'sqlalchemy.orm.registry', 'registry', ([], {}), '()\n', (2106, 2108), False, 'from sqlalchemy.orm import registry\n'), ((2118, 2156), 'logging.getLogger', 'logging.getLogger', (['"""dbgen.core.entity"""'], {}), "('dbgen.core.entity')\n", (2135, 2156), False, 'import logging\n'), ((2163, 2176), 'typing.TypeVar', 'TypeVar', (['"""_T"""'], {}), "('_T')\n", (2170, 2176), False, 'from typing import Any, Callable, ClassVar, Dict, Iterable, List, Optional, Set, Tuple, Type, TypeVar, Union, cast, overload\n'), ((14519, 14620), 'sqlmodel.main.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)', 'sa_column_kwargs': "{'autoincrement': False, 'unique': True}"}), "(default=None, primary_key=True, sa_column_kwargs={'autoincrement': \n False, 'unique': True})\n", (14524, 14620), False, 'from sqlmodel.main import Field, FieldInfo, SQLModel, SQLModelMetaclass\n'), ((14646, 14665), 'sqlmodel.main.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (14651, 14665), False, 'from sqlmodel.main import Field, FieldInfo, SQLModel, SQLModelMetaclass\n'), ((14952, 14988), 'typing.TypeVar', 'TypeVar', (['"""Model"""'], {'bound': '"""BaseEntity"""'}), "('Model', bound='BaseEntity')\n", (14959, 14988), False, 'from typing import Any, Callable, ClassVar, Dict, Iterable, List, Optional, Set, Tuple, Type, TypeVar, Union, cast, overload\n'), ((9247, 9569), 'dbgen.core.node.load.LoadEntity', 'LoadEntity', ([], {'name': '(cls.__tablename__ or cls.__name__)', 'schema_': 'cls.__schema__', 'entity_class_str': 'f"""{cls.__module__}.{cls.__qualname__}"""', 'primary_key_name': 'primary_key_name', 'attributes': 'attributes', 'foreign_keys': 'foreign_keys', 'identifying_attributes': 'identifying_attributes', 'identifying_foreign_keys': 'identifying_fks'}), "(name=cls.__tablename__ or cls.__name__, schema_=cls.__schema__,\n entity_class_str=f'{cls.__module__}.{cls.__qualname__}',\n primary_key_name=primary_key_name, attributes=attributes, foreign_keys=\n foreign_keys, identifying_attributes=identifying_attributes,\n identifying_foreign_keys=identifying_fks)\n", (9257, 9569), False, 'from dbgen.core.node.load import Load, LoadEntity\n'), ((12132, 12142), 'io.StringIO', 'StringIO', ([], {}), '()\n', (12140, 12142), False, 'from io import StringIO\n'), ((13093, 13135), 'dbgen.templates.jinja_env.get_template', 'jinja_env.get_template', (['"""insert.sql.jinja"""'], {}), "('insert.sql.jinja')\n", (13115, 13135), False, 'from dbgen.templates import jinja_env\n'), ((14351, 14468), 'sqlmodel.main.Field', 'Field', (['None'], {'foreign_key': 'f"""{cls.__fulltablename__}.{load_entity.primary_key_name}"""', 'primary_key': 'primary_key'}), "(None, foreign_key=\n f'{cls.__fulltablename__}.{load_entity.primary_key_name}', primary_key=\n primary_key)\n", (14356, 14468), False, 'from sqlmodel.main import Field, FieldInfo, SQLModel, SQLModelMetaclass\n'), ((16326, 16357), 'typing.cast', 'cast', (["Type['Model']", 'BaseEntity'], {}), "(Type['Model'], BaseEntity)\n", (16330, 16357), False, 'from typing import Any, Callable, ClassVar, Dict, Iterable, List, Optional, Set, Tuple, Type, TypeVar, Union, cast, overload\n'), ((10325, 10343), 'dbgen.core.args.Const', 'Const', (['invalid_arg'], {}), '(invalid_arg)\n', (10330, 10343), False, 'from dbgen.core.args import ArgLike, Const\n'), ((11458, 11521), 'dbgen.exceptions.DBgenInvalidArgument', 'DBgenInvalidArgument', (['f"""unknown "{fk}" kwarg in Load of {name}"""'], {}), '(f\'unknown "{fk}" kwarg in Load of {name}\')\n', (11478, 11521), False, 'from dbgen.exceptions import DBgenInvalidArgument, DBgenMissingInfo\n'), ((14739, 14762), 'sqlalchemy.DateTime', 'DateTime', ([], {'timezone': '(True)'}), '(timezone=True)\n', (14747, 14762), False, 'from sqlalchemy import Column, DateTime\n'), ((14779, 14789), 'sqlalchemy.sql.func.now', 'func.now', ([], {}), '()\n', (14787, 14789), False, 'from sqlalchemy.sql import func\n'), ((16860, 16881), 'sqlmodel.main.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (16865, 16881), False, 'from sqlmodel.main import Field, FieldInfo, SQLModel, SQLModelMetaclass\n'), ((16947, 16968), 'sqlmodel.main.Field', 'Field', ([], {'nullable': '(False)'}), '(nullable=False)\n', (16952, 16968), False, 'from sqlmodel.main import Field, FieldInfo, SQLModel, SQLModelMetaclass\n')]
|
from select import select
from app.schemas.common import (
IGetResponseBase,
IPostResponseBase,
IDeleteResponseBase,
)
from app.utils.zeroshot_nlp import analyze_text
from app.schemas.zeroshot_inference import (
ZeroShotInferenceCreate,
ZeroShotInferenceRead,
)
from fastapi_pagination import Page, Params
from sqlmodel.ext.asyncio.session import AsyncSession
from fastapi import APIRouter, Depends, HTTPException, Query
from app.api import deps
from app import crud
from app.models import ZeroShotInference
from app.models import ZeroShotInferenceBase
from app.models.user import User
from sqlmodel import select
router = APIRouter()
@router.get(
"/zero-shot-classification-inferences/",
response_model=IGetResponseBase[Page[ZeroShotInference]],
)
async def get_zero_shot_classification_inferences(
params: Params = Depends(),
db_session: AsyncSession = Depends(deps.get_db),
current_user: User = Depends(deps.get_current_active_user),
):
inferences = await crud.zeroshot_inference.get_multi_paginated(
db_session, params=params
)
return IGetResponseBase[Page[ZeroShotInference]](data=inferences)
@router.get(
"/zero-shot-classification-inferences/order_by_created_at/",
response_model=IGetResponseBase[Page[ZeroShotInference]],
)
async def zero_shot_classification_inferences_order_by_created_at(
params: Params = Depends(),
db_session: AsyncSession = Depends(deps.get_db),
current_user: User = Depends(deps.get_current_active_user),
):
query = select(ZeroShotInference).order_by(ZeroShotInference.created_at)
inferences = await crud.zeroshot_inference.get_multi_paginated(
db_session, query=query, params=params
)
return IGetResponseBase[Page[ZeroShotInferenceRead]](data=inferences)
@router.post(
"/zero-shot-classification-predict/",
response_model=IPostResponseBase[ZeroShotInferenceRead],
)
async def predict(
request: ZeroShotInferenceBase,
db_session: AsyncSession = Depends(deps.get_db),
current_user: User = Depends(deps.get_current_active_user),
):
text = request.text
labels = request.candidate_labels
result = await analyze_text(text, labels)
text = result[0]
candidate_labels = result[1]
res = result[2]
inference = ZeroShotInferenceCreate(
text=text, candidate_labels=candidate_labels, result=res
)
my_inference = await crud.zeroshot_inference.create_inference(
db_session, obj_in=inference, user_id=current_user.id
)
return IPostResponseBase(data=ZeroShotInferenceRead.from_orm(my_inference))
|
[
"sqlmodel.select"
] |
[((644, 655), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (653, 655), False, 'from fastapi import APIRouter, Depends, HTTPException, Query\n'), ((852, 861), 'fastapi.Depends', 'Depends', ([], {}), '()\n', (859, 861), False, 'from fastapi import APIRouter, Depends, HTTPException, Query\n'), ((894, 914), 'fastapi.Depends', 'Depends', (['deps.get_db'], {}), '(deps.get_db)\n', (901, 914), False, 'from fastapi import APIRouter, Depends, HTTPException, Query\n'), ((941, 978), 'fastapi.Depends', 'Depends', (['deps.get_current_active_user'], {}), '(deps.get_current_active_user)\n', (948, 978), False, 'from fastapi import APIRouter, Depends, HTTPException, Query\n'), ((1393, 1402), 'fastapi.Depends', 'Depends', ([], {}), '()\n', (1400, 1402), False, 'from fastapi import APIRouter, Depends, HTTPException, Query\n'), ((1435, 1455), 'fastapi.Depends', 'Depends', (['deps.get_db'], {}), '(deps.get_db)\n', (1442, 1455), False, 'from fastapi import APIRouter, Depends, HTTPException, Query\n'), ((1482, 1519), 'fastapi.Depends', 'Depends', (['deps.get_current_active_user'], {}), '(deps.get_current_active_user)\n', (1489, 1519), False, 'from fastapi import APIRouter, Depends, HTTPException, Query\n'), ((2003, 2023), 'fastapi.Depends', 'Depends', (['deps.get_db'], {}), '(deps.get_db)\n', (2010, 2023), False, 'from fastapi import APIRouter, Depends, HTTPException, Query\n'), ((2050, 2087), 'fastapi.Depends', 'Depends', (['deps.get_current_active_user'], {}), '(deps.get_current_active_user)\n', (2057, 2087), False, 'from fastapi import APIRouter, Depends, HTTPException, Query\n'), ((2292, 2377), 'app.schemas.zeroshot_inference.ZeroShotInferenceCreate', 'ZeroShotInferenceCreate', ([], {'text': 'text', 'candidate_labels': 'candidate_labels', 'result': 'res'}), '(text=text, candidate_labels=candidate_labels,\n result=res)\n', (2315, 2377), False, 'from app.schemas.zeroshot_inference import ZeroShotInferenceCreate, ZeroShotInferenceRead\n'), ((1006, 1076), 'app.crud.zeroshot_inference.get_multi_paginated', 'crud.zeroshot_inference.get_multi_paginated', (['db_session'], {'params': 'params'}), '(db_session, params=params)\n', (1049, 1076), False, 'from app import crud\n'), ((1624, 1712), 'app.crud.zeroshot_inference.get_multi_paginated', 'crud.zeroshot_inference.get_multi_paginated', (['db_session'], {'query': 'query', 'params': 'params'}), '(db_session, query=query, params\n =params)\n', (1667, 1712), False, 'from app import crud\n'), ((2174, 2200), 'app.utils.zeroshot_nlp.analyze_text', 'analyze_text', (['text', 'labels'], {}), '(text, labels)\n', (2186, 2200), False, 'from app.utils.zeroshot_nlp import analyze_text\n'), ((2414, 2513), 'app.crud.zeroshot_inference.create_inference', 'crud.zeroshot_inference.create_inference', (['db_session'], {'obj_in': 'inference', 'user_id': 'current_user.id'}), '(db_session, obj_in=inference,\n user_id=current_user.id)\n', (2454, 2513), False, 'from app import crud\n'), ((1536, 1561), 'sqlmodel.select', 'select', (['ZeroShotInference'], {}), '(ZeroShotInference)\n', (1542, 1561), False, 'from sqlmodel import select\n'), ((2559, 2603), 'app.schemas.zeroshot_inference.ZeroShotInferenceRead.from_orm', 'ZeroShotInferenceRead.from_orm', (['my_inference'], {}), '(my_inference)\n', (2589, 2603), False, 'from app.schemas.zeroshot_inference import ZeroShotInferenceCreate, ZeroShotInferenceRead\n')]
|
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import output, Struct
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.homogenization.homogen_app import HomogenizationApp
from sfepy.homogenization.coefficients import Coefficients
import tables as pt
from sfepy.discrete.fem.meshio import HDF5MeshIO
import sfepy.linalg as la
import sfepy.base.multiproc as multi
import os.path as op
import six
def get_homog_coefs_linear(ts, coor, mode,
micro_filename=None, regenerate=False,
coefs_filename=None):
oprefix = output.prefix
output.prefix = 'micro:'
required, other = get_standard_keywords()
required.remove( 'equations' )
conf = ProblemConf.from_file(micro_filename, required, other, verbose=False)
if coefs_filename is None:
coefs_filename = conf.options.get('coefs_filename', 'coefs')
coefs_filename = op.join(conf.options.get('output_dir', '.'),
coefs_filename) + '.h5'
if not regenerate:
if op.exists( coefs_filename ):
if not pt.is_hdf5_file( coefs_filename ):
regenerate = True
else:
regenerate = True
if regenerate:
options = Struct( output_filename_trunk = None )
app = HomogenizationApp( conf, options, 'micro:' )
coefs = app()
if type(coefs) is tuple:
coefs = coefs[0]
coefs.to_file_hdf5( coefs_filename )
else:
coefs = Coefficients.from_file_hdf5( coefs_filename )
out = {}
if mode == None:
for key, val in six.iteritems(coefs.__dict__):
out[key] = val
elif mode == 'qp':
for key, val in six.iteritems(coefs.__dict__):
if type( val ) == nm.ndarray or type(val) == nm.float64:
out[key] = nm.tile( val, (coor.shape[0], 1, 1) )
elif type(val) == dict:
for key2, val2 in six.iteritems(val):
if type(val2) == nm.ndarray or type(val2) == nm.float64:
out[key+'_'+key2] = \
nm.tile(val2, (coor.shape[0], 1, 1))
else:
out = None
output.prefix = oprefix
return out
def get_homog_coefs_nonlinear(ts, coor, mode, mtx_f=None,
term=None, problem=None,
iteration=None, **kwargs):
if not (mode == 'qp'):
return
oprefix = output.prefix
output.prefix = 'micro:'
if not hasattr(problem, 'homogen_app'):
required, other = get_standard_keywords()
required.remove('equations')
micro_file = problem.conf.options.micro_filename
conf = ProblemConf.from_file(micro_file, required, other,
verbose=False)
options = Struct(output_filename_trunk=None)
app = HomogenizationApp(conf, options, 'micro:',
n_micro=coor.shape[0], update_micro_coors=True)
problem.homogen_app = app
if hasattr(app.app_options, 'use_mpi') and app.app_options.use_mpi:
multiproc, multiproc_mode = multi.get_multiproc(mpi=True)
multi_mpi = multiproc if multiproc_mode == 'mpi' else None
else:
multi_mpi = None
app.multi_mpi = multi_mpi
if multi_mpi is not None:
multi_mpi.master_send_task('init', (micro_file, coor.shape[0]))
else:
app = problem.homogen_app
multi_mpi = app.multi_mpi
def_grad = mtx_f(problem, term) if callable(mtx_f) else mtx_f
if hasattr(problem, 'def_grad_prev'):
rel_def_grad = la.dot_sequences(def_grad,
nm.linalg.inv(problem.def_grad_prev),
'AB')
else:
rel_def_grad = def_grad.copy()
problem.def_grad_prev = def_grad.copy()
app.setup_macro_deformation(rel_def_grad)
if multi_mpi is not None:
multi_mpi.master_send_task('calculate', (rel_def_grad, ts, iteration))
coefs, deps = app(ret_all=True, itime=ts.step, iiter=iteration)
if type(coefs) is tuple:
coefs = coefs[0]
out = {}
for key, val in six.iteritems(coefs.__dict__):
if isinstance(val, list):
out[key] = nm.array(val)
elif isinstance(val, dict):
for key2, val2 in six.iteritems(val):
out[key+'_'+key2] = nm.array(val2)
for key in six.iterkeys(out):
shape = out[key].shape
if len(shape) == 1:
out[key] = out[key].reshape(shape + (1, 1))
elif len(shape) == 2:
out[key] = out[key].reshape(shape + (1,))
output.prefix = oprefix
return out
def get_correctors_from_file( coefs_filename = 'coefs.h5',
dump_names = None ):
if dump_names == None:
coefs = Coefficients.from_file_hdf5( coefs_filename )
if hasattr( coefs, 'dump_names' ):
dump_names = coefs.dump_names
else:
raise ValueError( ' "filenames" coefficient must be used!' )
out = {}
for key, val in six.iteritems(dump_names):
corr_name = op.split( val )[-1]
io = HDF5MeshIO( val+'.h5' )
data = io.read_data( 0 )
dkeys = list(data.keys())
corr = {}
for dk in dkeys:
corr[dk] = data[dk].data.reshape(data[dk].shape)
out[corr_name] = corr
return out
|
[
"sfepy.discrete.fem.meshio.HDF5MeshIO",
"sfepy.base.base.Struct",
"sfepy.homogenization.homogen_app.HomogenizationApp",
"sfepy.base.conf.ProblemConf.from_file",
"sfepy.base.multiproc.get_multiproc",
"sfepy.base.conf.get_standard_keywords",
"sfepy.homogenization.coefficients.Coefficients.from_file_hdf5"
] |
[((691, 714), 'sfepy.base.conf.get_standard_keywords', 'get_standard_keywords', ([], {}), '()\n', (712, 714), False, 'from sfepy.base.conf import ProblemConf, get_standard_keywords\n'), ((762, 831), 'sfepy.base.conf.ProblemConf.from_file', 'ProblemConf.from_file', (['micro_filename', 'required', 'other'], {'verbose': '(False)'}), '(micro_filename, required, other, verbose=False)\n', (783, 831), False, 'from sfepy.base.conf import ProblemConf, get_standard_keywords\n'), ((4269, 4298), 'six.iteritems', 'six.iteritems', (['coefs.__dict__'], {}), '(coefs.__dict__)\n', (4282, 4298), False, 'import six\n'), ((4524, 4541), 'six.iterkeys', 'six.iterkeys', (['out'], {}), '(out)\n', (4536, 4541), False, 'import six\n'), ((5195, 5220), 'six.iteritems', 'six.iteritems', (['dump_names'], {}), '(dump_names)\n', (5208, 5220), False, 'import six\n'), ((1094, 1119), 'os.path.exists', 'op.exists', (['coefs_filename'], {}), '(coefs_filename)\n', (1103, 1119), True, 'import os.path as op\n'), ((1293, 1327), 'sfepy.base.base.Struct', 'Struct', ([], {'output_filename_trunk': 'None'}), '(output_filename_trunk=None)\n', (1299, 1327), False, 'from sfepy.base.base import output, Struct\n'), ((1347, 1389), 'sfepy.homogenization.homogen_app.HomogenizationApp', 'HomogenizationApp', (['conf', 'options', '"""micro:"""'], {}), "(conf, options, 'micro:')\n", (1364, 1389), False, 'from sfepy.homogenization.homogen_app import HomogenizationApp\n'), ((1548, 1591), 'sfepy.homogenization.coefficients.Coefficients.from_file_hdf5', 'Coefficients.from_file_hdf5', (['coefs_filename'], {}), '(coefs_filename)\n', (1575, 1591), False, 'from sfepy.homogenization.coefficients import Coefficients\n'), ((1653, 1682), 'six.iteritems', 'six.iteritems', (['coefs.__dict__'], {}), '(coefs.__dict__)\n', (1666, 1682), False, 'import six\n'), ((2633, 2656), 'sfepy.base.conf.get_standard_keywords', 'get_standard_keywords', ([], {}), '()\n', (2654, 2656), False, 'from sfepy.base.conf import ProblemConf, get_standard_keywords\n'), ((2766, 2831), 'sfepy.base.conf.ProblemConf.from_file', 'ProblemConf.from_file', (['micro_file', 'required', 'other'], {'verbose': '(False)'}), '(micro_file, required, other, verbose=False)\n', (2787, 2831), False, 'from sfepy.base.conf import ProblemConf, get_standard_keywords\n'), ((2887, 2921), 'sfepy.base.base.Struct', 'Struct', ([], {'output_filename_trunk': 'None'}), '(output_filename_trunk=None)\n', (2893, 2921), False, 'from sfepy.base.base import output, Struct\n'), ((2936, 3030), 'sfepy.homogenization.homogen_app.HomogenizationApp', 'HomogenizationApp', (['conf', 'options', '"""micro:"""'], {'n_micro': 'coor.shape[0]', 'update_micro_coors': '(True)'}), "(conf, options, 'micro:', n_micro=coor.shape[0],\n update_micro_coors=True)\n", (2953, 3030), False, 'from sfepy.homogenization.homogen_app import HomogenizationApp\n'), ((4942, 4985), 'sfepy.homogenization.coefficients.Coefficients.from_file_hdf5', 'Coefficients.from_file_hdf5', (['coefs_filename'], {}), '(coefs_filename)\n', (4969, 4985), False, 'from sfepy.homogenization.coefficients import Coefficients\n'), ((5275, 5298), 'sfepy.discrete.fem.meshio.HDF5MeshIO', 'HDF5MeshIO', (["(val + '.h5')"], {}), "(val + '.h5')\n", (5285, 5298), False, 'from sfepy.discrete.fem.meshio import HDF5MeshIO\n'), ((1759, 1788), 'six.iteritems', 'six.iteritems', (['coefs.__dict__'], {}), '(coefs.__dict__)\n', (1772, 1788), False, 'import six\n'), ((3210, 3239), 'sfepy.base.multiproc.get_multiproc', 'multi.get_multiproc', ([], {'mpi': '(True)'}), '(mpi=True)\n', (3229, 3239), True, 'import sfepy.base.multiproc as multi\n'), ((3777, 3813), 'numpy.linalg.inv', 'nm.linalg.inv', (['problem.def_grad_prev'], {}), '(problem.def_grad_prev)\n', (3790, 3813), True, 'import numpy as nm\n'), ((4357, 4370), 'numpy.array', 'nm.array', (['val'], {}), '(val)\n', (4365, 4370), True, 'import numpy as nm\n'), ((5242, 5255), 'os.path.split', 'op.split', (['val'], {}), '(val)\n', (5250, 5255), True, 'import os.path as op\n'), ((1142, 1173), 'tables.is_hdf5_file', 'pt.is_hdf5_file', (['coefs_filename'], {}), '(coefs_filename)\n', (1157, 1173), True, 'import tables as pt\n'), ((4437, 4455), 'six.iteritems', 'six.iteritems', (['val'], {}), '(val)\n', (4450, 4455), False, 'import six\n'), ((1886, 1921), 'numpy.tile', 'nm.tile', (['val', '(coor.shape[0], 1, 1)'], {}), '(val, (coor.shape[0], 1, 1))\n', (1893, 1921), True, 'import numpy as nm\n'), ((4493, 4507), 'numpy.array', 'nm.array', (['val2'], {}), '(val2)\n', (4501, 4507), True, 'import numpy as nm\n'), ((1994, 2012), 'six.iteritems', 'six.iteritems', (['val'], {}), '(val)\n', (2007, 2012), False, 'import six\n'), ((2179, 2215), 'numpy.tile', 'nm.tile', (['val2', '(coor.shape[0], 1, 1)'], {}), '(val2, (coor.shape[0], 1, 1))\n', (2186, 2215), True, 'import numpy as nm\n')]
|
from typing import List, Optional
from sqlmodel import Field, Relationship, SQLModel
class Sport(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
athletes: List["Athlete"] = Relationship(back_populates="sport")
class Athlete(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
sport_id: Optional[int] = Field(default=None, foreign_key="sport.id")
sport: Optional[Sport] = Relationship(back_populates="athletes")
|
[
"sqlmodel.Relationship",
"sqlmodel.Field"
] |
[((147, 184), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (152, 184), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((231, 267), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""sport"""'}), "(back_populates='sport')\n", (243, 267), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((331, 368), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (336, 368), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((413, 456), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""sport.id"""'}), "(default=None, foreign_key='sport.id')\n", (418, 456), False, 'from sqlmodel import Field, Relationship, SQLModel\n'), ((486, 525), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""athletes"""'}), "(back_populates='athletes')\n", (498, 525), False, 'from sqlmodel import Field, Relationship, SQLModel\n')]
|
from enum import Enum
from typing import Dict, Optional, Union
from sqlmodel import Field, SQLModel, create_engine
# https://stackoverflow.com/questions/65209934/pydantic-enum-field-does-not-get-converted-to-string
class EventType(str, Enum):
BUILD_IMAGE = 'build_image'
CREATE_CONTAINER = 'create_container'
class Event(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
event_type: EventType
event_payload: str
event_status: Optional[int] = Field(default=None)
|
[
"sqlmodel.Field"
] |
[((378, 415), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'primary_key': '(True)'}), '(default=None, primary_key=True)\n', (383, 415), False, 'from sqlmodel import Field, SQLModel, create_engine\n'), ((499, 518), 'sqlmodel.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (504, 518), False, 'from sqlmodel import Field, SQLModel, create_engine\n')]
|
import os
import re
from pathlib import Path
from typing import List, Optional
import tmdbsimple as tmdb
from dotenv import load_dotenv
from models import (
Collection,
Genre,
Movie,
ProductionCompany,
ProductionCountry,
SpokenLanguage,
)
from sqlalchemy import extract
from sqlalchemy.exc import NoResultFound
from sqlmodel import Session, SQLModel, select
load_dotenv()
YEAR_PATTERN = re.compile(r"(\s\(\d{4}\))")
def create_model_obj(o: dict, model_type: SQLModel, session: Session) -> SQLModel:
obj = model_type(**o)
return obj
def create_model_objs(
data: dict, model_type: SQLModel, session: Session
) -> List[SQLModel]:
objs = []
for o in data:
obj = create_model_obj(o, model_type, session)
objs.append(obj)
return objs
def tmdb_info_to_movie(info: dict, session: Session) -> Movie:
relationship_keys = {
"genres",
"belongs_to_collection",
"production_companies",
"production_countries",
"spoken_languages",
}
movie_info = {k: v for k, v in info.items() if k not in relationship_keys}
genres = create_model_objs(info["genres"], Genre, session)
collection = None
if info["belongs_to_collection"]:
collection = create_model_obj(
info["belongs_to_collection"], Collection, session
)
production_companies = create_model_objs(
info["production_companies"], ProductionCompany, session
)
production_countries = create_model_objs(
info["production_countries"], ProductionCountry, session
)
# languages
spoken_languages = create_model_objs(
info["spoken_languages"], SpokenLanguage, session
)
# create movie
movie = Movie(**movie_info)
movie.genres = genres
movie.collection = collection
movie.production_companies = production_companies
movie.production_countries = production_countries
movie.spoken_languages = spoken_languages
session.add(movie)
session.commit()
session.refresh(movie)
return movie
tmdb.API_KEY = os.getenv("TMDB_API_KEY", None)
def split_movie_path_title_and_year(path: str):
movie_path = Path(path)
movie_name = movie_path.stem
year = None
match = YEAR_PATTERN.search(movie_name)
if match:
year = match.group().strip(" ()")
movie_name = movie_name.replace(match.group(), "")
return movie_name, year
def get_movie_from_path(path: str, session: Session) -> Optional[Movie]:
movie = None
movie_name, year = split_movie_path_title_and_year(path)
# lookup in db
statement = select(Movie).where(Movie.title == movie_name)
if year is not None:
statement = statement.filter(extract("year", Movie.release_date) == int(year))
try:
movie = session.exec(statement).one()
except NoResultFound:
search = tmdb.Search()
search.movie(query=movie_name, year=year)
# take the first result:
if search.results:
id = search.results[0]["id"]
info = tmdb.Movies(id).info()
movie = tmdb_info_to_movie(info, session)
return movie
|
[
"sqlmodel.select"
] |
[((384, 397), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (395, 397), False, 'from dotenv import load_dotenv\n'), ((414, 445), 're.compile', 're.compile', (['"""(\\\\s\\\\(\\\\d{4}\\\\))"""'], {}), "('(\\\\s\\\\(\\\\d{4}\\\\))')\n", (424, 445), False, 'import re\n'), ((2087, 2118), 'os.getenv', 'os.getenv', (['"""TMDB_API_KEY"""', 'None'], {}), "('TMDB_API_KEY', None)\n", (2096, 2118), False, 'import os\n'), ((1746, 1765), 'models.Movie', 'Movie', ([], {}), '(**movie_info)\n', (1751, 1765), False, 'from models import Collection, Genre, Movie, ProductionCompany, ProductionCountry, SpokenLanguage\n'), ((2186, 2196), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (2190, 2196), False, 'from pathlib import Path\n'), ((2625, 2638), 'sqlmodel.select', 'select', (['Movie'], {}), '(Movie)\n', (2631, 2638), False, 'from sqlmodel import Session, SQLModel, select\n'), ((2882, 2895), 'tmdbsimple.Search', 'tmdb.Search', ([], {}), '()\n', (2893, 2895), True, 'import tmdbsimple as tmdb\n'), ((2734, 2769), 'sqlalchemy.extract', 'extract', (['"""year"""', 'Movie.release_date'], {}), "('year', Movie.release_date)\n", (2741, 2769), False, 'from sqlalchemy import extract\n'), ((3066, 3081), 'tmdbsimple.Movies', 'tmdb.Movies', (['id'], {}), '(id)\n', (3077, 3081), True, 'import tmdbsimple as tmdb\n')]
|
# Copyright 2021 Modelyst LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Objects related to the running of Models and Generators."""
from bdb import BdbQuit
from datetime import datetime, timedelta
from time import time
from typing import Any, Dict, List, Mapping, Optional, Set, Tuple
from uuid import UUID
from psycopg import connect as pg3_connect
from pydantic.fields import Field, PrivateAttr
from pydasher import hasher
from sqlalchemy.future import Engine
from sqlmodel import Session, select
from tqdm import tqdm
import dbgen.exceptions as exceptions
from dbgen.core.base import Base, encoders
from dbgen.core.generator import Generator
from dbgen.core.metadata import (
GeneratorEntity,
GeneratorRunEntity,
GensToRun,
ModelEntity,
Repeats,
RunEntity,
Status,
)
from dbgen.core.model import Model
from dbgen.core.node.extract import Extract
from dbgen.core.node.query import BaseQuery
from dbgen.exceptions import DBgenExternalError, DBgenSkipException, RepeatException, SerializationError
from dbgen.utils.log import LogLevel
class RunConfig(Base):
"""Configuration for the running of a Generator and Model"""
retry: bool = False
include: Set[str] = Field(default_factory=set)
exclude: Set[str] = Field(default_factory=set)
start: Optional[str]
until: Optional[str]
batch_size: Optional[int]
progress_bar: bool = True
log_level: LogLevel = LogLevel.INFO
def should_gen_run(self, generator: Generator) -> bool:
"""Check a generator against include/exclude to see if it should run."""
markers = [generator.name, *generator.tags]
should_run = any(
map(
lambda x: (not self.include or x in self.include) and x not in self.exclude,
markers,
)
)
return should_run
def get_invalid_markers(self, model: Model) -> Dict[str, List[str]]:
"""Check that all inputs to RunConfig are meaningful for the model."""
invalid_marks = {}
gen_names = model.gens().keys()
# Validate start and until
for attr in ("start", "until"):
val: str = getattr(self, attr)
if val is not None and val not in gen_names:
invalid_marks[attr] = [val]
# Validate include and exclude as sets
for attr in ("include", "exclude"):
set_val: Set[str] = getattr(self, attr)
invalid_vals = [x for x in set_val if not model.validate_marker(x)]
if invalid_vals:
invalid_marks[attr] = invalid_vals
return invalid_marks
def update_run_by_id(run_id, status: Status, session: Session):
run = session.get(RunEntity, run_id)
assert run, f"No run found with id {run_id}"
run.status = status
session.commit()
class RunInitializer(Base):
"""Intializes a run by syncing the database and getting the run_id."""
def execute(self, engine: Engine, run_config: RunConfig) -> int:
# Use some metadatabase connection to initialize initialize the run
# Store the details of the run on the metadatabase so downstream GeneratorRuns can pick them up
# Sync the database with the registries
with Session(engine) as session:
run = RunEntity(status=Status.initialized)
session.add(run)
session.commit()
session.refresh(run)
assert isinstance(run.id, int)
run.status = Status.running
session.commit()
run_id = run.id
return run_id
class BaseGeneratorRun(Base):
"""A lightwieght wrapper for the Generator that grabs a specific Generator from metadatabase and runs it."""
_old_repeats: Set[UUID] = PrivateAttr(default_factory=set)
_new_repeats: Set[UUID] = PrivateAttr(default_factory=set)
def get_gen(self, meta_engine: Engine, *args, **kwargs) -> Generator:
raise NotImplementedError
def execute(
self,
main_engine: Engine,
meta_engine: Engine,
run_id: Optional[int],
run_config: Optional[RunConfig],
ordering: Optional[int],
):
# Set default values for run_config if none provided
if run_config is None:
run_config = RunConfig()
generator = self.get_gen(meta_engine=meta_engine)
# Initialize the generator_row in the meta database
meta_session = Session(meta_engine)
gen_run = self._initialize_gen_run(
generator=generator, session=meta_session, run_id=run_id, ordering=ordering
)
# Check if our run config excludes our generator
if not run_config.should_gen_run(generator):
self._logger.info(f'Excluding generator {generator.name!r}')
gen_run.status = Status.excluded
meta_session.commit()
return
# Start the Generator
self._logger.info(f'Running generator {generator.name!r}...')
gen_run.status = Status.running
meta_session.commit()
start = time()
# Set the extractor
self._logger.debug('Initializing extractor')
extractor_connection = main_engine.connect()
extract = generator.extract
if isinstance(extract, BaseQuery):
extract.set_extractor(connection=extractor_connection)
else:
extract.set_extractor()
self._logger.debug('Fetching extractor length')
row_count = extract.length(connection=extractor_connection)
gen_run.inputs_extracted = row_count
meta_session.commit()
self._logger.debug('Fetching repeats')
# Query the repeats table for input_hashes that match this generator's hash
self._old_repeats = set(
meta_session.exec(select(Repeats.input_hash).where(Repeats.generator_id == generator.uuid)).all()
)
# The batch_size is set either on the run_config or the generator
batch_size = run_config.batch_size or generator.batch_size
assert batch_size is None or batch_size > 0, f"Invalid batch size batch_size must be >0: {batch_size}"
# Open raw connections for fast loading
main_raw_connection = pg3_connect(str(main_engine.url))
meta_raw_connection = meta_engine.raw_connection()
batch_done = lambda x: x % batch_size == 0 if batch_size is not None else False
# Start while loop to iterate through the nodes
self._logger.info('Looping through extracted rows...')
progress_bar = tqdm(
total=row_count,
position=1,
leave=False,
desc="Transforming...",
disable=not run_config.progress_bar,
)
try:
while True:
gen_run.inputs_processed += 1
row: Dict[str, Mapping[str, Any]] = {}
try:
for node in generator._sort_graph():
output = node.run(row)
# Extract outputs need to be fed to our repeat checker and need to be checked for stop iterations
if isinstance(node, Extract):
if output is None or batch_done(gen_run.inputs_processed):
self._logger.debug('loading batch...')
self._load_repeats(meta_raw_connection, generator)
rows_inserted, rows_updated = self._load(main_raw_connection, generator)
gen_run.rows_inserted += rows_inserted
gen_run.rows_updated += rows_updated
meta_session.commit()
self._logger.debug('done loading batch.')
self._logger.debug(f'inserted {rows_inserted} rows.')
self._logger.debug(f'updated {rows_updated} rows.')
# if we are out of rows break out of while loop
if output is None:
raise StopIteration
is_repeat, input_hash = self._check_repeat(output, generator.uuid)
if not run_config.retry and is_repeat:
raise RepeatException()
row[node.hash] = output # type: ignore
if not is_repeat:
self._new_repeats.add(input_hash)
gen_run.unique_inputs += 1
progress_bar.update()
# Stop iteration is used to catch the empty extractor
except StopIteration:
break
# A repeated input from the extract will also cause a row to be skipped
except RepeatException:
continue
# Any node can raise a skip exception to skip the input before loading
except DBgenSkipException as exc:
self._logger.debug(f"Skipped Row: {exc.msg}")
gen_run.inputs_skipped += 1
# External errors are raised whenever a node fails due to internal logic
except DBgenExternalError as e:
msg = f"\n\nError when running generator {generator.name}\n"
self._logger.error(msg)
self._logger.error(f"\n{e}")
gen_run.status = Status.failed
gen_run.error = str(e)
run = meta_session.get(RunEntity, run_id)
assert run
run.errors = run.errors + 1 if run.errors else 1
meta_session.commit()
meta_session.close()
return 2
except (
Exception,
KeyboardInterrupt,
SystemExit,
BdbQuit,
) as e:
gen_run.status = Status.failed
gen_run.error = (
f"Uncaught Error encountered during running of generator {generator.name}: {e!r}"
)
update_run_by_id(run_id, Status.failed, meta_session)
raise
# Close all connections
finally:
gen_run.runtime = round(time() - start, 3)
meta_session.commit()
main_raw_connection.close()
meta_raw_connection.close()
extractor_connection.close()
gen_run.status = Status.completed
gen_run.runtime = round(time() - start, 3)
self._logger.info(
f"Finished running generator {generator.name}({generator.uuid}) in {gen_run.runtime}(s)."
)
self._logger.info(f"Loaded approximately {gen_run.rows_inserted} rows")
meta_session.commit()
meta_session.close()
return 0
def _initialize_gen_run(
self,
session: Session,
generator: Generator,
run_id: Optional[int],
ordering: Optional[int],
) -> GeneratorRunEntity:
# if no run_id is provided create one and mark it as a testing run
if run_id is None:
run = RunEntity(status='testing')
session.add(run)
session.commit()
session.refresh(run)
ordering = 0
run_id = run.id
gen_row = generator._get_gen_row()
session.merge(gen_row)
session.commit()
query = generator.extract.query if isinstance(generator.extract, BaseQuery) else ''
gen_run = GeneratorRunEntity(
run_id=run_id,
generator_id=gen_row.id,
status=Status.initialized,
ordering=ordering,
query=query,
)
session.add(gen_run)
session.commit()
session.refresh(gen_run)
return gen_run
def _load(self, connection, generator: Generator) -> Tuple[int, int]:
rows_inserted = 0
rows_updated = 0
for load in generator._sorted_loads():
if load.insert:
rows_inserted += len(load._output)
else:
rows_updated += len(load._output)
load.load(connection, gen_id=self.uuid)
return (rows_inserted, rows_updated)
def _load_repeats(self, connection, generator: Generator) -> None:
rows = ((generator.uuid, input_hash) for input_hash in self._new_repeats)
Repeats._quick_load(connection, rows, column_names=["generator_id", "input_hash"])
self._old_repeats = self._old_repeats.union(self._new_repeats)
self._new_repeats = set()
def _check_repeat(self, extracted_dict: Dict[str, Any], generator_uuid: UUID) -> Tuple[bool, UUID]:
# Convert Row to a dictionary so we can hash it for repeat-checking
input_hash = UUID(hasher((generator_uuid, extracted_dict), encoders=encoders))
# If the input_hash has been seen and we don't have retry=True skip row
is_repeat = input_hash in self._old_repeats or input_hash in self._new_repeats
return (is_repeat, input_hash)
class GeneratorRun(BaseGeneratorRun):
generator: Generator
def get_gen(self, meta_engine: Engine, *args, **kwargs):
return self.generator
class RemoteGeneratorRun(BaseGeneratorRun):
generator_id: UUID
def get_gen(self, meta_engine, *args, **kwargs):
with Session(meta_engine) as sess:
gen_json = sess.exec(
select(GeneratorEntity.gen_json).where(GeneratorEntity.id == self.generator_id)
).one()
try:
generator = Generator.deserialize(gen_json)
except ModuleNotFoundError as exc:
import os
raise SerializationError(
f"While deserializing generator id {self.generator_id} an unknown module was encountered. Are you using custom dbgen objects reachable by your python environment? Make sure any custom extractors or code can be found in your PYTHONPATH environment variable\nError: {exc}\nPYTHONPATH={os.environ.get('PYTHONPATH')}"
) from exc
if generator.uuid != self.generator_id:
error = f"Deserialization Failed the generator hash has changed for generator named {generator.name}!\n{generator}\n{self.generator_id}"
raise exceptions.SerializationError(error)
return generator
class ModelRun(Base):
model: Model
def get_gen_run(self, generator: Generator) -> BaseGeneratorRun:
return GeneratorRun(generator=generator)
def execute(
self,
main_engine: Engine,
meta_engine: Engine,
run_config: RunConfig = None,
nuke: bool = False,
rerun_failed: bool = False,
) -> RunEntity:
start = time()
if run_config is None:
run_config = RunConfig()
# Sync the Database statew with the model state
self.model.sync(main_engine, meta_engine, nuke=nuke)
# If doing last failed run query for gens to run and add to include
if rerun_failed:
with meta_engine.connect() as conn:
result = conn.execute(select(GensToRun.__table__.c.name))
for (gen_name,) in result:
run_config.include.add(gen_name)
# Initialize the run
run_init = RunInitializer()
run_id = run_init.execute(meta_engine, run_config)
sorted_generators = self.model._sort_graph()
# Add generators to metadb
with Session(meta_engine) as meta_session:
model_row = self.model._get_model_row()
model_row.last_run = datetime.now()
existing_model = meta_session.get(ModelEntity, model_row.id)
if not existing_model:
meta_session.merge(model_row)
else:
existing_model.last_run = datetime.now()
meta_session.commit()
# Apply start and until to exclude generators not between start_idx and until_idx
if run_config.start or run_config.until:
gen_names = [gen.name for gen in sorted_generators]
start_idx = gen_names.index(run_config.start) if run_config.start else 0
until_idx = gen_names.index(run_config.until) + 1 if run_config.until else len(gen_names)
# Modify include to only include the gen_names that pass the test
run_config.include = run_config.include.union(gen_names[start_idx:until_idx])
print(f"Only running generators: {gen_names[start_idx:until_idx]} due to start/until")
self._logger.debug(
f"Only running generators: {gen_names[start_idx:until_idx]} due to start/until"
)
with tqdm(total=len(sorted_generators), position=0, disable=not run_config.progress_bar) as tq:
for i, generator in enumerate(sorted_generators):
tq.set_description(generator.name)
gen_run = self.get_gen_run(generator)
gen_run.execute(main_engine, meta_engine, run_id, run_config, ordering=i)
tq.update()
# Complete run
with Session(meta_engine) as session:
update_run_by_id(run_id, Status.completed, session)
run = session.get(RunEntity, run_id)
assert run
run.runtime = timedelta(seconds=time() - start)
session.commit()
session.refresh(run)
return run
class RemoteModelRun(ModelRun):
def get_gen_run(self, generator):
return RemoteGeneratorRun(generator_id=generator.uuid)
|
[
"sqlmodel.Session",
"sqlmodel.select"
] |
[((1732, 1758), 'pydantic.fields.Field', 'Field', ([], {'default_factory': 'set'}), '(default_factory=set)\n', (1737, 1758), False, 'from pydantic.fields import Field, PrivateAttr\n'), ((1783, 1809), 'pydantic.fields.Field', 'Field', ([], {'default_factory': 'set'}), '(default_factory=set)\n', (1788, 1809), False, 'from pydantic.fields import Field, PrivateAttr\n'), ((4267, 4299), 'pydantic.fields.PrivateAttr', 'PrivateAttr', ([], {'default_factory': 'set'}), '(default_factory=set)\n', (4278, 4299), False, 'from pydantic.fields import Field, PrivateAttr\n'), ((4330, 4362), 'pydantic.fields.PrivateAttr', 'PrivateAttr', ([], {'default_factory': 'set'}), '(default_factory=set)\n', (4341, 4362), False, 'from pydantic.fields import Field, PrivateAttr\n'), ((4946, 4966), 'sqlmodel.Session', 'Session', (['meta_engine'], {}), '(meta_engine)\n', (4953, 4966), False, 'from sqlmodel import Session, select\n'), ((5577, 5583), 'time.time', 'time', ([], {}), '()\n', (5581, 5583), False, 'from time import time\n'), ((7051, 7162), 'tqdm.tqdm', 'tqdm', ([], {'total': 'row_count', 'position': '(1)', 'leave': '(False)', 'desc': '"""Transforming..."""', 'disable': '(not run_config.progress_bar)'}), "(total=row_count, position=1, leave=False, desc='Transforming...',\n disable=not run_config.progress_bar)\n", (7055, 7162), False, 'from tqdm import tqdm\n'), ((12159, 12281), 'dbgen.core.metadata.GeneratorRunEntity', 'GeneratorRunEntity', ([], {'run_id': 'run_id', 'generator_id': 'gen_row.id', 'status': 'Status.initialized', 'ordering': 'ordering', 'query': 'query'}), '(run_id=run_id, generator_id=gen_row.id, status=Status.\n initialized, ordering=ordering, query=query)\n', (12177, 12281), False, 'from dbgen.core.metadata import GeneratorEntity, GeneratorRunEntity, GensToRun, ModelEntity, Repeats, RunEntity, Status\n'), ((13037, 13123), 'dbgen.core.metadata.Repeats._quick_load', 'Repeats._quick_load', (['connection', 'rows'], {'column_names': "['generator_id', 'input_hash']"}), "(connection, rows, column_names=['generator_id',\n 'input_hash'])\n", (13056, 13123), False, 'from dbgen.core.metadata import GeneratorEntity, GeneratorRunEntity, GensToRun, ModelEntity, Repeats, RunEntity, Status\n'), ((15403, 15409), 'time.time', 'time', ([], {}), '()\n', (15407, 15409), False, 'from time import time\n'), ((3754, 3769), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (3761, 3769), False, 'from sqlmodel import Session, select\n'), ((3800, 3836), 'dbgen.core.metadata.RunEntity', 'RunEntity', ([], {'status': 'Status.initialized'}), '(status=Status.initialized)\n', (3809, 3836), False, 'from dbgen.core.metadata import GeneratorEntity, GeneratorRunEntity, GensToRun, ModelEntity, Repeats, RunEntity, Status\n'), ((11778, 11805), 'dbgen.core.metadata.RunEntity', 'RunEntity', ([], {'status': '"""testing"""'}), "(status='testing')\n", (11787, 11805), False, 'from dbgen.core.metadata import GeneratorEntity, GeneratorRunEntity, GensToRun, ModelEntity, Repeats, RunEntity, Status\n'), ((13432, 13491), 'pydasher.hasher', 'hasher', (['(generator_uuid, extracted_dict)'], {'encoders': 'encoders'}), '((generator_uuid, extracted_dict), encoders=encoders)\n', (13438, 13491), False, 'from pydasher import hasher\n'), ((13992, 14012), 'sqlmodel.Session', 'Session', (['meta_engine'], {}), '(meta_engine)\n', (13999, 14012), False, 'from sqlmodel import Session, select\n'), ((16141, 16161), 'sqlmodel.Session', 'Session', (['meta_engine'], {}), '(meta_engine)\n', (16148, 16161), False, 'from sqlmodel import Session, select\n'), ((16264, 16278), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (16276, 16278), False, 'from datetime import datetime, timedelta\n'), ((17768, 17788), 'sqlmodel.Session', 'Session', (['meta_engine'], {}), '(meta_engine)\n', (17775, 17788), False, 'from sqlmodel import Session, select\n'), ((11151, 11157), 'time.time', 'time', ([], {}), '()\n', (11155, 11157), False, 'from time import time\n'), ((14217, 14248), 'dbgen.core.generator.Generator.deserialize', 'Generator.deserialize', (['gen_json'], {}), '(gen_json)\n', (14238, 14248), False, 'from dbgen.core.generator import Generator\n'), ((14953, 14989), 'dbgen.exceptions.SerializationError', 'exceptions.SerializationError', (['error'], {}), '(error)\n', (14982, 14989), True, 'import dbgen.exceptions as exceptions\n'), ((16493, 16507), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (16505, 16507), False, 'from datetime import datetime, timedelta\n'), ((10902, 10908), 'time.time', 'time', ([], {}), '()\n', (10906, 10908), False, 'from time import time\n'), ((15783, 15817), 'sqlmodel.select', 'select', (['GensToRun.__table__.c.name'], {}), '(GensToRun.__table__.c.name)\n', (15789, 15817), False, 'from sqlmodel import Session, select\n'), ((17981, 17987), 'time.time', 'time', ([], {}), '()\n', (17985, 17987), False, 'from time import time\n'), ((6308, 6334), 'sqlmodel.select', 'select', (['Repeats.input_hash'], {}), '(Repeats.input_hash)\n', (6314, 6334), False, 'from sqlmodel import Session, select\n'), ((8828, 8845), 'dbgen.exceptions.RepeatException', 'RepeatException', ([], {}), '()\n', (8843, 8845), False, 'from dbgen.exceptions import DBgenExternalError, DBgenSkipException, RepeatException, SerializationError\n'), ((14072, 14104), 'sqlmodel.select', 'select', (['GeneratorEntity.gen_json'], {}), '(GeneratorEntity.gen_json)\n', (14078, 14104), False, 'from sqlmodel import Session, select\n'), ((14668, 14696), 'os.environ.get', 'os.environ.get', (['"""PYTHONPATH"""'], {}), "('PYTHONPATH')\n", (14682, 14696), False, 'import os\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.